aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/clang
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2023-09-02 21:17:18 +0000
committerDimitry Andric <dim@FreeBSD.org>2023-12-08 17:34:50 +0000
commit06c3fb2749bda94cb5201f81ffdb8fa6c3161b2e (patch)
tree62f873df87c7c675557a179e0c4c83fe9f3087bc /contrib/llvm-project/clang
parentcf037972ea8863e2bab7461d77345367d2c1e054 (diff)
parent7fa27ce4a07f19b07799a767fc29416f3b625afb (diff)
Diffstat (limited to 'contrib/llvm-project/clang')
-rw-r--r--contrib/llvm-project/clang/include/clang-c/Index.h382
-rw-r--r--contrib/llvm-project/clang/include/clang-c/module.modulemap4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/APValue.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTConsumer.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTContext.h45
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTDiagnostic.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTImporter.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTNodeTraverser.h13
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTTypeTraits.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CXXInheritance.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CommentSema.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ComparisonCategories.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Decl.h154
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclBase.h58
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclCXX.h96
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclObjC.h2
-rwxr-xr-xcontrib/llvm-project/clang/include/clang/AST/DeclTemplate.h36
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclarationName.h10
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Expr.h545
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ExprCXX.h38
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ExternalASTSource.h10
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/IgnoreExpr.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Mangle.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/MangleNumberingContext.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h126
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/OperationKinds.def8
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h15
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/PropertiesBase.td58
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/RawCommentList.h13
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/RecursiveASTVisitor.h19
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Redeclarable.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Stmt.h51
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/StmtCXX.h31
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TemplateBase.h54
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TemplateName.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Type.h93
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TypeLoc.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TypeProperties.td10
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/UnresolvedSet.h12
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/VTableBuilder.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h106
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/Consumed.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/IntervalPartition.h50
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/ReachableCode.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyTIL.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyUtil.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/UnsafeBufferUsage.h27
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def9
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/AnalysisDeclContext.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/BodyFarm.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/CFG.h47
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/CallGraph.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Arena.h147
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/ControlFlowContext.h25
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysis.h34
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h232
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h437
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DebugSupport.h52
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Formula.h138
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Logger.h89
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MatchSwitch.h15
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Models/ChromiumCheckModel.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/NoopAnalysis.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/RecordOps.h76
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Solver.h24
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/StorageLocation.h81
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Transfer.h19
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Value.h226
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h27
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/ProgramPoint.h68
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Support/BumpVector.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AArch64SVEACLETypes.def10
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AddressSpaces.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AlignedAllocation.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Attr.td216
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td268
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AttributeCommonInfo.h138
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Builtins.def65
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsAArch64.def20
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsAMDGPU.def17
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsARM.def3
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsNEON.def1
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def174
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsPPC.def1457
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCV.def98
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCVVector.def1
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsSME.def21
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsWebAssembly.def21
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def30
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86_64.def5
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def32
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h49
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Cuda.h10
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DarwinSDKInfo.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DebugInfoOptions.h66
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Diagnostic.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Diagnostic.td5
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td10
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommonKinds.td36
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticDocs.td6
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td60
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticError.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontendKinds.td17
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td82
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticLexKinds.td60
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.def4
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td95
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td320
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerializationKinds.td11
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DirectoryEntry.h18
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/ExceptionSpecificationType.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/FPOptions.def1
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Features.def6
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/FileEntry.h36
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/FileManager.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h54
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/LLVM.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/LangOptions.def11
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/LangOptions.h12
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/LangStandard.h20
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/LangStandards.def34
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Linkage.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Module.h112
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/ObjCRuntime.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensionTypes.def8
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensions.def2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.def10
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/ParsedAttrInfo.h152
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/RISCVVTypes.def290
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/SourceManager.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Specifiers.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/StmtNodes.td4
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TargetBuiltins.h25
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TargetCXXABI.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TargetID.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h51
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TargetOptions.h17
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Thunk.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def35
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TokenKinds.h15
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/WebAssemblyReferenceTypes.def40
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_bf16.td2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_neon.td6
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_sme.td259
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_sve.td412
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_sve_sme_incl.td281
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/riscv_sifive_vector.td105
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td1956
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/riscv_vector_common.td246
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/BackendUtil.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/CGFunctionInfo.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/CodeGenAction.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/ObjectFilePCHContainerOperations.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Action.h14
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Compilation.h11
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Distro.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Driver.h43
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Job.h26
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Multilib.h122
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/MultilibBuilder.h134
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/OffloadBundler.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Options.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Options.td830
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/ToolChain.h39
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Types.def1
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/XRayArgs.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/API.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/APIIgnoresList.h20
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/AvailabilityInfo.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/DeclarationFragments.h29
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/ExtractAPIActionBase.h54
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/ExtractAPIVisitor.h639
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/FrontendActions.h62
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/Serialization/SerializerBase.h118
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/Serialization/SymbolGraphSerializer.h70
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/TypedefUnderlyingTypeResolver.h (renamed from contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.h)0
-rwxr-xr-xcontrib/llvm-project/clang/include/clang/Format/Format.h546
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/ASTUnit.h17
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h28
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/CompilerInvocation.h11
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/DependencyOutputOptions.h14
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h18
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/LayoutOverrideSource.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/TextDiagnostic.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/Utils.h11
-rw-r--r--contrib/llvm-project/clang/include/clang/Interpreter/Interpreter.h97
-rw-r--r--contrib/llvm-project/clang/include/clang/Interpreter/Value.h208
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/DependencyDirectivesScanner.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h47
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/Lexer.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/LiteralSupport.h31
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/MacroInfo.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/ModuleMap.h72
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/MultipleIncludeOpt.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/Pragma.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h63
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/PreprocessorOptions.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/Token.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Parse/LoopHint.h12
-rw-r--r--contrib/llvm-project/clang/include/clang/Parse/Parser.h118
-rw-r--r--contrib/llvm-project/clang/include/clang/Rewrite/Core/RewriteRope.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/AnalysisBasedWarnings.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/CodeCompleteConsumer.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h29
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Designator.h201
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/EnterExpressionEvaluationContext.h69
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/HLSLExternalSemaSource.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/IdentifierResolver.h11
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Initialization.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Lookup.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/MultiplexExternalSemaSource.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Overload.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h233
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/RISCVIntrinsicManager.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Scope.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h16
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Sema.h600
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Template.h35
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/TemplateDeduction.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h18
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h33
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/GlobalModuleIndex.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ModuleFile.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/PCHContainerOperations.h17
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td54
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Taint.h54
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def9
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h20
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h21
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Checker.h27
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h176
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h22
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h33
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h72
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Regions.def1
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h16
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h13
-rw-r--r--contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h26
-rw-r--r--contrib/llvm-project/clang/include/clang/Testing/CommandLineArgs.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Testing/TestAST.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h95
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h19
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h18
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderAnalysis.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderIncludes.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Inclusions/StandardLibrary.h37
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Inclusions/StdSymbolMap.inc1538
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Tooling.h12
-rw-r--r--contrib/llvm-project/clang/include/clang/module.modulemap199
-rw-r--r--contrib/llvm-project/clang/include/module.modulemap205
-rw-r--r--contrib/llvm-project/clang/lib/APINotes/APINotesFormat.h4
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/FileRemapper.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/TransProperties.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTContext.cpp710
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTImporter.cpp347
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp45
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/AST/AttrImpl.cpp41
-rw-r--r--contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp43
-rw-r--r--contrib/llvm-project/clang/lib/AST/Decl.cpp296
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclBase.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclCXX.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp11
-rwxr-xr-xcontrib/llvm-project/clang/lib/AST/DeclTemplate.cpp101
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclarationName.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/AST/Expr.cpp339
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprCXX.cpp61
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprConstant.cpp341
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExternalASTMerger.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/AST/FormatString.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Boolean.h6
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp128
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h6
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp1103
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h123
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.cpp228
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h5
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Context.cpp67
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Context.h17
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp53
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h23
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h12
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Floating.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Floating.h158
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Frame.h2
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Function.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Function.h40
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/FunctionPointer.h71
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Integral.h31
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp186
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Interp.h513
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp67
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h17
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpBuiltin.cpp82
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp36
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h11
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp39
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h37
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpState.h9
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td147
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp57
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Pointer.h23
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/PrimType.h31
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Primitives.h36
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Program.cpp31
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Program.h6
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Record.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Record.h8
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Source.h9
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/State.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/State.h6
-rw-r--r--contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp228
-rw-r--r--contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp173
-rw-r--r--contrib/llvm-project/clang/lib/AST/NSAPI.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/ODRDiagsEmitter.cpp45
-rw-r--r--contrib/llvm-project/clang/lib/AST/ODRHash.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp72
-rw-r--r--contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp40
-rw-r--r--contrib/llvm-project/clang/lib/AST/Stmt.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtCXX.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtProfile.cpp88
-rw-r--r--contrib/llvm-project/clang/lib/AST/TemplateBase.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/AST/TemplateName.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/AST/Type.cpp182
-rw-r--r--contrib/llvm-project/clang/lib/AST/TypeLoc.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/TypePrinter.cpp120
-rw-r--r--contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h2
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/CFG.cpp441
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Arena.cpp98
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp60
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp401
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp744
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp208
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Formula.cpp82
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.cpp536
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.css142
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.html107
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.js219
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Logger.cpp108
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/ChromiumCheckModel.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp535
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/RecordOps.cpp117
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp591
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp307
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp333
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/IntervalPartition.cpp116
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp53
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/UnsafeBufferUsage.cpp1917
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Attributes.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Builtins.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Cuda.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp44
-rw-r--r--contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/Basic/FileManager.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp43
-rw-r--r--contrib/llvm-project/clang/lib/Basic/LangOptions.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Basic/LangStandards.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Module.cpp65
-rw-r--r--contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp31
-rw-r--r--contrib/llvm-project/clang/lib/Basic/ParsedAttrInfo.cpp32
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Sarif.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Basic/SourceManager.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Basic/TargetID.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets.cpp474
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp204
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h38
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp186
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h23
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/ARC.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/ARM.h8
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AVR.h5
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/BPF.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/CSKY.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/DirectX.h7
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Le64.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Le64.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.cpp101
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/M68k.h6
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/MSP430.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/MSP430.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Mips.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Mips.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h8
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h100
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PPC.h54
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h9
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h63
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h19
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/TCE.h7
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/VE.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h30
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp89
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/X86.h43
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/XCore.h4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ABIInfo.cpp231
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h244
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.cpp452
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.h152
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Address.h108
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp186
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp146
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp37
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h6
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h49
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp1508
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp52
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h24
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp401
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCall.h12
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp50
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp146
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp351
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h52
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp147
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGException.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp346
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp40
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp95
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp92
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp133
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp50
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp33
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp38
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.cpp43
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.h6
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp1677
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h215
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp227
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h80
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp238
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp400
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp83
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGVTables.h10
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGValue.h39
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp159
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp103
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h92
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp538
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h31
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h7
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp227
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h14
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp91
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h1
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/EHScopeStack.h9
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp393
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp101
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h1
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp12363
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h176
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/AArch64.cpp824
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/AMDGPU.cpp601
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/ARC.cpp158
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/ARM.cpp819
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/AVR.cpp154
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/BPF.cpp100
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/CSKY.cpp175
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/Hexagon.cpp423
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/Lanai.cpp154
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/LoongArch.cpp449
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/M68k.cpp55
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/MSP430.cpp94
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/Mips.cpp441
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/NVPTX.cpp309
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/PNaCl.cpp109
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/PPC.cpp972
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/RISCV.cpp519
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/SPIR.cpp218
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/Sparc.cpp409
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/SystemZ.cpp538
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/TCE.cpp82
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/VE.cpp71
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/WebAssembly.cpp173
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/X86.cpp3402
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/XCore.cpp662
-rw-r--r--contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Action.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Compilation.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Distro.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Driver.cpp409
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Job.cpp44
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Multilib.cpp334
-rw-r--r--contrib/llvm-project/clang/lib/Driver/MultilibBuilder.cpp197
-rw-r--r--contrib/llvm-project/clang/lib/Driver/OffloadBundler.cpp28
-rw-r--r--contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp106
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChain.cpp157
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp147
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp103
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp58
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp157
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h19
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp120
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.cpp50
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.h8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp38
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp342
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.h21
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp803
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp360
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h23
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.h1
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp49
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h14
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp136
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h7
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp213
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.h19
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp148
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h17
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp851
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.h9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp47
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.h3
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.cpp57
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.h24
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h3
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/LazyDetector.h45
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp87
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp65
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h10
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.h1
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.h1
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp78
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.cpp419
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.h95
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp41
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp112
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h3
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h1
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.h1
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.cpp310
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.h56
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Types.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp152
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/API.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/APIIgnoresList.cpp31
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/AvailabilityInfo.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/DeclarationFragments.cpp57
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp194
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIVisitor.cpp560
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SerializerBase.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp107
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Format/BreakableToken.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp89
-rw-r--r--contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Format/Format.cpp286
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatToken.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatToken.h295
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp49
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h4
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatTokenSource.h267
-rw-r--r--contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Format/MacroExpander.cpp34
-rw-r--r--contrib/llvm-project/clang/lib/Format/Macros.h21
-rw-r--r--contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp544
-rw-r--r--contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h41
-rw-r--r--contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnalyzer.h2
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp613
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnnotator.h38
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp195
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp688
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h76
-rw-r--r--contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp195
-rw-r--r--contrib/llvm-project/clang/lib/Format/WhitespaceManager.h7
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ASTConsumers.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ASTMerge.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp54
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp36
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp551
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp50
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/DiagnosticRenderer.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp57
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp28
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/HeaderIncludeGen.cpp49
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp90
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/LayoutOverrideSource.cpp104
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp31
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp37
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/Rewrite/FrontendActions.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp677
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h191
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_hip_cmath.h2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_hip_libdevice_declares.h62
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_hip_math.h127
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h13
-rw-r--r--contrib/llvm-project/clang/lib/Headers/adxintrin.h203
-rw-r--r--contrib/llvm-project/clang/lib/Headers/altivec.h260
-rw-r--r--contrib/llvm-project/clang/lib/Headers/amxcomplexintrin.h169
-rw-r--r--contrib/llvm-project/clang/lib/Headers/arm_acle.h22
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx2intrin.h4117
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512fintrin.h24
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512fp16intrin.h20
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avxintrin.h27
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avxvnniint16intrin.h473
-rw-r--r--contrib/llvm-project/clang/lib/Headers/bmi2intrin.h200
-rw-r--r--contrib/llvm-project/clang/lib/Headers/clflushoptintrin.h9
-rw-r--r--contrib/llvm-project/clang/lib/Headers/clzerointrin.h12
-rw-r--r--contrib/llvm-project/clang/lib/Headers/cpuid.h10
-rw-r--r--contrib/llvm-project/clang/lib/Headers/cuda_wrappers/bits/shared_ptr_base.h9
-rw-r--r--contrib/llvm-project/clang/lib/Headers/fmaintrin.h564
-rw-r--r--contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_intrinsics.h257
-rw-r--r--contrib/llvm-project/clang/lib/Headers/immintrin.h124
-rw-r--r--contrib/llvm-project/clang/lib/Headers/limits.h6
-rw-r--r--contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/ctype.h85
-rw-r--r--contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/inttypes.h34
-rw-r--r--contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/llvm-libc-decls/README.txt6
-rw-r--r--contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/stdio.h34
-rw-r--r--contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/stdlib.h42
-rw-r--r--contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/string.h37
-rw-r--r--contrib/llvm-project/clang/lib/Headers/mwaitxintrin.h29
-rw-r--r--contrib/llvm-project/clang/lib/Headers/opencl-c-base.h3
-rw-r--r--contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h1
-rw-r--r--contrib/llvm-project/clang/lib/Headers/openmp_wrappers/new2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/pmmintrin.h18
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/emmintrin.h3
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/smmintrin.h4
-rw-r--r--contrib/llvm-project/clang/lib/Headers/rdseedintrin.h67
-rw-r--r--contrib/llvm-project/clang/lib/Headers/riscv_ntlh.h28
-rw-r--r--contrib/llvm-project/clang/lib/Headers/sha512intrin.h200
-rw-r--r--contrib/llvm-project/clang/lib/Headers/shaintrin.h128
-rw-r--r--contrib/llvm-project/clang/lib/Headers/sifive_vector.h (renamed from contrib/llvm-project/clang/include/clang/Basic/DiagnosticAnalysisKinds.td)11
-rw-r--r--contrib/llvm-project/clang/lib/Headers/sm3intrin.h238
-rw-r--r--contrib/llvm-project/clang/lib/Headers/sm4intrin.h269
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdalign.h5
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdatomic.h11
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stddef.h5
-rw-r--r--contrib/llvm-project/clang/lib/Headers/wasm_simd128.h144
-rw-r--r--contrib/llvm-project/clang/lib/Headers/xsavecintrin.h50
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexBody.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexDecl.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Index/USRGeneration.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.cpp176
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.h51
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h6
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp149
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h19
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp564
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.cpp111
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.h54
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/Value.cpp266
-rw-r--r--contrib/llvm-project/clang/lib/Lex/DependencyDirectivesScanner.cpp131
-rw-r--r--contrib/llvm-project/clang/lib/Lex/HeaderMap.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp159
-rw-r--r--contrib/llvm-project/clang/lib/Lex/InitHeaderSearch.cpp33
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Lexer.cpp38
-rw-r--r--contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp148
-rw-r--r--contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp281
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp220
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp43
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Pragma.cpp134
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp79
-rw-r--r--contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseAST.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp431
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp139
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp77
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp190
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseHLSL.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseInit.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp104
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp68
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp49
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp47
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp134
-rw-r--r--contrib/llvm-project/clang/lib/Parse/Parser.cpp64
-rw-r--r--contrib/llvm-project/clang/lib/Rewrite/Rewriter.cpp77
-rw-r--r--contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp233
-rw-r--r--contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Sema/HLSLExternalSemaSource.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Sema/IdentifierResolver.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp147
-rw-r--r--contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/Sema/Scope.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Sema/ScopeInfo.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Sema/Sema.cpp158
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp112
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCast.cpp70
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp1544
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp56
-rwxr-xr-xcontrib/llvm-project/clang/lib/Sema/SemaConcept.cpp225
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp168
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp420
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp410
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp573
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp1238
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp167
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaInit.cpp648
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp796
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp251
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaModule.cpp314
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp804
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp391
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp114
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp108
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp85
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp201
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp208
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp140
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp143
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaType.cpp305
-rw-r--r--contrib/llvm-project/clang/lib/Sema/TreeTransform.h448
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp371
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp270
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h3
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp58
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp311
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp166
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp34
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/PCHContainerOperations.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp362
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp106
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp356
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp46
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp188
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp37
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp85
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp32
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp97
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp1782
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp59
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp194
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp28
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp59
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLambdaCapturesChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/APSIntType.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp40
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp67
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp111
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp124
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp214
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp113
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp36
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp150
-rw-r--r--contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Testing/TestAST.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/CompilationDatabase.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp145
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp124
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp59
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DumpTool/ClangSrcLocDump.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderAnalysis.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/CSymbolMap.inc (renamed from contrib/llvm-project/clang/include/clang/Tooling/Inclusions/CSymbolMap.inc)0
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StandardLibrary.cpp245
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdSpecialSymbolMap.inc722
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdSymbolMap.inc3819
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdTsSymbolMap.inc52
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp64
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Tooling.cpp46
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp2
-rw-r--r--contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArch.cpp138
-rw-r--r--contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArchByHIP.cpp96
-rw-r--r--contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArchByHSA.cpp122
-rw-r--r--contrib/llvm-project/clang/tools/clang-repl/ClangRepl.cpp82
-rw-r--r--contrib/llvm-project/clang/tools/driver/cc1_main.cpp28
-rw-r--r--contrib/llvm-project/clang/tools/driver/cc1as_main.cpp23
-rw-r--r--contrib/llvm-project/clang/tools/driver/cc1gen_reproducer_main.cpp17
-rw-r--r--contrib/llvm-project/clang/tools/driver/driver.cpp84
-rw-r--r--contrib/llvm-project/clang/tools/nvptx-arch/NVPTXArch.cpp37
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangAttrEmitter.cpp212
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp8
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangOpcodesEmitter.cpp198
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp87
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp15
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp95
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/SveEmitter.cpp293
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/TableGen.cpp55
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h9
974 files changed, 79701 insertions, 39979 deletions
diff --git a/contrib/llvm-project/clang/include/clang-c/Index.h b/contrib/llvm-project/clang/include/clang-c/Index.h
index a3e54285f89f..601b91f67d65 100644
--- a/contrib/llvm-project/clang/include/clang-c/Index.h
+++ b/contrib/llvm-project/clang/include/clang-c/Index.h
@@ -34,7 +34,7 @@
* compatible, thus CINDEX_VERSION_MAJOR is expected to remain stable.
*/
#define CINDEX_VERSION_MAJOR 0
-#define CINDEX_VERSION_MINOR 63
+#define CINDEX_VERSION_MINOR 64
#define CINDEX_VERSION_ENCODE(major, minor) (((major)*10000) + ((minor)*1))
@@ -48,6 +48,10 @@
#define CINDEX_VERSION_STRING \
CINDEX_VERSION_STRINGIZE(CINDEX_VERSION_MAJOR, CINDEX_VERSION_MINOR)
+#ifndef __has_feature
+#define __has_feature(feature) 0
+#endif
+
LLVM_CLANG_C_EXTERN_C_BEGIN
/** \defgroup CINDEX libclang: C Interface to Clang
@@ -277,6 +281,22 @@ CINDEX_LINKAGE void clang_disposeIndex(CXIndex index);
typedef enum {
/**
+ * Use the default value of an option that may depend on the process
+ * environment.
+ */
+ CXChoice_Default = 0,
+ /**
+ * Enable the option.
+ */
+ CXChoice_Enabled = 1,
+ /**
+ * Disable the option.
+ */
+ CXChoice_Disabled = 2
+} CXChoice;
+
+typedef enum {
+ /**
* Used to indicate that no special CXIndex options are needed.
*/
CXGlobalOpt_None = 0x0,
@@ -310,8 +330,130 @@ typedef enum {
} CXGlobalOptFlags;
/**
+ * Index initialization options.
+ *
+ * 0 is the default value of each member of this struct except for Size.
+ * Initialize the struct in one of the following three ways to avoid adapting
+ * code each time a new member is added to it:
+ * \code
+ * CXIndexOptions Opts;
+ * memset(&Opts, 0, sizeof(Opts));
+ * Opts.Size = sizeof(CXIndexOptions);
+ * \endcode
+ * or explicitly initialize the first data member and zero-initialize the rest:
+ * \code
+ * CXIndexOptions Opts = { sizeof(CXIndexOptions) };
+ * \endcode
+ * or to prevent the -Wmissing-field-initializers warning for the above version:
+ * \code
+ * CXIndexOptions Opts{};
+ * Opts.Size = sizeof(CXIndexOptions);
+ * \endcode
+ */
+typedef struct CXIndexOptions {
+ /**
+ * The size of struct CXIndexOptions used for option versioning.
+ *
+ * Always initialize this member to sizeof(CXIndexOptions), or assign
+ * sizeof(CXIndexOptions) to it right after creating a CXIndexOptions object.
+ */
+ unsigned Size;
+ /**
+ * A CXChoice enumerator that specifies the indexing priority policy.
+ * \sa CXGlobalOpt_ThreadBackgroundPriorityForIndexing
+ */
+ unsigned char ThreadBackgroundPriorityForIndexing;
+ /**
+ * A CXChoice enumerator that specifies the editing priority policy.
+ * \sa CXGlobalOpt_ThreadBackgroundPriorityForEditing
+ */
+ unsigned char ThreadBackgroundPriorityForEditing;
+ /**
+ * \see clang_createIndex()
+ */
+ unsigned ExcludeDeclarationsFromPCH : 1;
+ /**
+ * \see clang_createIndex()
+ */
+ unsigned DisplayDiagnostics : 1;
+ /**
+ * Store PCH in memory. If zero, PCH are stored in temporary files.
+ */
+ unsigned StorePreamblesInMemory : 1;
+ unsigned /*Reserved*/ : 13;
+
+ /**
+ * The path to a directory, in which to store temporary PCH files. If null or
+ * empty, the default system temporary directory is used. These PCH files are
+ * deleted on clean exit but stay on disk if the program crashes or is killed.
+ *
+ * This option is ignored if \a StorePreamblesInMemory is non-zero.
+ *
+ * Libclang does not create the directory at the specified path in the file
+ * system. Therefore it must exist, or storing PCH files will fail.
+ */
+ const char *PreambleStoragePath;
+ /**
+ * Specifies a path which will contain log files for certain libclang
+ * invocations. A null value implies that libclang invocations are not logged.
+ */
+ const char *InvocationEmissionPath;
+} CXIndexOptions;
+
+/**
+ * Provides a shared context for creating translation units.
+ *
+ * Call this function instead of clang_createIndex() if you need to configure
+ * the additional options in CXIndexOptions.
+ *
+ * \returns The created index or null in case of error, such as an unsupported
+ * value of options->Size.
+ *
+ * For example:
+ * \code
+ * CXIndex createIndex(const char *ApplicationTemporaryPath) {
+ * const int ExcludeDeclarationsFromPCH = 1;
+ * const int DisplayDiagnostics = 1;
+ * CXIndex Idx;
+ * #if CINDEX_VERSION_MINOR >= 64
+ * CXIndexOptions Opts;
+ * memset(&Opts, 0, sizeof(Opts));
+ * Opts.Size = sizeof(CXIndexOptions);
+ * Opts.ThreadBackgroundPriorityForIndexing = 1;
+ * Opts.ExcludeDeclarationsFromPCH = ExcludeDeclarationsFromPCH;
+ * Opts.DisplayDiagnostics = DisplayDiagnostics;
+ * Opts.PreambleStoragePath = ApplicationTemporaryPath;
+ * Idx = clang_createIndexWithOptions(&Opts);
+ * if (Idx)
+ * return Idx;
+ * fprintf(stderr,
+ * "clang_createIndexWithOptions() failed. "
+ * "CINDEX_VERSION_MINOR = %d, sizeof(CXIndexOptions) = %u\n",
+ * CINDEX_VERSION_MINOR, Opts.Size);
+ * #else
+ * (void)ApplicationTemporaryPath;
+ * #endif
+ * Idx = clang_createIndex(ExcludeDeclarationsFromPCH, DisplayDiagnostics);
+ * clang_CXIndex_setGlobalOptions(
+ * Idx, clang_CXIndex_getGlobalOptions(Idx) |
+ * CXGlobalOpt_ThreadBackgroundPriorityForIndexing);
+ * return Idx;
+ * }
+ * \endcode
+ *
+ * \sa clang_createIndex()
+ */
+CINDEX_LINKAGE CXIndex
+clang_createIndexWithOptions(const CXIndexOptions *options);
+
+/**
* Sets general options associated with a CXIndex.
*
+ * This function is DEPRECATED. Set
+ * CXIndexOptions::ThreadBackgroundPriorityForIndexing and/or
+ * CXIndexOptions::ThreadBackgroundPriorityForEditing and call
+ * clang_createIndexWithOptions() instead.
+ *
* For example:
* \code
* CXIndex idx = ...;
@@ -327,6 +469,9 @@ CINDEX_LINKAGE void clang_CXIndex_setGlobalOptions(CXIndex, unsigned options);
/**
* Gets the general options associated with a CXIndex.
*
+ * This function allows to obtain the final option values used by libclang after
+ * specifying the option policies via CXChoice enumerators.
+ *
* \returns A bitmask of options, a bitwise OR of CXGlobalOpt_XXX flags that
* are associated with the given CXIndex object.
*/
@@ -335,6 +480,9 @@ CINDEX_LINKAGE unsigned clang_CXIndex_getGlobalOptions(CXIndex);
/**
* Sets the invocation emission path option in a CXIndex.
*
+ * This function is DEPRECATED. Set CXIndexOptions::InvocationEmissionPath and
+ * call clang_createIndexWithOptions() instead.
+ *
* The invocation emission path specifies a path which will contain log
* files for certain libclang invocations. A null value (default) implies that
* libclang invocations are not logged..
@@ -2787,10 +2935,15 @@ enum CXTypeKind {
CXType_OCLIntelSubgroupAVCImeResult = 169,
CXType_OCLIntelSubgroupAVCRefResult = 170,
CXType_OCLIntelSubgroupAVCSicResult = 171,
+ CXType_OCLIntelSubgroupAVCImeResultSingleReferenceStreamout = 172,
+ CXType_OCLIntelSubgroupAVCImeResultDualReferenceStreamout = 173,
+ CXType_OCLIntelSubgroupAVCImeSingleReferenceStreamin = 174,
+ CXType_OCLIntelSubgroupAVCImeDualReferenceStreamin = 175,
+
+ /* Old aliases for AVC OpenCL extension types. */
CXType_OCLIntelSubgroupAVCImeResultSingleRefStreamout = 172,
CXType_OCLIntelSubgroupAVCImeResultDualRefStreamout = 173,
CXType_OCLIntelSubgroupAVCImeSingleRefStreamin = 174,
-
CXType_OCLIntelSubgroupAVCImeDualRefStreamin = 175,
CXType_ExtVector = 176,
@@ -2888,9 +3041,25 @@ CINDEX_LINKAGE unsigned long long
clang_getEnumConstantDeclUnsignedValue(CXCursor C);
/**
- * Retrieve the bit width of a bit field declaration as an integer.
+ * Returns non-zero if the cursor specifies a Record member that is a bit-field.
+ */
+CINDEX_LINKAGE unsigned clang_Cursor_isBitField(CXCursor C);
+
+/**
+ * Retrieve the bit width of a bit-field declaration as an integer.
+ *
+ * If the cursor does not reference a bit-field, or if the bit-field's width
+ * expression cannot be evaluated, -1 is returned.
*
- * If a cursor that is not a bit field declaration is passed in, -1 is returned.
+ * For example:
+ * \code
+ * if (clang_Cursor_isBitField(Cursor)) {
+ * int Width = clang_getFieldDeclBitWidth(Cursor);
+ * if (Width != -1) {
+ * // The bit-field width is not value-dependent.
+ * }
+ * }
+ * \endcode
*/
CINDEX_LINKAGE int clang_getFieldDeclBitWidth(CXCursor C);
@@ -3520,12 +3689,6 @@ CINDEX_LINKAGE CXType clang_Type_getTemplateArgumentAsType(CXType T,
CINDEX_LINKAGE enum CXRefQualifierKind clang_Type_getCXXRefQualifier(CXType T);
/**
- * Returns non-zero if the cursor specifies a Record member that is a
- * bitfield.
- */
-CINDEX_LINKAGE unsigned clang_Cursor_isBitField(CXCursor C);
-
-/**
* Returns 1 if the base class specified by the cursor with kind
* CX_CXXBaseSpecifier is virtual.
*/
@@ -3697,8 +3860,6 @@ typedef enum CXChildVisitResult (*CXCursorVisitor)(CXCursor cursor,
CINDEX_LINKAGE unsigned clang_visitChildren(CXCursor parent,
CXCursorVisitor visitor,
CXClientData client_data);
-#ifdef __has_feature
-#if __has_feature(blocks)
/**
* Visitor invoked for each cursor found by a traversal.
*
@@ -3709,8 +3870,12 @@ CINDEX_LINKAGE unsigned clang_visitChildren(CXCursor parent,
* The visitor should return one of the \c CXChildVisitResult values
* to direct clang_visitChildrenWithBlock().
*/
+#if __has_feature(blocks)
typedef enum CXChildVisitResult (^CXCursorVisitorBlock)(CXCursor cursor,
CXCursor parent);
+#else
+typedef struct _CXChildVisitResult *CXCursorVisitorBlock;
+#endif
/**
* Visits the children of a cursor using the specified block. Behaves
@@ -3718,8 +3883,6 @@ typedef enum CXChildVisitResult (^CXCursorVisitorBlock)(CXCursor cursor,
*/
CINDEX_LINKAGE unsigned
clang_visitChildrenWithBlock(CXCursor parent, CXCursorVisitorBlock block);
-#endif
-#endif
/**
* @}
@@ -4344,6 +4507,51 @@ CINDEX_LINKAGE unsigned clang_CXXMethod_isCopyAssignmentOperator(CXCursor C);
CINDEX_LINKAGE unsigned clang_CXXMethod_isMoveAssignmentOperator(CXCursor C);
/**
+ * Determines if a C++ constructor or conversion function was declared
+ * explicit, returning 1 if such is the case and 0 otherwise.
+ *
+ * Constructors or conversion functions are declared explicit through
+ * the use of the explicit specifier.
+ *
+ * For example, the following constructor and conversion function are
+ * not explicit as they lack the explicit specifier:
+ *
+ * class Foo {
+ * Foo();
+ * operator int();
+ * };
+ *
+ * While the following constructor and conversion function are
+ * explicit as they are declared with the explicit specifier.
+ *
+ * class Foo {
+ * explicit Foo();
+ * explicit operator int();
+ * };
+ *
+ * This function will return 0 when given a cursor pointing to one of
+ * the former declarations and it will return 1 for a cursor pointing
+ * to the latter declarations.
+ *
+ * The explicit specifier allows the user to specify a
+ * conditional compile-time expression whose value decides
+ * whether the marked element is explicit or not.
+ *
+ * For example:
+ *
+ * constexpr bool foo(int i) { return i % 2 == 0; }
+ *
+ * class Foo {
+ * explicit(foo(1)) Foo();
+ * explicit(foo(2)) operator int();
+ * }
+ *
+ * This function will return 0 for the constructor and 1 for
+ * the conversion function.
+ */
+CINDEX_LINKAGE unsigned clang_CXXMethod_isExplicit(CXCursor C);
+
+/**
* Determine if a C++ record is abstract, i.e. whether a class or struct
* has a pure virtual member function.
*/
@@ -5675,11 +5883,12 @@ CINDEX_LINKAGE CXResult clang_findReferencesInFile(
CINDEX_LINKAGE CXResult clang_findIncludesInFile(
CXTranslationUnit TU, CXFile file, CXCursorAndRangeVisitor visitor);
-#ifdef __has_feature
#if __has_feature(blocks)
-
typedef enum CXVisitorResult (^CXCursorAndRangeVisitorBlock)(CXCursor,
CXSourceRange);
+#else
+typedef struct _CXCursorAndRangeVisitorBlock *CXCursorAndRangeVisitorBlock;
+#endif
CINDEX_LINKAGE
CXResult clang_findReferencesInFileWithBlock(CXCursor, CXFile,
@@ -5689,9 +5898,6 @@ CINDEX_LINKAGE
CXResult clang_findIncludesInFileWithBlock(CXTranslationUnit, CXFile,
CXCursorAndRangeVisitorBlock);
-#endif
-#endif
-
/**
* The client's data object that is associated with a CXFile.
*/
@@ -6305,6 +6511,144 @@ CINDEX_LINKAGE unsigned clang_Type_visitFields(CXType T, CXFieldVisitor visitor,
CXClientData client_data);
/**
+ * Describes the kind of binary operators.
+ */
+enum CXBinaryOperatorKind {
+ /** This value describes cursors which are not binary operators. */
+ CXBinaryOperator_Invalid,
+ /** C++ Pointer - to - member operator. */
+ CXBinaryOperator_PtrMemD,
+ /** C++ Pointer - to - member operator. */
+ CXBinaryOperator_PtrMemI,
+ /** Multiplication operator. */
+ CXBinaryOperator_Mul,
+ /** Division operator. */
+ CXBinaryOperator_Div,
+ /** Remainder operator. */
+ CXBinaryOperator_Rem,
+ /** Addition operator. */
+ CXBinaryOperator_Add,
+ /** Subtraction operator. */
+ CXBinaryOperator_Sub,
+ /** Bitwise shift left operator. */
+ CXBinaryOperator_Shl,
+ /** Bitwise shift right operator. */
+ CXBinaryOperator_Shr,
+ /** C++ three-way comparison (spaceship) operator. */
+ CXBinaryOperator_Cmp,
+ /** Less than operator. */
+ CXBinaryOperator_LT,
+ /** Greater than operator. */
+ CXBinaryOperator_GT,
+ /** Less or equal operator. */
+ CXBinaryOperator_LE,
+ /** Greater or equal operator. */
+ CXBinaryOperator_GE,
+ /** Equal operator. */
+ CXBinaryOperator_EQ,
+ /** Not equal operator. */
+ CXBinaryOperator_NE,
+ /** Bitwise AND operator. */
+ CXBinaryOperator_And,
+ /** Bitwise XOR operator. */
+ CXBinaryOperator_Xor,
+ /** Bitwise OR operator. */
+ CXBinaryOperator_Or,
+ /** Logical AND operator. */
+ CXBinaryOperator_LAnd,
+ /** Logical OR operator. */
+ CXBinaryOperator_LOr,
+ /** Assignment operator. */
+ CXBinaryOperator_Assign,
+ /** Multiplication assignment operator. */
+ CXBinaryOperator_MulAssign,
+ /** Division assignment operator. */
+ CXBinaryOperator_DivAssign,
+ /** Remainder assignment operator. */
+ CXBinaryOperator_RemAssign,
+ /** Addition assignment operator. */
+ CXBinaryOperator_AddAssign,
+ /** Subtraction assignment operator. */
+ CXBinaryOperator_SubAssign,
+ /** Bitwise shift left assignment operator. */
+ CXBinaryOperator_ShlAssign,
+ /** Bitwise shift right assignment operator. */
+ CXBinaryOperator_ShrAssign,
+ /** Bitwise AND assignment operator. */
+ CXBinaryOperator_AndAssign,
+ /** Bitwise XOR assignment operator. */
+ CXBinaryOperator_XorAssign,
+ /** Bitwise OR assignment operator. */
+ CXBinaryOperator_OrAssign,
+ /** Comma operator. */
+ CXBinaryOperator_Comma
+};
+
+/**
+ * Retrieve the spelling of a given CXBinaryOperatorKind.
+ */
+CINDEX_LINKAGE CXString
+clang_getBinaryOperatorKindSpelling(enum CXBinaryOperatorKind kind);
+
+/**
+ * Retrieve the binary operator kind of this cursor.
+ *
+ * If this cursor is not a binary operator then returns Invalid.
+ */
+CINDEX_LINKAGE enum CXBinaryOperatorKind
+clang_getCursorBinaryOperatorKind(CXCursor cursor);
+
+/**
+ * Describes the kind of unary operators.
+ */
+enum CXUnaryOperatorKind {
+ /** This value describes cursors which are not unary operators. */
+ CXUnaryOperator_Invalid,
+ /** Postfix increment operator. */
+ CXUnaryOperator_PostInc,
+ /** Postfix decrement operator. */
+ CXUnaryOperator_PostDec,
+ /** Prefix increment operator. */
+ CXUnaryOperator_PreInc,
+ /** Prefix decrement operator. */
+ CXUnaryOperator_PreDec,
+ /** Address of operator. */
+ CXUnaryOperator_AddrOf,
+ /** Dereference operator. */
+ CXUnaryOperator_Deref,
+ /** Plus operator. */
+ CXUnaryOperator_Plus,
+ /** Minus operator. */
+ CXUnaryOperator_Minus,
+ /** Not operator. */
+ CXUnaryOperator_Not,
+ /** LNot operator. */
+ CXUnaryOperator_LNot,
+ /** "__real expr" operator. */
+ CXUnaryOperator_Real,
+ /** "__imag expr" operator. */
+ CXUnaryOperator_Imag,
+ /** __extension__ marker operator. */
+ CXUnaryOperator_Extension,
+ /** C++ co_await operator. */
+ CXUnaryOperator_Coawait
+};
+
+/**
+ * Retrieve the spelling of a given CXUnaryOperatorKind.
+ */
+CINDEX_LINKAGE CXString
+clang_getUnaryOperatorKindSpelling(enum CXUnaryOperatorKind kind);
+
+/**
+ * Retrieve the unary operator kind of this cursor.
+ *
+ * If this cursor is not a unary operator then returns Invalid.
+ */
+CINDEX_LINKAGE enum CXUnaryOperatorKind
+clang_getCursorUnaryOperatorKind(CXCursor cursor);
+
+/**
* @}
*/
diff --git a/contrib/llvm-project/clang/include/clang-c/module.modulemap b/contrib/llvm-project/clang/include/clang-c/module.modulemap
deleted file mode 100644
index 95a59d62344c..000000000000
--- a/contrib/llvm-project/clang/include/clang-c/module.modulemap
+++ /dev/null
@@ -1,4 +0,0 @@
-module Clang_C {
- umbrella "."
- module * { export * }
-}
diff --git a/contrib/llvm-project/clang/include/clang/AST/APValue.h b/contrib/llvm-project/clang/include/clang/AST/APValue.h
index 4e22d6c8443c..286c1a1b0941 100644
--- a/contrib/llvm-project/clang/include/clang/AST/APValue.h
+++ b/contrib/llvm-project/clang/include/clang/AST/APValue.h
@@ -267,9 +267,9 @@ private:
};
struct LV;
struct Vec {
- APValue *Elts;
- unsigned NumElts;
- Vec() : Elts(nullptr), NumElts(0) {}
+ APValue *Elts = nullptr;
+ unsigned NumElts = 0;
+ Vec() = default;
~Vec() { delete[] Elts; }
};
struct Arr {
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTConsumer.h b/contrib/llvm-project/clang/include/clang/AST/ASTConsumer.h
index 21850e832ff1..ebcd8059284d 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTConsumer.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTConsumer.h
@@ -33,12 +33,12 @@ namespace clang {
class ASTConsumer {
/// Whether this AST consumer also requires information about
/// semantic analysis.
- bool SemaConsumer;
+ bool SemaConsumer = false;
friend class SemaConsumer;
public:
- ASTConsumer() : SemaConsumer(false) { }
+ ASTConsumer() = default;
virtual ~ASTConsumer() {}
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTContext.h b/contrib/llvm-project/clang/include/clang/AST/ASTContext.h
index 0238371927e0..40cadd93158c 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTContext.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTContext.h
@@ -447,8 +447,8 @@ class ASTContext : public RefCountedBase<ASTContext> {
};
llvm::DenseMap<Module*, PerModuleInitializers*> ModuleInitializers;
- /// For module code-gen cases, this is the top-level module we are building.
- Module *TopLevelModule = nullptr;
+ /// This is the top-level (C++20) Named module we are building.
+ Module *CurrentCXXNamedModule = nullptr;
static constexpr unsigned ConstantArrayTypesLog2InitSize = 8;
static constexpr unsigned GeneralTypesLog2InitSize = 9;
@@ -1051,10 +1051,10 @@ public:
ArrayRef<Decl*> getModuleInitializers(Module *M);
/// Set the (C++20) module we are building.
- void setModuleForCodeGen(Module *M) { TopLevelModule = M; }
+ void setCurrentNamedModule(Module *M);
/// Get module under construction, nullptr if this is not a C++20 module.
- Module *getModuleForCodeGen() const { return TopLevelModule; }
+ Module *getCurrentNamedModule() const { return CurrentCXXNamedModule; }
TranslationUnitDecl *getTranslationUnitDecl() const {
return TUDecl->getMostRecentDecl();
@@ -1126,6 +1126,8 @@ public:
#define RVV_TYPE(Name, Id, SingletonId) \
CanQualType SingletonId;
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) CanQualType SingletonId;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
// Types for deductions in C++0x [stmt.ranged]'s desugaring. Built on demand.
mutable QualType AutoDeductTy; // Deduction against 'auto'.
@@ -1470,9 +1472,15 @@ public:
/// Return the unique reference to a scalable vector type of the specified
/// element type and scalable number of elements.
+ /// For RISC-V, number of fields is also provided when it fetching for
+ /// tuple type.
///
/// \pre \p EltTy must be a built-in type.
- QualType getScalableVectorType(QualType EltTy, unsigned NumElts) const;
+ QualType getScalableVectorType(QualType EltTy, unsigned NumElts,
+ unsigned NumFields = 1) const;
+
+ /// Return a WebAssembly externref type.
+ QualType getWebAssemblyExternrefType() const;
/// Return the unique reference to a vector type of the specified
/// element type and size.
@@ -1710,6 +1718,10 @@ public:
/// C++11 deduction pattern for 'auto &&' type.
QualType getAutoRRefDeductType() const;
+ /// Remove any type constraints from a template parameter type, for
+ /// equivalence comparison of template parameters.
+ QualType getUnconstrainedType(QualType T) const;
+
/// C++17 deduced class template specialization type.
QualType getDeducedTemplateSpecializationType(TemplateName Template,
QualType DeducedType,
@@ -2243,6 +2255,17 @@ public:
/// false otherwise.
bool areLaxCompatibleSveTypes(QualType FirstType, QualType SecondType);
+ /// Return true if the given types are an RISC-V vector builtin type and a
+ /// VectorType that is a fixed-length representation of the RISC-V vector
+ /// builtin type for a specific vector-length.
+ bool areCompatibleRVVTypes(QualType FirstType, QualType SecondType);
+
+ /// Return true if the given vector types are lax-compatible RISC-V vector
+ /// types as defined by -flax-vector-conversions=, which permits implicit
+ /// conversions between vectors with different number of elements and/or
+ /// incompatible element types, false otherwise.
+ bool areLaxCompatibleRVVTypes(QualType FirstType, QualType SecondType);
+
/// Return true if the type has been explicitly qualified with ObjC ownership.
/// A type may be implicitly qualified with ownership under ObjC ARC, and in
/// some cases the compiler treats these differently.
@@ -2482,7 +2505,9 @@ public:
/// Return true if the specified type has unique object representations
/// according to (C++17 [meta.unary.prop]p9)
- bool hasUniqueObjectRepresentations(QualType Ty) const;
+ bool
+ hasUniqueObjectRepresentations(QualType Ty,
+ bool CheckIfTriviallyCopyable = true) const;
//===--------------------------------------------------------------------===//
// Type Operators
@@ -2647,11 +2672,6 @@ public:
/// template.
bool hasSameTemplateName(const TemplateName &X, const TemplateName &Y) const;
- /// Determine whether two Friend functions are different because constraints
- /// that refer to an enclosing template, according to [temp.friend] p9.
- bool FriendsDifferByConstraints(const FunctionDecl *X,
- const FunctionDecl *Y) const;
-
/// Determine whether the two declarations refer to the same entity.
bool isSameEntity(const NamedDecl *X, const NamedDecl *Y) const;
@@ -3042,7 +3062,7 @@ public:
}
GVALinkage GetGVALinkageForFunction(const FunctionDecl *FD) const;
- GVALinkage GetGVALinkageForVariable(const VarDecl *VD);
+ GVALinkage GetGVALinkageForVariable(const VarDecl *VD) const;
/// Determines if the decl can be CodeGen'ed or deserialized from PCH
/// lazily, only when used; this is only relevant for function or file scoped
@@ -3193,7 +3213,6 @@ private:
public:
ObjCEncOptions() : Bits(0) {}
- ObjCEncOptions(const ObjCEncOptions &RHS) : Bits(RHS.Bits) {}
#define OPT_LIST(V) \
V(ExpandPointedToStructures, 0) \
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTDiagnostic.h b/contrib/llvm-project/clang/include/clang/AST/ASTDiagnostic.h
index 4cd909751725..ef2224982862 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTDiagnostic.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTDiagnostic.h
@@ -34,7 +34,8 @@ namespace clang {
ArrayRef<intptr_t> QualTypeVals);
/// Returns a desugared version of the QualType, and marks ShouldAKA as true
- /// whenever we remove significant sugar from the type.
+ /// whenever we remove significant sugar from the type. Make sure ShouldAKA
+ /// is initialized before passing it in.
QualType desugarForDiagnostic(ASTContext &Context, QualType QT,
bool &ShouldAKA);
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTImporter.h b/contrib/llvm-project/clang/include/clang/AST/ASTImporter.h
index f851decd0965..4ffd91384657 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTImporter.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTImporter.h
@@ -258,6 +258,7 @@ class TypeSourceInfo;
FoundDeclsTy findDeclsInToCtx(DeclContext *DC, DeclarationName Name);
void AddToLookupTable(Decl *ToD);
+ llvm::Error ImportAttrs(Decl *ToD, Decl *FromD);
protected:
/// Can be overwritten by subclasses to implement their own import logic.
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTNodeTraverser.h b/contrib/llvm-project/clang/include/clang/AST/ASTNodeTraverser.h
index a2c57aab89eb..d649ef6816ca 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTNodeTraverser.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTNodeTraverser.h
@@ -104,7 +104,8 @@ public:
Visit(Comment, Comment);
// Decls within functions are visited by the body.
- if (!isa<FunctionDecl>(*D) && !isa<ObjCMethodDecl>(*D)) {
+ if (!isa<FunctionDecl>(*D) && !isa<ObjCMethodDecl>(*D) &&
+ !isa<BlockDecl>(*D)) {
if (Traversal != TK_AsIs) {
if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
auto SK = CTSD->getSpecializationKind();
@@ -384,7 +385,8 @@ public:
}
void VisitAttributedType(const AttributedType *T) {
// FIXME: AttrKind
- Visit(T->getModifiedType());
+ if (T->getModifiedType() != T->getEquivalentType())
+ Visit(T->getModifiedType());
}
void VisitBTFTagAttributedType(const BTFTagAttributedType *T) {
Visit(T->getWrappedType());
@@ -731,8 +733,11 @@ public:
}
void VisitGenericSelectionExpr(const GenericSelectionExpr *E) {
- Visit(E->getControllingExpr());
- Visit(E->getControllingExpr()->getType()); // FIXME: remove
+ if (E->isExprPredicate()) {
+ Visit(E->getControllingExpr());
+ Visit(E->getControllingExpr()->getType()); // FIXME: remove
+ } else
+ Visit(E->getControllingType()->getType());
for (const auto Assoc : E->associations()) {
Visit(Assoc);
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTTypeTraits.h b/contrib/llvm-project/clang/include/clang/AST/ASTTypeTraits.h
index 8713221a7378..78661823ca85 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTTypeTraits.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTTypeTraits.h
@@ -78,9 +78,12 @@ public:
constexpr bool isNone() const { return KindId == NKI_None; }
/// Returns \c true if \c this is a base kind of (or same as) \c Other.
+ bool isBaseOf(ASTNodeKind Other) const;
+
+ /// Returns \c true if \c this is a base kind of (or same as) \c Other.
/// \param Distance If non-null, used to return the distance between \c this
/// and \c Other in the class hierarchy.
- bool isBaseOf(ASTNodeKind Other, unsigned *Distance = nullptr) const;
+ bool isBaseOf(ASTNodeKind Other, unsigned *Distance) const;
/// String representation of the kind.
StringRef asStringRef() const;
@@ -168,6 +171,10 @@ private:
/// Returns \c true if \c Base is a base kind of (or same as) \c
/// Derived.
+ static bool isBaseOf(NodeKindId Base, NodeKindId Derived);
+
+ /// Returns \c true if \c Base is a base kind of (or same as) \c
+ /// Derived.
/// \param Distance If non-null, used to return the distance between \c Base
/// and \c Derived in the class hierarchy.
static bool isBaseOf(NodeKindId Base, NodeKindId Derived, unsigned *Distance);
diff --git a/contrib/llvm-project/clang/include/clang/AST/CXXInheritance.h b/contrib/llvm-project/clang/include/clang/AST/CXXInheritance.h
index 946b9e318baa..eec2119f4a18 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CXXInheritance.h
+++ b/contrib/llvm-project/clang/include/clang/AST/CXXInheritance.h
@@ -315,7 +315,7 @@ public:
/// virtual function; in abstract classes, the final overrider for at
/// least one virtual function is a pure virtual function. Due to
/// multiple, virtual inheritance, it is possible for a class to have
-/// more than one final overrider. Athough this is an error (per C++
+/// more than one final overrider. Although this is an error (per C++
/// [class.virtual]p2), it is not considered an error here: the final
/// overrider map can represent multiple final overriders for a
/// method, and it is up to the client to determine whether they are
diff --git a/contrib/llvm-project/clang/include/clang/AST/CommentSema.h b/contrib/llvm-project/clang/include/clang/AST/CommentSema.h
index 9c2ca5f8e603..5d8df7dbf385 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CommentSema.h
+++ b/contrib/llvm-project/clang/include/clang/AST/CommentSema.h
@@ -193,7 +193,7 @@ private:
void checkContainerDecl(const BlockCommandComment *Comment);
/// Resolve parameter names to parameter indexes in function declaration.
- /// Emit diagnostics about unknown parametrs.
+ /// Emit diagnostics about unknown parameters.
void resolveParamCommandIndexes(const FullComment *FC);
/// \returns \c true if the declaration that this comment is attached to
diff --git a/contrib/llvm-project/clang/include/clang/AST/ComparisonCategories.h b/contrib/llvm-project/clang/include/clang/AST/ComparisonCategories.h
index 1c94cee4b7c4..b4ad37e394ce 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ComparisonCategories.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ComparisonCategories.h
@@ -39,9 +39,8 @@ class NamespaceDecl;
/// An enumeration representing the different comparison categories
/// types.
///
-/// C++2a [cmp.categories.pre] The types weak_equality, strong_equality,
-/// partial_ordering, weak_ordering, and strong_ordering are collectively
-/// termed the comparison category types.
+/// C++20 [cmp.categories.pre] The types partial_ordering, weak_ordering, and
+/// strong_ordering are collectively termed the comparison category types.
enum class ComparisonCategoryType : unsigned char {
PartialOrdering,
WeakOrdering,
diff --git a/contrib/llvm-project/clang/include/clang/AST/Decl.h b/contrib/llvm-project/clang/include/clang/AST/Decl.h
index 32c5ab943e7e..788f6ab97b1b 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Decl.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Decl.h
@@ -395,9 +395,7 @@ public:
/// Get the linkage from a semantic point of view. Entities in
/// anonymous namespaces are external (in c++98).
- Linkage getFormalLinkage() const {
- return clang::getFormalLinkage(getLinkageInternal());
- }
+ Linkage getFormalLinkage() const;
/// True if this decl has external linkage.
bool hasExternalFormalLinkage() const {
@@ -902,7 +900,7 @@ struct EvaluatedStmt {
bool HasICEInit : 1;
bool CheckedForICEInit : 1;
- Stmt *Value;
+ LazyDeclStmtPtr Value;
APValue Evaluated;
EvaluatedStmt()
@@ -1360,12 +1358,15 @@ public:
EvaluatedStmt *getEvaluatedStmt() const;
/// Attempt to evaluate the value of the initializer attached to this
- /// declaration, and produce notes explaining why it cannot be evaluated or is
- /// not a constant expression. Returns a pointer to the value if evaluation
- /// succeeded, 0 otherwise.
+ /// declaration, and produce notes explaining why it cannot be evaluated.
+ /// Returns a pointer to the value if evaluation succeeded, 0 otherwise.
APValue *evaluateValue() const;
- APValue *evaluateValue(SmallVectorImpl<PartialDiagnosticAt> &Notes) const;
+private:
+ APValue *evaluateValueImpl(SmallVectorImpl<PartialDiagnosticAt> &Notes,
+ bool IsConstantInitialization) const;
+
+public:
/// Return the already-evaluated value of this variable's
/// initializer, or NULL if the value is not yet known. Returns pointer
/// to untyped APValue if the value could not be evaluated.
@@ -2167,7 +2168,7 @@ public:
/// declaration to the declaration that is a definition (if there is one).
///
/// \param CheckForPendingFriendDefinition If \c true, also check for friend
- /// declarations that were instantiataed from function definitions.
+ /// declarations that were instantiated from function definitions.
/// Such a declaration behaves as if it is a definition for the
/// purpose of redefinition checking, but isn't actually a "real"
/// definition until its body is instantiated.
@@ -2377,6 +2378,21 @@ public:
return getConstexprKind() == ConstexprSpecKind::Consteval;
}
+ void setBodyContainsImmediateEscalatingExpressions(bool Set) {
+ FunctionDeclBits.BodyContainsImmediateEscalatingExpression = Set;
+ }
+
+ bool BodyContainsImmediateEscalatingExpressions() const {
+ return FunctionDeclBits.BodyContainsImmediateEscalatingExpression;
+ }
+
+ bool isImmediateEscalating() const;
+
+ // The function is a C++ immediate function.
+ // This can be either a consteval function, or an immediate escalating
+ // function containing an immediate escalating expression.
+ bool isImmediateFunction() const;
+
/// Whether the instantiation of this function is pending.
/// This bit is set when the decision to instantiate this function is made
/// and unset if and when the function body is created. That leaves out
@@ -2536,6 +2552,10 @@ public:
->FunctionDeclBits.FriendConstraintRefersToEnclosingTemplate;
}
+ /// Determine whether a function is a friend function that cannot be
+ /// redeclared outside of its class, per C++ [temp.friend]p9.
+ bool isMemberLikeConstrainedFriend() const;
+
/// Gets the kind of multiversioning attribute this declaration has. Note that
/// this can return a value even if the function is not multiversion, such as
/// the case of 'target'.
@@ -2938,11 +2958,7 @@ public:
/// Represents a member of a struct/union/class.
class FieldDecl : public DeclaratorDecl, public Mergeable<FieldDecl> {
- unsigned BitField : 1;
- unsigned Mutable : 1;
- mutable unsigned CachedFieldIndex : 30;
-
- /// The kinds of value we can store in InitializerOrBitWidth.
+ /// The kinds of value we can store in StorageKind.
///
/// Note that this is compatible with InClassInitStyle except for
/// ISK_CapturedVLAType.
@@ -2965,10 +2981,15 @@ class FieldDecl : public DeclaratorDecl, public Mergeable<FieldDecl> {
ISK_CapturedVLAType,
};
+ unsigned BitField : 1;
+ unsigned Mutable : 1;
+ unsigned StorageKind : 2;
+ mutable unsigned CachedFieldIndex : 28;
+
/// If this is a bitfield with a default member initializer, this
/// structure is used to represent the two expressions.
- struct InitAndBitWidth {
- Expr *Init;
+ struct InitAndBitWidthStorage {
+ LazyDeclStmtPtr Init;
Expr *BitWidth;
};
@@ -2981,16 +3002,25 @@ class FieldDecl : public DeclaratorDecl, public Mergeable<FieldDecl> {
/// and attached.
// FIXME: Tail-allocate this to reduce the size of FieldDecl in the
// overwhelmingly common case that we have none of these things.
- llvm::PointerIntPair<void *, 2, InitStorageKind> InitStorage;
+ union {
+ // Active member if ISK is not ISK_CapturedVLAType and BitField is false.
+ LazyDeclStmtPtr Init;
+ // Active member if ISK is ISK_NoInit and BitField is true.
+ Expr *BitWidth;
+ // Active member if ISK is ISK_InClass*Init and BitField is true.
+ InitAndBitWidthStorage *InitAndBitWidth;
+ // Active member if ISK is ISK_CapturedVLAType.
+ const VariableArrayType *CapturedVLAType;
+ };
protected:
FieldDecl(Kind DK, DeclContext *DC, SourceLocation StartLoc,
- SourceLocation IdLoc, IdentifierInfo *Id,
- QualType T, TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
+ SourceLocation IdLoc, IdentifierInfo *Id, QualType T,
+ TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
InClassInitStyle InitStyle)
- : DeclaratorDecl(DK, DC, IdLoc, Id, T, TInfo, StartLoc),
- BitField(false), Mutable(Mutable), CachedFieldIndex(0),
- InitStorage(nullptr, (InitStorageKind) InitStyle) {
+ : DeclaratorDecl(DK, DC, IdLoc, Id, T, TInfo, StartLoc), BitField(false),
+ Mutable(Mutable), StorageKind((InitStorageKind)InitStyle),
+ CachedFieldIndex(0), Init() {
if (BW)
setBitWidth(BW);
}
@@ -3029,10 +3059,7 @@ public:
Expr *getBitWidth() const {
if (!BitField)
return nullptr;
- void *Ptr = InitStorage.getPointer();
- if (getInClassInitStyle())
- return static_cast<InitAndBitWidth*>(Ptr)->BitWidth;
- return static_cast<Expr*>(Ptr);
+ return hasInClassInitializer() ? InitAndBitWidth->BitWidth : BitWidth;
}
unsigned getBitWidthValue(const ASTContext &Ctx) const;
@@ -3043,11 +3070,11 @@ public:
assert(!hasCapturedVLAType() && !BitField &&
"bit width or captured type already set");
assert(Width && "no bit width specified");
- InitStorage.setPointer(
- InitStorage.getInt()
- ? new (getASTContext())
- InitAndBitWidth{getInClassInitializer(), Width}
- : static_cast<void*>(Width));
+ if (hasInClassInitializer())
+ InitAndBitWidth =
+ new (getASTContext()) InitAndBitWidthStorage{Init, Width};
+ else
+ BitWidth = Width;
BitField = true;
}
@@ -3055,7 +3082,11 @@ public:
// Note: used by some clients (i.e., do not remove it).
void removeBitWidth() {
assert(isBitField() && "no bitfield width to remove");
- InitStorage.setPointer(getInClassInitializer());
+ if (hasInClassInitializer()) {
+ // Read the old initializer before we change the active union member.
+ auto ExistingInit = InitAndBitWidth->Init;
+ Init = ExistingInit;
+ }
BitField = false;
}
@@ -3069,11 +3100,14 @@ public:
/// [[no_unique_address]] attribute.
bool isZeroSize(const ASTContext &Ctx) const;
+ /// Determine if this field is of potentially-overlapping class type, that
+ /// is, subobject with the [[no_unique_address]] attribute
+ bool isPotentiallyOverlapping() const;
+
/// Get the kind of (C++11) default member initializer that this field has.
InClassInitStyle getInClassInitStyle() const {
- InitStorageKind storageKind = InitStorage.getInt();
- return (storageKind == ISK_CapturedVLAType
- ? ICIS_NoInit : (InClassInitStyle) storageKind);
+ return (StorageKind == ISK_CapturedVLAType ? ICIS_NoInit
+ : (InClassInitStyle)StorageKind);
}
/// Determine whether this member has a C++11 default member initializer.
@@ -3081,44 +3115,44 @@ public:
return getInClassInitStyle() != ICIS_NoInit;
}
+ /// Determine whether getInClassInitializer() would return a non-null pointer
+ /// without deserializing the initializer.
+ bool hasNonNullInClassInitializer() const {
+ return hasInClassInitializer() && (BitField ? InitAndBitWidth->Init : Init);
+ }
+
/// Get the C++11 default member initializer for this member, or null if one
/// has not been set. If a valid declaration has a default member initializer,
/// but this returns null, then we have not parsed and attached it yet.
- Expr *getInClassInitializer() const {
- if (!hasInClassInitializer())
- return nullptr;
- void *Ptr = InitStorage.getPointer();
- if (BitField)
- return static_cast<InitAndBitWidth*>(Ptr)->Init;
- return static_cast<Expr*>(Ptr);
- }
+ Expr *getInClassInitializer() const;
/// Set the C++11 in-class initializer for this member.
- void setInClassInitializer(Expr *Init) {
- assert(hasInClassInitializer() && !getInClassInitializer());
- if (BitField)
- static_cast<InitAndBitWidth*>(InitStorage.getPointer())->Init = Init;
- else
- InitStorage.setPointer(Init);
- }
+ void setInClassInitializer(Expr *NewInit);
+
+private:
+ void setLazyInClassInitializer(LazyDeclStmtPtr NewInit);
+public:
/// Remove the C++11 in-class initializer from this member.
void removeInClassInitializer() {
assert(hasInClassInitializer() && "no initializer to remove");
- InitStorage.setPointerAndInt(getBitWidth(), ISK_NoInit);
+ StorageKind = ISK_NoInit;
+ if (BitField) {
+ // Read the bit width before we change the active union member.
+ Expr *ExistingBitWidth = InitAndBitWidth->BitWidth;
+ BitWidth = ExistingBitWidth;
+ }
}
/// Determine whether this member captures the variable length array
/// type.
bool hasCapturedVLAType() const {
- return InitStorage.getInt() == ISK_CapturedVLAType;
+ return StorageKind == ISK_CapturedVLAType;
}
/// Get the captured variable length array type.
const VariableArrayType *getCapturedVLAType() const {
- return hasCapturedVLAType() ? static_cast<const VariableArrayType *>(
- InitStorage.getPointer())
- : nullptr;
+ return hasCapturedVLAType() ? CapturedVLAType : nullptr;
}
/// Set the captured variable length array type for this field.
@@ -3691,6 +3725,7 @@ public:
return getExtInfo()->TemplParamLists[i];
}
+ using TypeDecl::printName;
void printName(raw_ostream &OS, const PrintingPolicy &Policy) const override;
void setTemplateParameterListsInfo(ASTContext &Context,
@@ -4305,6 +4340,7 @@ class TopLevelStmtDecl : public Decl {
friend class ASTDeclWriter;
Stmt *Statement = nullptr;
+ bool IsSemiMissing = false;
TopLevelStmtDecl(DeclContext *DC, SourceLocation L, Stmt *S)
: Decl(TopLevelStmt, DC, L), Statement(S) {}
@@ -4318,6 +4354,12 @@ public:
SourceRange getSourceRange() const override LLVM_READONLY;
Stmt *getStmt() { return Statement; }
const Stmt *getStmt() const { return Statement; }
+ void setStmt(Stmt *S) {
+ assert(IsSemiMissing && "Operation supported for printing values only!");
+ Statement = S;
+ }
+ bool isSemiMissing() const { return IsSemiMissing; }
+ void setSemiMissing(bool Missing = true) { IsSemiMissing = Missing; }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == TopLevelStmt; }
@@ -4704,7 +4746,7 @@ public:
static bool classofKind(Kind K) { return K == Import; }
};
-/// Represents a C++ Modules TS module export declaration.
+/// Represents a standard C++ module export declaration.
///
/// For example:
/// \code
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclBase.h b/contrib/llvm-project/clang/include/clang/AST/DeclBase.h
index 6134fdde8a2c..1b99709ca90d 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclBase.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclBase.h
@@ -644,6 +644,9 @@ public:
return getModuleOwnershipKind() > ModuleOwnershipKind::VisibleWhenImported;
}
+ /// Whether this declaration comes from another module unit.
+ bool isInAnotherModuleUnit() const;
+
/// FIXME: Implement discarding declarations actually in global module
/// fragment. See [module.global.frag]p3,4 for details.
bool isDiscardedInGlobalModuleFragment() const { return false; }
@@ -810,7 +813,7 @@ public:
}
/// Get the module that owns this declaration for linkage purposes.
- /// There only ever is such a module under the C++ Modules TS.
+ /// There only ever is such a standard C++ module.
///
/// \param IgnoreLinkage Ignore the linkage of the entity; assume that
/// all declarations in a global module fragment are unowned.
@@ -1172,6 +1175,12 @@ public:
}
}
+ /// Clears the namespace of this declaration.
+ ///
+ /// This is useful if we want this declaration to be available for
+ /// redeclaration lookup but otherwise hidden for ordinary name lookups.
+ void clearIdentifierNamespace() { IdentifierNamespace = 0; }
+
enum FriendObjectKind {
FOK_None, ///< Not a friend object.
FOK_Declared, ///< A friend of a previously-declared entity.
@@ -1227,6 +1236,10 @@ public:
/// have a FunctionType.
const FunctionType *getFunctionType(bool BlocksToo = true) const;
+ // Looks through the Decl's underlying type to determine if it's a
+ // function pointer type.
+ bool isFunctionPointerType() const;
+
private:
void setAttrsImpl(const AttrVec& Attrs, ASTContext &Ctx);
void setDeclContextsImpl(DeclContext *SemaDC, DeclContext *LexicalDC,
@@ -1369,6 +1382,13 @@ public:
}
};
+/// Only used by CXXDeductionGuideDecl.
+enum class DeductionCandidate : unsigned char {
+ Normal,
+ Copy,
+ Aggregate,
+};
+
/// DeclContext - This is used only as base class of specific decl types that
/// can act as declaration contexts. These decls are (only the top classes
/// that directly derive from DeclContext are mentioned, not their subclasses):
@@ -1389,6 +1409,8 @@ public:
class DeclContext {
/// For makeDeclVisibleInContextImpl
friend class ASTDeclReader;
+ /// For checking the new bits in the Serialization part.
+ friend class ASTDeclWriter;
/// For reconcileExternalVisibleStorage, CreateStoredDeclsMap,
/// hasNeedToReconcileExternalVisibleStorage
friend class ExternalASTSource;
@@ -1595,7 +1617,7 @@ class DeclContext {
uint64_t : NumDeclContextBits;
/// Kind of initializer,
- /// function call or omp_priv<init_expr> initializtion.
+ /// function call or omp_priv<init_expr> initialization.
uint64_t InitializerKind : 2;
};
@@ -1605,10 +1627,10 @@ class DeclContext {
/// Stores the bits used by FunctionDecl.
/// If modified NumFunctionDeclBits and the accessor
/// methods in FunctionDecl and CXXDeductionGuideDecl
- /// (for IsCopyDeductionCandidate) should be updated appropriately.
+ /// (for DeductionCandidateKind) should be updated appropriately.
class FunctionDeclBitfields {
friend class FunctionDecl;
- /// For IsCopyDeductionCandidate
+ /// For DeductionCandidateKind
friend class CXXDeductionGuideDecl;
/// For the bits in DeclContextBitfields.
uint64_t : NumDeclContextBits;
@@ -1644,6 +1666,8 @@ class DeclContext {
/// Kind of contexpr specifier as defined by ConstexprSpecKind.
uint64_t ConstexprKind : 2;
+ uint64_t BodyContainsImmediateEscalatingExpression : 1;
+
uint64_t InstantiationIsPending : 1;
/// Indicates if the function uses __try.
@@ -1661,10 +1685,10 @@ class DeclContext {
/// function using attribute 'target'.
uint64_t IsMultiVersion : 1;
- /// [C++17] Only used by CXXDeductionGuideDecl. Indicates that
- /// the Deduction Guide is the implicitly generated 'copy
- /// deduction candidate' (is used during overload resolution).
- uint64_t IsCopyDeductionCandidate : 1;
+ /// Only used by CXXDeductionGuideDecl. Indicates the kind
+ /// of the Deduction Guide that is implicitly generated
+ /// (used during overload resolution).
+ uint64_t DeductionCandidateKind : 2;
/// Store the ODRHash after first calculation.
uint64_t HasODRHash : 1;
@@ -1678,7 +1702,7 @@ class DeclContext {
};
/// Number of non-inherited bits in FunctionDeclBitfields.
- enum { NumFunctionDeclBits = 29 };
+ enum { NumFunctionDeclBits = 30 };
/// Stores the bits used by CXXConstructorDecl. If modified
/// NumCXXConstructorDeclBits and the accessor
@@ -1690,12 +1714,12 @@ class DeclContext {
/// For the bits in FunctionDeclBitfields.
uint64_t : NumFunctionDeclBits;
- /// 22 bits to fit in the remaining available space.
+ /// 21 bits to fit in the remaining available space.
/// Note that this makes CXXConstructorDeclBitfields take
/// exactly 64 bits and thus the width of NumCtorInitializers
/// will need to be shrunk if some bit is added to NumDeclContextBitfields,
/// NumFunctionDeclBitfields or CXXConstructorDeclBitfields.
- uint64_t NumCtorInitializers : 19;
+ uint64_t NumCtorInitializers : 18;
uint64_t IsInheritingConstructor : 1;
/// Whether this constructor has a trail-allocated explicit specifier.
@@ -2525,10 +2549,8 @@ public:
D == LastDecl);
}
- bool setUseQualifiedLookup(bool use = true) const {
- bool old_value = DeclContextBits.UseQualifiedLookup;
+ void setUseQualifiedLookup(bool use = true) const {
DeclContextBits.UseQualifiedLookup = use;
- return old_value;
}
bool shouldUseQualifiedLookup() const {
@@ -2589,14 +2611,6 @@ private:
void reconcileExternalVisibleStorage() const;
bool LoadLexicalDeclsFromExternalStorage() const;
- /// Makes a declaration visible within this context, but
- /// suppresses searches for external declarations with the same
- /// name.
- ///
- /// Analogous to makeDeclVisibleInContext, but for the exclusive
- /// use of addDeclInternal().
- void makeDeclVisibleInContextInternal(NamedDecl *D);
-
StoredDeclsMap *CreateStoredDeclsMap(ASTContext &C) const;
void loadLazyLocalLexicalLookups();
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclCXX.h b/contrib/llvm-project/clang/include/clang/AST/DeclCXX.h
index 11276c77490c..afec8150c2c9 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclCXX.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclCXX.h
@@ -395,7 +395,7 @@ private:
unsigned NumCaptures : 15;
/// The number of explicit captures in this lambda.
- unsigned NumExplicitCaptures : 13;
+ unsigned NumExplicitCaptures : 12;
/// Has known `internal` linkage.
unsigned HasKnownInternalLinkage : 1;
@@ -404,6 +404,10 @@ private:
/// mangling in the Itanium C++ ABI.
unsigned ManglingNumber : 31;
+ /// The index of this lambda within its context declaration. This is not in
+ /// general the same as the mangling number.
+ unsigned IndexInContext;
+
/// The declaration that provides context for this lambda, if the
/// actual DeclContext does not suffice. This is used for lambdas that
/// occur within default arguments of function parameters within the class
@@ -424,7 +428,7 @@ private:
: DefinitionData(D), DependencyKind(DK), IsGenericLambda(IsGeneric),
CaptureDefault(CaptureDefault), NumCaptures(0),
NumExplicitCaptures(0), HasKnownInternalLinkage(0), ManglingNumber(0),
- MethodTyInfo(Info) {
+ IndexInContext(0), MethodTyInfo(Info) {
IsLambda = true;
// C++1z [expr.prim.lambda]p4:
@@ -1092,6 +1096,11 @@ public:
unsigned capture_size() const { return getLambdaData().NumCaptures; }
+ const LambdaCapture *getCapture(unsigned I) const {
+ assert(isLambda() && I < capture_size() && "invalid index for capture");
+ return captures_begin() + I;
+ }
+
using conversion_iterator = UnresolvedSetIterator;
conversion_iterator conversion_begin() const {
@@ -1160,6 +1169,10 @@ public:
///
/// \note This does NOT include a check for union-ness.
bool isEmpty() const { return data().Empty; }
+ /// Marks this record as empty. This is used by DWARFASTParserClang
+ /// when parsing records with empty fields having [[no_unique_address]]
+ /// attribute
+ void markEmpty() { data().Empty = true; }
void setInitMethod(bool Val) { data().HasInitMethod = Val; }
bool hasInitMethod() const { return data().HasInitMethod; }
@@ -1437,7 +1450,7 @@ public:
}
/// Notify the class that this destructor is now selected.
- ///
+ ///
/// Important properties of the class depend on destructor properties. Since
/// C++20, it is possible to have multiple destructor declarations in a class
/// out of which one will be selected at the end.
@@ -1763,18 +1776,31 @@ public:
/// the declaration context suffices.
Decl *getLambdaContextDecl() const;
- /// Set the mangling number and context declaration for a lambda
- /// class.
- void setLambdaMangling(unsigned ManglingNumber, Decl *ContextDecl,
- bool HasKnownInternalLinkage = false) {
+ /// Retrieve the index of this lambda within the context declaration returned
+ /// by getLambdaContextDecl().
+ unsigned getLambdaIndexInContext() const {
assert(isLambda() && "Not a lambda closure type!");
- getLambdaData().ManglingNumber = ManglingNumber;
- getLambdaData().ContextDecl = ContextDecl;
- getLambdaData().HasKnownInternalLinkage = HasKnownInternalLinkage;
+ return getLambdaData().IndexInContext;
}
- /// Set the device side mangling number.
- void setDeviceLambdaManglingNumber(unsigned Num) const;
+ /// Information about how a lambda is numbered within its context.
+ struct LambdaNumbering {
+ Decl *ContextDecl = nullptr;
+ unsigned IndexInContext = 0;
+ unsigned ManglingNumber = 0;
+ unsigned DeviceManglingNumber = 0;
+ bool HasKnownInternalLinkage = false;
+ };
+
+ /// Set the mangling numbers and context declaration for a lambda class.
+ void setLambdaNumbering(LambdaNumbering Numbering);
+
+ // Get the mangling numbers and context declaration for a lambda class.
+ LambdaNumbering getLambdaNumbering() const {
+ return {getLambdaContextDecl(), getLambdaIndexInContext(),
+ getLambdaManglingNumber(), getDeviceLambdaManglingNumber(),
+ hasKnownLambdaInternalLinkage()};
+ }
/// Retrieve the device side mangling number.
unsigned getDeviceLambdaManglingNumber() const;
@@ -1826,6 +1852,20 @@ public:
return getLambdaData().MethodTyInfo;
}
+ void setLambdaTypeInfo(TypeSourceInfo *TS) {
+ assert(DefinitionData && DefinitionData->IsLambda &&
+ "setting lambda property of non-lambda class");
+ auto &DL = static_cast<LambdaDefinitionData &>(*DefinitionData);
+ DL.MethodTyInfo = TS;
+ }
+
+ void setLambdaIsGeneric(bool IsGeneric) {
+ assert(DefinitionData && DefinitionData->IsLambda &&
+ "setting lambda property of non-lambda class");
+ auto &DL = static_cast<LambdaDefinitionData &>(*DefinitionData);
+ DL.IsGenericLambda = IsGeneric;
+ }
+
// Determine whether this type is an Interface Like type for
// __interface inheritance purposes.
bool isInterfaceLike() const;
@@ -1902,13 +1942,13 @@ private:
ExplicitSpecifier ES,
const DeclarationNameInfo &NameInfo, QualType T,
TypeSourceInfo *TInfo, SourceLocation EndLocation,
- CXXConstructorDecl *Ctor)
+ CXXConstructorDecl *Ctor, DeductionCandidate Kind)
: FunctionDecl(CXXDeductionGuide, C, DC, StartLoc, NameInfo, T, TInfo,
SC_None, false, false, ConstexprSpecKind::Unspecified),
Ctor(Ctor), ExplicitSpec(ES) {
if (EndLocation.isValid())
setRangeEnd(EndLocation);
- setIsCopyDeductionCandidate(false);
+ setDeductionCandidateKind(Kind);
}
CXXConstructorDecl *Ctor;
@@ -1923,7 +1963,8 @@ public:
Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
ExplicitSpecifier ES, const DeclarationNameInfo &NameInfo, QualType T,
TypeSourceInfo *TInfo, SourceLocation EndLocation,
- CXXConstructorDecl *Ctor = nullptr);
+ CXXConstructorDecl *Ctor = nullptr,
+ DeductionCandidate Kind = DeductionCandidate::Normal);
static CXXDeductionGuideDecl *CreateDeserialized(ASTContext &C, unsigned ID);
@@ -1940,16 +1981,15 @@ public:
/// Get the constructor from which this deduction guide was generated, if
/// this is an implicit deduction guide.
- CXXConstructorDecl *getCorrespondingConstructor() const {
- return Ctor;
- }
+ CXXConstructorDecl *getCorrespondingConstructor() const { return Ctor; }
- void setIsCopyDeductionCandidate(bool isCDC = true) {
- FunctionDeclBits.IsCopyDeductionCandidate = isCDC;
+ void setDeductionCandidateKind(DeductionCandidate K) {
+ FunctionDeclBits.DeductionCandidateKind = static_cast<unsigned char>(K);
}
- bool isCopyDeductionCandidate() const {
- return FunctionDeclBits.IsCopyDeductionCandidate;
+ DeductionCandidate getDeductionCandidateKind() const {
+ return static_cast<DeductionCandidate>(
+ FunctionDeclBits.DeductionCandidateKind);
}
// Implement isa/cast/dyncast/etc.
@@ -3970,12 +4010,12 @@ public:
/// Represents a C++11 static_assert declaration.
class StaticAssertDecl : public Decl {
llvm::PointerIntPair<Expr *, 1, bool> AssertExprAndFailed;
- StringLiteral *Message;
+ Expr *Message;
SourceLocation RParenLoc;
StaticAssertDecl(DeclContext *DC, SourceLocation StaticAssertLoc,
- Expr *AssertExpr, StringLiteral *Message,
- SourceLocation RParenLoc, bool Failed)
+ Expr *AssertExpr, Expr *Message, SourceLocation RParenLoc,
+ bool Failed)
: Decl(StaticAssert, DC, StaticAssertLoc),
AssertExprAndFailed(AssertExpr, Failed), Message(Message),
RParenLoc(RParenLoc) {}
@@ -3987,15 +4027,15 @@ public:
static StaticAssertDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation StaticAssertLoc,
- Expr *AssertExpr, StringLiteral *Message,
+ Expr *AssertExpr, Expr *Message,
SourceLocation RParenLoc, bool Failed);
static StaticAssertDecl *CreateDeserialized(ASTContext &C, unsigned ID);
Expr *getAssertExpr() { return AssertExprAndFailed.getPointer(); }
const Expr *getAssertExpr() const { return AssertExprAndFailed.getPointer(); }
- StringLiteral *getMessage() { return Message; }
- const StringLiteral *getMessage() const { return Message; }
+ Expr *getMessage() { return Message; }
+ const Expr *getMessage() const { return Message; }
bool isFailed() const { return AssertExprAndFailed.getInt(); }
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclObjC.h b/contrib/llvm-project/clang/include/clang/AST/DeclObjC.h
index 3d650b82f2b9..ee8ec7a6a016 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclObjC.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclObjC.h
@@ -172,7 +172,7 @@ private:
Selector SelInfo, QualType T, TypeSourceInfo *ReturnTInfo,
DeclContext *contextDecl, bool isInstance = true,
bool isVariadic = false, bool isPropertyAccessor = false,
- bool isSynthesizedAccessorStub = false,
+ bool isSynthesizedAccessorStub = false,
bool isImplicitlyDeclared = false, bool isDefined = false,
ImplementationControl impControl = None,
bool HasRelatedResultType = false);
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h b/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h
index ae2542f4f231..7cd505218f2b 100755
--- a/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h
@@ -117,6 +117,8 @@ public:
SourceLocation RAngleLoc,
Expr *RequiresClause);
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &C) const;
+
/// Iterates through the template parameters in this list.
using iterator = NamedDecl **;
@@ -616,7 +618,7 @@ public:
static void
Profile(llvm::FoldingSetNodeID &ID, ArrayRef<TemplateArgument> TemplateArgs,
- ASTContext &Context) {
+ const ASTContext &Context) {
ID.AddInteger(TemplateArgs.size());
for (const TemplateArgument &TemplateArg : TemplateArgs)
TemplateArg.Profile(ID, Context);
@@ -850,7 +852,7 @@ protected:
/// template.
///
/// This pointer refers to the template arguments (there are as
- /// many template arguments as template parameaters) for the
+ /// many template arguments as template parameters) for the
/// template, and is allocated lazily, since most templates do not
/// require the use of this information.
TemplateArgument *InjectedArgs = nullptr;
@@ -2081,7 +2083,7 @@ public:
static void
Profile(llvm::FoldingSetNodeID &ID, ArrayRef<TemplateArgument> TemplateArgs,
- ASTContext &Context) {
+ const ASTContext &Context) {
ID.AddInteger(TemplateArgs.size());
for (const TemplateArgument &TemplateArg : TemplateArgs)
TemplateArg.Profile(ID, Context);
@@ -2257,7 +2259,7 @@ public:
static void
Profile(llvm::FoldingSetNodeID &ID, ArrayRef<TemplateArgument> TemplateArgs,
- TemplateParameterList *TPL, ASTContext &Context);
+ TemplateParameterList *TPL, const ASTContext &Context);
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
@@ -2307,9 +2309,15 @@ protected:
return static_cast<Common *>(RedeclarableTemplateDecl::getCommonPtr());
}
+ void setCommonPtr(Common *C) {
+ RedeclarableTemplateDecl::Common = C;
+ }
+
public:
+
friend class ASTDeclReader;
friend class ASTDeclWriter;
+ friend class TemplateDeclInstantiator;
/// Load any lazily-loaded specializations from the external source.
void LoadLazySpecializations() const;
@@ -2926,13 +2934,7 @@ public:
return ExplicitInfo ? ExplicitInfo->TemplateKeywordLoc : SourceLocation();
}
- SourceRange getSourceRange() const override LLVM_READONLY {
- if (isExplicitSpecialization()) {
- if (const ASTTemplateArgumentListInfo *Info = getTemplateArgsInfo())
- return SourceRange(getOuterLocStart(), Info->getRAngleLoc());
- }
- return VarDecl::getSourceRange();
- }
+ SourceRange getSourceRange() const override LLVM_READONLY;
void Profile(llvm::FoldingSetNodeID &ID) const {
Profile(ID, TemplateArgs->asArray(), getASTContext());
@@ -2940,7 +2942,7 @@ public:
static void Profile(llvm::FoldingSetNodeID &ID,
ArrayRef<TemplateArgument> TemplateArgs,
- ASTContext &Context) {
+ const ASTContext &Context) {
ID.AddInteger(TemplateArgs.size());
for (const TemplateArgument &TemplateArg : TemplateArgs)
TemplateArg.Profile(ID, Context);
@@ -3091,13 +3093,7 @@ public:
return First->InstantiatedFromMember.setInt(true);
}
- SourceRange getSourceRange() const override LLVM_READONLY {
- if (isExplicitSpecialization()) {
- if (const ASTTemplateArgumentListInfo *Info = getTemplateArgsAsWritten())
- return SourceRange(getOuterLocStart(), Info->getRAngleLoc());
- }
- return VarDecl::getSourceRange();
- }
+ SourceRange getSourceRange() const override LLVM_READONLY;
void Profile(llvm::FoldingSetNodeID &ID) const {
Profile(ID, getTemplateArgs().asArray(), getTemplateParameters(),
@@ -3106,7 +3102,7 @@ public:
static void
Profile(llvm::FoldingSetNodeID &ID, ArrayRef<TemplateArgument> TemplateArgs,
- TemplateParameterList *TPL, ASTContext &Context);
+ TemplateParameterList *TPL, const ASTContext &Context);
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclarationName.h b/contrib/llvm-project/clang/include/clang/AST/DeclarationName.h
index 1496e213dd76..b06931ea3e41 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclarationName.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclarationName.h
@@ -118,14 +118,14 @@ class alignas(IdentifierInfoAlignment) CXXLiteralOperatorIdName
friend class clang::DeclarationName;
friend class clang::DeclarationNameTable;
- IdentifierInfo *ID;
+ const IdentifierInfo *ID;
/// Extra information associated with this operator name that
/// can be used by the front end. All bits are really needed
/// so it is not possible to stash something in the low order bits.
void *FETokenInfo;
- CXXLiteralOperatorIdName(IdentifierInfo *II)
+ CXXLiteralOperatorIdName(const IdentifierInfo *II)
: DeclarationNameExtra(CXXLiteralOperatorName), ID(II),
FETokenInfo(nullptr) {}
@@ -478,7 +478,7 @@ public:
/// If this name is the name of a literal operator,
/// retrieve the identifier associated with it.
- IdentifierInfo *getCXXLiteralIdentifier() const {
+ const IdentifierInfo *getCXXLiteralIdentifier() const {
if (getNameKind() == CXXLiteralOperatorName) {
assert(getPtr() && "getCXXLiteralIdentifier on a null DeclarationName!");
return castAsCXXLiteralOperatorIdName()->ID;
@@ -650,7 +650,7 @@ public:
}
/// Get the name of the literal operator function with II as the identifier.
- DeclarationName getCXXLiteralOperatorName(IdentifierInfo *II);
+ DeclarationName getCXXLiteralOperatorName(const IdentifierInfo *II);
};
/// DeclarationNameLoc - Additional source/type location info
@@ -763,7 +763,7 @@ public:
};
/// DeclarationNameInfo - A collector data type for bundling together
-/// a DeclarationName and the correspnding source/type location info.
+/// a DeclarationName and the corresponding source/type location info.
struct DeclarationNameInfo {
private:
/// Name - The declaration name, also encoding name kind.
diff --git a/contrib/llvm-project/clang/include/clang/AST/Expr.h b/contrib/llvm-project/clang/include/clang/AST/Expr.h
index b47e2aa4688b..f9795b6386c4 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Expr.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Expr.h
@@ -135,8 +135,8 @@ protected:
void setDependence(ExprDependence Deps) {
ExprBits.Dependent = static_cast<unsigned>(Deps);
}
- friend class ASTImporter; // Sets dependence dircetly.
- friend class ASTStmtReader; // Sets dependence dircetly.
+ friend class ASTImporter; // Sets dependence directly.
+ friend class ASTStmtReader; // Sets dependence directly.
public:
QualType getType() const { return TR; }
@@ -171,7 +171,7 @@ public:
}
/// Determines whether the type of this expression depends on
- /// - a template paramter (C++ [temp.dep.expr], which means that its type
+ /// - a template parameter (C++ [temp.dep.expr], which means that its type
/// could change from one template instantiation to the next)
/// - or an error
///
@@ -593,12 +593,12 @@ public:
struct EvalStatus {
/// Whether the evaluated expression has side effects.
/// For example, (f() && 0) can be folded, but it still has side effects.
- bool HasSideEffects;
+ bool HasSideEffects = false;
/// Whether the evaluation hit undefined behavior.
/// For example, 1.0 / 0.0 can be folded to Inf, but has undefined behavior.
/// Likewise, INT_MAX + 1 can be folded to INT_MIN, but has UB.
- bool HasUndefinedBehavior;
+ bool HasUndefinedBehavior = false;
/// Diag - If this is non-null, it will be filled in with a stack of notes
/// indicating why evaluation failed (or why it failed to produce a constant
@@ -607,10 +607,9 @@ public:
/// foldable. If the expression is foldable, but not a constant expression,
/// the notes will describes why it isn't a constant expression. If the
/// expression *is* a constant expression, no notes will be produced.
- SmallVectorImpl<PartialDiagnosticAt> *Diag;
+ SmallVectorImpl<PartialDiagnosticAt> *Diag = nullptr;
- EvalStatus()
- : HasSideEffects(false), HasUndefinedBehavior(false), Diag(nullptr) {}
+ EvalStatus() = default;
// hasSideEffects - Return true if the evaluated expression has
// side effects.
@@ -665,8 +664,8 @@ public:
SideEffectsKind AllowSideEffects = SE_NoSideEffects,
bool InConstantContext = false) const;
- /// EvaluateAsFloat - Return true if this is a constant which we can fold and
- /// convert to a fixed point value.
+ /// EvaluateAsFixedPoint - Return true if this is a constant which we can fold
+ /// and convert to a fixed point value.
bool EvaluateAsFixedPoint(EvalResult &Result, const ASTContext &Ctx,
SideEffectsKind AllowSideEffects = SE_NoSideEffects,
bool InConstantContext = false) const;
@@ -714,7 +713,8 @@ public:
/// notes will be produced if the expression is not a constant expression.
bool EvaluateAsInitializer(APValue &Result, const ASTContext &Ctx,
const VarDecl *VD,
- SmallVectorImpl<PartialDiagnosticAt> &Notes) const;
+ SmallVectorImpl<PartialDiagnosticAt> &Notes,
+ bool IsConstantInitializer) const;
/// EvaluateWithSubstitution - Evaluate an expression as if from the context
/// of a call to the given function with the given arguments, inside an
@@ -762,6 +762,11 @@ public:
/// strlen, false otherwise.
bool tryEvaluateStrLen(uint64_t &Result, ASTContext &Ctx) const;
+ bool EvaluateCharRangeAsString(std::string &Result,
+ const Expr *SizeExpression,
+ const Expr *PtrExpression, ASTContext &Ctx,
+ EvalResult &Status) const;
+
/// Enumeration used to describe the kind of Null pointer constant
/// returned from \c isNullPointerConstant().
enum NullPointerConstantKind {
@@ -819,7 +824,7 @@ public:
/// member expression.
static QualType findBoundMemberType(const Expr *expr);
- /// Skip past any invisble AST nodes which might surround this
+ /// Skip past any invisible AST nodes which might surround this
/// statement, such as ExprWithCleanups or ImplicitCastExpr nodes,
/// but also injected CXXMemberExpr and CXXConstructExpr which represent
/// implicit conversions.
@@ -923,7 +928,7 @@ public:
return const_cast<Expr *>(this)->IgnoreParenLValueCasts();
}
- /// Skip past any parenthese and casts which do not change the value
+ /// Skip past any parentheses and casts which do not change the value
/// (including ptr->int casts of the same size) until reaching a fixed point.
/// Skips:
/// * What IgnoreParens() skips
@@ -1436,6 +1441,14 @@ public:
return DeclRefExprBits.RefersToEnclosingVariableOrCapture;
}
+ bool isImmediateEscalating() const {
+ return DeclRefExprBits.IsImmediateEscalating;
+ }
+
+ void setIsImmediateEscalating(bool Set) {
+ DeclRefExprBits.IsImmediateEscalating = Set;
+ }
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclRefExprClass;
}
@@ -1796,7 +1809,7 @@ class StringLiteral final
/// * An array of getByteLength() char used to store the string data.
public:
- enum StringKind { Ordinary, Wide, UTF8, UTF16, UTF32 };
+ enum StringKind { Ordinary, Wide, UTF8, UTF16, UTF32, Unevaluated };
private:
unsigned numTrailingObjects(OverloadToken<unsigned>) const { return 1; }
@@ -1858,7 +1871,7 @@ public:
unsigned CharByteWidth);
StringRef getString() const {
- assert(getCharByteWidth() == 1 &&
+ assert((isUnevaluated() || getCharByteWidth() == 1) &&
"This function is used in places that assume strings use char");
return StringRef(getStrDataAsChar(), getByteLength());
}
@@ -1898,6 +1911,7 @@ public:
bool isUTF8() const { return getKind() == UTF8; }
bool isUTF16() const { return getKind() == UTF16; }
bool isUTF32() const { return getKind() == UTF32; }
+ bool isUnevaluated() const { return getKind() == Unevaluated; }
bool isPascal() const { return StringLiteralBits.IsPascal; }
bool containsNonAscii() const {
@@ -1992,7 +2006,7 @@ public:
private:
PredefinedExpr(SourceLocation L, QualType FNTy, IdentKind IK,
- StringLiteral *SL);
+ bool IsTransparent, StringLiteral *SL);
explicit PredefinedExpr(EmptyShell Empty, bool HasFunctionName);
@@ -2007,8 +2021,12 @@ private:
public:
/// Create a PredefinedExpr.
+ ///
+ /// If IsTransparent, the PredefinedExpr is transparently handled as a
+ /// StringLiteral.
static PredefinedExpr *Create(const ASTContext &Ctx, SourceLocation L,
- QualType FNTy, IdentKind IK, StringLiteral *SL);
+ QualType FNTy, IdentKind IK, bool IsTransparent,
+ StringLiteral *SL);
/// Create an empty PredefinedExpr.
static PredefinedExpr *CreateEmpty(const ASTContext &Ctx,
@@ -2018,6 +2036,8 @@ public:
return static_cast<IdentKind>(PredefinedExprBits.Kind);
}
+ bool isTransparent() const { return PredefinedExprBits.IsTransparent; }
+
SourceLocation getLocation() const { return PredefinedExprBits.Loc; }
void setLocation(SourceLocation L) { PredefinedExprBits.Loc = L; }
@@ -2233,14 +2253,14 @@ public:
bool canOverflow() const { return UnaryOperatorBits.CanOverflow; }
void setCanOverflow(bool C) { UnaryOperatorBits.CanOverflow = C; }
- // Get the FP contractability status of this operator. Only meaningful for
- // operations on floating point types.
+ /// Get the FP contractability status of this operator. Only meaningful for
+ /// operations on floating point types.
bool isFPContractableWithinStatement(const LangOptions &LO) const {
return getFPFeaturesInEffect(LO).allowFPContractWithinStatement();
}
- // Get the FENV_ACCESS status of this operator. Only meaningful for
- // operations on floating point types.
+ /// Get the FENV_ACCESS status of this operator. Only meaningful for
+ /// operations on floating point types.
bool isFEnvAccessOn(const LangOptions &LO) const {
return getFPFeaturesInEffect(LO).getAllowFEnvAccess();
}
@@ -2325,8 +2345,8 @@ protected:
void setStoredFPFeatures(FPOptionsOverride F) { getTrailingFPFeatures() = F; }
public:
- // Get the FP features status of this operator. Only meaningful for
- // operations on floating point types.
+ /// Get the FP features status of this operator. Only meaningful for
+ /// operations on floating point types.
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const {
if (UnaryOperatorBits.HasFPFeatures)
return getStoredFPFeatures().applyOverrides(LO);
@@ -2814,7 +2834,7 @@ class CallExpr : public Expr {
/// The number of arguments in the call expression.
unsigned NumArgs;
- /// The location of the right parenthese. This has a different meaning for
+ /// The location of the right parentheses. This has a different meaning for
/// the derived classes of CallExpr.
SourceLocation RParenLoc;
@@ -3082,8 +3102,8 @@ public:
*getTrailingFPFeatures() = F;
}
- // Get the FP features status of this operator. Only meaningful for
- // operations on floating point types.
+ /// Get the FP features status of this operator. Only meaningful for
+ /// operations on floating point types.
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const {
if (hasStoredFPFeatures())
return getStoredFPFeatures().applyOverrides(LO);
@@ -3573,8 +3593,8 @@ public:
return *getTrailingFPFeatures();
}
- // Get the FP features status of this operation. Only meaningful for
- // operations on floating point types.
+ /// Get the FP features status of this operation. Only meaningful for
+ /// operations on floating point types.
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const {
if (hasStoredFPFeatures())
return getStoredFPFeatures().applyOverrides(LO);
@@ -3971,11 +3991,12 @@ public:
return isShiftAssignOp(getOpcode());
}
- // Return true if a binary operator using the specified opcode and operands
- // would match the 'p = (i8*)nullptr + n' idiom for casting a pointer-sized
- // integer to a pointer.
+ /// Return true if a binary operator using the specified opcode and operands
+ /// would match the 'p = (i8*)nullptr + n' idiom for casting a pointer-sized
+ /// integer to a pointer.
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc,
- Expr *LHS, Expr *RHS);
+ const Expr *LHS,
+ const Expr *RHS);
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstBinaryOperatorConstant &&
@@ -4006,8 +4027,8 @@ public:
*getTrailingFPFeatures() = F;
}
- // Get the FP features status of this operator. Only meaningful for
- // operations on floating point types.
+ /// Get the FP features status of this operator. Only meaningful for
+ /// operations on floating point types.
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const {
if (BinaryOperatorBits.HasFPFeatures)
return getStoredFPFeatures().applyOverrides(LO);
@@ -4021,14 +4042,14 @@ public:
return FPOptionsOverride();
}
- // Get the FP contractability status of this operator. Only meaningful for
- // operations on floating point types.
+ /// Get the FP contractability status of this operator. Only meaningful for
+ /// operations on floating point types.
bool isFPContractableWithinStatement(const LangOptions &LO) const {
return getFPFeaturesInEffect(LO).allowFPContractWithinStatement();
}
- // Get the FENV_ACCESS status of this operator. Only meaningful for
- // operations on floating point types.
+ /// Get the FENV_ACCESS status of this operator. Only meaningful for
+ /// operations on floating point types.
bool isFEnvAccessOn(const LangOptions &LO) const {
return getFPFeaturesInEffect(LO).getAllowFEnvAccess();
}
@@ -4124,17 +4145,17 @@ protected:
: Expr(SC, Empty) { }
public:
- // getCond - Return the expression representing the condition for
- // the ?: operator.
+ /// getCond - Return the expression representing the condition for
+ /// the ?: operator.
Expr *getCond() const;
- // getTrueExpr - Return the subexpression representing the value of
- // the expression if the condition evaluates to true.
+ /// getTrueExpr - Return the subexpression representing the value of
+ /// the expression if the condition evaluates to true.
Expr *getTrueExpr() const;
- // getFalseExpr - Return the subexpression representing the value of
- // the expression if the condition evaluates to false. This is
- // the same as getRHS.
+ /// getFalseExpr - Return the subexpression representing the value of
+ /// the expression if the condition evaluates to false. This is
+ /// the same as getRHS.
Expr *getFalseExpr() const;
SourceLocation getQuestionLoc() const { return QuestionLoc; }
@@ -4169,17 +4190,17 @@ public:
explicit ConditionalOperator(EmptyShell Empty)
: AbstractConditionalOperator(ConditionalOperatorClass, Empty) { }
- // getCond - Return the expression representing the condition for
- // the ?: operator.
+ /// getCond - Return the expression representing the condition for
+ /// the ?: operator.
Expr *getCond() const { return cast<Expr>(SubExprs[COND]); }
- // getTrueExpr - Return the subexpression representing the value of
- // the expression if the condition evaluates to true.
+ /// getTrueExpr - Return the subexpression representing the value of
+ /// the expression if the condition evaluates to true.
Expr *getTrueExpr() const { return cast<Expr>(SubExprs[LHS]); }
- // getFalseExpr - Return the subexpression representing the value of
- // the expression if the condition evaluates to false. This is
- // the same as getRHS.
+ /// getFalseExpr - Return the subexpression representing the value of
+ /// the expression if the condition evaluates to false. This is
+ /// the same as getRHS.
Expr *getFalseExpr() const { return cast<Expr>(SubExprs[RHS]); }
Expr *getLHS() const { return cast<Expr>(SubExprs[LHS]); }
@@ -4684,13 +4705,22 @@ public:
};
/// Represents a function call to one of __builtin_LINE(), __builtin_COLUMN(),
-/// __builtin_FUNCTION(), __builtin_FILE(), or __builtin_source_location().
+/// __builtin_FUNCTION(), __builtin_FUNCSIG(), __builtin_FILE(),
+/// __builtin_FILE_NAME() or __builtin_source_location().
class SourceLocExpr final : public Expr {
SourceLocation BuiltinLoc, RParenLoc;
DeclContext *ParentContext;
public:
- enum IdentKind { Function, File, Line, Column, SourceLocStruct };
+ enum IdentKind {
+ Function,
+ FuncSig,
+ File,
+ FileName,
+ Line,
+ Column,
+ SourceLocStruct
+ };
SourceLocExpr(const ASTContext &Ctx, IdentKind Type, QualType ResultTy,
SourceLocation BLoc, SourceLocation RParenLoc,
@@ -4714,7 +4744,9 @@ public:
bool isIntType() const {
switch (getIdentKind()) {
case File:
+ case FileName:
case Function:
+ case FuncSig:
case SourceLocStruct:
return false;
case Line:
@@ -4900,6 +4932,13 @@ public:
/// has been set.
bool hasArrayFiller() const { return getArrayFiller(); }
+ /// Determine whether this initializer list contains a designated initializer.
+ bool hasDesignatedInit() const {
+ return std::any_of(begin(), end(), [](const Stmt *S) {
+ return isa<DesignatedInitExpr>(S);
+ });
+ }
+
/// If this initializes a union, specifies which field in the
/// union to initialize.
///
@@ -4928,8 +4967,8 @@ public:
return LBraceLoc.isValid() && RBraceLoc.isValid();
}
- // Is this an initializer for an array of characters, initialized by a string
- // literal or an @encode?
+ /// Is this an initializer for an array of characters, initialized by a string
+ /// literal or an @encode?
bool isStringLiteralInit() const;
/// Is this a transparent initializer list (that is, an InitListExpr that is
@@ -5068,37 +5107,6 @@ private:
NumDesignators(0), NumSubExprs(NumSubExprs), Designators(nullptr) { }
public:
- /// A field designator, e.g., ".x".
- struct FieldDesignator {
- /// Refers to the field that is being initialized. The low bit
- /// of this field determines whether this is actually a pointer
- /// to an IdentifierInfo (if 1) or a FieldDecl (if 0). When
- /// initially constructed, a field designator will store an
- /// IdentifierInfo*. After semantic analysis has resolved that
- /// name, the field designator will instead store a FieldDecl*.
- uintptr_t NameOrField;
-
- /// The location of the '.' in the designated initializer.
- SourceLocation DotLoc;
-
- /// The location of the field name in the designated initializer.
- SourceLocation FieldLoc;
- };
-
- /// An array or GNU array-range designator, e.g., "[9]" or "[10..15]".
- struct ArrayOrRangeDesignator {
- /// Location of the first index expression within the designated
- /// initializer expression's list of subexpressions.
- unsigned Index;
- /// The location of the '[' starting the array range designator.
- SourceLocation LBracketLoc;
- /// The location of the ellipsis separating the start and end
- /// indices. Only valid for GNU array-range designators.
- SourceLocation EllipsisLoc;
- /// The location of the ']' terminating the array range designator.
- SourceLocation RBracketLoc;
- };
-
/// Represents a single C99 designator.
///
/// @todo This class is infuriatingly similar to clang::Designator,
@@ -5106,118 +5114,177 @@ public:
/// keep us from reusing it. Try harder, later, to rectify these
/// differences.
class Designator {
+ /// A field designator, e.g., ".x".
+ struct FieldDesignatorInfo {
+ /// Refers to the field that is being initialized. The low bit
+ /// of this field determines whether this is actually a pointer
+ /// to an IdentifierInfo (if 1) or a FieldDecl (if 0). When
+ /// initially constructed, a field designator will store an
+ /// IdentifierInfo*. After semantic analysis has resolved that
+ /// name, the field designator will instead store a FieldDecl*.
+ uintptr_t NameOrField;
+
+ /// The location of the '.' in the designated initializer.
+ SourceLocation DotLoc;
+
+ /// The location of the field name in the designated initializer.
+ SourceLocation FieldLoc;
+
+ FieldDesignatorInfo(const IdentifierInfo *II, SourceLocation DotLoc,
+ SourceLocation FieldLoc)
+ : NameOrField(reinterpret_cast<uintptr_t>(II) | 0x1), DotLoc(DotLoc),
+ FieldLoc(FieldLoc) {}
+ };
+
+ /// An array or GNU array-range designator, e.g., "[9]" or "[10...15]".
+ struct ArrayOrRangeDesignatorInfo {
+ /// Location of the first index expression within the designated
+ /// initializer expression's list of subexpressions.
+ unsigned Index;
+
+ /// The location of the '[' starting the array range designator.
+ SourceLocation LBracketLoc;
+
+ /// The location of the ellipsis separating the start and end
+ /// indices. Only valid for GNU array-range designators.
+ SourceLocation EllipsisLoc;
+
+ /// The location of the ']' terminating the array range designator.
+ SourceLocation RBracketLoc;
+
+ ArrayOrRangeDesignatorInfo(unsigned Index, SourceLocation LBracketLoc,
+ SourceLocation RBracketLoc)
+ : Index(Index), LBracketLoc(LBracketLoc), RBracketLoc(RBracketLoc) {}
+
+ ArrayOrRangeDesignatorInfo(unsigned Index,
+ SourceLocation LBracketLoc,
+ SourceLocation EllipsisLoc,
+ SourceLocation RBracketLoc)
+ : Index(Index), LBracketLoc(LBracketLoc), EllipsisLoc(EllipsisLoc),
+ RBracketLoc(RBracketLoc) {}
+ };
+
/// The kind of designator this describes.
- enum {
+ enum DesignatorKind {
FieldDesignator,
ArrayDesignator,
ArrayRangeDesignator
- } Kind;
+ };
+
+ DesignatorKind Kind;
union {
/// A field designator, e.g., ".x".
- struct FieldDesignator Field;
+ struct FieldDesignatorInfo FieldInfo;
+
/// An array or GNU array-range designator, e.g., "[9]" or "[10..15]".
- struct ArrayOrRangeDesignator ArrayOrRange;
+ struct ArrayOrRangeDesignatorInfo ArrayOrRangeInfo;
};
- friend class DesignatedInitExpr;
+
+ Designator(DesignatorKind Kind) : Kind(Kind) {}
public:
Designator() {}
- /// Initializes a field designator.
- Designator(const IdentifierInfo *FieldName, SourceLocation DotLoc,
- SourceLocation FieldLoc)
- : Kind(FieldDesignator) {
- new (&Field) DesignatedInitExpr::FieldDesignator;
- Field.NameOrField = reinterpret_cast<uintptr_t>(FieldName) | 0x01;
- Field.DotLoc = DotLoc;
- Field.FieldLoc = FieldLoc;
- }
-
- /// Initializes an array designator.
- Designator(unsigned Index, SourceLocation LBracketLoc,
- SourceLocation RBracketLoc)
- : Kind(ArrayDesignator) {
- new (&ArrayOrRange) DesignatedInitExpr::ArrayOrRangeDesignator;
- ArrayOrRange.Index = Index;
- ArrayOrRange.LBracketLoc = LBracketLoc;
- ArrayOrRange.EllipsisLoc = SourceLocation();
- ArrayOrRange.RBracketLoc = RBracketLoc;
- }
-
- /// Initializes a GNU array-range designator.
- Designator(unsigned Index, SourceLocation LBracketLoc,
- SourceLocation EllipsisLoc, SourceLocation RBracketLoc)
- : Kind(ArrayRangeDesignator) {
- new (&ArrayOrRange) DesignatedInitExpr::ArrayOrRangeDesignator;
- ArrayOrRange.Index = Index;
- ArrayOrRange.LBracketLoc = LBracketLoc;
- ArrayOrRange.EllipsisLoc = EllipsisLoc;
- ArrayOrRange.RBracketLoc = RBracketLoc;
- }
-
bool isFieldDesignator() const { return Kind == FieldDesignator; }
bool isArrayDesignator() const { return Kind == ArrayDesignator; }
bool isArrayRangeDesignator() const { return Kind == ArrayRangeDesignator; }
- IdentifierInfo *getFieldName() const;
+ //===------------------------------------------------------------------===//
+ // FieldDesignatorInfo
+
+ /// Creates a field designator.
+ static Designator CreateFieldDesignator(const IdentifierInfo *FieldName,
+ SourceLocation DotLoc,
+ SourceLocation FieldLoc) {
+ Designator D(FieldDesignator);
+ new (&D.FieldInfo) FieldDesignatorInfo(FieldName, DotLoc, FieldLoc);
+ return D;
+ }
+
+ const IdentifierInfo *getFieldName() const;
- FieldDecl *getField() const {
- assert(Kind == FieldDesignator && "Only valid on a field designator");
- if (Field.NameOrField & 0x01)
+ FieldDecl *getFieldDecl() const {
+ assert(isFieldDesignator() && "Only valid on a field designator");
+ if (FieldInfo.NameOrField & 0x01)
return nullptr;
- else
- return reinterpret_cast<FieldDecl *>(Field.NameOrField);
+ return reinterpret_cast<FieldDecl *>(FieldInfo.NameOrField);
}
- void setField(FieldDecl *FD) {
- assert(Kind == FieldDesignator && "Only valid on a field designator");
- Field.NameOrField = reinterpret_cast<uintptr_t>(FD);
+ void setFieldDecl(FieldDecl *FD) {
+ assert(isFieldDesignator() && "Only valid on a field designator");
+ FieldInfo.NameOrField = reinterpret_cast<uintptr_t>(FD);
}
SourceLocation getDotLoc() const {
- assert(Kind == FieldDesignator && "Only valid on a field designator");
- return Field.DotLoc;
+ assert(isFieldDesignator() && "Only valid on a field designator");
+ return FieldInfo.DotLoc;
}
SourceLocation getFieldLoc() const {
- assert(Kind == FieldDesignator && "Only valid on a field designator");
- return Field.FieldLoc;
+ assert(isFieldDesignator() && "Only valid on a field designator");
+ return FieldInfo.FieldLoc;
}
- SourceLocation getLBracketLoc() const {
- assert((Kind == ArrayDesignator || Kind == ArrayRangeDesignator) &&
+ //===------------------------------------------------------------------===//
+ // ArrayOrRangeDesignator
+
+ /// Creates an array designator.
+ static Designator CreateArrayDesignator(unsigned Index,
+ SourceLocation LBracketLoc,
+ SourceLocation RBracketLoc) {
+ Designator D(ArrayDesignator);
+ new (&D.ArrayOrRangeInfo) ArrayOrRangeDesignatorInfo(Index, LBracketLoc,
+ RBracketLoc);
+ return D;
+ }
+
+ /// Creates a GNU array-range designator.
+ static Designator CreateArrayRangeDesignator(unsigned Index,
+ SourceLocation LBracketLoc,
+ SourceLocation EllipsisLoc,
+ SourceLocation RBracketLoc) {
+ Designator D(ArrayRangeDesignator);
+ new (&D.ArrayOrRangeInfo) ArrayOrRangeDesignatorInfo(Index, LBracketLoc,
+ EllipsisLoc,
+ RBracketLoc);
+ return D;
+ }
+
+ unsigned getArrayIndex() const {
+ assert((isArrayDesignator() || isArrayRangeDesignator()) &&
"Only valid on an array or array-range designator");
- return ArrayOrRange.LBracketLoc;
+ return ArrayOrRangeInfo.Index;
}
- SourceLocation getRBracketLoc() const {
- assert((Kind == ArrayDesignator || Kind == ArrayRangeDesignator) &&
+ SourceLocation getLBracketLoc() const {
+ assert((isArrayDesignator() || isArrayRangeDesignator()) &&
"Only valid on an array or array-range designator");
- return ArrayOrRange.RBracketLoc;
+ return ArrayOrRangeInfo.LBracketLoc;
}
SourceLocation getEllipsisLoc() const {
- assert(Kind == ArrayRangeDesignator &&
+ assert(isArrayRangeDesignator() &&
"Only valid on an array-range designator");
- return ArrayOrRange.EllipsisLoc;
+ return ArrayOrRangeInfo.EllipsisLoc;
}
- unsigned getFirstExprIndex() const {
- assert((Kind == ArrayDesignator || Kind == ArrayRangeDesignator) &&
+ SourceLocation getRBracketLoc() const {
+ assert((isArrayDesignator() || isArrayRangeDesignator()) &&
"Only valid on an array or array-range designator");
- return ArrayOrRange.Index;
+ return ArrayOrRangeInfo.RBracketLoc;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
- if (Kind == FieldDesignator)
- return getDotLoc().isInvalid()? getFieldLoc() : getDotLoc();
- else
- return getLBracketLoc();
+ if (isFieldDesignator())
+ return getDotLoc().isInvalid() ? getFieldLoc() : getDotLoc();
+ return getLBracketLoc();
}
+
SourceLocation getEndLoc() const LLVM_READONLY {
- return Kind == FieldDesignator ? getFieldLoc() : getRBracketLoc();
+ return isFieldDesignator() ? getFieldLoc() : getRBracketLoc();
}
+
SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getBeginLoc(), getEndLoc());
}
@@ -5627,6 +5694,12 @@ public:
/// which names a dependent type in its association list is result-dependent,
/// which means that the choice of result expression is dependent.
/// Result-dependent generic associations are both type- and value-dependent.
+///
+/// We also allow an extended form in both C and C++ where the controlling
+/// predicate for the selection expression is a type rather than an expression.
+/// This type argument form does not perform any conversions for the
+/// controlling type, which makes it suitable for use with qualified type
+/// associations, which is not possible with the expression form.
class GenericSelectionExpr final
: public Expr,
private llvm::TrailingObjects<GenericSelectionExpr, Stmt *,
@@ -5639,31 +5712,67 @@ class GenericSelectionExpr final
/// expression in the case where the generic selection expression is not
/// result-dependent. The result index is equal to ResultDependentIndex
/// if and only if the generic selection expression is result-dependent.
- unsigned NumAssocs, ResultIndex;
+ unsigned NumAssocs : 15;
+ unsigned ResultIndex : 15; // NB: ResultDependentIndex is tied to this width.
+ unsigned IsExprPredicate : 1;
enum : unsigned {
- ResultDependentIndex = std::numeric_limits<unsigned>::max(),
- ControllingIndex = 0,
- AssocExprStartIndex = 1
+ ResultDependentIndex = 0x7FFF
};
+ unsigned getIndexOfControllingExpression() const {
+ // If controlled by an expression, the first offset into the Stmt *
+ // trailing array is the controlling expression, the associated expressions
+ // follow this.
+ assert(isExprPredicate() && "Asking for the controlling expression of a "
+ "selection expr predicated by a type");
+ return 0;
+ }
+
+ unsigned getIndexOfControllingType() const {
+ // If controlled by a type, the first offset into the TypeSourceInfo *
+ // trailing array is the controlling type, the associated types follow this.
+ assert(isTypePredicate() && "Asking for the controlling type of a "
+ "selection expr predicated by an expression");
+ return 0;
+ }
+
+ unsigned getIndexOfStartOfAssociatedExprs() const {
+ // If the predicate is a type, then the associated expressions are the only
+ // Stmt * in the trailing array, otherwise we need to offset past the
+ // predicate expression.
+ return (int)isExprPredicate();
+ }
+
+ unsigned getIndexOfStartOfAssociatedTypes() const {
+ // If the predicate is a type, then the associated types follow it in the
+ // trailing array. Otherwise, the associated types are the only
+ // TypeSourceInfo * in the trailing array.
+ return (int)isTypePredicate();
+ }
+
+
/// The location of the "default" and of the right parenthesis.
SourceLocation DefaultLoc, RParenLoc;
// GenericSelectionExpr is followed by several trailing objects.
// They are (in order):
//
- // * A single Stmt * for the controlling expression.
+ // * A single Stmt * for the controlling expression or a TypeSourceInfo * for
+ // the controlling type, depending on the result of isTypePredicate() or
+ // isExprPredicate().
// * An array of getNumAssocs() Stmt * for the association expressions.
// * An array of getNumAssocs() TypeSourceInfo *, one for each of the
// association expressions.
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
// Add one to account for the controlling expression; the remainder
// are the associated expressions.
- return 1 + getNumAssocs();
+ return getNumAssocs() + (int)isExprPredicate();
}
unsigned numTrailingObjects(OverloadToken<TypeSourceInfo *>) const {
- return getNumAssocs();
+ // Add one to account for the controlling type predicate, the remainder
+ // are the associated types.
+ return getNumAssocs() + (int)isTypePredicate();
}
template <bool Const> class AssociationIteratorTy;
@@ -5744,7 +5853,8 @@ class GenericSelectionExpr final
bool operator==(AssociationIteratorTy Other) const { return E == Other.E; }
}; // class AssociationIterator
- /// Build a non-result-dependent generic selection expression.
+ /// Build a non-result-dependent generic selection expression accepting an
+ /// expression predicate.
GenericSelectionExpr(const ASTContext &Context, SourceLocation GenericLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> AssocTypes,
@@ -5753,7 +5863,8 @@ class GenericSelectionExpr final
bool ContainsUnexpandedParameterPack,
unsigned ResultIndex);
- /// Build a result-dependent generic selection expression.
+ /// Build a result-dependent generic selection expression accepting an
+ /// expression predicate.
GenericSelectionExpr(const ASTContext &Context, SourceLocation GenericLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> AssocTypes,
@@ -5761,11 +5872,31 @@ class GenericSelectionExpr final
SourceLocation RParenLoc,
bool ContainsUnexpandedParameterPack);
+ /// Build a non-result-dependent generic selection expression accepting a
+ /// type predicate.
+ GenericSelectionExpr(const ASTContext &Context, SourceLocation GenericLoc,
+ TypeSourceInfo *ControllingType,
+ ArrayRef<TypeSourceInfo *> AssocTypes,
+ ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ bool ContainsUnexpandedParameterPack,
+ unsigned ResultIndex);
+
+ /// Build a result-dependent generic selection expression accepting a type
+ /// predicate.
+ GenericSelectionExpr(const ASTContext &Context, SourceLocation GenericLoc,
+ TypeSourceInfo *ControllingType,
+ ArrayRef<TypeSourceInfo *> AssocTypes,
+ ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ bool ContainsUnexpandedParameterPack);
+
/// Build an empty generic selection expression for deserialization.
explicit GenericSelectionExpr(EmptyShell Empty, unsigned NumAssocs);
public:
- /// Create a non-result-dependent generic selection expression.
+ /// Create a non-result-dependent generic selection expression accepting an
+ /// expression predicate.
static GenericSelectionExpr *
Create(const ASTContext &Context, SourceLocation GenericLoc,
Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> AssocTypes,
@@ -5773,13 +5904,31 @@ public:
SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack,
unsigned ResultIndex);
- /// Create a result-dependent generic selection expression.
+ /// Create a result-dependent generic selection expression accepting an
+ /// expression predicate.
static GenericSelectionExpr *
Create(const ASTContext &Context, SourceLocation GenericLoc,
Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> AssocTypes,
ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc,
SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack);
+ /// Create a non-result-dependent generic selection expression accepting a
+ /// type predicate.
+ static GenericSelectionExpr *
+ Create(const ASTContext &Context, SourceLocation GenericLoc,
+ TypeSourceInfo *ControllingType, ArrayRef<TypeSourceInfo *> AssocTypes,
+ ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack,
+ unsigned ResultIndex);
+
+ /// Create a result-dependent generic selection expression accepting a type
+ /// predicate
+ static GenericSelectionExpr *
+ Create(const ASTContext &Context, SourceLocation GenericLoc,
+ TypeSourceInfo *ControllingType, ArrayRef<TypeSourceInfo *> AssocTypes,
+ ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack);
+
/// Create an empty generic selection expression for deserialization.
static GenericSelectionExpr *CreateEmpty(const ASTContext &Context,
unsigned NumAssocs);
@@ -5807,32 +5956,56 @@ public:
/// Whether this generic selection is result-dependent.
bool isResultDependent() const { return ResultIndex == ResultDependentIndex; }
+ /// Whether this generic selection uses an expression as its controlling
+ /// argument.
+ bool isExprPredicate() const { return IsExprPredicate; }
+ /// Whether this generic selection uses a type as its controlling argument.
+ bool isTypePredicate() const { return !IsExprPredicate; }
+
/// Return the controlling expression of this generic selection expression.
+ /// Only valid to call if the selection expression used an expression as its
+ /// controlling argument.
Expr *getControllingExpr() {
- return cast<Expr>(getTrailingObjects<Stmt *>()[ControllingIndex]);
+ return cast<Expr>(
+ getTrailingObjects<Stmt *>()[getIndexOfControllingExpression()]);
}
const Expr *getControllingExpr() const {
- return cast<Expr>(getTrailingObjects<Stmt *>()[ControllingIndex]);
+ return cast<Expr>(
+ getTrailingObjects<Stmt *>()[getIndexOfControllingExpression()]);
+ }
+
+ /// Return the controlling type of this generic selection expression. Only
+ /// valid to call if the selection expression used a type as its controlling
+ /// argument.
+ TypeSourceInfo *getControllingType() {
+ return getTrailingObjects<TypeSourceInfo *>()[getIndexOfControllingType()];
+ }
+ const TypeSourceInfo* getControllingType() const {
+ return getTrailingObjects<TypeSourceInfo *>()[getIndexOfControllingType()];
}
/// Return the result expression of this controlling expression. Defined if
/// and only if the generic selection expression is not result-dependent.
Expr *getResultExpr() {
return cast<Expr>(
- getTrailingObjects<Stmt *>()[AssocExprStartIndex + getResultIndex()]);
+ getTrailingObjects<Stmt *>()[getIndexOfStartOfAssociatedExprs() +
+ getResultIndex()]);
}
const Expr *getResultExpr() const {
return cast<Expr>(
- getTrailingObjects<Stmt *>()[AssocExprStartIndex + getResultIndex()]);
+ getTrailingObjects<Stmt *>()[getIndexOfStartOfAssociatedExprs() +
+ getResultIndex()]);
}
ArrayRef<Expr *> getAssocExprs() const {
return {reinterpret_cast<Expr *const *>(getTrailingObjects<Stmt *>() +
- AssocExprStartIndex),
+ getIndexOfStartOfAssociatedExprs()),
NumAssocs};
}
ArrayRef<TypeSourceInfo *> getAssocTypeSourceInfos() const {
- return {getTrailingObjects<TypeSourceInfo *>(), NumAssocs};
+ return {getTrailingObjects<TypeSourceInfo *>() +
+ getIndexOfStartOfAssociatedTypes(),
+ NumAssocs};
}
/// Return the Ith association expression with its TypeSourceInfo,
@@ -5841,23 +6014,30 @@ public:
assert(I < getNumAssocs() &&
"Out-of-range index in GenericSelectionExpr::getAssociation!");
return Association(
- cast<Expr>(getTrailingObjects<Stmt *>()[AssocExprStartIndex + I]),
- getTrailingObjects<TypeSourceInfo *>()[I],
+ cast<Expr>(
+ getTrailingObjects<Stmt *>()[getIndexOfStartOfAssociatedExprs() +
+ I]),
+ getTrailingObjects<
+ TypeSourceInfo *>()[getIndexOfStartOfAssociatedTypes() + I],
!isResultDependent() && (getResultIndex() == I));
}
ConstAssociation getAssociation(unsigned I) const {
assert(I < getNumAssocs() &&
"Out-of-range index in GenericSelectionExpr::getAssociation!");
return ConstAssociation(
- cast<Expr>(getTrailingObjects<Stmt *>()[AssocExprStartIndex + I]),
- getTrailingObjects<TypeSourceInfo *>()[I],
+ cast<Expr>(
+ getTrailingObjects<Stmt *>()[getIndexOfStartOfAssociatedExprs() +
+ I]),
+ getTrailingObjects<
+ TypeSourceInfo *>()[getIndexOfStartOfAssociatedTypes() + I],
!isResultDependent() && (getResultIndex() == I));
}
association_range associations() {
AssociationIterator Begin(getTrailingObjects<Stmt *>() +
- AssocExprStartIndex,
- getTrailingObjects<TypeSourceInfo *>(),
+ getIndexOfStartOfAssociatedExprs(),
+ getTrailingObjects<TypeSourceInfo *>() +
+ getIndexOfStartOfAssociatedTypes(),
/*Offset=*/0, ResultIndex);
AssociationIterator End(Begin.E + NumAssocs, Begin.TSI + NumAssocs,
/*Offset=*/NumAssocs, ResultIndex);
@@ -5866,8 +6046,9 @@ public:
const_association_range associations() const {
ConstAssociationIterator Begin(getTrailingObjects<Stmt *>() +
- AssocExprStartIndex,
- getTrailingObjects<TypeSourceInfo *>(),
+ getIndexOfStartOfAssociatedExprs(),
+ getTrailingObjects<TypeSourceInfo *>() +
+ getIndexOfStartOfAssociatedTypes(),
/*Offset=*/0, ResultIndex);
ConstAssociationIterator End(Begin.E + NumAssocs, Begin.TSI + NumAssocs,
/*Offset=*/NumAssocs, ResultIndex);
@@ -6180,11 +6361,11 @@ public:
return getSubExprsBuffer() + getNumSubExprs();
}
- llvm::iterator_range<semantics_iterator> semantics() {
- return llvm::make_range(semantics_begin(), semantics_end());
+ ArrayRef<Expr*> semantics() {
+ return ArrayRef(semantics_begin(), semantics_end());
}
- llvm::iterator_range<const_semantics_iterator> semantics() const {
- return llvm::make_range(semantics_begin(), semantics_end());
+ ArrayRef<const Expr*> semantics() const {
+ return ArrayRef(semantics_begin(), semantics_end());
}
Expr *getSemanticExpr(unsigned index) {
diff --git a/contrib/llvm-project/clang/include/clang/AST/ExprCXX.h b/contrib/llvm-project/clang/include/clang/AST/ExprCXX.h
index 032fd199b030..f5e805257ce5 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ExprCXX.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ExprCXX.h
@@ -1681,6 +1681,14 @@ public:
getArgs()[Arg] = ArgExpr;
}
+ bool isImmediateEscalating() const {
+ return CXXConstructExprBits.IsImmediateEscalating;
+ }
+
+ void setIsImmediateEscalating(bool Set) {
+ CXXConstructExprBits.IsImmediateEscalating = Set;
+ }
+
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
SourceRange getParenOrBraceRange() const { return ParenOrBraceRange; }
@@ -2321,7 +2329,7 @@ public:
/// This might return std::nullopt even if isArray() returns true,
/// since there might not be an array size expression.
- /// If the result is not-None, it will never wrap a nullptr.
+ /// If the result is not std::nullopt, it will never wrap a nullptr.
std::optional<Expr *> getArraySize() {
if (!isArray())
return std::nullopt;
@@ -2335,7 +2343,7 @@ public:
/// This might return std::nullopt even if isArray() returns true,
/// since there might not be an array size expression.
- /// If the result is not-None, it will never wrap a nullptr.
+ /// If the result is not std::nullopt, it will never wrap a nullptr.
std::optional<const Expr *> getArraySize() const {
if (!isArray())
return std::nullopt;
@@ -3504,8 +3512,9 @@ class CXXUnresolvedConstructExpr final
friend class ASTStmtReader;
friend TrailingObjects;
- /// The type being constructed.
- TypeSourceInfo *TSI;
+ /// The type being constructed, and whether the construct expression models
+ /// list initialization or not.
+ llvm::PointerIntPair<TypeSourceInfo *, 1> TypeAndInitForm;
/// The location of the left parentheses ('(').
SourceLocation LParenLoc;
@@ -3515,30 +3524,31 @@ class CXXUnresolvedConstructExpr final
CXXUnresolvedConstructExpr(QualType T, TypeSourceInfo *TSI,
SourceLocation LParenLoc, ArrayRef<Expr *> Args,
- SourceLocation RParenLoc);
+ SourceLocation RParenLoc, bool IsListInit);
CXXUnresolvedConstructExpr(EmptyShell Empty, unsigned NumArgs)
- : Expr(CXXUnresolvedConstructExprClass, Empty), TSI(nullptr) {
+ : Expr(CXXUnresolvedConstructExprClass, Empty) {
CXXUnresolvedConstructExprBits.NumArgs = NumArgs;
}
public:
- static CXXUnresolvedConstructExpr *Create(const ASTContext &Context,
- QualType T, TypeSourceInfo *TSI,
- SourceLocation LParenLoc,
- ArrayRef<Expr *> Args,
- SourceLocation RParenLoc);
+ static CXXUnresolvedConstructExpr *
+ Create(const ASTContext &Context, QualType T, TypeSourceInfo *TSI,
+ SourceLocation LParenLoc, ArrayRef<Expr *> Args,
+ SourceLocation RParenLoc, bool IsListInit);
static CXXUnresolvedConstructExpr *CreateEmpty(const ASTContext &Context,
unsigned NumArgs);
/// Retrieve the type that is being constructed, as specified
/// in the source code.
- QualType getTypeAsWritten() const { return TSI->getType(); }
+ QualType getTypeAsWritten() const { return getTypeSourceInfo()->getType(); }
/// Retrieve the type source information for the type being
/// constructed.
- TypeSourceInfo *getTypeSourceInfo() const { return TSI; }
+ TypeSourceInfo *getTypeSourceInfo() const {
+ return TypeAndInitForm.getPointer();
+ }
/// Retrieve the location of the left parentheses ('(') that
/// precedes the argument list.
@@ -3553,7 +3563,7 @@ public:
/// Determine whether this expression models list-initialization.
/// If so, there will be exactly one subexpression, which will be
/// an InitListExpr.
- bool isListInitialization() const { return LParenLoc.isInvalid(); }
+ bool isListInitialization() const { return TypeAndInitForm.getInt(); }
/// Retrieve the number of arguments.
unsigned getNumArgs() const { return CXXUnresolvedConstructExprBits.NumArgs; }
diff --git a/contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h b/contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h
index 746a5b2fbfc6..d900e980852b 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h
@@ -82,12 +82,6 @@ public:
static ConceptSpecializationExpr *
Create(const ASTContext &C, ConceptDecl *NamedConcept,
- ImplicitConceptSpecializationDecl *SpecDecl,
- const ConstraintSatisfaction *Satisfaction, bool Dependent,
- bool ContainsUnexpandedParameterPack);
-
- static ConceptSpecializationExpr *
- Create(const ASTContext &C, ConceptDecl *NamedConcept,
const ASTTemplateArgumentListInfo *ArgsAsWritten,
ImplicitConceptSpecializationDecl *SpecDecl,
const ConstraintSatisfaction *Satisfaction, bool Dependent,
diff --git a/contrib/llvm-project/clang/include/clang/AST/ExternalASTSource.h b/contrib/llvm-project/clang/include/clang/AST/ExternalASTSource.h
index 65f8ae61fe48..8e573965b0a3 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ExternalASTSource.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ExternalASTSource.h
@@ -371,7 +371,7 @@ public:
/// \param Source the external AST source.
///
/// \returns a pointer to the AST node.
- T* get(ExternalASTSource *Source) const {
+ T *get(ExternalASTSource *Source) const {
if (isOffset()) {
assert(Source &&
"Cannot deserialize a lazy pointer without an AST source");
@@ -379,6 +379,14 @@ public:
}
return reinterpret_cast<T*>(Ptr);
}
+
+ /// Retrieve the address of the AST node pointer. Deserializes the pointee if
+ /// necessary.
+ T **getAddressOfPointer(ExternalASTSource *Source) const {
+ // Ensure the integer is in pointer form.
+ (void)get(Source);
+ return reinterpret_cast<T**>(&Ptr);
+ }
};
/// A lazy value (of type T) that is within an AST node of type Owner,
diff --git a/contrib/llvm-project/clang/include/clang/AST/IgnoreExpr.h b/contrib/llvm-project/clang/include/clang/AST/IgnoreExpr.h
index a7e9b07bef6c..917bada61fa6 100644
--- a/contrib/llvm-project/clang/include/clang/AST/IgnoreExpr.h
+++ b/contrib/llvm-project/clang/include/clang/AST/IgnoreExpr.h
@@ -23,7 +23,8 @@ namespace detail {
inline Expr *IgnoreExprNodesImpl(Expr *E) { return E; }
template <typename FnTy, typename... FnTys>
Expr *IgnoreExprNodesImpl(Expr *E, FnTy &&Fn, FnTys &&... Fns) {
- return IgnoreExprNodesImpl(Fn(E), std::forward<FnTys>(Fns)...);
+ return IgnoreExprNodesImpl(std::forward<FnTy>(Fn)(E),
+ std::forward<FnTys>(Fns)...);
}
} // namespace detail
@@ -165,6 +166,11 @@ inline Expr *IgnoreParensSingleStep(Expr *E) {
return CE->getChosenSubExpr();
}
+ else if (auto *PE = dyn_cast<PredefinedExpr>(E)) {
+ if (PE->isTransparent() && PE->getFunctionName())
+ return PE->getFunctionName();
+ }
+
return E;
}
diff --git a/contrib/llvm-project/clang/include/clang/AST/Mangle.h b/contrib/llvm-project/clang/include/clang/AST/Mangle.h
index 9662a33c61cb..c04bcc7f01cb 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Mangle.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Mangle.h
@@ -140,7 +140,8 @@ public:
unsigned ManglingNumber,
raw_ostream &) = 0;
virtual void mangleCXXRTTI(QualType T, raw_ostream &) = 0;
- virtual void mangleCXXRTTIName(QualType T, raw_ostream &) = 0;
+ virtual void mangleCXXRTTIName(QualType T, raw_ostream &,
+ bool NormalizeIntegers = false) = 0;
virtual void mangleStringLiteral(const StringLiteral *SL, raw_ostream &) = 0;
virtual void mangleMSGuidDecl(const MSGuidDecl *GD, raw_ostream&);
@@ -177,7 +178,8 @@ public:
/// or type uniquing.
/// TODO: Extend this to internal types by generating names that are unique
/// across translation units so it can be used with LTO.
- virtual void mangleTypeName(QualType T, raw_ostream &) = 0;
+ virtual void mangleTypeName(QualType T, raw_ostream &,
+ bool NormalizeIntegers = false) = 0;
/// @}
};
diff --git a/contrib/llvm-project/clang/include/clang/AST/MangleNumberingContext.h b/contrib/llvm-project/clang/include/clang/AST/MangleNumberingContext.h
index a4a6ce4c2708..1313c94eb122 100644
--- a/contrib/llvm-project/clang/include/clang/AST/MangleNumberingContext.h
+++ b/contrib/llvm-project/clang/include/clang/AST/MangleNumberingContext.h
@@ -27,6 +27,9 @@ class VarDecl;
/// Keeps track of the mangled names of lambda expressions and block
/// literals within a particular context.
class MangleNumberingContext {
+ // The index of the next lambda we encounter in this context.
+ unsigned LambdaIndex = 0;
+
public:
virtual ~MangleNumberingContext() {}
@@ -55,6 +58,11 @@ public:
/// given call operator within the device context. No device number is
/// assigned if there's no device numbering context is associated.
virtual unsigned getDeviceManglingNumber(const CXXMethodDecl *) { return 0; }
+
+ // Retrieve the index of the next lambda appearing in this context, which is
+ // used for deduplicating lambdas across modules. Note that this is a simple
+ // sequence number and is not ABI-dependent.
+ unsigned getNextLambdaIndex() { return LambdaIndex++; }
};
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h b/contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h
index 627e9025c112..0bea21270692 100644
--- a/contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h
+++ b/contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h
@@ -9046,6 +9046,132 @@ public:
Expr *getSize() const { return getStmtAs<Expr>(); }
};
+/// This represents the 'doacross' clause for the '#pragma omp ordered'
+/// directive.
+///
+/// \code
+/// #pragma omp ordered doacross(sink: i-1, j-1)
+/// \endcode
+/// In this example directive '#pragma omp ordered' with clause 'doacross' with
+/// a dependence-type 'sink' and loop-iteration vector expressions i-1 and j-1.
+class OMPDoacrossClause final
+ : public OMPVarListClause<OMPDoacrossClause>,
+ private llvm::TrailingObjects<OMPDoacrossClause, Expr *> {
+ friend class OMPClauseReader;
+ friend OMPVarListClause;
+ friend TrailingObjects;
+
+ /// Dependence type (sink or source).
+ OpenMPDoacrossClauseModifier DepType = OMPC_DOACROSS_unknown;
+
+ /// Dependence type location.
+ SourceLocation DepLoc;
+
+ /// Colon location.
+ SourceLocation ColonLoc;
+
+ /// Number of loops, associated with the doacross clause.
+ unsigned NumLoops = 0;
+
+ /// Build clause with number of expressions \a N.
+ ///
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ /// \param N Number of expressions in the clause.
+ /// \param NumLoops Number of loops associated with the clause.
+ OMPDoacrossClause(SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, unsigned N, unsigned NumLoops)
+ : OMPVarListClause<OMPDoacrossClause>(llvm::omp::OMPC_doacross, StartLoc,
+ LParenLoc, EndLoc, N),
+ NumLoops(NumLoops) {}
+
+ /// Build an empty clause.
+ ///
+ /// \param N Number of expressions in the clause.
+ /// \param NumLoops Number of loops associated with the clause.
+ explicit OMPDoacrossClause(unsigned N, unsigned NumLoops)
+ : OMPVarListClause<OMPDoacrossClause>(llvm::omp::OMPC_doacross,
+ SourceLocation(), SourceLocation(),
+ SourceLocation(), N),
+ NumLoops(NumLoops) {}
+
+ /// Set dependence type.
+ void setDependenceType(OpenMPDoacrossClauseModifier M) { DepType = M; }
+
+ /// Set dependence type location.
+ void setDependenceLoc(SourceLocation Loc) { DepLoc = Loc; }
+
+ /// Set colon location.
+ void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
+
+public:
+ /// Creates clause with a list of expressions \a VL.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ /// \param DepType The dependence type.
+ /// \param DepLoc Location of the dependence type.
+ /// \param ColonLoc Location of ':'.
+ /// \param VL List of references to the expressions.
+ /// \param NumLoops Number of loops that associated with the clause.
+ static OMPDoacrossClause *
+ Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, OpenMPDoacrossClauseModifier DepType,
+ SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL,
+ unsigned NumLoops);
+
+ /// Creates an empty clause with \a N expressions.
+ ///
+ /// \param C AST context.
+ /// \param N The number of expressions.
+ /// \param NumLoops Number of loops that is associated with this clause.
+ static OMPDoacrossClause *CreateEmpty(const ASTContext &C, unsigned N,
+ unsigned NumLoops);
+
+ /// Get dependence type.
+ OpenMPDoacrossClauseModifier getDependenceType() const { return DepType; }
+
+ /// Get dependence type location.
+ SourceLocation getDependenceLoc() const { return DepLoc; }
+
+ /// Get colon location.
+ SourceLocation getColonLoc() const { return ColonLoc; }
+
+ /// Get number of loops associated with the clause.
+ unsigned getNumLoops() const { return NumLoops; }
+
+ /// Set the loop data.
+ void setLoopData(unsigned NumLoop, Expr *Cnt);
+
+ /// Get the loop data.
+ Expr *getLoopData(unsigned NumLoop);
+ const Expr *getLoopData(unsigned NumLoop) const;
+
+ child_range children() {
+ return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
+ reinterpret_cast<Stmt **>(varlist_end()));
+ }
+
+ const_child_range children() const {
+ auto Children = const_cast<OMPDoacrossClause *>(this)->children();
+ return const_child_range(Children.begin(), Children.end());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_doacross;
+ }
+};
+
} // namespace clang
#endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/OperationKinds.def b/contrib/llvm-project/clang/include/clang/AST/OperationKinds.def
index b05b9d81569e..96b5a4db55e0 100644
--- a/contrib/llvm-project/clang/include/clang/AST/OperationKinds.def
+++ b/contrib/llvm-project/clang/include/clang/AST/OperationKinds.def
@@ -362,8 +362,8 @@ CAST_OPERATION(IntToOCLSampler)
//===- Binary Operations -------------------------------------------------===//
// Operators listed in order of precedence.
-// Note that additions to this should also update the StmtVisitor class and
-// BinaryOperator::getOverloadedOperator.
+// Note that additions to this should also update the StmtVisitor class,
+// BinaryOperator::getOverloadedOperator and CXBinaryOperatorKind enum.
// [C++ 5.5] Pointer-to-member operators.
BINARY_OPERATION(PtrMemD, ".*")
@@ -415,8 +415,8 @@ BINARY_OPERATION(Comma, ",")
//===- Unary Operations ---------------------------------------------------===//
-// Note that additions to this should also update the StmtVisitor class and
-// UnaryOperator::getOverloadedOperator.
+// Note that additions to this should also update the StmtVisitor class,
+// UnaryOperator::getOverloadedOperator and CXUnaryOperatorKind enum.
// [C99 6.5.2.4] Postfix increment and decrement
UNARY_OPERATION(PostInc, "++")
diff --git a/contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h b/contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h
index 5aeaca7beda2..8a0bc6dfb57b 100644
--- a/contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h
+++ b/contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h
@@ -60,14 +60,15 @@ struct PrintingPolicy {
: Indentation(2), SuppressSpecifiers(false),
SuppressTagKeyword(LO.CPlusPlus), IncludeTagDefinition(false),
SuppressScope(false), SuppressUnwrittenScope(false),
- SuppressInlineNamespace(true), SuppressInitializers(false),
- ConstantArraySizeAsWritten(false), AnonymousTagLocations(true),
- SuppressStrongLifetime(false), SuppressLifetimeQualifiers(false),
+ SuppressInlineNamespace(true), SuppressElaboration(false),
+ SuppressInitializers(false), ConstantArraySizeAsWritten(false),
+ AnonymousTagLocations(true), SuppressStrongLifetime(false),
+ SuppressLifetimeQualifiers(false),
SuppressTemplateArgsInCXXConstructors(false),
SuppressDefaultTemplateArgs(true), Bool(LO.Bool),
Nullptr(LO.CPlusPlus11 || LO.C2x), NullptrTypeInNamespace(LO.CPlusPlus),
- Restrict(LO.C99), Alignof(LO.CPlusPlus11),
- UnderscoreAlignof(LO.C11), UseVoidForZeroParams(!LO.CPlusPlus),
+ Restrict(LO.C99), Alignof(LO.CPlusPlus11), UnderscoreAlignof(LO.C11),
+ UseVoidForZeroParams(!LO.CPlusPlus),
SplitTemplateClosers(!LO.CPlusPlus11), TerseOutput(false),
PolishForDeclaration(false), Half(LO.Half),
MSWChar(LO.MicrosoftExt && !LO.WChar), IncludeNewlines(true),
@@ -139,6 +140,10 @@ struct PrintingPolicy {
/// removed.
unsigned SuppressInlineNamespace : 1;
+ /// Ignore qualifiers and tag keywords as specified by elaborated type sugar,
+ /// instead letting the underlying type print as normal.
+ unsigned SuppressElaboration : 1;
+
/// Suppress printing of variable initializers.
///
/// This flag is used when printing the loop variable in a for-range
diff --git a/contrib/llvm-project/clang/include/clang/AST/PropertiesBase.td b/contrib/llvm-project/clang/include/clang/AST/PropertiesBase.td
index c2823c660f36..c6fe790e1964 100644
--- a/contrib/llvm-project/clang/include/clang/AST/PropertiesBase.td
+++ b/contrib/llvm-project/clang/include/clang/AST/PropertiesBase.td
@@ -450,10 +450,13 @@ let Class = PropertyTypeCase<APValue, "LValue"> in {
lvalueBase ? lvalueBase.dyn_cast<const Expr *>() : nullptr;
bool lvalueBaseIsExpr = (bool) expr;
bool lvalueBaseIsTypeInfo = lvalueBase.is<TypeInfoLValue>();
+ bool lvalueBaseIsDynamicAlloc = lvalueBase.is<DynamicAllocLValue>();
QualType elemTy;
if (lvalueBase) {
if (lvalueBaseIsTypeInfo) {
elemTy = lvalueBase.getTypeInfoType();
+ } else if (lvalueBaseIsDynamicAlloc) {
+ elemTy = lvalueBase.getDynamicAllocType();
} else if (lvalueBaseIsExpr) {
elemTy = expr->getType();
} else {
@@ -473,6 +476,9 @@ let Class = PropertyTypeCase<APValue, "LValue"> in {
def : Property<"isTypeInfo", Bool> {
let Read = [{ lvalueBaseIsTypeInfo }];
}
+ def : Property<"isDynamicAlloc", Bool> {
+ let Read = [{ lvalueBaseIsDynamicAlloc }];
+ }
def : Property<"hasBase", Bool> {
let Read = [{ static_cast<bool>(lvalueBase) }];
}
@@ -485,9 +491,17 @@ let Class = PropertyTypeCase<APValue, "LValue"> in {
QualType(node.getLValueBase().get<TypeInfoLValue>().getType(), 0)
}];
}
+ def : Property<"dynamicAlloc", UInt32> {
+ let Conditional = [{ hasBase && isDynamicAlloc }];
+ let Read = [{ node.getLValueBase().get<DynamicAllocLValue>().getIndex() }];
+ }
def : Property<"type", QualType> {
- let Conditional = [{ hasBase && isTypeInfo }];
- let Read = [{ node.getLValueBase().getTypeInfoType() }];
+ let Conditional = [{ hasBase && (isTypeInfo || isDynamicAlloc) }];
+ let Read = [{
+ isTypeInfo
+ ? node.getLValueBase().getTypeInfoType()
+ : node.getLValueBase().getDynamicAllocType()
+ }];
}
def : Property<"callIndex", UInt32> {
let Conditional = [{ hasBase && !isTypeInfo }];
@@ -502,7 +516,7 @@ let Class = PropertyTypeCase<APValue, "LValue"> in {
let Read = [{ const_cast<Expr *>(expr) }];
}
def : Property<"decl", DeclRef> {
- let Conditional = [{ hasBase && !isTypeInfo && !isExpr }];
+ let Conditional = [{ hasBase && !isTypeInfo && !isDynamicAlloc && !isExpr }];
let Read = [{ lvalueBase.get<const ValueDecl *>() }];
}
def : Property<"offsetQuantity", UInt32> {
@@ -521,6 +535,9 @@ let Class = PropertyTypeCase<APValue, "LValue"> in {
if (isTypeInfo) {
base = APValue::LValueBase::getTypeInfo(
TypeInfoLValue(typeInfo->getTypePtr()), *type);
+ } else if (isDynamicAlloc) {
+ base = APValue::LValueBase::getDynamicAlloc(
+ DynamicAllocLValue(*dynamicAlloc), *type);
} else if (isExpr) {
base = APValue::LValueBase(cast<Expr>(*stmt),
*callIndex, *version);
@@ -745,8 +762,11 @@ let Class = PropertyTypeCase<TemplateArgument, "Type"> in {
def : Property<"type", QualType> {
let Read = [{ node.getAsType() }];
}
+ def : Property<"isDefaulted", Bool> {
+ let Read = [{ node.getIsDefaulted() }];
+ }
def : Creator<[{
- return TemplateArgument(type);
+ return TemplateArgument(type, /* isNullPtr */ false, isDefaulted);
}]>;
}
let Class = PropertyTypeCase<TemplateArgument, "Declaration"> in {
@@ -756,16 +776,22 @@ let Class = PropertyTypeCase<TemplateArgument, "Declaration"> in {
def : Property<"parameterType", QualType> {
let Read = [{ node.getParamTypeForDecl() }];
}
+ def : Property<"isDefaulted", Bool> {
+ let Read = [{ node.getIsDefaulted() }];
+ }
def : Creator<[{
- return TemplateArgument(declaration, parameterType);
+ return TemplateArgument(declaration, parameterType, isDefaulted);
}]>;
}
let Class = PropertyTypeCase<TemplateArgument, "NullPtr"> in {
def : Property<"type", QualType> {
let Read = [{ node.getNullPtrType() }];
}
+ def : Property<"isDefaulted", Bool> {
+ let Read = [{ node.getIsDefaulted() }];
+ }
def : Creator<[{
- return TemplateArgument(type, /*nullptr*/ true);
+ return TemplateArgument(type, /*nullptr*/ true, isDefaulted);
}]>;
}
let Class = PropertyTypeCase<TemplateArgument, "Integral"> in {
@@ -775,16 +801,22 @@ let Class = PropertyTypeCase<TemplateArgument, "Integral"> in {
def : Property<"type", QualType> {
let Read = [{ node.getIntegralType() }];
}
+ def : Property<"isDefaulted", Bool> {
+ let Read = [{ node.getIsDefaulted() }];
+ }
def : Creator<[{
- return TemplateArgument(ctx, value, type);
+ return TemplateArgument(ctx, value, type, isDefaulted);
}]>;
}
let Class = PropertyTypeCase<TemplateArgument, "Template"> in {
def : Property<"name", TemplateName> {
let Read = [{ node.getAsTemplateOrTemplatePattern() }];
}
+ def : Property<"isDefaulted", Bool> {
+ let Read = [{ node.getIsDefaulted() }];
+ }
def : Creator<[{
- return TemplateArgument(name);
+ return TemplateArgument(name, isDefaulted);
}]>;
}
let Class = PropertyTypeCase<TemplateArgument, "TemplateExpansion"> in {
@@ -798,19 +830,25 @@ let Class = PropertyTypeCase<TemplateArgument, "TemplateExpansion"> in {
[](unsigned i) { return uint32_t(i); })
}];
}
+ def : Property<"isDefaulted", Bool> {
+ let Read = [{ node.getIsDefaulted() }];
+ }
def : Creator<[{
auto numExpansionsUnsigned = llvm::transformOptional(
numExpansions, [](uint32_t i) { return unsigned(i); });
- return TemplateArgument(name, numExpansionsUnsigned);
+ return TemplateArgument(name, numExpansionsUnsigned, isDefaulted);
}]>;
}
let Class = PropertyTypeCase<TemplateArgument, "Expression"> in {
def : Property<"expression", ExprRef> {
let Read = [{ node.getAsExpr() }];
}
+ def : Property<"isDefaulted", Bool> {
+ let Read = [{ node.getIsDefaulted() }];
+ }
def : Creator<[{
- return TemplateArgument(expression);
+ return TemplateArgument(expression, isDefaulted);
}]>;
}
let Class = PropertyTypeCase<TemplateArgument, "Pack"> in {
diff --git a/contrib/llvm-project/clang/include/clang/AST/RawCommentList.h b/contrib/llvm-project/clang/include/clang/AST/RawCommentList.h
index 1bb8d7ce40a9..2f44a77d4503 100644
--- a/contrib/llvm-project/clang/include/clang/AST/RawCommentList.h
+++ b/contrib/llvm-project/clang/include/clang/AST/RawCommentList.h
@@ -115,6 +115,17 @@ public:
return extractBriefText(Context);
}
+ bool hasUnsupportedSplice(const SourceManager &SourceMgr) const {
+ if (!isInvalid())
+ return false;
+ StringRef Text = getRawText(SourceMgr);
+ if (Text.size() < 6 || Text[0] != '/')
+ return false;
+ if (Text[1] == '*')
+ return Text[Text.size() - 1] != '/' || Text[Text.size() - 2] != '*';
+ return Text[1] != '/';
+ }
+
/// Returns sanitized comment text, suitable for presentation in editor UIs.
/// E.g. will transform:
/// // This is a long multiline comment.
@@ -162,7 +173,7 @@ private:
SourceRange Range;
mutable StringRef RawText;
- mutable const char *BriefText;
+ mutable const char *BriefText = nullptr;
mutable bool RawTextValid : 1; ///< True if RawText is valid
mutable bool BriefTextValid : 1; ///< True if BriefText is valid
diff --git a/contrib/llvm-project/clang/include/clang/AST/RecursiveASTVisitor.h b/contrib/llvm-project/clang/include/clang/AST/RecursiveASTVisitor.h
index 5802e905be46..604875cd6337 100644
--- a/contrib/llvm-project/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/contrib/llvm-project/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -2553,7 +2553,11 @@ bool RecursiveASTVisitor<Derived>::TraverseInitListExpr(
// are interleaved. We also need to watch out for null types (default
// generic associations).
DEF_TRAVERSE_STMT(GenericSelectionExpr, {
- TRY_TO(TraverseStmt(S->getControllingExpr()));
+ if (S->isExprPredicate())
+ TRY_TO(TraverseStmt(S->getControllingExpr()));
+ else
+ TRY_TO(TraverseTypeLoc(S->getControllingType()->getTypeLoc()));
+
for (const GenericSelectionExpr::Association Assoc : S->associations()) {
if (TypeSourceInfo *TSI = Assoc.getTypeSourceInfo())
TRY_TO(TraverseTypeLoc(TSI->getTypeLoc()));
@@ -2723,7 +2727,11 @@ DEF_TRAVERSE_STMT(CXXDefaultArgExpr, {
TRY_TO(TraverseStmt(S->getExpr()));
})
-DEF_TRAVERSE_STMT(CXXDefaultInitExpr, {})
+DEF_TRAVERSE_STMT(CXXDefaultInitExpr, {
+ if (getDerived().shouldVisitImplicitCode())
+ TRY_TO(TraverseStmt(S->getExpr()));
+})
+
DEF_TRAVERSE_STMT(CXXDeleteExpr, {})
DEF_TRAVERSE_STMT(ExprWithCleanups, {})
DEF_TRAVERSE_STMT(CXXInheritedCtorInitExpr, {})
@@ -3860,6 +3868,13 @@ bool RecursiveASTVisitor<Derived>::VisitOMPXDynCGroupMemClause(
return true;
}
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPDoacrossClause(
+ OMPDoacrossClause *C) {
+ TRY_TO(VisitOMPClauseList(C));
+ return true;
+}
+
// FIXME: look at the following tricky-seeming exprs to see if we
// need to recurse on anything. These are ones that have methods
// returning decls or qualtypes or nestednamespecifier -- though I'm
diff --git a/contrib/llvm-project/clang/include/clang/AST/Redeclarable.h b/contrib/llvm-project/clang/include/clang/AST/Redeclarable.h
index 58ec07973920..091bb886f2d4 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Redeclarable.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Redeclarable.h
@@ -240,7 +240,7 @@ public:
class redecl_iterator {
/// Current - The current declaration.
decl_type *Current = nullptr;
- decl_type *Starter;
+ decl_type *Starter = nullptr;
bool PassedFirst = false;
public:
diff --git a/contrib/llvm-project/clang/include/clang/AST/Stmt.h b/contrib/llvm-project/clang/include/clang/AST/Stmt.h
index b70cf3aec5d6..87ffebc00d7b 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Stmt.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Stmt.h
@@ -364,6 +364,10 @@ protected:
/// for the predefined identifier.
unsigned HasFunctionName : 1;
+ /// True if this PredefinedExpr should be treated as a StringLiteral (for
+ /// MSVC compatibility).
+ unsigned IsTransparent : 1;
+
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
@@ -380,6 +384,7 @@ protected:
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
unsigned NonOdrUseReason : 2;
+ unsigned IsImmediateEscalating : 1;
/// The location of the declaration name itself.
SourceLocation Loc;
@@ -588,10 +593,8 @@ protected:
unsigned : NumExprBits;
- // These don't need to be particularly wide, because they're
- // strictly limited by the forms of expressions we permit.
- unsigned NumSubExprs : 8;
- unsigned ResultIndex : 32 - 8 - NumExprBits;
+ unsigned NumSubExprs : 16;
+ unsigned ResultIndex : 16;
};
class SourceLocExprBitfields {
@@ -823,6 +826,7 @@ protected:
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
+ unsigned IsImmediateEscalating : 1;
SourceLocation Loc;
};
@@ -978,7 +982,7 @@ protected:
SourceLocation RequiresKWLoc;
};
- //===--- C++ Coroutines TS bitfields classes ---===//
+ //===--- C++ Coroutines bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
@@ -1082,7 +1086,7 @@ protected:
LambdaExprBitfields LambdaExprBits;
RequiresExprBitfields RequiresExprBits;
- // C++ Coroutines TS expressions
+ // C++ Coroutines expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
@@ -1291,8 +1295,13 @@ public:
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
+ /// \param ProfileLambdaExpr whether or not to profile lambda expressions.
+ /// When false, the lambda expressions are never considered to be equal to
+ /// other lambda expressions. When true, the lambda expressions with the same
+ /// implementation will be considered to be the same. ProfileLambdaExpr should
+ /// only be true when we try to merge two declarations within modules.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
- bool Canonical) const;
+ bool Canonical, bool ProfileLambdaExpr = false) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
@@ -2092,6 +2101,11 @@ public:
: nullptr;
}
+ void setConditionVariableDeclStmt(DeclStmt *CondVar) {
+ assert(hasVarStorage());
+ getTrailingObjects<Stmt *>()[varOffset()] = CondVar;
+ }
+
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
@@ -2324,6 +2338,11 @@ public:
: nullptr;
}
+ void setConditionVariableDeclStmt(DeclStmt *CondVar) {
+ assert(hasVarStorage());
+ getTrailingObjects<Stmt *>()[varOffset()] = CondVar;
+ }
+
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
@@ -2487,6 +2506,11 @@ public:
: nullptr;
}
+ void setConditionVariableDeclStmt(DeclStmt *CondVar) {
+ assert(hasVarStorage());
+ getTrailingObjects<Stmt *>()[varOffset()] = CondVar;
+ }
+
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
@@ -2576,6 +2600,8 @@ public:
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
+ friend class ASTStmtReader;
+
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
@@ -2603,10 +2629,18 @@ public:
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
+ DeclStmt *getConditionVariableDeclStmt() {
+ return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
+ }
+
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
+ void setConditionVariableDeclStmt(DeclStmt *CondVar) {
+ SubExprs[CONDVAR] = CondVar;
+ }
+
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
@@ -3558,8 +3592,11 @@ public:
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
+ Capture() = default;
+
public:
friend class ASTStmtReader;
+ friend class CapturedStmt;
/// Create a new capture.
///
diff --git a/contrib/llvm-project/clang/include/clang/AST/StmtCXX.h b/contrib/llvm-project/clang/include/clang/AST/StmtCXX.h
index 2c71f8676896..8b4ef24ed376 100644
--- a/contrib/llvm-project/clang/include/clang/AST/StmtCXX.h
+++ b/contrib/llvm-project/clang/include/clang/AST/StmtCXX.h
@@ -75,7 +75,8 @@ class CXXTryStmt final : public Stmt,
unsigned NumHandlers;
size_t numTrailingObjects(OverloadToken<Stmt *>) const { return NumHandlers; }
- CXXTryStmt(SourceLocation tryLoc, Stmt *tryBlock, ArrayRef<Stmt*> handlers);
+ CXXTryStmt(SourceLocation tryLoc, CompoundStmt *tryBlock,
+ ArrayRef<Stmt *> handlers);
CXXTryStmt(EmptyShell Empty, unsigned numHandlers)
: Stmt(CXXTryStmtClass), NumHandlers(numHandlers) { }
@@ -84,7 +85,7 @@ class CXXTryStmt final : public Stmt,
public:
static CXXTryStmt *Create(const ASTContext &C, SourceLocation tryLoc,
- Stmt *tryBlock, ArrayRef<Stmt*> handlers);
+ CompoundStmt *tryBlock, ArrayRef<Stmt *> handlers);
static CXXTryStmt *Create(const ASTContext &C, EmptyShell Empty,
unsigned numHandlers);
@@ -326,6 +327,7 @@ class CoroutineBodyStmt final
OnFallthrough, ///< Handler for control flow falling off the body.
Allocate, ///< Coroutine frame memory allocation.
Deallocate, ///< Coroutine frame memory deallocation.
+ ResultDecl, ///< Declaration holding the result of get_return_object.
ReturnValue, ///< Return value for thunk function: p.get_return_object().
ReturnStmt, ///< Return statement for the thunk function.
ReturnStmtOnAllocFailure, ///< Return statement if allocation failed.
@@ -352,6 +354,7 @@ public:
Stmt *OnFallthrough = nullptr;
Expr *Allocate = nullptr;
Expr *Deallocate = nullptr;
+ Stmt *ResultDecl = nullptr;
Expr *ReturnValue = nullptr;
Stmt *ReturnStmt = nullptr;
Stmt *ReturnStmtOnAllocFailure = nullptr;
@@ -372,9 +375,10 @@ public:
}
/// Retrieve the body of the coroutine as written. This will be either
- /// a CompoundStmt or a TryStmt.
- Stmt *getBody() const {
- return getStoredStmts()[SubStmt::Body];
+ /// a CompoundStmt. If the coroutine is in function-try-block, we will
+ /// wrap the CXXTryStmt into a CompoundStmt to keep consistency.
+ CompoundStmt *getBody() const {
+ return cast<CompoundStmt>(getStoredStmts()[SubStmt::Body]);
}
Stmt *getPromiseDeclStmt() const {
@@ -404,13 +408,13 @@ public:
Expr *getDeallocate() const {
return cast_or_null<Expr>(getStoredStmts()[SubStmt::Deallocate]);
}
+ Stmt *getResultDecl() const { return getStoredStmts()[SubStmt::ResultDecl]; }
Expr *getReturnValueInit() const {
return cast<Expr>(getStoredStmts()[SubStmt::ReturnValue]);
}
Expr *getReturnValue() const {
- assert(getReturnStmt());
- auto *RS = cast<clang::ReturnStmt>(getReturnStmt());
- return RS->getRetValue();
+ auto *RS = dyn_cast_or_null<clang::ReturnStmt>(getReturnStmt());
+ return RS ? RS->getRetValue() : nullptr;
}
Stmt *getReturnStmt() const { return getStoredStmts()[SubStmt::ReturnStmt]; }
Stmt *getReturnStmtOnAllocFailure() const {
@@ -439,6 +443,17 @@ public:
NumParams);
}
+ child_range childrenExclBody() {
+ return child_range(getStoredStmts() + SubStmt::Body + 1,
+ getStoredStmts() + SubStmt::FirstParamMove + NumParams);
+ }
+
+ const_child_range childrenExclBody() const {
+ return const_child_range(getStoredStmts() + SubStmt::Body + 1,
+ getStoredStmts() + SubStmt::FirstParamMove +
+ NumParams);
+ }
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == CoroutineBodyStmtClass;
}
diff --git a/contrib/llvm-project/clang/include/clang/AST/TemplateBase.h b/contrib/llvm-project/clang/include/clang/AST/TemplateBase.h
index 66aef2121d81..8e6b4d819740 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TemplateBase.h
+++ b/contrib/llvm-project/clang/include/clang/AST/TemplateBase.h
@@ -103,12 +103,14 @@ private:
/// The kind of template argument we're storing.
struct DA {
- unsigned Kind;
+ unsigned Kind : 31;
+ unsigned IsDefaulted : 1;
void *QT;
ValueDecl *D;
};
struct I {
- unsigned Kind;
+ unsigned Kind : 31;
+ unsigned IsDefaulted : 1;
// We store a decomposed APSInt with the data allocated by ASTContext if
// BitWidth > 64. The memory may be shared between multiple
// TemplateArgument instances.
@@ -124,17 +126,20 @@ private:
void *Type;
};
struct A {
- unsigned Kind;
+ unsigned Kind : 31;
+ unsigned IsDefaulted : 1;
unsigned NumArgs;
const TemplateArgument *Args;
};
struct TA {
- unsigned Kind;
+ unsigned Kind : 31;
+ unsigned IsDefaulted : 1;
unsigned NumExpansions;
void *Name;
};
struct TV {
- unsigned Kind;
+ unsigned Kind : 31;
+ unsigned IsDefaulted : 1;
uintptr_t V;
};
union {
@@ -147,27 +152,31 @@ private:
public:
/// Construct an empty, invalid template argument.
- constexpr TemplateArgument() : TypeOrValue({Null, 0}) {}
+ constexpr TemplateArgument() : TypeOrValue({Null, 0, /* IsDefaulted */ 0}) {}
/// Construct a template type argument.
- TemplateArgument(QualType T, bool isNullPtr = false) {
+ TemplateArgument(QualType T, bool isNullPtr = false,
+ bool IsDefaulted = false) {
TypeOrValue.Kind = isNullPtr ? NullPtr : Type;
+ TypeOrValue.IsDefaulted = IsDefaulted;
TypeOrValue.V = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
}
/// Construct a template argument that refers to a
/// declaration, which is either an external declaration or a
/// template declaration.
- TemplateArgument(ValueDecl *D, QualType QT) {
+ TemplateArgument(ValueDecl *D, QualType QT, bool IsDefaulted = false) {
assert(D && "Expected decl");
DeclArg.Kind = Declaration;
+ DeclArg.IsDefaulted = IsDefaulted;
DeclArg.QT = QT.getAsOpaquePtr();
DeclArg.D = D;
}
/// Construct an integral constant template argument. The memory to
/// store the value is allocated with Ctx.
- TemplateArgument(ASTContext &Ctx, const llvm::APSInt &Value, QualType Type);
+ TemplateArgument(ASTContext &Ctx, const llvm::APSInt &Value, QualType Type,
+ bool IsDefaulted = false);
/// Construct an integral constant template argument with the same
/// value as Other but a different type.
@@ -184,8 +193,12 @@ public:
/// is taken.
///
/// \param Name The template name.
- TemplateArgument(TemplateName Name) {
+ ///
+ /// \param IsDefaulted If 'true', implies that this TemplateArgument
+ /// corresponds to a default template parameter
+ TemplateArgument(TemplateName Name, bool IsDefaulted = false) {
TemplateArg.Kind = Template;
+ TemplateArg.IsDefaulted = IsDefaulted;
TemplateArg.Name = Name.getAsVoidPointer();
TemplateArg.NumExpansions = 0;
}
@@ -201,8 +214,13 @@ public:
///
/// \param NumExpansions The number of expansions that will be generated by
/// instantiating
- TemplateArgument(TemplateName Name, std::optional<unsigned> NumExpansions) {
+ ///
+ /// \param IsDefaulted If 'true', implies that this TemplateArgument
+ /// corresponds to a default template parameter
+ TemplateArgument(TemplateName Name, std::optional<unsigned> NumExpansions,
+ bool IsDefaulted = false) {
TemplateArg.Kind = TemplateExpansion;
+ TemplateArg.IsDefaulted = IsDefaulted;
TemplateArg.Name = Name.getAsVoidPointer();
if (NumExpansions)
TemplateArg.NumExpansions = *NumExpansions + 1;
@@ -215,8 +233,9 @@ public:
/// This form of template argument only occurs in template argument
/// lists used for dependent types and for expression; it will not
/// occur in a non-dependent, canonical template argument list.
- TemplateArgument(Expr *E) {
+ TemplateArgument(Expr *E, bool IsDefaulted = false) {
TypeOrValue.Kind = Expression;
+ TypeOrValue.IsDefaulted = IsDefaulted;
TypeOrValue.V = reinterpret_cast<uintptr_t>(E);
}
@@ -226,12 +245,11 @@ public:
/// outlives the TemplateArgument itself.
explicit TemplateArgument(ArrayRef<TemplateArgument> Args) {
this->Args.Kind = Pack;
+ this->Args.IsDefaulted = false;
this->Args.Args = Args.data();
this->Args.NumArgs = Args.size();
}
- TemplateArgument(TemplateName, bool) = delete;
-
static TemplateArgument getEmptyPack() {
return TemplateArgument(std::nullopt);
}
@@ -334,6 +352,14 @@ public:
Integer.Type = T.getAsOpaquePtr();
}
+ /// Set to 'true' if this TemplateArgument corresponds to a
+ /// default template parameter.
+ void setIsDefaulted(bool v) { TypeOrValue.IsDefaulted = v; }
+
+ /// If returns 'true', this TemplateArgument corresponds to a
+ /// default template parameter.
+ bool getIsDefaulted() const { return (bool)TypeOrValue.IsDefaulted; }
+
/// If this is a non-type template argument, get its type. Otherwise,
/// returns a null QualType.
QualType getNonTypeTemplateArgumentType() const;
diff --git a/contrib/llvm-project/clang/include/clang/AST/TemplateName.h b/contrib/llvm-project/clang/include/clang/AST/TemplateName.h
index 2f108ea00726..d56361b50059 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TemplateName.h
+++ b/contrib/llvm-project/clang/include/clang/AST/TemplateName.h
@@ -351,9 +351,7 @@ public:
/// error.
void dump() const;
- void Profile(llvm::FoldingSetNodeID &ID) {
- ID.AddPointer(Storage.getOpaqueValue());
- }
+ void Profile(llvm::FoldingSetNodeID &ID);
/// Retrieve the template name as a void pointer.
void *getAsVoidPointer() const { return Storage.getOpaqueValue(); }
diff --git a/contrib/llvm-project/clang/include/clang/AST/Type.h b/contrib/llvm-project/clang/include/clang/AST/Type.h
index 180251d7f6bd..8d20d088bb63 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Type.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Type.h
@@ -899,12 +899,24 @@ public:
/// Return true if this is a trivially relocatable type.
bool isTriviallyRelocatableType(const ASTContext &Context) const;
+ /// Return true if this is a trivially equality comparable type.
+ bool isTriviallyEqualityComparableType(const ASTContext &Context) const;
+
/// Returns true if it is a class and it might be dynamic.
bool mayBeDynamicClass() const;
/// Returns true if it is not a class or if the class might not be dynamic.
bool mayBeNotDynamicClass() const;
+ /// Returns true if it is a WebAssembly Reference Type.
+ bool isWebAssemblyReferenceType() const;
+
+ /// Returns true if it is a WebAssembly Externref Type.
+ bool isWebAssemblyExternrefType() const;
+
+ /// Returns true if it is a WebAssembly Funcref Type.
+ bool isWebAssemblyFuncrefType() const;
+
// Don't promise in the API that anything besides 'const' can be
// easily added.
@@ -945,7 +957,6 @@ public:
void removeLocalConst();
void removeLocalVolatile();
void removeLocalRestrict();
- void removeLocalCVRQualifiers(unsigned Mask);
void removeLocalFastQualifiers() { Value.setInt(0); }
void removeLocalFastQualifiers(unsigned Mask) {
@@ -1647,7 +1658,8 @@ protected:
unsigned : NumTypeBits;
/// The kind (BuiltinType::Kind) of builtin type this is.
- unsigned Kind : 8;
+ static constexpr unsigned NumOfBuiltinTypeBits = 9;
+ unsigned Kind : NumOfBuiltinTypeBits;
};
/// FunctionTypeBitfields store various bits belonging to FunctionProtoType.
@@ -1767,7 +1779,7 @@ protected:
/// The kind of vector, either a generic vector type or some
/// target-specific vector type such as for AltiVec or Neon.
- unsigned VecKind : 3;
+ unsigned VecKind : 4;
/// The number of elements in the vector.
uint32_t NumElements;
};
@@ -2029,6 +2041,17 @@ public:
/// Returns true for SVE scalable vector types.
bool isSVESizelessBuiltinType() const;
+ /// Returns true for RVV scalable vector types.
+ bool isRVVSizelessBuiltinType() const;
+
+ /// Check if this is a WebAssembly Externref Type.
+ bool isWebAssemblyExternrefType() const;
+
+ /// Returns true if this is a WebAssembly table type: either an array of
+ /// reference types, or a pointer to a reference type (which can only be
+ /// created by array to pointer decay).
+ bool isWebAssemblyTableType() const;
+
/// Determines if this is a sizeless type supported by the
/// 'arm_sve_vector_bits' type attribute, which can be applied to a single
/// SVE vector or predicate, excluding tuple types such as svint32x4_t.
@@ -2039,6 +2062,16 @@ public:
/// 'arm_sve_vector_bits' type attribute as VectorType.
QualType getSveEltType(const ASTContext &Ctx) const;
+ /// Determines if this is a sizeless type supported by the
+ /// 'riscv_rvv_vector_bits' type attribute, which can be applied to a single
+ /// RVV vector or mask.
+ bool isRVVVLSBuiltinType() const;
+
+ /// Returns the representative type for the element of an RVV builtin type.
+ /// This is used to represent fixed-length RVV vectors created with the
+ /// 'riscv_rvv_vector_bits' type attribute as VectorType.
+ QualType getRVVEltType(const ASTContext &Ctx) const;
+
/// Types are partitioned into 3 broad categories (C99 6.2.5p1):
/// object types, function types, and incomplete types.
@@ -2273,8 +2306,12 @@ public:
/// Check if the type is the CUDA device builtin texture type.
bool isCUDADeviceBuiltinTextureType() const;
+ bool isRVVType(unsigned ElementCount) const;
+
bool isRVVType() const;
+ bool isRVVType(unsigned Bitwidth, bool IsFloat) const;
+
/// Return the implicit lifetime for this type, which must not be dependent.
Qualifiers::ObjCLifetime getObjCARCImplicitLifetime() const;
@@ -2640,6 +2677,9 @@ public:
// RVV Types
#define RVV_TYPE(Name, Id, SingletonId) Id,
#include "clang/Basic/RISCVVTypes.def"
+// WebAssembly reference types
+#define WASM_TYPE(Name, Id, SingletonId) Id,
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
// All other builtin types
#define BUILTIN_TYPE(Id, SingletonId) Id,
#define LAST_BUILTIN_TYPE(Id) LastKind = Id
@@ -2653,6 +2693,10 @@ private:
: Type(Builtin, QualType(),
K == Dependent ? TypeDependence::DependentInstantiation
: TypeDependence::None) {
+ static_assert(Kind::LastKind <
+ (1 << BuiltinTypeBitfields::NumOfBuiltinTypeBits) &&
+ "Defined builtin type exceeds the allocated space for serial "
+ "numbering");
BuiltinTypeBits.Kind = K;
}
@@ -2688,6 +2732,8 @@ public:
bool isSVEBool() const { return getKind() == Kind::SveBool; }
+ bool isSVECount() const { return getKind() == Kind::SveCount; }
+
/// Determines whether the given kind corresponds to a placeholder type.
static bool isPlaceholderTypeKind(Kind K) {
return K >= Overload;
@@ -3385,7 +3431,10 @@ public:
SveFixedLengthDataVector,
/// is AArch64 SVE fixed-length predicate vector
- SveFixedLengthPredicateVector
+ SveFixedLengthPredicateVector,
+
+ /// is RISC-V RVV fixed-length data vector
+ RVVFixedLengthDataVector,
};
protected:
@@ -3924,7 +3973,7 @@ public:
/// The number of types in the exception specification.
/// A whole unsigned is not needed here and according to
/// [implimits] 8 bits would be enough here.
- unsigned NumExceptionType = 0;
+ uint16_t NumExceptionType = 0;
};
protected:
@@ -4920,6 +4969,8 @@ public:
bool isMSTypeSpec() const;
+ bool isWebAssemblyFuncrefSpec() const;
+
bool isCallingConv() const;
std::optional<NullabilityKind> getImmediateNullability() const;
@@ -6608,7 +6659,7 @@ class alignas(8) TypeSourceInfo {
QualType Ty;
- TypeSourceInfo(QualType ty) : Ty(ty) {}
+ TypeSourceInfo(QualType ty, size_t DataSize); // implemented in TypeLoc.h
public:
/// Return the type wrapped by this type source info.
@@ -6749,15 +6800,6 @@ inline void QualType::removeLocalVolatile() {
removeLocalFastQualifiers(Qualifiers::Volatile);
}
-inline void QualType::removeLocalCVRQualifiers(unsigned Mask) {
- assert(!(Mask & ~Qualifiers::CVRMask) && "mask has non-CVR bits");
- static_assert((int)Qualifiers::CVRMask == (int)Qualifiers::FastMask,
- "Fast bits differ from CVR bits!");
-
- // Fast path: we don't need to touch the slow qualifiers.
- removeLocalFastQualifiers(Mask);
-}
-
/// Check if this type has any address space qualifier.
inline bool QualType::hasAddressSpace() const {
return getQualifiers().hasAddressSpace();
@@ -7153,6 +7195,27 @@ inline bool Type::isRVVType() const {
false; // end of boolean or operation.
}
+inline bool Type::isRVVType(unsigned ElementCount) const {
+ bool Ret = false;
+#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
+ IsFP) \
+ if (NumEls == ElementCount) \
+ Ret |= isSpecificBuiltinType(BuiltinType::Id);
+#include "clang/Basic/RISCVVTypes.def"
+ return Ret;
+}
+
+inline bool Type::isRVVType(unsigned Bitwidth, bool IsFloat) const {
+ bool Ret = false;
+#define RVV_TYPE(Name, Id, SingletonId)
+#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
+ IsFP) \
+ if (ElBits == Bitwidth && IsFloat == IsFP) \
+ Ret |= isSpecificBuiltinType(BuiltinType::Id);
+#include "clang/Basic/RISCVVTypes.def"
+ return Ret;
+}
+
inline bool Type::isTemplateTypeParmType() const {
return isa<TemplateTypeParmType>(CanonicalType);
}
diff --git a/contrib/llvm-project/clang/include/clang/AST/TypeLoc.h b/contrib/llvm-project/clang/include/clang/AST/TypeLoc.h
index 72ed3cb752e0..27f714b7c983 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TypeLoc.h
+++ b/contrib/llvm-project/clang/include/clang/AST/TypeLoc.h
@@ -240,6 +240,11 @@ private:
static SourceRange getLocalSourceRangeImpl(TypeLoc TL);
};
+inline TypeSourceInfo::TypeSourceInfo(QualType ty, size_t DataSize) : Ty(ty) {
+ // Init data attached to the object. See getTypeLoc.
+ memset(this + 1, 0, DataSize);
+}
+
/// Return the TypeLoc for a type source info.
inline TypeLoc TypeSourceInfo::getTypeLoc() const {
// TODO: is this alignment already sufficient?
@@ -2102,7 +2107,7 @@ struct AutoTypeLocInfo : TypeSpecLocInfo {
NestedNameSpecifierLoc NestedNameSpec;
SourceLocation TemplateKWLoc;
SourceLocation ConceptNameLoc;
- NamedDecl *FoundDecl;
+ NamedDecl *FoundDecl = nullptr;
SourceLocation LAngleLoc;
SourceLocation RAngleLoc;
diff --git a/contrib/llvm-project/clang/include/clang/AST/TypeProperties.td b/contrib/llvm-project/clang/include/clang/AST/TypeProperties.td
index aca445fbe6ce..3cc826c1463a 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TypeProperties.td
+++ b/contrib/llvm-project/clang/include/clang/AST/TypeProperties.td
@@ -11,7 +11,7 @@ include "clang/Basic/TypeNodes.td"
let Class = ComplexType in {
def : Property<"elementType", QualType> {
- let Read = [{ node->getElementType() }];
+ let Read = [{ node->getElementType() }];
}
def : Creator<[{ return ctx.getComplexType(elementType); }]>;
@@ -591,7 +591,7 @@ let Class = ParenType in {
def : Creator<[{
return ctx.getParenType(innerType);
- }]>;
+ }]>;
}
let Class = MacroQualifiedType in {
@@ -813,6 +813,10 @@ let Class = BuiltinType in {
case BuiltinType::ID: return ctx.SINGLETON_ID;
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(NAME, ID, SINGLETON_ID) \
+ case BuiltinType::ID: return ctx.SINGLETON_ID;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
+
#define BUILTIN_TYPE(ID, SINGLETON_ID) \
case BuiltinType::ID: return ctx.SINGLETON_ID;
#include "clang/AST/BuiltinTypes.def"
@@ -889,7 +893,7 @@ let Class = ObjCInterfaceType in {
let Class = ObjCTypeParamType in {
def : Property<"declaration", ObjCTypeParamDeclRef> {
let Read = [{ node->getDecl() }];
- }
+ }
def : Property<"qualifiers", Array<ObjCProtocolDeclRef>> {
let Read = [{ node->getProtocols() }];
}
diff --git a/contrib/llvm-project/clang/include/clang/AST/UnresolvedSet.h b/contrib/llvm-project/clang/include/clang/AST/UnresolvedSet.h
index 17b47f6ab96b..ee31be969b6e 100644
--- a/contrib/llvm-project/clang/include/clang/AST/UnresolvedSet.h
+++ b/contrib/llvm-project/clang/include/clang/AST/UnresolvedSet.h
@@ -114,9 +114,17 @@ public:
I.I->set(New, AS);
}
- void erase(unsigned I) { decls()[I] = decls().pop_back_val(); }
+ void erase(unsigned I) {
+ auto val = decls().pop_back_val();
+ if (I < size())
+ decls()[I] = val;
+ }
- void erase(iterator I) { *I.I = decls().pop_back_val(); }
+ void erase(iterator I) {
+ auto val = decls().pop_back_val();
+ if (I != end())
+ *I.I = val;
+ }
void setAccess(iterator I, AccessSpecifier AS) { I.I->setAccess(AS); }
diff --git a/contrib/llvm-project/clang/include/clang/AST/VTableBuilder.h b/contrib/llvm-project/clang/include/clang/AST/VTableBuilder.h
index e451f3f861b7..1bf7d0467aa3 100644
--- a/contrib/llvm-project/clang/include/clang/AST/VTableBuilder.h
+++ b/contrib/llvm-project/clang/include/clang/AST/VTableBuilder.h
@@ -563,8 +563,6 @@ private:
llvm::DenseMap<const CXXRecordDecl *, std::unique_ptr<VirtualBaseInfo>>
VBaseInfo;
- void enumerateVFPtrs(const CXXRecordDecl *ForClass, VPtrInfoVector &Result);
-
void computeVTableRelatedInformation(const CXXRecordDecl *RD) override;
void dumpMethodLocations(const CXXRecordDecl *RD,
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h
index 98b727e6940b..f49204a3d906 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h
@@ -81,6 +81,7 @@
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
@@ -1333,6 +1334,16 @@ extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
cxxDeductionGuideDecl;
+/// Matches concept declarations.
+///
+/// Example matches integral
+/// \code
+/// template<typename T>
+/// concept integral = std::is_integral_v<T>;
+/// \endcode
+extern const internal::VariadicDynCastAllOfMatcher<Decl, ConceptDecl>
+ conceptDecl;
+
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
@@ -1970,6 +1981,45 @@ extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
cxxNoexceptExpr;
+/// Matches a loop initializing the elements of an array in a number of contexts:
+/// * in the implicit copy/move constructor for a class with an array member
+/// * when a lambda-expression captures an array by value
+/// * when a decomposition declaration decomposes an array
+///
+/// Given
+/// \code
+/// void testLambdaCapture() {
+/// int a[10];
+/// auto Lam1 = [a]() {
+/// return;
+/// };
+/// }
+/// \endcode
+/// arrayInitLoopExpr() matches the implicit loop that initializes each element of
+/// the implicit array field inside the lambda object, that represents the array `a`
+/// captured by value.
+extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArrayInitLoopExpr>
+ arrayInitLoopExpr;
+
+/// The arrayInitIndexExpr consists of two subexpressions: a common expression
+/// (the source array) that is evaluated once up-front, and a per-element initializer
+/// that runs once for each array element. Within the per-element initializer,
+/// the current index may be obtained via an ArrayInitIndexExpr.
+///
+/// Given
+/// \code
+/// void testStructBinding() {
+/// int a[2] = {1, 2};
+/// auto [x, y] = a;
+/// }
+/// \endcode
+/// arrayInitIndexExpr() matches the array index that implicitly iterates
+/// over the array `a` to copy each element to the anonymous array
+/// that backs the structured binding `[x, y]` elements of which are
+/// referred to by their aliases `x` and `y`.
+extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArrayInitIndexExpr>
+ arrayInitIndexExpr;
+
/// Matches array subscript expressions.
///
/// Given
@@ -2450,6 +2500,17 @@ extern const internal::VariadicDynCastAllOfMatcher<Stmt, DependentCoawaitExpr>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoyieldExpr>
coyieldExpr;
+/// Matches coroutine body statements.
+///
+/// coroutineBodyStmt() matches the coroutine below
+/// \code
+/// generator<int> gen() {
+/// co_return;
+/// }
+/// \endcode
+extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoroutineBodyStmt>
+ coroutineBodyStmt;
+
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
@@ -4419,6 +4480,33 @@ AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
return NumArgs == N;
}
+/// Checks that a call expression or a constructor call expression has at least
+/// the specified number of arguments (including absent default arguments).
+///
+/// Example matches f(0, 0) and g(0, 0, 0)
+/// (matcher = callExpr(argumentCountAtLeast(2)))
+/// \code
+/// void f(int x, int y);
+/// void g(int x, int y, int z);
+/// f(0, 0);
+/// g(0, 0, 0);
+/// \endcode
+AST_POLYMORPHIC_MATCHER_P(argumentCountAtLeast,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(
+ CallExpr, CXXConstructExpr,
+ CXXUnresolvedConstructExpr, ObjCMessageExpr),
+ unsigned, N) {
+ unsigned NumArgs = Node.getNumArgs();
+ if (!Finder->isTraversalIgnoringImplicitNodes())
+ return NumArgs >= N;
+ while (NumArgs) {
+ if (!isa<CXXDefaultArgExpr>(Node.getArg(NumArgs - 1)))
+ break;
+ --NumArgs;
+ }
+ return NumArgs >= N;
+}
+
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
@@ -5460,9 +5548,10 @@ AST_MATCHER_P(ArraySubscriptExpr, hasBase,
return false;
}
-/// Matches a 'for', 'while', 'do' statement or a function definition that has
-/// a given body. Note that in case of functions this matcher only matches the
-/// definition itself and not the other declarations of the same function.
+/// Matches a 'for', 'while', 'while' statement or a function or coroutine
+/// definition that has a given body. Note that in case of functions or
+/// coroutines this matcher only matches the definition itself and not the
+/// other declarations of the same function or coroutine.
///
/// Given
/// \code
@@ -5483,12 +5572,11 @@ AST_MATCHER_P(ArraySubscriptExpr, hasBase,
/// with compoundStmt()
/// matching '{}'
/// but does not match 'void f();'
-AST_POLYMORPHIC_MATCHER_P(hasBody,
- AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
- WhileStmt,
- CXXForRangeStmt,
- FunctionDecl),
- internal::Matcher<Stmt>, InnerMatcher) {
+AST_POLYMORPHIC_MATCHER_P(
+ hasBody,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt,
+ FunctionDecl, CoroutineBodyStmt),
+ internal::Matcher<Stmt>, InnerMatcher) {
if (Finder->isTraversalIgnoringImplicitNodes() && isDefaultedHelper(&Node))
return false;
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h
index 3f6f364d6505..960d59a747fc 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h
@@ -28,9 +28,9 @@ namespace ast_matchers {
namespace dynamic {
struct SourceLocation {
- SourceLocation() : Line(), Column() {}
- unsigned Line;
- unsigned Column;
+ SourceLocation() = default;
+ unsigned Line = 0;
+ unsigned Column = 0;
};
struct SourceRange {
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h
index b65a3967cd50..6a1528a2da24 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h
@@ -28,7 +28,7 @@ class Stmt;
/// \enum IfThen -- then branch of the if statement has no call.
/// \enum IfElse -- else branch of the if statement has no call.
/// \enum Switch -- one of the switch cases doesn't have a call.
-/// \enum SwitchSkipped -- there is no call if none of the cases appies.
+/// \enum SwitchSkipped -- there is no call if none of the cases applies.
/// \enum LoopEntered -- no call when the loop is entered.
/// \enum LoopSkipped -- no call when the loop is not entered.
/// \enum FallbackReason -- fallback case when we were not able to figure out
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Consumed.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Consumed.h
index 24702567ab6c..3e2788cac3c9 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Consumed.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Consumed.h
@@ -155,6 +155,10 @@ namespace consumed {
ConsumedStateMap(const ConsumedStateMap &Other)
: Reachable(Other.Reachable), From(Other.From), VarMap(Other.VarMap) {}
+ // The copy assignment operator is defined as deleted pending further
+ // motivation.
+ ConsumedStateMap &operator=(const ConsumedStateMap &) = delete;
+
/// Warn if any of the parameters being tracked are not in the state
/// they were declared to be in upon return from a function.
void checkParamsForReturnTypestate(SourceLocation BlameLoc,
@@ -240,7 +244,7 @@ namespace consumed {
ConsumedBlockInfo BlockInfo;
std::unique_ptr<ConsumedStateMap> CurrStates;
- ConsumedState ExpectedReturnState;
+ ConsumedState ExpectedReturnState = CS_None;
void determineExpectedReturnState(AnalysisDeclContext &AC,
const FunctionDecl *D);
@@ -258,7 +262,7 @@ namespace consumed {
/// Check a function's CFG for consumed violations.
///
/// We traverse the blocks in the CFG, keeping track of the state of each
- /// value who's type has uniquness annotations. If methods are invoked in
+ /// value who's type has uniqueness annotations. If methods are invoked in
/// the wrong state a warning is issued. Each block in the CFG is traversed
/// exactly once.
void run(AnalysisDeclContext &AC);
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/IntervalPartition.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/IntervalPartition.h
new file mode 100644
index 000000000000..cc04bab7bf6c
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/IntervalPartition.h
@@ -0,0 +1,50 @@
+//===- IntervalPartition.h - CFG Partitioning into Intervals -----*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functionality for partitioning a CFG into intervals. The
+// concepts and implementations are based on the presentation in "Compilers" by
+// Aho, Sethi and Ullman (the "dragon book"), pages 664-666.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_ANALYSES_INTERVALPARTITION_H
+#define LLVM_CLANG_ANALYSIS_ANALYSES_INTERVALPARTITION_H
+
+#include "clang/Analysis/CFG.h"
+#include "llvm/ADT/DenseSet.h"
+#include <vector>
+
+namespace clang {
+
+// An interval is a strongly-connected component of the CFG along with a
+// trailing acyclic structure. The _header_ of the interval is either the CFG
+// entry block or has at least one predecessor outside of the interval. All
+// other blocks in the interval have only predecessors also in the interval.
+struct CFGInterval {
+ CFGInterval(const CFGBlock *Header) : Header(Header), Blocks({Header}) {}
+
+ // The block from which the interval was constructed. Is either the CFG entry
+ // block or has at least one predecessor outside the interval.
+ const CFGBlock *Header;
+
+ llvm::SmallDenseSet<const CFGBlock *> Blocks;
+
+ // Successor blocks of the *interval*: blocks outside the interval for
+ // reachable (in one edge) from within the interval.
+ llvm::SmallDenseSet<const CFGBlock *> Successors;
+};
+
+CFGInterval buildInterval(const CFG &Cfg, const CFGBlock &Header);
+
+// Partitions `Cfg` into intervals and constructs a graph of the intervals,
+// based on the edges between nodes in these intervals.
+std::vector<CFGInterval> partitionIntoIntervals(const CFG &Cfg);
+
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_ANALYSES_INTERVALPARTITION_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ReachableCode.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ReachableCode.h
index 514b9458d331..f1b63f74b6c8 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ReachableCode.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ReachableCode.h
@@ -48,11 +48,9 @@ class Callback {
virtual void anchor();
public:
virtual ~Callback() {}
- virtual void HandleUnreachable(UnreachableKind UK,
- SourceLocation L,
- SourceRange ConditionVal,
- SourceRange R1,
- SourceRange R2) = 0;
+ virtual void HandleUnreachable(UnreachableKind UK, SourceLocation L,
+ SourceRange ConditionVal, SourceRange R1,
+ SourceRange R2, bool HasFallThroughAttr) = 0;
};
/// ScanReachableFromBlock - Mark all blocks reachable from Start.
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
index 9c73d65db266..9d28325c1ea6 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
@@ -484,8 +484,6 @@ private:
SMap.insert(std::make_pair(S, E));
}
- til::SExpr *getCurrentLVarDefinition(const ValueDecl *VD);
-
til::SExpr *addStatement(til::SExpr *E, const Stmt *S,
const ValueDecl *VD = nullptr);
til::SExpr *lookupVarDecl(const ValueDecl *VD);
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyTIL.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyTIL.h
index eac402a79106..65dd66ee093f 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyTIL.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyTIL.h
@@ -319,6 +319,7 @@ public:
protected:
SExpr(TIL_Opcode Op) : Opcode(Op) {}
SExpr(const SExpr &E) : Opcode(E.Opcode), Flags(E.Flags) {}
+ SExpr &operator=(const SExpr &) = delete;
const TIL_Opcode Opcode;
unsigned char Reserved = 0;
@@ -488,6 +489,10 @@ public:
Undefined(const Stmt *S = nullptr) : SExpr(COP_Undefined), Cstmt(S) {}
Undefined(const Undefined &U) : SExpr(U), Cstmt(U.Cstmt) {}
+ // The copy assignment operator is defined as deleted pending further
+ // motivation.
+ Undefined &operator=(const Undefined &) = delete;
+
static bool classof(const SExpr *E) { return E->opcode() == COP_Undefined; }
template <class V>
@@ -566,6 +571,10 @@ public:
LiteralT(T Dat) : Literal(ValueType::getValueType<T>()), Val(Dat) {}
LiteralT(const LiteralT<T> &L) : Literal(L), Val(L.Val) {}
+ // The copy assignment operator is defined as deleted pending further
+ // motivation.
+ LiteralT &operator=(const LiteralT<T> &) = delete;
+
T value() const { return Val;}
T& value() { return Val; }
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyUtil.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyUtil.h
index 7792707e5025..ac7b24cdb4a6 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyUtil.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyUtil.h
@@ -240,6 +240,10 @@ class CopyOnWriteVector {
VectorData() = default;
VectorData(const VectorData &VD) : Vect(VD.Vect) {}
+
+ // The copy assignment operator is defined as deleted pending further
+ // motivation.
+ VectorData &operator=(const VectorData &) = delete;
};
public:
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/UnsafeBufferUsage.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/UnsafeBufferUsage.h
index e3f87cd0f366..6766ba8ec277 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/UnsafeBufferUsage.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/UnsafeBufferUsage.h
@@ -19,6 +19,8 @@
namespace clang {
+using DefMapTy = llvm::DenseMap<const VarDecl *, std::vector<const VarDecl *>>;
+
/// The interface that lets the caller handle unsafe buffer usage analysis
/// results by overriding this class's handle... methods.
class UnsafeBufferUsageHandler {
@@ -34,15 +36,32 @@ public:
virtual void handleUnsafeOperation(const Stmt *Operation,
bool IsRelatedToDecl) = 0;
- /// Invoked when a fix is suggested against a variable.
- virtual void handleFixableVariable(const VarDecl *Variable,
- FixItList &&List) = 0;
+ /// Invoked when a fix is suggested against a variable. This function groups
+ /// all variables that must be fixed together (i.e their types must be changed to the
+ /// same target type to prevent type mismatches) into a single fixit.
+ virtual void handleUnsafeVariableGroup(const VarDecl *Variable,
+ const DefMapTy &VarGrpMap,
+ FixItList &&Fixes) = 0;
+
+ /// Returns a reference to the `Preprocessor`:
+ virtual bool isSafeBufferOptOut(const SourceLocation &Loc) const = 0;
+
+ virtual std::string
+ getUnsafeBufferUsageAttributeTextAt(SourceLocation Loc,
+ StringRef WSSuffix = "") const = 0;
};
// This function invokes the analysis and allows the caller to react to it
// through the handler class.
-void checkUnsafeBufferUsage(const Decl *D, UnsafeBufferUsageHandler &Handler);
+void checkUnsafeBufferUsage(const Decl *D, UnsafeBufferUsageHandler &Handler,
+ bool EmitSuggestions);
+namespace internal {
+// Tests if any two `FixItHint`s in `FixIts` conflict. Two `FixItHint`s
+// conflict if they have overlapping source ranges.
+bool anyConflict(const llvm::SmallVectorImpl<FixItHint> &FixIts,
+ const SourceManager &SM);
+} // namespace internal
} // end namespace clang
#endif /* LLVM_CLANG_ANALYSIS_ANALYSES_UNSAFEBUFFERUSAGE_H */
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def
index d10d95e5b1ba..ff687a0d178b 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def
@@ -29,6 +29,15 @@ WARNING_GADGET(Increment)
WARNING_GADGET(Decrement)
WARNING_GADGET(ArraySubscript)
WARNING_GADGET(PointerArithmetic)
+WARNING_GADGET(UnsafeBufferUsageAttr)
+FIXABLE_GADGET(ULCArraySubscript) // `DRE[any]` in an Unspecified Lvalue Context
+FIXABLE_GADGET(DerefSimplePtrArithFixable)
+FIXABLE_GADGET(PointerDereference)
+FIXABLE_GADGET(UPCAddressofArraySubscript) // '&DRE[any]' in an Unspecified Pointer Context
+FIXABLE_GADGET(UPCStandalonePointer)
+FIXABLE_GADGET(UPCPreIncrement) // '++Ptr' in an Unspecified Pointer Context
+FIXABLE_GADGET(PointerAssignment)
+FIXABLE_GADGET(PointerInit)
#undef FIXABLE_GADGET
#undef WARNING_GADGET
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/AnalysisDeclContext.h b/contrib/llvm-project/clang/include/clang/Analysis/AnalysisDeclContext.h
index ce60ad56af4e..a517a4e757c9 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/AnalysisDeclContext.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/AnalysisDeclContext.h
@@ -331,7 +331,7 @@ public:
unsigned getIndex() const { return Index; }
CFGElement getCallSiteCFGElement() const { return (*Block)[Index]; }
-
+
void Profile(llvm::FoldingSetNodeID &ID) override;
static void Profile(llvm::FoldingSetNodeID &ID, AnalysisDeclContext *ADC,
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/BodyFarm.h b/contrib/llvm-project/clang/include/clang/Analysis/BodyFarm.h
index eaa6472433dd..52be29cb7885 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/BodyFarm.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/BodyFarm.h
@@ -40,6 +40,9 @@ public:
/// Remove copy constructor to avoid accidental copying.
BodyFarm(const BodyFarm &other) = delete;
+ /// Delete copy assignment operator.
+ BodyFarm &operator=(const BodyFarm &other) = delete;
+
private:
typedef llvm::DenseMap<const Decl *, std::optional<Stmt *>> BodyMap;
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/CFG.h b/contrib/llvm-project/clang/include/clang/Analysis/CFG.h
index bd5658cbdea3..eacebe176dda 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/CFG.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/CFG.h
@@ -1122,19 +1122,10 @@ public:
Elements.push_back(CFGScopeBegin(VD, S), C);
}
- void prependScopeBegin(const VarDecl *VD, const Stmt *S,
- BumpVectorContext &C) {
- Elements.insert(Elements.rbegin(), 1, CFGScopeBegin(VD, S), C);
- }
-
void appendScopeEnd(const VarDecl *VD, const Stmt *S, BumpVectorContext &C) {
Elements.push_back(CFGScopeEnd(VD, S), C);
}
- void prependScopeEnd(const VarDecl *VD, const Stmt *S, BumpVectorContext &C) {
- Elements.insert(Elements.rbegin(), 1, CFGScopeEnd(VD, S), C);
- }
-
void appendBaseDtor(const CXXBaseSpecifier *BS, BumpVectorContext &C) {
Elements.push_back(CFGBaseDtor(BS), C);
}
@@ -1162,44 +1153,6 @@ public:
void appendDeleteDtor(CXXRecordDecl *RD, CXXDeleteExpr *DE, BumpVectorContext &C) {
Elements.push_back(CFGDeleteDtor(RD, DE), C);
}
-
- // Destructors must be inserted in reversed order. So insertion is in two
- // steps. First we prepare space for some number of elements, then we insert
- // the elements beginning at the last position in prepared space.
- iterator beginAutomaticObjDtorsInsert(iterator I, size_t Cnt,
- BumpVectorContext &C) {
- return iterator(Elements.insert(I.base(), Cnt,
- CFGAutomaticObjDtor(nullptr, nullptr), C));
- }
- iterator insertAutomaticObjDtor(iterator I, VarDecl *VD, Stmt *S) {
- *I = CFGAutomaticObjDtor(VD, S);
- return ++I;
- }
-
- // Scope leaving must be performed in reversed order. So insertion is in two
- // steps. First we prepare space for some number of elements, then we insert
- // the elements beginning at the last position in prepared space.
- iterator beginLifetimeEndsInsert(iterator I, size_t Cnt,
- BumpVectorContext &C) {
- return iterator(
- Elements.insert(I.base(), Cnt, CFGLifetimeEnds(nullptr, nullptr), C));
- }
- iterator insertLifetimeEnds(iterator I, VarDecl *VD, Stmt *S) {
- *I = CFGLifetimeEnds(VD, S);
- return ++I;
- }
-
- // Scope leaving must be performed in reversed order. So insertion is in two
- // steps. First we prepare space for some number of elements, then we insert
- // the elements beginning at the last position in prepared space.
- iterator beginScopeEndInsert(iterator I, size_t Cnt, BumpVectorContext &C) {
- return iterator(
- Elements.insert(I.base(), Cnt, CFGScopeEnd(nullptr, nullptr), C));
- }
- iterator insertScopeEnd(iterator I, VarDecl *VD, Stmt *S) {
- *I = CFGScopeEnd(VD, S);
- return ++I;
- }
};
/// CFGCallback defines methods that should be called when a logical
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/CallGraph.h b/contrib/llvm-project/clang/include/clang/Analysis/CallGraph.h
index 999ac5da8acb..78f8d1155501 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/CallGraph.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/CallGraph.h
@@ -66,7 +66,7 @@ public:
/// Determine if a declaration should be included in the graph.
static bool includeInGraph(const Decl *D);
- /// Determine if a declaration should be included in the graph for the
+ /// Determine if a declaration should be included in the graph for the
/// purposes of being a callee. This is similar to includeInGraph except
/// it permits declarations, not just definitions.
static bool includeCalleeInGraph(const Decl *D);
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Arena.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Arena.h
new file mode 100644
index 000000000000..373697dc7379
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Arena.h
@@ -0,0 +1,147 @@
+//===-- Arena.h -------------------------------*- C++ -------------------*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE__ARENA_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE__ARENA_H
+
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "clang/Analysis/FlowSensitive/StorageLocation.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include <vector>
+
+namespace clang::dataflow {
+
+/// The Arena owns the objects that model data within an analysis.
+/// For example, `Value`, `StorageLocation`, `Atom`, and `Formula`.
+class Arena {
+public:
+ Arena() : True(makeAtom()), False(makeAtom()) {}
+ Arena(const Arena &) = delete;
+ Arena &operator=(const Arena &) = delete;
+
+ /// Creates a `T` (some subclass of `StorageLocation`), forwarding `args` to
+ /// the constructor, and returns a reference to it.
+ ///
+ /// The `DataflowAnalysisContext` takes ownership of the created object. The
+ /// object will be destroyed when the `DataflowAnalysisContext` is destroyed.
+ template <typename T, typename... Args>
+ std::enable_if_t<std::is_base_of<StorageLocation, T>::value, T &>
+ create(Args &&...args) {
+ // Note: If allocation of individual `StorageLocation`s turns out to be
+ // costly, consider creating specializations of `create<T>` for commonly
+ // used `StorageLocation` subclasses and make them use a `BumpPtrAllocator`.
+ return *cast<T>(
+ Locs.emplace_back(std::make_unique<T>(std::forward<Args>(args)...))
+ .get());
+ }
+
+ /// Creates a `T` (some subclass of `Value`), forwarding `args` to the
+ /// constructor, and returns a reference to it.
+ ///
+ /// The `DataflowAnalysisContext` takes ownership of the created object. The
+ /// object will be destroyed when the `DataflowAnalysisContext` is destroyed.
+ template <typename T, typename... Args>
+ std::enable_if_t<std::is_base_of<Value, T>::value, T &>
+ create(Args &&...args) {
+ // Note: If allocation of individual `Value`s turns out to be costly,
+ // consider creating specializations of `create<T>` for commonly used
+ // `Value` subclasses and make them use a `BumpPtrAllocator`.
+ return *cast<T>(
+ Vals.emplace_back(std::make_unique<T>(std::forward<Args>(args)...))
+ .get());
+ }
+
+ /// Creates a BoolValue wrapping a particular formula.
+ ///
+ /// Passing in the same formula will result in the same BoolValue.
+ /// FIXME: Interning BoolValues but not other Values is inconsistent.
+ /// Decide whether we want Value interning or not.
+ BoolValue &makeBoolValue(const Formula &);
+
+ /// Creates a fresh atom and wraps in in an AtomicBoolValue.
+ /// FIXME: For now, identical-address AtomicBoolValue <=> identical atom.
+ /// Stop relying on pointer identity and remove this guarantee.
+ AtomicBoolValue &makeAtomValue() {
+ return cast<AtomicBoolValue>(makeBoolValue(makeAtomRef(makeAtom())));
+ }
+
+ /// Creates a fresh Top boolean value.
+ TopBoolValue &makeTopValue() {
+ // No need for deduplicating: there's no way to create aliasing Tops.
+ return create<TopBoolValue>(makeAtomRef(makeAtom()));
+ }
+
+ /// Returns a symbolic integer value that models an integer literal equal to
+ /// `Value`. These literals are the same every time.
+ /// Integer literals are not typed; the type is determined by the `Expr` that
+ /// an integer literal is associated with.
+ IntegerValue &makeIntLiteral(llvm::APInt Value);
+
+ // Factories for boolean formulas.
+ // Formulas are interned: passing the same arguments return the same result.
+ // For commutative operations like And/Or, interning ignores order.
+ // Simplifications are applied: makeOr(X, X) => X, etc.
+
+ /// Returns a formula for the conjunction of `LHS` and `RHS`.
+ const Formula &makeAnd(const Formula &LHS, const Formula &RHS);
+
+ /// Returns a formula for the disjunction of `LHS` and `RHS`.
+ const Formula &makeOr(const Formula &LHS, const Formula &RHS);
+
+ /// Returns a formula for the negation of `Val`.
+ const Formula &makeNot(const Formula &Val);
+
+ /// Returns a formula for `LHS => RHS`.
+ const Formula &makeImplies(const Formula &LHS, const Formula &RHS);
+
+ /// Returns a formula for `LHS <=> RHS`.
+ const Formula &makeEquals(const Formula &LHS, const Formula &RHS);
+
+ /// Returns a formula for the variable A.
+ const Formula &makeAtomRef(Atom A);
+
+ /// Returns a formula for a literal true/false.
+ const Formula &makeLiteral(bool Value) {
+ return makeAtomRef(Value ? True : False);
+ }
+
+ /// Returns a new atomic boolean variable, distinct from any other.
+ Atom makeAtom() { return static_cast<Atom>(NextAtom++); };
+
+ /// Creates a fresh flow condition and returns a token that identifies it. The
+ /// token can be used to perform various operations on the flow condition such
+ /// as adding constraints to it, forking it, joining it with another flow
+ /// condition, or checking implications.
+ Atom makeFlowConditionToken() { return makeAtom(); }
+
+private:
+ llvm::BumpPtrAllocator Alloc;
+
+ // Storage for the state of a program.
+ std::vector<std::unique_ptr<StorageLocation>> Locs;
+ std::vector<std::unique_ptr<Value>> Vals;
+
+ // Indices that are used to avoid recreating the same integer literals and
+ // composite boolean values.
+ llvm::DenseMap<llvm::APInt, IntegerValue *> IntegerLiterals;
+ using FormulaPair = std::pair<const Formula *, const Formula *>;
+ llvm::DenseMap<FormulaPair, const Formula *> Ands;
+ llvm::DenseMap<FormulaPair, const Formula *> Ors;
+ llvm::DenseMap<const Formula *, const Formula *> Nots;
+ llvm::DenseMap<FormulaPair, const Formula *> Implies;
+ llvm::DenseMap<FormulaPair, const Formula *> Equals;
+ llvm::DenseMap<Atom, const Formula *> AtomRefs;
+
+ llvm::DenseMap<const Formula *, BoolValue *> FormulaValues;
+ unsigned NextAtom = 0;
+
+ Atom True, False;
+};
+
+} // namespace clang::dataflow
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE__ARENA_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/ControlFlowContext.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/ControlFlowContext.h
index e641468f77d0..bb36ed237c1e 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/ControlFlowContext.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/ControlFlowContext.h
@@ -18,6 +18,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/Stmt.h"
#include "clang/Analysis/CFG.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/Error.h"
#include <memory>
@@ -30,8 +31,18 @@ namespace dataflow {
/// analysis.
class ControlFlowContext {
public:
+ /// Builds a ControlFlowContext from a `FunctionDecl`.
+ /// `Func.hasBody()` must be true, and `Func.isTemplated()` must be false.
+ static llvm::Expected<ControlFlowContext> build(const FunctionDecl &Func);
+
+ /// Builds a ControlFlowContext from an AST node. `D` is the function in which
+ /// `S` resides. `D.isTemplated()` must be false.
+ static llvm::Expected<ControlFlowContext> build(const Decl &D, Stmt &S,
+ ASTContext &C);
+
/// Builds a ControlFlowContext from an AST node. `D` is the function in which
- /// `S` resides and must not be null.
+ /// `S` resides. `D` must not be null and `D->isTemplated()` must be false.
+ LLVM_DEPRECATED("Use the version that takes a const Decl & instead", "")
static llvm::Expected<ControlFlowContext> build(const Decl *D, Stmt &S,
ASTContext &C);
@@ -47,18 +58,26 @@ public:
return StmtToBlock;
}
+ /// Returns whether `B` is reachable from the entry block.
+ bool isBlockReachable(const CFGBlock &B) const {
+ return BlockReachable[B.getBlockID()];
+ }
+
private:
// FIXME: Once the deprecated `build` method is removed, mark `D` as "must not
// be null" and add an assertion.
ControlFlowContext(const Decl *D, std::unique_ptr<CFG> Cfg,
- llvm::DenseMap<const Stmt *, const CFGBlock *> StmtToBlock)
+ llvm::DenseMap<const Stmt *, const CFGBlock *> StmtToBlock,
+ llvm::BitVector BlockReachable)
: ContainingDecl(D), Cfg(std::move(Cfg)),
- StmtToBlock(std::move(StmtToBlock)) {}
+ StmtToBlock(std::move(StmtToBlock)),
+ BlockReachable(std::move(BlockReachable)) {}
/// The `Decl` containing the statement used to construct the CFG.
const Decl *ContainingDecl;
std::unique_ptr<CFG> Cfg;
llvm::DenseMap<const Stmt *, const CFGBlock *> StmtToBlock;
+ llvm::BitVector BlockReachable;
};
} // namespace dataflow
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysis.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysis.h
index 517dd3c9997f..33eb42897b8a 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysis.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysis.h
@@ -41,7 +41,7 @@ namespace dataflow {
/// must provide the following public members:
/// * `LatticeT initialElement()` - returns a lattice element that models the
/// initial state of a basic block;
-/// * `void transfer(const CFGElement *, LatticeT &, Environment &)` - applies
+/// * `void transfer(const CFGElement &, LatticeT &, Environment &)` - applies
/// the analysis transfer function for a given CFG element and lattice
/// element.
///
@@ -61,6 +61,7 @@ namespace dataflow {
/// argument by computing their least upper bound, modifies the object if
/// necessary, and returns an effect indicating whether any changes were
/// made to it;
+/// FIXME: make it `static LatticeT join(const LatticeT&, const LatticeT&)`
/// * `bool operator==(const LatticeT &) const` - returns true if and only if
/// the object is equal to the argument.
///
@@ -98,11 +99,13 @@ public:
return {static_cast<Derived *>(this)->initialElement()};
}
- LatticeJoinEffect joinTypeErased(TypeErasedLattice &E1,
+ TypeErasedLattice joinTypeErased(const TypeErasedLattice &E1,
const TypeErasedLattice &E2) final {
- Lattice &L1 = llvm::any_cast<Lattice &>(E1.Value);
+ // FIXME: change the signature of join() to avoid copying here.
+ Lattice L1 = llvm::any_cast<const Lattice &>(E1.Value);
const Lattice &L2 = llvm::any_cast<const Lattice &>(E2.Value);
- return L1.join(L2);
+ L1.join(L2);
+ return {std::move(L1)};
}
LatticeJoinEffect widenTypeErased(TypeErasedLattice &Current,
@@ -119,7 +122,7 @@ public:
return L1 == L2;
}
- void transferTypeErased(const CFGElement *Element, TypeErasedLattice &E,
+ void transferTypeErased(const CFGElement &Element, TypeErasedLattice &E,
Environment &Env) final {
Lattice &L = llvm::any_cast<Lattice &>(E.Value);
static_cast<Derived *>(this)->transfer(Element, L, Env);
@@ -205,8 +208,10 @@ runDataflowAnalysis(
const TypeErasedDataflowAnalysisState &State) {
auto *Lattice =
llvm::any_cast<typename AnalysisT::Lattice>(&State.Lattice.Value);
+ // FIXME: we should not be copying the environment here!
+ // Ultimately the PostVisitCFG only gets a const reference anyway.
PostVisitCFG(Element, DataflowAnalysisState<typename AnalysisT::Lattice>{
- *Lattice, State.Env});
+ *Lattice, State.Env.fork()});
};
}
@@ -222,14 +227,15 @@ runDataflowAnalysis(
llvm::transform(
std::move(*TypeErasedBlockStates), std::back_inserter(BlockStates),
[](auto &OptState) {
- return llvm::transformOptional(std::move(OptState), [](auto &&State) {
- return DataflowAnalysisState<typename AnalysisT::Lattice>{
- llvm::any_cast<typename AnalysisT::Lattice>(
- std::move(State.Lattice.Value)),
- std::move(State.Env)};
- });
+ return llvm::transformOptional(
+ std::move(OptState), [](TypeErasedDataflowAnalysisState &&State) {
+ return DataflowAnalysisState<typename AnalysisT::Lattice>{
+ llvm::any_cast<typename AnalysisT::Lattice>(
+ std::move(State.Lattice.Value)),
+ std::move(State.Env)};
+ });
});
- return BlockStates;
+ return std::move(BlockStates);
}
/// Abstract base class for dataflow "models": reusable analysis components that
@@ -238,7 +244,7 @@ runDataflowAnalysis(
class DataflowModel : public Environment::ValueModel {
public:
/// Return value indicates whether the model processed the `Element`.
- virtual bool transfer(const CFGElement *Element, Environment &Env) = 0;
+ virtual bool transfer(const CFGElement &Element, Environment &Env) = 0;
};
} // namespace dataflow
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h
index 74748c8723ce..e5c325b876bd 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h
@@ -18,12 +18,14 @@
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/TypeOrdering.h"
+#include "clang/Analysis/FlowSensitive/Arena.h"
#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
#include "clang/Analysis/FlowSensitive/Solver.h"
#include "clang/Analysis/FlowSensitive/StorageLocation.h"
#include "clang/Analysis/FlowSensitive/Value.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/Support/Compiler.h"
#include <cassert>
#include <memory>
@@ -34,6 +36,7 @@
namespace clang {
namespace dataflow {
+class Logger;
/// Skip past nodes that the CFG does not emit. These nodes are invisible to
/// flow-sensitive analysis, and should be ignored as they will effectively not
@@ -48,8 +51,12 @@ namespace dataflow {
const Expr &ignoreCFGOmittedNodes(const Expr &E);
const Stmt &ignoreCFGOmittedNodes(const Stmt &S);
+/// A set of `FieldDecl *`. Use `SmallSetVector` to guarantee deterministic
+/// iteration order.
+using FieldSet = llvm::SmallSetVector<const FieldDecl *, 4>;
+
/// Returns the set of all fields in the type.
-llvm::DenseSet<const FieldDecl *> getObjectFields(QualType Type);
+FieldSet getObjectFields(QualType Type);
struct ContextSensitiveOptions {
/// The maximum depth to analyze. A value of zero is equivalent to disabling
@@ -67,6 +74,11 @@ public:
/// fundamentally limited: some constructs, such as recursion, are
/// explicitly unsupported.
std::optional<ContextSensitiveOptions> ContextSensitiveOpts;
+
+ /// If provided, analysis details will be recorded here.
+ /// (This is always non-null within an AnalysisContext, the framework
+ /// provides a fallback no-op logger).
+ Logger *Log = nullptr;
};
/// Constructs a dataflow analysis context.
@@ -76,37 +88,9 @@ public:
/// `S` must not be null.
DataflowAnalysisContext(std::unique_ptr<Solver> S,
Options Opts = Options{
- /*ContextSensitiveOpts=*/std::nullopt})
- : S(std::move(S)), TrueVal(createAtomicBoolValue()),
- FalseVal(createAtomicBoolValue()), Opts(Opts) {
- assert(this->S != nullptr);
- }
-
- /// Takes ownership of `Loc` and returns a reference to it.
- ///
- /// Requirements:
- ///
- /// `Loc` must not be null.
- template <typename T>
- std::enable_if_t<std::is_base_of<StorageLocation, T>::value, T &>
- takeOwnership(std::unique_ptr<T> Loc) {
- assert(Loc != nullptr);
- Locs.push_back(std::move(Loc));
- return *cast<T>(Locs.back().get());
- }
-
- /// Takes ownership of `Val` and returns a reference to it.
- ///
- /// Requirements:
- ///
- /// `Val` must not be null.
- template <typename T>
- std::enable_if_t<std::is_base_of<Value, T>::value, T &>
- takeOwnership(std::unique_ptr<T> Val) {
- assert(Val != nullptr);
- Vals.push_back(std::move(Val));
- return *cast<T>(Vals.back().get());
- }
+ /*ContextSensitiveOpts=*/std::nullopt,
+ /*Logger=*/nullptr});
+ ~DataflowAnalysisContext();
/// Returns a new storage location appropriate for `Type`.
///
@@ -125,15 +109,14 @@ public:
///
/// `D` must not be assigned a storage location.
void setStorageLocation(const ValueDecl &D, StorageLocation &Loc) {
- assert(DeclToLoc.find(&D) == DeclToLoc.end());
+ assert(!DeclToLoc.contains(&D));
DeclToLoc[&D] = &Loc;
}
/// Returns the storage location assigned to `D` or null if `D` has no
/// assigned storage location.
StorageLocation *getStorageLocation(const ValueDecl &D) const {
- auto It = DeclToLoc.find(&D);
- return It == DeclToLoc.end() ? nullptr : It->second;
+ return DeclToLoc.lookup(&D);
}
/// Assigns `Loc` as the storage location of `E`.
@@ -143,15 +126,14 @@ public:
/// `E` must not be assigned a storage location.
void setStorageLocation(const Expr &E, StorageLocation &Loc) {
const Expr &CanonE = ignoreCFGOmittedNodes(E);
- assert(ExprToLoc.find(&CanonE) == ExprToLoc.end());
+ assert(!ExprToLoc.contains(&CanonE));
ExprToLoc[&CanonE] = &Loc;
}
/// Returns the storage location assigned to `E` or null if `E` has no
/// assigned storage location.
StorageLocation *getStorageLocation(const Expr &E) const {
- auto It = ExprToLoc.find(&ignoreCFGOmittedNodes(E));
- return It == ExprToLoc.end() ? nullptr : It->second;
+ return ExprToLoc.lookup(&ignoreCFGOmittedNodes(E));
}
/// Returns a pointer value that represents a null pointer. Calls with
@@ -159,112 +141,33 @@ public:
/// A null `PointeeType` can be used for the pointee of `std::nullptr_t`.
PointerValue &getOrCreateNullPointerValue(QualType PointeeType);
- /// Returns a symbolic boolean value that models a boolean literal equal to
- /// `Value`.
- AtomicBoolValue &getBoolLiteralValue(bool Value) const {
- return Value ? TrueVal : FalseVal;
- }
-
- /// Creates an atomic boolean value.
- AtomicBoolValue &createAtomicBoolValue() {
- return takeOwnership(std::make_unique<AtomicBoolValue>());
- }
-
- /// Creates a Top value for booleans. Each instance is unique and can be
- /// assigned a distinct truth value during solving.
- ///
- /// FIXME: `Top iff Top` is true when both Tops are identical (by pointer
- /// equality), but not when they are distinct values. We should improve the
- /// implementation so that `Top iff Top` has a consistent meaning, regardless
- /// of the identity of `Top`. Moreover, I think the meaning should be
- /// `false`.
- TopBoolValue &createTopBoolValue() {
- return takeOwnership(std::make_unique<TopBoolValue>());
- }
-
- /// Returns a boolean value that represents the conjunction of `LHS` and
- /// `RHS`. Subsequent calls with the same arguments, regardless of their
- /// order, will return the same result. If the given boolean values represent
- /// the same value, the result will be the value itself.
- BoolValue &getOrCreateConjunction(BoolValue &LHS, BoolValue &RHS);
-
- /// Returns a boolean value that represents the disjunction of `LHS` and
- /// `RHS`. Subsequent calls with the same arguments, regardless of their
- /// order, will return the same result. If the given boolean values represent
- /// the same value, the result will be the value itself.
- BoolValue &getOrCreateDisjunction(BoolValue &LHS, BoolValue &RHS);
-
- /// Returns a boolean value that represents the negation of `Val`. Subsequent
- /// calls with the same argument will return the same result.
- BoolValue &getOrCreateNegation(BoolValue &Val);
-
- /// Returns a boolean value that represents `LHS => RHS`. Subsequent calls
- /// with the same arguments, will return the same result. If the given boolean
- /// values represent the same value, the result will be a value that
- /// represents the true boolean literal.
- BoolValue &getOrCreateImplication(BoolValue &LHS, BoolValue &RHS);
-
- /// Returns a boolean value that represents `LHS <=> RHS`. Subsequent calls
- /// with the same arguments, regardless of their order, will return the same
- /// result. If the given boolean values represent the same value, the result
- /// will be a value that represents the true boolean literal.
- BoolValue &getOrCreateIff(BoolValue &LHS, BoolValue &RHS);
-
- /// Creates a fresh flow condition and returns a token that identifies it. The
- /// token can be used to perform various operations on the flow condition such
- /// as adding constraints to it, forking it, joining it with another flow
- /// condition, or checking implications.
- AtomicBoolValue &makeFlowConditionToken();
-
/// Adds `Constraint` to the flow condition identified by `Token`.
- void addFlowConditionConstraint(AtomicBoolValue &Token,
- BoolValue &Constraint);
+ void addFlowConditionConstraint(Atom Token, const Formula &Constraint);
/// Creates a new flow condition with the same constraints as the flow
/// condition identified by `Token` and returns its token.
- AtomicBoolValue &forkFlowCondition(AtomicBoolValue &Token);
+ Atom forkFlowCondition(Atom Token);
/// Creates a new flow condition that represents the disjunction of the flow
/// conditions identified by `FirstToken` and `SecondToken`, and returns its
/// token.
- AtomicBoolValue &joinFlowConditions(AtomicBoolValue &FirstToken,
- AtomicBoolValue &SecondToken);
-
- // FIXME: This function returns the flow condition expressed directly as its
- // constraints: (C1 AND C2 AND ...). This differs from the general approach in
- // the framework where a flow condition is represented as a token (an atomic
- // boolean) with dependencies and constraints tracked in `FlowConditionDeps`
- // and `FlowConditionConstraints`: (FC <=> C1 AND C2 AND ...).
- // Consider if we should make the representation of flow condition consistent,
- // returning an atomic boolean token with separate constraints instead.
- //
- /// Builds and returns the logical formula defining the flow condition
- /// identified by `Token`. If a value in the formula is present as a key in
- /// `Substitutions`, it will be substituted with the value it maps to.
- /// As an example, say we have flow condition tokens FC1, FC2, FC3 and
- /// FlowConditionConstraints: { FC1: C1,
- /// FC2: C2,
- /// FC3: (FC1 v FC2) ^ C3 }
- /// buildAndSubstituteFlowCondition(FC3, {{C1 -> C1'}}) will return a value
- /// corresponding to (C1' v C2) ^ C3.
- BoolValue &buildAndSubstituteFlowCondition(
- AtomicBoolValue &Token,
- llvm::DenseMap<AtomicBoolValue *, BoolValue *> Substitutions);
+ Atom joinFlowConditions(Atom FirstToken, Atom SecondToken);
/// Returns true if and only if the constraints of the flow condition
/// identified by `Token` imply that `Val` is true.
- bool flowConditionImplies(AtomicBoolValue &Token, BoolValue &Val);
+ bool flowConditionImplies(Atom Token, const Formula &);
/// Returns true if and only if the constraints of the flow condition
/// identified by `Token` are always true.
- bool flowConditionIsTautology(AtomicBoolValue &Token);
+ bool flowConditionIsTautology(Atom Token);
/// Returns true if `Val1` is equivalent to `Val2`.
/// Note: This function doesn't take into account constraints on `Val1` and
/// `Val2` imposed by the flow condition.
- bool equivalentBoolValues(BoolValue &Val1, BoolValue &Val2);
+ bool equivalentFormulas(const Formula &Val1, const Formula &Val2);
- LLVM_DUMP_METHOD void dumpFlowCondition(AtomicBoolValue &Token);
+ LLVM_DUMP_METHOD void dumpFlowCondition(Atom Token,
+ llvm::raw_ostream &OS = llvm::dbgs());
/// Returns the `ControlFlowContext` registered for `F`, if any. Otherwise,
/// returns null.
@@ -272,6 +175,20 @@ public:
const Options &getOptions() { return Opts; }
+ Arena &arena() { return *A; }
+
+ /// Returns the outcome of satisfiability checking on `Constraints`.
+ ///
+ /// Flow conditions are not incorporated, so they may need to be manually
+ /// included in `Constraints` to provide contextually-accurate results, e.g.
+ /// if any definitions or relationships of the values in `Constraints` have
+ /// been stored in flow conditions.
+ Solver::Result querySolver(llvm::SetVector<const Formula *> Constraints);
+
+ /// Returns the fields of `Type`, limited to the set of fields modeled by this
+ /// context.
+ FieldSet getModeledFields(QualType Type);
+
private:
friend class Environment;
@@ -287,56 +204,25 @@ private:
};
// Extends the set of modeled field declarations.
- void addModeledFields(const llvm::DenseSet<const FieldDecl *> &Fields);
-
- /// Returns the fields of `Type`, limited to the set of fields modeled by this
- /// context.
- llvm::DenseSet<const FieldDecl *> getReferencedFields(QualType Type);
+ void addModeledFields(const FieldSet &Fields);
/// Adds all constraints of the flow condition identified by `Token` and all
/// of its transitive dependencies to `Constraints`. `VisitedTokens` is used
/// to track tokens of flow conditions that were already visited by recursive
/// calls.
void addTransitiveFlowConditionConstraints(
- AtomicBoolValue &Token, llvm::DenseSet<BoolValue *> &Constraints,
- llvm::DenseSet<AtomicBoolValue *> &VisitedTokens);
-
- /// Returns the outcome of satisfiability checking on `Constraints`.
- /// Possible outcomes are:
- /// - `Satisfiable`: A satisfying assignment exists and is returned.
- /// - `Unsatisfiable`: A satisfying assignment does not exist.
- /// - `TimedOut`: The search for a satisfying assignment was not completed.
- Solver::Result querySolver(llvm::DenseSet<BoolValue *> Constraints);
+ Atom Token, llvm::SetVector<const Formula *> &Constraints,
+ llvm::DenseSet<Atom> &VisitedTokens);
/// Returns true if the solver is able to prove that there is no satisfying
/// assignment for `Constraints`
- bool isUnsatisfiable(llvm::DenseSet<BoolValue *> Constraints) {
+ bool isUnsatisfiable(llvm::SetVector<const Formula *> Constraints) {
return querySolver(std::move(Constraints)).getStatus() ==
Solver::Result::Status::Unsatisfiable;
}
- /// Returns a boolean value as a result of substituting `Val` and its sub
- /// values based on entries in `SubstitutionsCache`. Intermediate results are
- /// stored in `SubstitutionsCache` to avoid reprocessing values that have
- /// already been visited.
- BoolValue &substituteBoolValue(
- BoolValue &Val,
- llvm::DenseMap<BoolValue *, BoolValue *> &SubstitutionsCache);
-
- /// Builds and returns the logical formula defining the flow condition
- /// identified by `Token`, sub values may be substituted based on entries in
- /// `SubstitutionsCache`. Intermediate results are stored in
- /// `SubstitutionsCache` to avoid reprocessing values that have already been
- /// visited.
- BoolValue &buildAndSubstituteFlowConditionWithCache(
- AtomicBoolValue &Token,
- llvm::DenseMap<BoolValue *, BoolValue *> &SubstitutionsCache);
-
std::unique_ptr<Solver> S;
-
- // Storage for the state of a program.
- std::vector<std::unique_ptr<StorageLocation>> Locs;
- std::vector<std::unique_ptr<Value>> Vals;
+ std::unique_ptr<Arena> A;
// Maps from program declarations and statements to storage locations that are
// assigned to them. These assignments are global (aggregated across all basic
@@ -355,23 +241,8 @@ private:
llvm::DenseMap<QualType, PointerValue *, NullableQualTypeDenseMapInfo>
NullPointerVals;
- AtomicBoolValue &TrueVal;
- AtomicBoolValue &FalseVal;
-
Options Opts;
- // Indices that are used to avoid recreating the same composite boolean
- // values.
- llvm::DenseMap<std::pair<BoolValue *, BoolValue *>, ConjunctionValue *>
- ConjunctionVals;
- llvm::DenseMap<std::pair<BoolValue *, BoolValue *>, DisjunctionValue *>
- DisjunctionVals;
- llvm::DenseMap<BoolValue *, NegationValue *> NegationVals;
- llvm::DenseMap<std::pair<BoolValue *, BoolValue *>, ImplicationValue *>
- ImplicationVals;
- llvm::DenseMap<std::pair<BoolValue *, BoolValue *>, BiconditionalValue *>
- BiconditionalVals;
-
// Flow conditions are tracked symbolically: each unique flow condition is
// associated with a fresh symbolic variable (token), bound to the clause that
// defines the flow condition. Conceptually, each binding corresponds to an
@@ -384,14 +255,15 @@ private:
// Flow conditions depend on other flow conditions if they are created using
// `forkFlowCondition` or `joinFlowConditions`. The graph of flow condition
// dependencies is stored in the `FlowConditionDeps` map.
- llvm::DenseMap<AtomicBoolValue *, llvm::DenseSet<AtomicBoolValue *>>
- FlowConditionDeps;
- llvm::DenseMap<AtomicBoolValue *, BoolValue *> FlowConditionConstraints;
+ llvm::DenseMap<Atom, llvm::DenseSet<Atom>> FlowConditionDeps;
+ llvm::DenseMap<Atom, const Formula *> FlowConditionConstraints;
llvm::DenseMap<const FunctionDecl *, ControlFlowContext> FunctionContexts;
// Fields modeled by environments covered by this context.
- llvm::DenseSet<const FieldDecl *> ModeledFields;
+ FieldSet ModeledFields;
+
+ std::unique_ptr<Logger> LogOwner; // If created via flags.
};
} // namespace dataflow
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
index e457430a5e64..5cf52ad3d722 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
@@ -22,10 +22,14 @@
#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
#include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h"
#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "clang/Analysis/FlowSensitive/Logger.h"
#include "clang/Analysis/FlowSensitive/StorageLocation.h"
#include "clang/Analysis/FlowSensitive/Value.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <memory>
#include <type_traits>
@@ -44,9 +48,6 @@ enum class SkipPast {
None,
/// An optional reference should be skipped past.
Reference,
- /// An optional reference should be skipped past, then an optional pointer
- /// should be skipped past.
- ReferenceThenPointer,
};
/// Indicates the result of a tentative comparison.
@@ -161,8 +162,8 @@ public:
/// the state of a program.
explicit Environment(DataflowAnalysisContext &DACtx);
- Environment(const Environment &Other);
- Environment &operator=(const Environment &Other);
+ // Copy-constructor is private, Environments should not be copied. See fork().
+ Environment &operator=(const Environment &Other) = delete;
Environment(Environment &&Other) = default;
Environment &operator=(Environment &&Other) = default;
@@ -177,9 +178,15 @@ public:
/// with a symbolic representation of the `this` pointee.
Environment(DataflowAnalysisContext &DACtx, const DeclContext &DeclCtx);
- const DataflowAnalysisContext::Options &getAnalysisOptions() {
- return DACtx->getOptions();
- }
+ /// Returns a new environment that is a copy of this one.
+ ///
+ /// The state of the program is initially the same, but can be mutated without
+ /// affecting the original.
+ ///
+ /// However the original should not be further mutated, as this may interfere
+ /// with the fork. (In practice, values are stored independently, but the
+ /// forked flow condition references the original).
+ Environment fork() const;
/// Creates and returns an environment to use for an inline analysis of the
/// callee. Uses the storage location from each argument in the `Call` as the
@@ -197,7 +204,8 @@ public:
/// Moves gathered information back into `this` from a `CalleeEnv` created via
/// `pushCall`.
- void popCall(const Environment &CalleeEnv);
+ void popCall(const CallExpr *Call, const Environment &CalleeEnv);
+ void popCall(const CXXConstructExpr *Call, const Environment &CalleeEnv);
/// Returns true if and only if the environment is equivalent to `Other`, i.e
/// the two environments:
@@ -212,17 +220,15 @@ public:
bool equivalentTo(const Environment &Other,
Environment::ValueModel &Model) const;
- /// Joins the environment with `Other` by taking the intersection of storage
- /// locations and values that are stored in them. Distinct values that are
- /// assigned to the same storage locations in the environment and `Other` are
- /// merged using `Model`.
+ /// Joins two environments by taking the intersection of storage locations and
+ /// values that are stored in them. Distinct values that are assigned to the
+ /// same storage locations in `EnvA` and `EnvB` are merged using `Model`.
///
/// Requirements:
///
- /// `Other` and `this` must use the same `DataflowAnalysisContext`.
- LatticeJoinEffect join(const Environment &Other,
- Environment::ValueModel &Model);
-
+ /// `EnvA` and `EnvB` must use the same `DataflowAnalysisContext`.
+ static Environment join(const Environment &EnvA, const Environment &EnvB,
+ Environment::ValueModel &Model);
/// Widens the environment point-wise, using `PrevEnv` as needed to inform the
/// approximation.
@@ -259,101 +265,290 @@ public:
///
/// Requirements:
///
- /// `D` must not be assigned a storage location in the environment.
+ /// `D` must not already have a storage location in the environment.
+ ///
+ /// If `D` has reference type, `Loc` must refer directly to the referenced
+ /// object (if any), not to a `ReferenceValue`, and it is not permitted to
+ /// later change `Loc` to refer to a `ReferenceValue.`
void setStorageLocation(const ValueDecl &D, StorageLocation &Loc);
- /// Returns the storage location assigned to `D` in the environment, applying
- /// the `SP` policy for skipping past indirections, or null if `D` isn't
- /// assigned a storage location in the environment.
- StorageLocation *getStorageLocation(const ValueDecl &D, SkipPast SP) const;
+ /// Returns the storage location assigned to `D` in the environment, or null
+ /// if `D` isn't assigned a storage location in the environment.
+ ///
+ /// Note that if `D` has reference type, the storage location that is returned
+ /// refers directly to the referenced object, not a `ReferenceValue`.
+ StorageLocation *getStorageLocation(const ValueDecl &D) const;
/// Assigns `Loc` as the storage location of `E` in the environment.
///
+ /// This function is deprecated; prefer `setStorageLocationStrict()`.
+ /// For details, see https://discourse.llvm.org/t/70086.
+ ///
/// Requirements:
///
/// `E` must not be assigned a storage location in the environment.
void setStorageLocation(const Expr &E, StorageLocation &Loc);
+ /// Assigns `Loc` as the storage location of the glvalue `E` in the
+ /// environment.
+ ///
+ /// This function is the preferred alternative to
+ /// `setStorageLocation(const Expr &, StorageLocation &)`. Once the migration
+ /// to strict handling of value categories is complete (see
+ /// https://discourse.llvm.org/t/70086), `setStorageLocation()` will be
+ /// removed and this function will be renamed to `setStorageLocation()`.
+ ///
+ /// Requirements:
+ ///
+ /// `E` must not be assigned a storage location in the environment.
+ /// `E` must be a glvalue or a `BuiltinType::BuiltinFn`
+ void setStorageLocationStrict(const Expr &E, StorageLocation &Loc);
+
/// Returns the storage location assigned to `E` in the environment, applying
/// the `SP` policy for skipping past indirections, or null if `E` isn't
/// assigned a storage location in the environment.
+ ///
+ /// This function is deprecated; prefer `getStorageLocationStrict()`.
+ /// For details, see https://discourse.llvm.org/t/70086.
StorageLocation *getStorageLocation(const Expr &E, SkipPast SP) const;
+ /// Returns the storage location assigned to the glvalue `E` in the
+ /// environment, or null if `E` isn't assigned a storage location in the
+ /// environment.
+ ///
+ /// If the storage location for `E` is associated with a
+ /// `ReferenceValue RefVal`, returns `RefVal.getReferentLoc()` instead.
+ ///
+ /// This function is the preferred alternative to
+ /// `getStorageLocation(const Expr &, SkipPast)`. Once the migration
+ /// to strict handling of value categories is complete (see
+ /// https://discourse.llvm.org/t/70086), `getStorageLocation()` will be
+ /// removed and this function will be renamed to `getStorageLocation()`.
+ ///
+ /// Requirements:
+ /// `E` must be a glvalue or a `BuiltinType::BuiltinFn`
+ StorageLocation *getStorageLocationStrict(const Expr &E) const;
+
/// Returns the storage location assigned to the `this` pointee in the
/// environment or null if the `this` pointee has no assigned storage location
/// in the environment.
- StorageLocation *getThisPointeeStorageLocation() const;
+ AggregateStorageLocation *getThisPointeeStorageLocation() const;
- /// Returns the storage location of the return value or null, if unset.
- StorageLocation *getReturnStorageLocation() const;
+ /// Returns the location of the result object for a record-type prvalue.
+ ///
+ /// In C++, prvalues of record type serve only a limited purpose: They can
+ /// only be used to initialize a result object (e.g. a variable or a
+ /// temporary). This function returns the location of that result object.
+ ///
+ /// When creating a prvalue of record type, we already need the storage
+ /// location of the result object to pass in `this`, even though prvalues are
+ /// otherwise not associated with storage locations.
+ ///
+ /// FIXME: Currently, this simply returns a stable storage location for `E`,
+ /// but this doesn't do the right thing in scenarios like the following:
+ /// ```
+ /// MyClass c = some_condition()? MyClass(foo) : MyClass(bar);
+ /// ```
+ /// Here, `MyClass(foo)` and `MyClass(bar)` will have two different storage
+ /// locations, when in fact their storage locations should be the same.
+ /// Eventually, we want to propagate storage locations from result objects
+ /// down to the prvalues that initialize them, similar to the way that this is
+ /// done in Clang's CodeGen.
+ ///
+ /// Requirements:
+ /// `E` must be a prvalue of record type.
+ AggregateStorageLocation &getResultObjectLocation(const Expr &RecordPRValue);
+
+ /// Returns the return value of the current function. This can be null if:
+ /// - The function has a void return type
+ /// - No return value could be determined for the function, for example
+ /// because it calls a function without a body.
+ ///
+ /// Requirements:
+ /// The current function must have a non-reference return type.
+ Value *getReturnValue() const {
+ assert(getCurrentFunc() != nullptr &&
+ !getCurrentFunc()->getReturnType()->isReferenceType());
+ return ReturnVal;
+ }
+
+ /// Returns the storage location for the reference returned by the current
+ /// function. This can be null if function doesn't return a single consistent
+ /// reference.
+ ///
+ /// Requirements:
+ /// The current function must have a reference return type.
+ StorageLocation *getReturnStorageLocation() const {
+ assert(getCurrentFunc() != nullptr &&
+ getCurrentFunc()->getReturnType()->isReferenceType());
+ return ReturnLoc;
+ }
+
+ /// Sets the return value of the current function.
+ ///
+ /// Requirements:
+ /// The current function must have a non-reference return type.
+ void setReturnValue(Value *Val) {
+ assert(getCurrentFunc() != nullptr &&
+ !getCurrentFunc()->getReturnType()->isReferenceType());
+ ReturnVal = Val;
+ }
+
+ /// Sets the storage location for the reference returned by the current
+ /// function.
+ ///
+ /// Requirements:
+ /// The current function must have a reference return type.
+ void setReturnStorageLocation(StorageLocation *Loc) {
+ assert(getCurrentFunc() != nullptr &&
+ getCurrentFunc()->getReturnType()->isReferenceType());
+ ReturnLoc = Loc;
+ }
/// Returns a pointer value that represents a null pointer. Calls with
/// `PointeeType` that are canonically equivalent will return the same result.
PointerValue &getOrCreateNullPointerValue(QualType PointeeType);
/// Creates a value appropriate for `Type`, if `Type` is supported, otherwise
- /// return null. If `Type` is a pointer or reference type, creates all the
- /// necessary storage locations and values for indirections until it finds a
+ /// returns null.
+ ///
+ /// If `Type` is a pointer or reference type, creates all the necessary
+ /// storage locations and values for indirections until it finds a
/// non-pointer/non-reference type.
///
+ /// If `Type` is a class, struct, or union type, calls `setValue()` to
+ /// associate the `StructValue` with its storage location
+ /// (`StructValue::getAggregateLoc()`).
+ ///
+ /// If `Type` is one of the following types, this function will always return
+ /// a non-null pointer:
+ /// - `bool`
+ /// - Any integer type
+ /// - Any class, struct, or union type
+ ///
/// Requirements:
///
/// `Type` must not be null.
Value *createValue(QualType Type);
+ /// Creates an object (i.e. a storage location with an associated value) of
+ /// type `Ty`. If `InitExpr` is non-null and has a value associated with it,
+ /// initializes the object with this value. Otherwise, initializes the object
+ /// with a value created using `createValue()`.
+ StorageLocation &createObject(QualType Ty, const Expr *InitExpr = nullptr) {
+ return createObjectInternal(nullptr, Ty, InitExpr);
+ }
+
+ /// Creates an object for the variable declaration `D`. If `D` has an
+ /// initializer and this initializer is associated with a value, initializes
+ /// the object with this value. Otherwise, initializes the object with a
+ /// value created using `createValue()`. Uses the storage location returned by
+ /// `DataflowAnalysisContext::getStableStorageLocation(D)`.
+ StorageLocation &createObject(const VarDecl &D) {
+ return createObjectInternal(&D, D.getType(), D.getInit());
+ }
+
+ /// Creates an object for the variable declaration `D`. If `InitExpr` is
+ /// non-null and has a value associated with it, initializes the object with
+ /// this value. Otherwise, initializes the object with a value created using
+ /// `createValue()`. Uses the storage location returned by
+ /// `DataflowAnalysisContext::getStableStorageLocation(D)`.
+ StorageLocation &createObject(const VarDecl &D, const Expr *InitExpr) {
+ return createObjectInternal(&D, D.getType(), InitExpr);
+ }
+
/// Assigns `Val` as the value of `Loc` in the environment.
void setValue(const StorageLocation &Loc, Value &Val);
+ /// Clears any association between `Loc` and a value in the environment.
+ void clearValue(const StorageLocation &Loc) { LocToVal.erase(&Loc); }
+
+ /// Assigns `Val` as the value of the prvalue `E` in the environment.
+ ///
+ /// If `E` is not yet associated with a storage location, associates it with
+ /// a newly created storage location. In any case, associates the storage
+ /// location of `E` with `Val`.
+ ///
+ /// Once the migration to strict handling of value categories is complete
+ /// (see https://discourse.llvm.org/t/70086), this function will be renamed to
+ /// `setValue()`. At this point, prvalue expressions will be associated
+ /// directly with `Value`s, and the legacy behavior of associating prvalue
+ /// expressions with storage locations (as described above) will be
+ /// eliminated.
+ ///
+ /// Requirements:
+ ///
+ /// `E` must be a prvalue
+ /// `Val` must not be a `ReferenceValue`
+ /// If `Val` is a `StructValue`, its `AggregateStorageLocation` must be the
+ /// same as that of any `StructValue` that has already been associated with
+ /// `E`. This is to guarantee that the result object initialized by a prvalue
+ /// `StructValue` has a durable storage location.
+ void setValueStrict(const Expr &E, Value &Val);
+
/// Returns the value assigned to `Loc` in the environment or null if `Loc`
/// isn't assigned a value in the environment.
Value *getValue(const StorageLocation &Loc) const;
/// Equivalent to `getValue(getStorageLocation(D, SP), SkipPast::None)` if `D`
/// is assigned a storage location in the environment, otherwise returns null.
- Value *getValue(const ValueDecl &D, SkipPast SP) const;
+ Value *getValue(const ValueDecl &D) const;
/// Equivalent to `getValue(getStorageLocation(E, SP), SkipPast::None)` if `E`
/// is assigned a storage location in the environment, otherwise returns null.
+ ///
+ /// This function is deprecated; prefer `getValueStrict()`. For details, see
+ /// https://discourse.llvm.org/t/70086.
Value *getValue(const Expr &E, SkipPast SP) const;
- /// Transfers ownership of `Loc` to the analysis context and returns a
- /// reference to it.
+ /// Returns the `Value` assigned to the prvalue `E` in the environment, or
+ /// null if `E` isn't assigned a value in the environment.
+ ///
+ /// This function is the preferred alternative to
+ /// `getValue(const Expr &, SkipPast)`. Once the migration to strict handling
+ /// of value categories is complete (see https://discourse.llvm.org/t/70086),
+ /// `getValue()` will be removed and this function will be renamed to
+ /// `getValue()`.
///
/// Requirements:
///
- /// `Loc` must not be null.
- template <typename T>
- std::enable_if_t<std::is_base_of<StorageLocation, T>::value, T &>
- takeOwnership(std::unique_ptr<T> Loc) {
- return DACtx->takeOwnership(std::move(Loc));
- }
+ /// `E` must be a prvalue
+ Value *getValueStrict(const Expr &E) const;
- /// Transfers ownership of `Val` to the analysis context and returns a
- /// reference to it.
- ///
- /// Requirements:
+ // FIXME: should we deprecate the following & call arena().create() directly?
+
+ /// Creates a `T` (some subclass of `Value`), forwarding `args` to the
+ /// constructor, and returns a reference to it.
///
- /// `Val` must not be null.
- template <typename T>
+ /// The analysis context takes ownership of the created object. The object
+ /// will be destroyed when the analysis context is destroyed.
+ template <typename T, typename... Args>
std::enable_if_t<std::is_base_of<Value, T>::value, T &>
- takeOwnership(std::unique_ptr<T> Val) {
- return DACtx->takeOwnership(std::move(Val));
+ create(Args &&...args) {
+ return arena().create<T>(std::forward<Args>(args)...);
+ }
+
+ /// Returns a symbolic integer value that models an integer literal equal to
+ /// `Value`
+ IntegerValue &getIntLiteralValue(llvm::APInt Value) const {
+ return arena().makeIntLiteral(Value);
}
/// Returns a symbolic boolean value that models a boolean literal equal to
/// `Value`
AtomicBoolValue &getBoolLiteralValue(bool Value) const {
- return DACtx->getBoolLiteralValue(Value);
+ return cast<AtomicBoolValue>(
+ arena().makeBoolValue(arena().makeLiteral(Value)));
}
/// Returns an atomic boolean value.
BoolValue &makeAtomicBoolValue() const {
- return DACtx->createAtomicBoolValue();
+ return arena().makeAtomValue();
}
/// Returns a unique instance of boolean Top.
BoolValue &makeTopBoolValue() const {
- return DACtx->createTopBoolValue();
+ return arena().makeTopValue();
}
/// Returns a boolean value that represents the conjunction of `LHS` and
@@ -361,7 +556,8 @@ public:
/// order, will return the same result. If the given boolean values represent
/// the same value, the result will be the value itself.
BoolValue &makeAnd(BoolValue &LHS, BoolValue &RHS) const {
- return DACtx->getOrCreateConjunction(LHS, RHS);
+ return arena().makeBoolValue(
+ arena().makeAnd(LHS.formula(), RHS.formula()));
}
/// Returns a boolean value that represents the disjunction of `LHS` and
@@ -369,13 +565,14 @@ public:
/// order, will return the same result. If the given boolean values represent
/// the same value, the result will be the value itself.
BoolValue &makeOr(BoolValue &LHS, BoolValue &RHS) const {
- return DACtx->getOrCreateDisjunction(LHS, RHS);
+ return arena().makeBoolValue(
+ arena().makeOr(LHS.formula(), RHS.formula()));
}
/// Returns a boolean value that represents the negation of `Val`. Subsequent
/// calls with the same argument will return the same result.
BoolValue &makeNot(BoolValue &Val) const {
- return DACtx->getOrCreateNegation(Val);
+ return arena().makeBoolValue(arena().makeNot(Val.formula()));
}
/// Returns a boolean value represents `LHS` => `RHS`. Subsequent calls with
@@ -383,7 +580,8 @@ public:
/// values represent the same value, the result will be a value that
/// represents the true boolean literal.
BoolValue &makeImplication(BoolValue &LHS, BoolValue &RHS) const {
- return DACtx->getOrCreateImplication(LHS, RHS);
+ return arena().makeBoolValue(
+ arena().makeImplies(LHS.formula(), RHS.formula()));
}
/// Returns a boolean value represents `LHS` <=> `RHS`. Subsequent calls with
@@ -391,48 +589,61 @@ public:
/// result. If the given boolean values represent the same value, the result
/// will be a value that represents the true boolean literal.
BoolValue &makeIff(BoolValue &LHS, BoolValue &RHS) const {
- return DACtx->getOrCreateIff(LHS, RHS);
+ return arena().makeBoolValue(
+ arena().makeEquals(LHS.formula(), RHS.formula()));
}
- /// Returns the token that identifies the flow condition of the environment.
- AtomicBoolValue &getFlowConditionToken() const { return *FlowConditionToken; }
-
- /// Builds and returns the logical formula defining the flow condition
- /// identified by `Token`. If a value in the formula is present as a key in
- /// `Substitutions`, it will be substituted with the value it maps to.
- BoolValue &buildAndSubstituteFlowCondition(
- AtomicBoolValue &Token,
- llvm::DenseMap<AtomicBoolValue *, BoolValue *> Substitutions) {
- return DACtx->buildAndSubstituteFlowCondition(Token,
- std::move(Substitutions));
- }
+ /// Returns a boolean variable that identifies the flow condition (FC).
+ ///
+ /// The flow condition is a set of facts that are necessarily true when the
+ /// program reaches the current point, expressed as boolean formulas.
+ /// The flow condition token is equivalent to the AND of these facts.
+ ///
+ /// These may e.g. constrain the value of certain variables. A pointer
+ /// variable may have a consistent modeled PointerValue throughout, but at a
+ /// given point the Environment may tell us that the value must be non-null.
+ ///
+ /// The FC is necessary but not sufficient for this point to be reachable.
+ /// In particular, where the FC token appears in flow conditions of successor
+ /// environments, it means "point X may have been reached", not
+ /// "point X was reached".
+ Atom getFlowConditionToken() const { return FlowConditionToken; }
- /// Adds `Val` to the set of clauses that constitute the flow condition.
- void addToFlowCondition(BoolValue &Val);
+ /// Record a fact that must be true if this point in the program is reached.
+ void addToFlowCondition(const Formula &);
- /// Returns true if and only if the clauses that constitute the flow condition
- /// imply that `Val` is true.
- bool flowConditionImplies(BoolValue &Val) const;
+ /// Returns true if the formula is always true when this point is reached.
+ /// Returns false if the formula may be false, or if the flow condition isn't
+ /// sufficiently precise to prove that it is true.
+ bool flowConditionImplies(const Formula &) const;
/// Returns the `DeclContext` of the block being analysed, if any. Otherwise,
/// returns null.
const DeclContext *getDeclCtx() const { return CallStack.back(); }
+ /// Returns the function currently being analyzed, or null if the code being
+ /// analyzed isn't part of a function.
+ const FunctionDecl *getCurrentFunc() const {
+ return dyn_cast<FunctionDecl>(getDeclCtx());
+ }
+
/// Returns whether this `Environment` can be extended to analyze the given
/// `Callee` (i.e. if `pushCall` can be used), with recursion disallowed and a
/// given `MaxDepth`.
bool canDescend(unsigned MaxDepth, const DeclContext *Callee) const;
- /// Returns the `ControlFlowContext` registered for `F`, if any. Otherwise,
- /// returns null.
- const ControlFlowContext *getControlFlowContext(const FunctionDecl *F) {
- return DACtx->getControlFlowContext(F);
- }
+ /// Returns the `DataflowAnalysisContext` used by the environment.
+ DataflowAnalysisContext &getDataflowAnalysisContext() const { return *DACtx; }
+
+ Arena &arena() const { return DACtx->arena(); }
LLVM_DUMP_METHOD void dump() const;
LLVM_DUMP_METHOD void dump(raw_ostream &OS) const;
private:
+ // The copy-constructor is for use in fork() only.
+ Environment(const Environment &) = default;
+
/// Creates a value appropriate for `Type`, if `Type` is supported, otherwise
/// return null.
///
@@ -448,6 +659,19 @@ private:
llvm::DenseSet<QualType> &Visited,
int Depth, int &CreatedValuesCount);
+ /// Creates a storage location for `Ty`. Also creates and associates a value
+ /// with the storage location, unless values of this type are not supported or
+ /// we hit one of the limits at which we stop producing values (controlled by
+ /// `Visited`, `Depth`, and `CreatedValuesCount`).
+ StorageLocation &createLocAndMaybeValue(QualType Ty,
+ llvm::DenseSet<QualType> &Visited,
+ int Depth, int &CreatedValuesCount);
+
+ /// Shared implementation of `createObject()` overloads.
+ /// `D` and `InitExpr` may be null.
+ StorageLocation &createObjectInternal(const VarDecl *D, QualType Ty,
+ const Expr *InitExpr);
+
StorageLocation &skip(StorageLocation &Loc, SkipPast SP) const;
const StorageLocation &skip(const StorageLocation &Loc, SkipPast SP) const;
@@ -457,26 +681,29 @@ private:
void pushCallInternal(const FunctionDecl *FuncDecl,
ArrayRef<const Expr *> Args);
- /// Assigns storage locations and values to all variables in `Vars`.
- void initVars(llvm::DenseSet<const VarDecl *> Vars);
+ /// Assigns storage locations and values to all global variables, fields
+ /// and functions referenced in `FuncDecl`. `FuncDecl` must have a body.
+ void initFieldsGlobalsAndFuncs(const FunctionDecl *FuncDecl);
// `DACtx` is not null and not owned by this object.
DataflowAnalysisContext *DACtx;
-
- // FIXME: move the fields `CallStack`, `ReturnLoc` and `ThisPointeeLoc` into a
- // separate call-context object, shared between environments in the same call.
+ // FIXME: move the fields `CallStack`, `ReturnVal`, `ReturnLoc` and
+ // `ThisPointeeLoc` into a separate call-context object, shared between
+ // environments in the same call.
// https://github.com/llvm/llvm-project/issues/59005
// `DeclContext` of the block being analysed if provided.
std::vector<const DeclContext *> CallStack;
- // In a properly initialized `Environment`, `ReturnLoc` should only be null if
- // its `DeclContext` could not be cast to a `FunctionDecl`.
+ // Value returned by the function (if it has non-reference return type).
+ Value *ReturnVal = nullptr;
+ // Storage location of the reference returned by the function (if it has
+ // reference return type).
StorageLocation *ReturnLoc = nullptr;
// The storage location of the `this` pointee. Should only be null if the
// function being analyzed is only a function and not a method.
- StorageLocation *ThisPointeeLoc = nullptr;
+ AggregateStorageLocation *ThisPointeeLoc = nullptr;
// Maps from program declarations and statements to storage locations that are
// assigned to them. Unlike the maps in `DataflowAnalysisContext`, these
@@ -484,17 +711,47 @@ private:
// block.
llvm::DenseMap<const ValueDecl *, StorageLocation *> DeclToLoc;
llvm::DenseMap<const Expr *, StorageLocation *> ExprToLoc;
+ // We preserve insertion order so that join/widen process values in
+ // deterministic sequence. This in turn produces deterministic SAT formulas.
+ llvm::MapVector<const StorageLocation *, Value *> LocToVal;
- llvm::DenseMap<const StorageLocation *, Value *> LocToVal;
+ Atom FlowConditionToken;
+};
- // Maps locations of struct members to symbolic values of the structs that own
- // them and the decls of the struct members.
- llvm::DenseMap<const StorageLocation *,
- std::pair<StructValue *, const ValueDecl *>>
- MemberLocToStruct;
+/// Returns the storage location for the implicit object of a
+/// `CXXMemberCallExpr`, or null if none is defined in the environment.
+/// Dereferences the pointer if the member call expression was written using
+/// `->`.
+AggregateStorageLocation *
+getImplicitObjectLocation(const CXXMemberCallExpr &MCE, const Environment &Env);
+
+/// Returns the storage location for the base object of a `MemberExpr`, or null
+/// if none is defined in the environment. Dereferences the pointer if the
+/// member expression was written using `->`.
+AggregateStorageLocation *getBaseObjectLocation(const MemberExpr &ME,
+ const Environment &Env);
+
+/// Returns the fields of `RD` that are initialized by an `InitListExpr`, in the
+/// order in which they appear in `InitListExpr::inits()`.
+std::vector<FieldDecl *> getFieldsForInitListExpr(const RecordDecl *RD);
+
+/// Associates a new `StructValue` with `Loc` and returns the new value.
+/// It is not defined whether the field values remain the same or not.
+///
+/// This function is primarily intended for use by checks that set custom
+/// properties on `StructValue`s to model the state of these values. Such checks
+/// should avoid modifying the properties of an existing `StructValue` because
+/// these changes would be visible to other `Environment`s that share the same
+/// `StructValue`. Instead, call `refreshStructValue()`, then set the properties
+/// on the new `StructValue` that it returns. Typical usage:
+///
+/// refreshStructValue(Loc, Env).setProperty("my_prop", MyPropValue);
+StructValue &refreshStructValue(AggregateStorageLocation &Loc,
+ Environment &Env);
- AtomicBoolValue *FlowConditionToken;
-};
+/// Associates a new `StructValue` with `Expr` and returns the new value.
+/// See also documentation for the overload above.
+StructValue &refreshStructValue(const Expr &Expr, Environment &Env);
} // namespace dataflow
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DebugSupport.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DebugSupport.h
index ca50ffc5f5c8..6b9f3681490a 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DebugSupport.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DebugSupport.h
@@ -19,7 +19,6 @@
#include "clang/Analysis/FlowSensitive/Solver.h"
#include "clang/Analysis/FlowSensitive/Value.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
namespace clang {
@@ -28,60 +27,9 @@ namespace dataflow {
/// Returns a string representation of a value kind.
llvm::StringRef debugString(Value::Kind Kind);
-/// Returns a string representation of a boolean assignment to true or false.
-llvm::StringRef debugString(Solver::Result::Assignment Assignment);
-
/// Returns a string representation of the result status of a SAT check.
llvm::StringRef debugString(Solver::Result::Status Status);
-/// Returns a string representation for the boolean value `B`.
-///
-/// Atomic booleans appearing in the boolean value `B` are assigned to labels
-/// either specified in `AtomNames` or created by default rules as B0, B1, ...
-///
-/// Requirements:
-///
-/// Names assigned to atoms should not be repeated in `AtomNames`.
-std::string debugString(
- const BoolValue &B,
- llvm::DenseMap<const AtomicBoolValue *, std::string> AtomNames = {{}});
-
-/// Returns a string representation for `Constraints` - a collection of boolean
-/// formulas.
-///
-/// Atomic booleans appearing in the boolean value `Constraints` are assigned to
-/// labels either specified in `AtomNames` or created by default rules as B0,
-/// B1, ...
-///
-/// Requirements:
-///
-/// Names assigned to atoms should not be repeated in `AtomNames`.
-std::string debugString(
- const llvm::DenseSet<BoolValue *> &Constraints,
- llvm::DenseMap<const AtomicBoolValue *, std::string> AtomNames = {{}});
-
-/// Returns a string representation for `Constraints` - a collection of boolean
-/// formulas and the `Result` of satisfiability checking.
-///
-/// Atomic booleans appearing in `Constraints` and `Result` are assigned to
-/// labels either specified in `AtomNames` or created by default rules as B0,
-/// B1, ...
-///
-/// Requirements:
-///
-/// Names assigned to atoms should not be repeated in `AtomNames`.
-std::string debugString(
- ArrayRef<BoolValue *> Constraints, const Solver::Result &Result,
- llvm::DenseMap<const AtomicBoolValue *, std::string> AtomNames = {{}});
-inline std::string debugString(
- const llvm::DenseSet<BoolValue *> &Constraints,
- const Solver::Result &Result,
- llvm::DenseMap<const AtomicBoolValue *, std::string> AtomNames = {{}}) {
- std::vector<BoolValue *> ConstraintsVec(Constraints.begin(),
- Constraints.end());
- return debugString(ConstraintsVec, Result, std::move(AtomNames));
-}
-
} // namespace dataflow
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Formula.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Formula.h
new file mode 100644
index 000000000000..64fe8f5b630a
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Formula.h
@@ -0,0 +1,138 @@
+//===- Formula.h - Boolean formulas -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_FORMULA_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_FORMULA_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <string>
+#include <type_traits>
+
+namespace clang::dataflow {
+
+/// Identifies an atomic boolean variable such as "V1".
+///
+/// This often represents an assertion that is interesting to the analysis but
+/// cannot immediately be proven true or false. For example:
+/// - V1 may mean "the program reaches this point",
+/// - V2 may mean "the parameter was null"
+///
+/// We can use these variables in formulas to describe relationships we know
+/// to be true: "if the parameter was null, the program reaches this point".
+/// We also express hypotheses as formulas, and use a SAT solver to check
+/// whether they are consistent with the known facts.
+enum class Atom : unsigned {};
+
+/// A boolean expression such as "true" or "V1 & !V2".
+/// Expressions may refer to boolean atomic variables. These should take a
+/// consistent true/false value across the set of formulas being considered.
+///
+/// (Formulas are always expressions in terms of boolean variables rather than
+/// e.g. integers because our underlying model is SAT rather than e.g. SMT).
+///
+/// Simple formulas such as "true" and "V1" are self-contained.
+/// Compound formulas connect other formulas, e.g. "(V1 & V2) || V3" is an 'or'
+/// formula, with pointers to its operands "(V1 & V2)" and "V3" stored as
+/// trailing objects.
+/// For this reason, Formulas are Arena-allocated and over-aligned.
+class Formula;
+class alignas(const Formula *) Formula {
+public:
+ enum Kind : unsigned {
+ /// A reference to an atomic boolean variable.
+ /// We name these e.g. "V3", where 3 == atom identity == Value.
+ AtomRef,
+ // FIXME: add const true/false rather than modeling them as variables
+
+ Not, /// True if its only operand is false
+
+ // These kinds connect two operands LHS and RHS
+ And, /// True if LHS and RHS are both true
+ Or, /// True if either LHS or RHS is true
+ Implies, /// True if LHS is false or RHS is true
+ Equal, /// True if LHS and RHS have the same truth value
+ };
+ Kind kind() const { return FormulaKind; }
+
+ Atom getAtom() const {
+ assert(kind() == AtomRef);
+ return static_cast<Atom>(Value);
+ }
+
+ ArrayRef<const Formula *> operands() const {
+ return ArrayRef(reinterpret_cast<Formula *const *>(this + 1),
+ numOperands(kind()));
+ }
+
+ using AtomNames = llvm::DenseMap<Atom, std::string>;
+ // Produce a stable human-readable representation of this formula.
+ // For example: (V3 | !(V1 & V2))
+ // If AtomNames is provided, these override the default V0, V1... names.
+ void print(llvm::raw_ostream &OS, const AtomNames * = nullptr) const;
+
+ // Allocate Formulas using Arena rather than calling this function directly.
+ static Formula &create(llvm::BumpPtrAllocator &Alloc, Kind K,
+ ArrayRef<const Formula *> Operands,
+ unsigned Value = 0);
+
+private:
+ Formula() = default;
+ Formula(const Formula &) = delete;
+ Formula &operator=(const Formula &) = delete;
+
+ static unsigned numOperands(Kind K) {
+ switch (K) {
+ case AtomRef:
+ return 0;
+ case Not:
+ return 1;
+ case And:
+ case Or:
+ case Implies:
+ case Equal:
+ return 2;
+ }
+ llvm_unreachable("Unhandled Formula::Kind enum");
+ }
+
+ Kind FormulaKind;
+ // Some kinds of formula have scalar values, e.g. AtomRef's atom number.
+ unsigned Value;
+};
+
+// The default names of atoms are V0, V1 etc in order of creation.
+inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, Atom A) {
+ return OS << 'V' << static_cast<unsigned>(A);
+}
+inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Formula &F) {
+ F.print(OS);
+ return OS;
+}
+
+} // namespace clang::dataflow
+namespace llvm {
+template <> struct DenseMapInfo<clang::dataflow::Atom> {
+ using Atom = clang::dataflow::Atom;
+ using Underlying = std::underlying_type_t<Atom>;
+
+ static inline Atom getEmptyKey() { return Atom(Underlying(-1)); }
+ static inline Atom getTombstoneKey() { return Atom(Underlying(-2)); }
+ static unsigned getHashValue(const Atom &Val) {
+ return DenseMapInfo<Underlying>::getHashValue(Underlying(Val));
+ }
+ static bool isEqual(const Atom &LHS, const Atom &RHS) { return LHS == RHS; }
+};
+} // namespace llvm
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Logger.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Logger.h
new file mode 100644
index 000000000000..6836488003a9
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Logger.h
@@ -0,0 +1,89 @@
+//===-- Logger.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_LOGGER_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_LOGGER_H
+
+#include "clang/Analysis/CFG.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+
+namespace clang::dataflow {
+// Forward declarations so we can use Logger anywhere in the framework.
+class ControlFlowContext;
+class TypeErasedDataflowAnalysis;
+struct TypeErasedDataflowAnalysisState;
+
+/// A logger is notified as the analysis progresses.
+/// It can produce a report of the analysis's findings and how it came to them.
+///
+/// The framework reports key structural events (e.g. traversal of blocks).
+/// The specific analysis can add extra details to be presented in context.
+class Logger {
+public:
+ /// Returns a dummy logger that does nothing.
+ static Logger &null();
+ /// A logger that simply writes messages to the specified ostream in real
+ /// time.
+ static std::unique_ptr<Logger> textual(llvm::raw_ostream &);
+ /// A logger that builds an HTML UI to inspect the analysis results.
+ /// Each function's analysis is written to a stream obtained from the factory.
+ static std::unique_ptr<Logger>
+ html(std::function<std::unique_ptr<llvm::raw_ostream>()>);
+
+ virtual ~Logger() = default;
+
+ /// Called by the framework as we start analyzing a new function or statement.
+ /// Forms a pair with endAnalysis().
+ virtual void beginAnalysis(const ControlFlowContext &,
+ TypeErasedDataflowAnalysis &) {}
+ virtual void endAnalysis() {}
+
+ // At any time during the analysis, we're computing the state for some target
+ // program point.
+
+ /// Called when we start (re-)processing a block in the CFG.
+ /// The target program point is the entry to the specified block.
+ /// Calls to log() describe transferBranch(), join() etc.
+ virtual void enterBlock(const CFGBlock &) {}
+ /// Called when we start processing an element in the current CFG block.
+ /// The target program point is after the specified element.
+ /// Calls to log() describe the transfer() function.
+ virtual void enterElement(const CFGElement &) {}
+
+ /// Records the analysis state computed for the current program point.
+ virtual void recordState(TypeErasedDataflowAnalysisState &) {}
+ /// Records that the analysis state for the current block is now final.
+ virtual void blockConverged() {}
+
+ /// Called by the framework or user code to report some event.
+ /// The event is associated with the current context (program point).
+ /// The Emit function produces the log message. It may or may not be called,
+ /// depending on if the logger is interested; it should have no side effects.
+ void log(llvm::function_ref<void(llvm::raw_ostream &)> Emit) {
+ if (!ShouldLogText)
+ return;
+ std::string S;
+ llvm::raw_string_ostream OS(S);
+ Emit(OS);
+ logText(S);
+ }
+
+protected:
+ /// ShouldLogText should be false for trivial loggers that ignore logText().
+ /// This allows log() to skip evaluating its Emit function.
+ Logger(bool ShouldLogText = true) : ShouldLogText(ShouldLogText) {}
+
+private:
+ bool ShouldLogText;
+ virtual void logText(llvm::StringRef) {}
+};
+
+} // namespace clang::dataflow
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MatchSwitch.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MatchSwitch.h
index 37894ab37dd8..9a298478c510 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MatchSwitch.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MatchSwitch.h
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the `MatchSwitch` abstraction for building a "switch"
+// This file defines the `ASTMatchSwitch` abstraction for building a "switch"
// statement, where each case of the switch is defined by an AST matcher. The
// cases are considered in order, like pattern matching in functional
// languages.
@@ -17,8 +17,7 @@
//
//===----------------------------------------------------------------------===//
//
-// FIXME: Rename to ASTMatchSwitch.h and update documentation when all usages of
-// `MatchSwitch` are updated to `ASTMatchSwitch<Stmt>`
+// FIXME: Rename to ASTMatchSwitch.h
#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_MATCHSWITCH_H_
#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_MATCHSWITCH_H_
@@ -69,11 +68,6 @@ template <typename BaseT, typename State, typename Result = void>
using ASTMatchSwitch =
std::function<Result(const BaseT &, ASTContext &, State &)>;
-// FIXME: Remove this alias when all usages of `MatchSwitch` are updated to
-// `ASTMatchSwitch<Stmt>`.
-template <typename State, typename Result = void>
-using MatchSwitch = ASTMatchSwitch<Stmt, State, Result>;
-
/// Collects cases of a "match switch": a collection of matchers paired with
/// callbacks, which together define a switch that can be applied to a node
/// whose type derives from `BaseT`. This structure can simplify the definition
@@ -171,11 +165,6 @@ private:
std::vector<MatchSwitchAction<BaseT, State, Result>> Actions;
};
-// FIXME: Remove this alias when all usages of `MatchSwitchBuilder` are updated
-// to `ASTMatchSwitchBuilder<Stmt>`.
-template <typename State, typename Result = void>
-using MatchSwitchBuilder = ASTMatchSwitchBuilder<Stmt, State, Result>;
-
} // namespace dataflow
} // namespace clang
#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_MATCHSWITCH_H_
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Models/ChromiumCheckModel.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Models/ChromiumCheckModel.h
index e65f40b0b726..b4315e41d79f 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Models/ChromiumCheckModel.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Models/ChromiumCheckModel.h
@@ -13,7 +13,6 @@
#define CLANG_ANALYSIS_FLOWSENSITIVE_MODELS_CHROMIUMCHECKMODEL_H
#include "clang/AST/DeclCXX.h"
-#include "clang/AST/Stmt.h"
#include "clang/Analysis/FlowSensitive/DataflowAnalysis.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
#include "llvm/ADT/DenseSet.h"
@@ -26,7 +25,7 @@ namespace dataflow {
class ChromiumCheckModel : public DataflowModel {
public:
ChromiumCheckModel() = default;
- bool transfer(const CFGElement *Element, Environment &Env) override;
+ bool transfer(const CFGElement &Element, Environment &Env) override;
private:
/// Declarations for `::logging::CheckError::.*Check`, lazily initialized.
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.h
index 2d52ee5fc846..23dfdd49e94d 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.h
@@ -52,7 +52,7 @@ public:
static NoopLattice initialElement() { return {}; }
- void transfer(const CFGElement *Elt, NoopLattice &L, Environment &Env);
+ void transfer(const CFGElement &Elt, NoopLattice &L, Environment &Env);
ComparisonResult compare(QualType Type, const Value &Val1,
const Environment &Env1, const Value &Val2,
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/NoopAnalysis.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/NoopAnalysis.h
index bf27ec3a58dd..e28a7f902faf 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/NoopAnalysis.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/NoopAnalysis.h
@@ -38,7 +38,7 @@ public:
static NoopLattice initialElement() { return {}; }
- void transfer(const CFGElement *E, NoopLattice &L, Environment &Env) {}
+ void transfer(const CFGElement &E, NoopLattice &L, Environment &Env) {}
};
} // namespace dataflow
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/RecordOps.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/RecordOps.h
new file mode 100644
index 000000000000..c9c302b9199b
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/RecordOps.h
@@ -0,0 +1,76 @@
+//===-- RecordOps.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Operations on records (structs, classes, and unions).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_RECORDOPS_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_RECORDOPS_H
+
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/StorageLocation.h"
+
+namespace clang {
+namespace dataflow {
+
+/// Copies a record (struct, class, or union) from `Src` to `Dst`.
+///
+/// This performs a deep copy, i.e. it copies every field and recurses on
+/// fields of record type. It also copies properties from the `StructValue`
+/// associated with `Src` to the `StructValue` associated with `Dst` (if these
+/// `StructValue`s exist).
+///
+/// If there is a `StructValue` associated with `Dst` in the environment, this
+/// function creates a new `StructValue` and associates it with `Dst`; clients
+/// need to be aware of this and must not assume that the `StructValue`
+/// associated with `Dst` remains the same after the call.
+///
+/// We create a new `StructValue` rather than modifying properties on the old
+/// `StructValue` because the old `StructValue` may be shared with other
+/// `Environment`s, and we don't want changes to properties to be visible there.
+///
+/// Requirements:
+///
+/// `Src` and `Dst` must have the same canonical unqualified type.
+void copyRecord(AggregateStorageLocation &Src, AggregateStorageLocation &Dst,
+ Environment &Env);
+
+/// Returns whether the records `Loc1` and `Loc2` are equal.
+///
+/// Values for `Loc1` are retrieved from `Env1`, and values for `Loc2` are
+/// retrieved from `Env2`. A convenience overload retrieves values for `Loc1`
+/// and `Loc2` from the same environment.
+///
+/// This performs a deep comparison, i.e. it compares every field and recurses
+/// on fields of record type. Fields of reference type compare equal if they
+/// refer to the same storage location. If `StructValue`s are associated with
+/// `Loc1` and `Loc2`, it also compares the properties on those `StructValue`s.
+///
+/// Note on how to interpret the result:
+/// - If this returns true, the records are guaranteed to be equal at runtime.
+/// - If this returns false, the records may still be equal at runtime; our
+/// analysis merely cannot guarantee that they will be equal.
+///
+/// Requirements:
+///
+/// `Src` and `Dst` must have the same canonical unqualified type.
+bool recordsEqual(const AggregateStorageLocation &Loc1, const Environment &Env1,
+ const AggregateStorageLocation &Loc2,
+ const Environment &Env2);
+
+inline bool recordsEqual(const AggregateStorageLocation &Loc1,
+ const AggregateStorageLocation &Loc2,
+ const Environment &Env) {
+ return recordsEqual(Loc1, Env, Loc2, Env);
+}
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_RECORDOPS_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Solver.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Solver.h
index e4d450c8d12b..079f6802f241 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Solver.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Solver.h
@@ -14,10 +14,12 @@
#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_SOLVER_H
#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_SOLVER_H
-#include "clang/Analysis/FlowSensitive/Value.h"
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/DenseSet.h"
#include <optional>
+#include <vector>
namespace clang {
namespace dataflow {
@@ -45,8 +47,7 @@ public:
/// Constructs a result indicating that the queried boolean formula is
/// satisfiable. The result will hold a solution found by the solver.
- static Result
- Satisfiable(llvm::DenseMap<AtomicBoolValue *, Assignment> Solution) {
+ static Result Satisfiable(llvm::DenseMap<Atom, Assignment> Solution) {
return Result(Status::Satisfiable, std::move(Solution));
}
@@ -64,19 +65,17 @@ public:
/// Returns a truth assignment to boolean values that satisfies the queried
/// boolean formula if available. Otherwise, an empty optional is returned.
- std::optional<llvm::DenseMap<AtomicBoolValue *, Assignment>>
- getSolution() const {
+ std::optional<llvm::DenseMap<Atom, Assignment>> getSolution() const {
return Solution;
}
private:
- Result(
- enum Status SATCheckStatus,
- std::optional<llvm::DenseMap<AtomicBoolValue *, Assignment>> Solution)
+ Result(Status SATCheckStatus,
+ std::optional<llvm::DenseMap<Atom, Assignment>> Solution)
: SATCheckStatus(SATCheckStatus), Solution(std::move(Solution)) {}
Status SATCheckStatus;
- std::optional<llvm::DenseMap<AtomicBoolValue *, Assignment>> Solution;
+ std::optional<llvm::DenseMap<Atom, Assignment>> Solution;
};
virtual ~Solver() = default;
@@ -87,9 +86,12 @@ public:
/// Requirements:
///
/// All elements in `Vals` must not be null.
- virtual Result solve(llvm::DenseSet<BoolValue *> Vals) = 0;
+ virtual Result solve(llvm::ArrayRef<const Formula *> Vals) = 0;
};
+llvm::raw_ostream &operator<<(llvm::raw_ostream &, const Solver::Result &);
+llvm::raw_ostream &operator<<(llvm::raw_ostream &, Solver::Result::Assignment);
+
} // namespace dataflow
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/StorageLocation.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/StorageLocation.h
index f7ea7eb174c5..62e3d5e59c65 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/StorageLocation.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/StorageLocation.h
@@ -17,6 +17,10 @@
#include "clang/AST/Decl.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Debug.h"
+#include <cassert>
+
+#define DEBUG_TYPE "dataflow"
namespace clang {
namespace dataflow {
@@ -29,7 +33,9 @@ class StorageLocation {
public:
enum class Kind { Scalar, Aggregate };
- StorageLocation(Kind LocKind, QualType Type) : LocKind(LocKind), Type(Type) {}
+ StorageLocation(Kind LocKind, QualType Type) : LocKind(LocKind), Type(Type) {
+ assert(Type.isNull() || !Type->isReferenceType());
+ }
// Non-copyable because addresses of storage locations are used as their
// identities throughout framework and user code. The framework is responsible
@@ -65,36 +71,89 @@ public:
/// struct with public members. The child map is flat, so when used for a struct
/// or class type, all accessible members of base struct and class types are
/// directly accesible as children of this location.
+///
+/// The storage location for a field of reference type may be null. This
+/// typically occurs in one of two situations:
+/// - The record has not been fully initialized.
+/// - The maximum depth for modelling a self-referential data structure has been
+/// reached.
+/// Storage locations for fields of all other types must be non-null.
+///
/// FIXME: Currently, the storage location of unions is modelled the same way as
/// that of structs or classes. Eventually, we need to change this modelling so
/// that all of the members of a given union have the same storage location.
class AggregateStorageLocation final : public StorageLocation {
public:
- explicit AggregateStorageLocation(QualType Type)
- : AggregateStorageLocation(
- Type, llvm::DenseMap<const ValueDecl *, StorageLocation *>()) {}
+ using FieldToLoc = llvm::DenseMap<const ValueDecl *, StorageLocation *>;
- AggregateStorageLocation(
- QualType Type,
- llvm::DenseMap<const ValueDecl *, StorageLocation *> Children)
- : StorageLocation(Kind::Aggregate, Type), Children(std::move(Children)) {}
+ explicit AggregateStorageLocation(QualType Type)
+ : AggregateStorageLocation(Type, FieldToLoc()) {}
+
+ AggregateStorageLocation(QualType Type, FieldToLoc TheChildren)
+ : StorageLocation(Kind::Aggregate, Type),
+ Children(std::move(TheChildren)) {
+ assert(!Type.isNull());
+ assert(Type->isRecordType());
+ assert([this] {
+ for (auto [Field, Loc] : Children) {
+ if (!Field->getType()->isReferenceType() && Loc == nullptr)
+ return false;
+ }
+ return true;
+ }());
+ }
static bool classof(const StorageLocation *Loc) {
return Loc->getKind() == Kind::Aggregate;
}
/// Returns the child storage location for `D`.
- StorageLocation &getChild(const ValueDecl &D) const {
+ ///
+ /// May return null if `D` has reference type; guaranteed to return non-null
+ /// in all other cases.
+ ///
+ /// Note that it is an error to call this with a field that does not exist.
+ /// The function does not return null in this case.
+ StorageLocation *getChild(const ValueDecl &D) const {
auto It = Children.find(&D);
+ LLVM_DEBUG({
+ if (It == Children.end()) {
+ llvm::dbgs() << "Couldn't find child " << D.getNameAsString()
+ << " on StorageLocation " << this << " of type "
+ << getType() << "\n";
+ llvm::dbgs() << "Existing children:\n";
+ for ([[maybe_unused]] auto [Field, Loc] : Children) {
+ llvm::dbgs() << Field->getNameAsString() << "\n";
+ }
+ }
+ });
assert(It != Children.end());
- return *It->second;
+ return It->second;
+ }
+
+ /// Changes the child storage location for a field `D` of reference type.
+ /// All other fields cannot change their storage location and always retain
+ /// the storage location passed to the `AggregateStorageLocation` constructor.
+ ///
+ /// Requirements:
+ ///
+ /// `D` must have reference type.
+ void setChild(const ValueDecl &D, StorageLocation *Loc) {
+ assert(D.getType()->isReferenceType());
+ Children[&D] = Loc;
+ }
+
+ llvm::iterator_range<FieldToLoc::const_iterator> children() const {
+ return {Children.begin(), Children.end()};
}
private:
- llvm::DenseMap<const ValueDecl *, StorageLocation *> Children;
+ FieldToLoc Children;
};
} // namespace dataflow
} // namespace clang
+#undef DEBUG_TYPE
+
#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_STORAGELOCATION_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Transfer.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Transfer.h
index 78a426ed94dd..58bb77c4905c 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Transfer.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Transfer.h
@@ -17,6 +17,7 @@
#include "clang/AST/Stmt.h"
#include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h"
namespace clang {
namespace dataflow {
@@ -24,12 +25,18 @@ namespace dataflow {
/// Maps statements to the environments of basic blocks that contain them.
class StmtToEnvMap {
public:
- virtual ~StmtToEnvMap() = default;
-
- /// Returns the environment of the basic block that contains `S` or nullptr if
- /// there isn't one.
- /// FIXME: Ensure that the result can't be null and return a const reference.
- virtual const Environment *getEnvironment(const Stmt &S) const = 0;
+ StmtToEnvMap(const ControlFlowContext &CFCtx,
+ llvm::ArrayRef<std::optional<TypeErasedDataflowAnalysisState>>
+ BlockToState)
+ : CFCtx(CFCtx), BlockToState(BlockToState) {}
+
+ /// Returns the environment of the basic block that contains `S`.
+ /// The result is guaranteed never to be null.
+ const Environment *getEnvironment(const Stmt &S) const;
+
+private:
+ const ControlFlowContext &CFCtx;
+ llvm::ArrayRef<std::optional<TypeErasedDataflowAnalysisState>> BlockToState;
};
/// Evaluates `S` and updates `Env` accordingly.
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h
index 1d7962e9f67a..88a33d19f7d8 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h
@@ -72,7 +72,7 @@ public:
/// Joins two type-erased lattice elements by computing their least upper
/// bound. Places the join result in the left element and returns an effect
/// indicating whether any changes were made to it.
- virtual LatticeJoinEffect joinTypeErased(TypeErasedLattice &,
+ virtual TypeErasedLattice joinTypeErased(const TypeErasedLattice &,
const TypeErasedLattice &) = 0;
/// Chooses a lattice element that approximates the current element at a
@@ -96,7 +96,7 @@ public:
/// Applies the analysis transfer function for a given control flow graph
/// element and type-erased lattice element.
- virtual void transferTypeErased(const CFGElement *, TypeErasedLattice &,
+ virtual void transferTypeErased(const CFGElement &, TypeErasedLattice &,
Environment &) = 0;
/// Applies the analysis transfer function for a given edge from a CFG block
@@ -104,6 +104,7 @@ public:
/// @param Stmt The condition which is responsible for the split in the CFG.
/// @param Branch True if the edge goes to the basic block where the
/// condition is true.
+ // FIXME: Change `Stmt` argument to a reference.
virtual void transferBranchTypeErased(bool Branch, const Stmt *,
TypeErasedLattice &, Environment &) = 0;
@@ -125,6 +126,10 @@ struct TypeErasedDataflowAnalysisState {
TypeErasedDataflowAnalysisState(TypeErasedLattice Lattice, Environment Env)
: Lattice(std::move(Lattice)), Env(std::move(Env)) {}
+
+ TypeErasedDataflowAnalysisState fork() const {
+ return TypeErasedDataflowAnalysisState(Lattice, Env.fork());
+ }
};
/// Transfers the state of a basic block by evaluating each of its elements in
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Value.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Value.h
index 32d10a348948..7d9a7b7d2825 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Value.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Value.h
@@ -15,11 +15,11 @@
#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_VALUE_H
#include "clang/AST/Decl.h"
+#include "clang/Analysis/FlowSensitive/Formula.h"
#include "clang/Analysis/FlowSensitive/StorageLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <utility>
@@ -38,14 +38,10 @@ public:
Pointer,
Struct,
- // Synthetic boolean values are either atomic values or logical connectives.
+ // TODO: Top values should not be need to be type-specific.
TopBool,
AtomicBool,
- Conjunction,
- Disjunction,
- Negation,
- Implication,
- Biconditional,
+ FormulaBool,
};
explicit Value(Kind ValKind) : ValKind(ValKind) {}
@@ -63,8 +59,7 @@ public:
/// Returns the value of the synthetic property with the given `Name` or null
/// if the property isn't assigned a value.
Value *getProperty(llvm::StringRef Name) const {
- auto It = Properties.find(Name);
- return It == Properties.end() ? nullptr : It->second;
+ return Properties.lookup(Name);
}
/// Assigns `Val` as the value of the synthetic property with the given
@@ -73,6 +68,11 @@ public:
Properties.insert_or_assign(Name, &Val);
}
+ llvm::iterator_range<llvm::StringMap<Value *>::const_iterator>
+ properties() const {
+ return {Properties.begin(), Properties.end()};
+ }
+
private:
Kind ValKind;
llvm::StringMap<Value *> Properties;
@@ -91,151 +91,68 @@ bool areEquivalentValues(const Value &Val1, const Value &Val2);
/// Models a boolean.
class BoolValue : public Value {
+ const Formula *F;
+
public:
- explicit BoolValue(Kind ValueKind) : Value(ValueKind) {}
+ explicit BoolValue(Kind ValueKind, const Formula &F)
+ : Value(ValueKind), F(&F) {}
static bool classof(const Value *Val) {
return Val->getKind() == Kind::TopBool ||
Val->getKind() == Kind::AtomicBool ||
- Val->getKind() == Kind::Conjunction ||
- Val->getKind() == Kind::Disjunction ||
- Val->getKind() == Kind::Negation ||
- Val->getKind() == Kind::Implication ||
- Val->getKind() == Kind::Biconditional;
+ Val->getKind() == Kind::FormulaBool;
}
+
+ const Formula &formula() const { return *F; }
};
-/// Models the trivially true formula, which is Top in the lattice of boolean
-/// formulas.
+/// A TopBoolValue represents a boolean that is explicitly unconstrained.
+///
+/// This is equivalent to an AtomicBoolValue that does not appear anywhere
+/// else in a system of formula.
+/// Knowing the value is unconstrained is useful when e.g. reasoning about
+/// convergence.
class TopBoolValue final : public BoolValue {
public:
- TopBoolValue() : BoolValue(Kind::TopBool) {}
+ TopBoolValue(const Formula &F) : BoolValue(Kind::TopBool, F) {
+ assert(F.kind() == Formula::AtomRef);
+ }
static bool classof(const Value *Val) {
return Val->getKind() == Kind::TopBool;
}
-};
-/// Models an atomic boolean.
-class AtomicBoolValue : public BoolValue {
-public:
- explicit AtomicBoolValue() : BoolValue(Kind::AtomicBool) {}
-
- static bool classof(const Value *Val) {
- return Val->getKind() == Kind::AtomicBool;
- }
+ Atom getAtom() const { return formula().getAtom(); }
};
-/// Models a boolean conjunction.
-// FIXME: Consider representing binary and unary boolean operations similar
-// to how they are represented in the AST. This might become more pressing
-// when such operations need to be added for other data types.
-class ConjunctionValue : public BoolValue {
+/// Models an atomic boolean.
+///
+/// FIXME: Merge this class into FormulaBoolValue.
+/// When we want to specify atom identity, use Atom.
+class AtomicBoolValue final : public BoolValue {
public:
- explicit ConjunctionValue(BoolValue &LeftSubVal, BoolValue &RightSubVal)
- : BoolValue(Kind::Conjunction), LeftSubVal(LeftSubVal),
- RightSubVal(RightSubVal) {}
-
- static bool classof(const Value *Val) {
- return Val->getKind() == Kind::Conjunction;
+ explicit AtomicBoolValue(const Formula &F) : BoolValue(Kind::AtomicBool, F) {
+ assert(F.kind() == Formula::AtomRef);
}
- /// Returns the left sub-value of the conjunction.
- BoolValue &getLeftSubValue() const { return LeftSubVal; }
-
- /// Returns the right sub-value of the conjunction.
- BoolValue &getRightSubValue() const { return RightSubVal; }
-
-private:
- BoolValue &LeftSubVal;
- BoolValue &RightSubVal;
-};
-
-/// Models a boolean disjunction.
-class DisjunctionValue : public BoolValue {
-public:
- explicit DisjunctionValue(BoolValue &LeftSubVal, BoolValue &RightSubVal)
- : BoolValue(Kind::Disjunction), LeftSubVal(LeftSubVal),
- RightSubVal(RightSubVal) {}
-
static bool classof(const Value *Val) {
- return Val->getKind() == Kind::Disjunction;
- }
-
- /// Returns the left sub-value of the disjunction.
- BoolValue &getLeftSubValue() const { return LeftSubVal; }
-
- /// Returns the right sub-value of the disjunction.
- BoolValue &getRightSubValue() const { return RightSubVal; }
-
-private:
- BoolValue &LeftSubVal;
- BoolValue &RightSubVal;
-};
-
-/// Models a boolean negation.
-class NegationValue : public BoolValue {
-public:
- explicit NegationValue(BoolValue &SubVal)
- : BoolValue(Kind::Negation), SubVal(SubVal) {}
-
- static bool classof(const Value *Val) {
- return Val->getKind() == Kind::Negation;
+ return Val->getKind() == Kind::AtomicBool;
}
- /// Returns the sub-value of the negation.
- BoolValue &getSubVal() const { return SubVal; }
-
-private:
- BoolValue &SubVal;
+ Atom getAtom() const { return formula().getAtom(); }
};
-/// Models a boolean implication.
-///
-/// Equivalent to `!LHS v RHS`.
-class ImplicationValue : public BoolValue {
+/// Models a compound boolean formula.
+class FormulaBoolValue final : public BoolValue {
public:
- explicit ImplicationValue(BoolValue &LeftSubVal, BoolValue &RightSubVal)
- : BoolValue(Kind::Implication), LeftSubVal(LeftSubVal),
- RightSubVal(RightSubVal) {}
-
- static bool classof(const Value *Val) {
- return Val->getKind() == Kind::Implication;
+ explicit FormulaBoolValue(const Formula &F)
+ : BoolValue(Kind::FormulaBool, F) {
+ assert(F.kind() != Formula::AtomRef && "For now, use AtomicBoolValue");
}
- /// Returns the left sub-value of the implication.
- BoolValue &getLeftSubValue() const { return LeftSubVal; }
-
- /// Returns the right sub-value of the implication.
- BoolValue &getRightSubValue() const { return RightSubVal; }
-
-private:
- BoolValue &LeftSubVal;
- BoolValue &RightSubVal;
-};
-
-/// Models a boolean biconditional.
-///
-/// Equivalent to `(LHS ^ RHS) v (!LHS ^ !RHS)`.
-class BiconditionalValue : public BoolValue {
-public:
- explicit BiconditionalValue(BoolValue &LeftSubVal, BoolValue &RightSubVal)
- : BoolValue(Kind::Biconditional), LeftSubVal(LeftSubVal),
- RightSubVal(RightSubVal) {}
-
static bool classof(const Value *Val) {
- return Val->getKind() == Kind::Biconditional;
+ return Val->getKind() == Kind::FormulaBool;
}
-
- /// Returns the left sub-value of the biconditional.
- BoolValue &getLeftSubValue() const { return LeftSubVal; }
-
- /// Returns the right sub-value of the biconditional.
- BoolValue &getRightSubValue() const { return RightSubVal; }
-
-private:
- BoolValue &LeftSubVal;
- BoolValue &RightSubVal;
};
/// Models an integer.
@@ -281,34 +198,59 @@ private:
StorageLocation &PointeeLoc;
};
-/// Models a value of `struct` or `class` type, with a flat map of fields to
-/// child storage locations, containing all accessible members of base struct
-/// and class types.
+/// Models a value of `struct` or `class` type.
+/// In C++, prvalues of class type serve only a limited purpose: They can only
+/// be used to initialize a result object. It is not possible to access member
+/// variables or call member functions on a prvalue of class type.
+/// Correspondingly, `StructValue` also serves only two limited purposes:
+/// - It conveys a prvalue of class type from the place where the object is
+/// constructed to the result object that it initializes.
+///
+/// When creating a prvalue of class type, we already need a storage location
+/// for `this`, even though prvalues are otherwise not associated with storage
+/// locations. `StructValue` is therefore essentially a wrapper for a storage
+/// location, which is then used to set the storage location for the result
+/// object when we process the AST node for that result object.
+///
+/// For example:
+/// MyStruct S = MyStruct(3);
+///
+/// In this example, `MyStruct(3) is a prvalue, which is modeled as a
+/// `StructValue` that wraps an `AbstractStorageLocation`. This
+// `AbstractStorageLocation` is then used as the storage location for `S`.
+///
+/// - It allows properties to be associated with an object of class type.
+/// Note that when doing so, you should avoid mutating the properties of an
+/// existing `StructValue` in place, as these changes would be visible to
+/// other `Environment`s that share the same `StructValue`. Instead, associate
+/// a new `StructValue` with the `AggregateStorageLocation` and set the
+/// properties on this new `StructValue`. (See also `refreshStructValue()` in
+/// DataflowEnvironment.h, which makes this easy.)
+/// Note also that this implies that it is common for the same
+/// `AggregateStorageLocation` to be associated with different `StructValue`s
+/// in different environments.
+/// Over time, we may eliminate `StructValue` entirely. See also the discussion
+/// here: https://reviews.llvm.org/D155204#inline-1503204
class StructValue final : public Value {
public:
- StructValue() : StructValue(llvm::DenseMap<const ValueDecl *, Value *>()) {}
-
- explicit StructValue(llvm::DenseMap<const ValueDecl *, Value *> Children)
- : Value(Kind::Struct), Children(std::move(Children)) {}
+ explicit StructValue(AggregateStorageLocation &Loc)
+ : Value(Kind::Struct), Loc(Loc) {}
static bool classof(const Value *Val) {
return Val->getKind() == Kind::Struct;
}
- /// Returns the child value that is assigned for `D` or null if the child is
- /// not initialized.
- Value *getChild(const ValueDecl &D) const {
- auto It = Children.find(&D);
- if (It == Children.end())
- return nullptr;
- return It->second;
- }
+ /// Returns the storage location that this `StructValue` is associated with.
+ AggregateStorageLocation &getAggregateLoc() const { return Loc; }
- /// Assigns `Val` as the child value for `D`.
- void setChild(const ValueDecl &D, Value &Val) { Children[&D] = &Val; }
+ /// Convenience function that returns the child storage location for `Field`.
+ /// See also the documentation for `AggregateStorageLocation::getChild()`.
+ StorageLocation *getChild(const ValueDecl &Field) const {
+ return Loc.getChild(Field);
+ }
private:
- llvm::DenseMap<const ValueDecl *, Value *> Children;
+ AggregateStorageLocation &Loc;
};
raw_ostream &operator<<(raw_ostream &OS, const Value &Val);
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h
index 702da97349da..5448eecf6d41 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h
@@ -14,9 +14,10 @@
#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_WATCHEDLITERALSSOLVER_H
#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_WATCHEDLITERALSSOLVER_H
+#include "clang/Analysis/FlowSensitive/Formula.h"
#include "clang/Analysis/FlowSensitive/Solver.h"
-#include "clang/Analysis/FlowSensitive/Value.h"
-#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/ArrayRef.h"
+#include <limits>
namespace clang {
namespace dataflow {
@@ -27,8 +28,28 @@ namespace dataflow {
/// single "watched" literal per clause, and uses a set of "active" variables
/// for unit propagation.
class WatchedLiteralsSolver : public Solver {
+ // Count of the iterations of the main loop of the solver. This spans *all*
+ // calls to the underlying solver across the life of this object. It is
+ // reduced with every (non-trivial) call to the solver.
+ //
+ // We give control over the abstract count of iterations instead of concrete
+ // measurements like CPU cycles or time to ensure deterministic results.
+ std::int64_t MaxIterations = std::numeric_limits<std::int64_t>::max();
+
public:
- Result solve(llvm::DenseSet<BoolValue *> Vals) override;
+ WatchedLiteralsSolver() = default;
+
+ // `Work` specifies a computational limit on the solver. Units of "work"
+ // roughly correspond to attempts to assign a value to a single
+ // variable. Since the algorithm is exponential in the number of variables,
+ // this is the most direct (abstract) unit to target.
+ explicit WatchedLiteralsSolver(std::int64_t WorkLimit)
+ : MaxIterations(WorkLimit) {}
+
+ Result solve(llvm::ArrayRef<const Formula *> Vals) override;
+
+ // The solver reached its maximum number of iterations.
+ bool reachedLimit() const { return MaxIterations == 0; }
};
} // namespace dataflow
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/ProgramPoint.h b/contrib/llvm-project/clang/include/clang/Analysis/ProgramPoint.h
index 6dba0582c8dd..b9339570e1ae 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/ProgramPoint.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/ProgramPoint.h
@@ -95,35 +95,33 @@ private:
llvm::PointerIntPair<const ProgramPointTag *, 2, unsigned> Tag;
+ CFGBlock::ConstCFGElementRef ElemRef = {nullptr, 0};
+
protected:
ProgramPoint() = default;
- ProgramPoint(const void *P,
- Kind k,
- const LocationContext *l,
- const ProgramPointTag *tag = nullptr)
- : Data1(P),
- Data2(nullptr, (((unsigned) k) >> 0) & 0x3),
- L(l, (((unsigned) k) >> 2) & 0x3),
- Tag(tag, (((unsigned) k) >> 4) & 0x3) {
- assert(getKind() == k);
- assert(getLocationContext() == l);
- assert(getData1() == P);
- }
-
- ProgramPoint(const void *P1,
- const void *P2,
- Kind k,
- const LocationContext *l,
- const ProgramPointTag *tag = nullptr)
- : Data1(P1),
- Data2(P2, (((unsigned) k) >> 0) & 0x3),
- L(l, (((unsigned) k) >> 2) & 0x3),
- Tag(tag, (((unsigned) k) >> 4) & 0x3) {}
+ ProgramPoint(const void *P, Kind k, const LocationContext *l,
+ const ProgramPointTag *tag = nullptr,
+ CFGBlock::ConstCFGElementRef ElemRef = {nullptr, 0})
+ : Data1(P), Data2(nullptr, (((unsigned)k) >> 0) & 0x3),
+ L(l, (((unsigned)k) >> 2) & 0x3), Tag(tag, (((unsigned)k) >> 4) & 0x3),
+ ElemRef(ElemRef) {
+ assert(getKind() == k);
+ assert(getLocationContext() == l);
+ assert(getData1() == P);
+ }
+
+ ProgramPoint(const void *P1, const void *P2, Kind k, const LocationContext *l,
+ const ProgramPointTag *tag = nullptr,
+ CFGBlock::ConstCFGElementRef ElemRef = {nullptr, 0})
+ : Data1(P1), Data2(P2, (((unsigned)k) >> 0) & 0x3),
+ L(l, (((unsigned)k) >> 2) & 0x3), Tag(tag, (((unsigned)k) >> 4) & 0x3),
+ ElemRef(ElemRef) {}
protected:
const void *getData1() const { return Data1; }
const void *getData2() const { return Data2.getPointer(); }
void setData2(const void *d) { Data2.setPointer(d); }
+ CFGBlock::ConstCFGElementRef getElementRef() const { return ElemRef; }
public:
/// Create a new ProgramPoint object that is the same as the original
@@ -190,17 +188,13 @@ public:
}
bool operator==(const ProgramPoint & RHS) const {
- return Data1 == RHS.Data1 &&
- Data2 == RHS.Data2 &&
- L == RHS.L &&
- Tag == RHS.Tag;
+ return Data1 == RHS.Data1 && Data2 == RHS.Data2 && L == RHS.L &&
+ Tag == RHS.Tag && ElemRef == RHS.ElemRef;
}
bool operator!=(const ProgramPoint &RHS) const {
- return Data1 != RHS.Data1 ||
- Data2 != RHS.Data2 ||
- L != RHS.L ||
- Tag != RHS.Tag;
+ return Data1 != RHS.Data1 || Data2 != RHS.Data2 || L != RHS.L ||
+ Tag != RHS.Tag || ElemRef != RHS.ElemRef;
}
void Profile(llvm::FoldingSetNodeID& ID) const {
@@ -209,6 +203,8 @@ public:
ID.AddPointer(getData2());
ID.AddPointer(getLocationContext());
ID.AddPointer(getTag());
+ ID.AddPointer(ElemRef.getParent());
+ ID.AddInteger(ElemRef.getIndexInBlock());
}
void printJson(llvm::raw_ostream &Out, const char *NL = "\n") const;
@@ -266,6 +262,7 @@ private:
}
};
+// FIXME: Eventually we want to take a CFGElementRef as parameter here too.
class StmtPoint : public ProgramPoint {
public:
StmtPoint(const Stmt *S, const void *p2, Kind k, const LocationContext *L,
@@ -557,8 +554,9 @@ private:
class ImplicitCallPoint : public ProgramPoint {
public:
ImplicitCallPoint(const Decl *D, SourceLocation Loc, Kind K,
- const LocationContext *L, const ProgramPointTag *Tag)
- : ProgramPoint(Loc.getPtrEncoding(), D, K, L, Tag) {}
+ const LocationContext *L, const ProgramPointTag *Tag,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : ProgramPoint(Loc.getPtrEncoding(), D, K, L, Tag, ElemRef) {}
const Decl *getDecl() const { return static_cast<const Decl *>(getData2()); }
SourceLocation getLocation() const {
@@ -581,8 +579,9 @@ private:
class PreImplicitCall : public ImplicitCallPoint {
public:
PreImplicitCall(const Decl *D, SourceLocation Loc, const LocationContext *L,
+ CFGBlock::ConstCFGElementRef ElemRef,
const ProgramPointTag *Tag = nullptr)
- : ImplicitCallPoint(D, Loc, PreImplicitCallKind, L, Tag) {}
+ : ImplicitCallPoint(D, Loc, PreImplicitCallKind, L, Tag, ElemRef) {}
private:
friend class ProgramPoint;
@@ -598,8 +597,9 @@ private:
class PostImplicitCall : public ImplicitCallPoint {
public:
PostImplicitCall(const Decl *D, SourceLocation Loc, const LocationContext *L,
+ CFGBlock::ConstCFGElementRef ElemRef,
const ProgramPointTag *Tag = nullptr)
- : ImplicitCallPoint(D, Loc, PostImplicitCallKind, L, Tag) {}
+ : ImplicitCallPoint(D, Loc, PostImplicitCallKind, L, Tag, ElemRef) {}
private:
friend class ProgramPoint;
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Support/BumpVector.h b/contrib/llvm-project/clang/include/clang/Analysis/Support/BumpVector.h
index 74092dabbfda..6c3f11e99306 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Support/BumpVector.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Support/BumpVector.h
@@ -42,6 +42,15 @@ public:
Other.Alloc.setPointer(nullptr);
}
+ // The move assignment operator is defined as deleted pending further
+ // motivation.
+ BumpVectorContext &operator=(BumpVectorContext &&) = delete;
+
+ // The copy constrcutor and copy assignment operator is defined as deleted
+ // pending further motivation.
+ BumpVectorContext(const BumpVectorContext &) = delete;
+ BumpVectorContext &operator=(const BumpVectorContext &) = delete;
+
/// Construct a new BumpVectorContext that reuses an existing
/// BumpPtrAllocator. This BumpPtrAllocator is not destroyed when the
/// BumpVectorContext object is destroyed.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AArch64SVEACLETypes.def b/contrib/llvm-project/clang/include/clang/Basic/AArch64SVEACLETypes.def
index b98a07436e94..56af270e1d10 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AArch64SVEACLETypes.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/AArch64SVEACLETypes.def
@@ -49,6 +49,11 @@
SVE_TYPE(Name, Id, SingletonId)
#endif
+#ifndef SVE_OPAQUE_TYPE
+#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \
+ SVE_TYPE(Name, Id, SingletonId)
+#endif
+
//===- Vector point types -----------------------------------------------===//
@@ -124,7 +129,12 @@ SVE_VECTOR_TYPE("__clang_svfloat64x4_t", "svfloat64x4_t", SveFloat64x4, SveFloat
SVE_VECTOR_TYPE("__clang_svbfloat16x4_t", "svbfloat16x4_t", SveBFloat16x4, SveBFloat16x4Ty, 32, 16, true, false, true)
SVE_PREDICATE_TYPE("__SVBool_t", "__SVBool_t", SveBool, SveBoolTy, 16)
+SVE_PREDICATE_TYPE("__clang_svboolx2_t", "svboolx2_t", SveBoolx2, SveBoolx2Ty, 32)
+SVE_PREDICATE_TYPE("__clang_svboolx4_t", "svboolx4_t", SveBoolx4, SveBoolx4Ty, 64)
+
+SVE_OPAQUE_TYPE("__SVCount_t", "__SVCount_t", SveCount, SveCountTy)
#undef SVE_VECTOR_TYPE
#undef SVE_PREDICATE_TYPE
+#undef SVE_OPAQUE_TYPE
#undef SVE_TYPE
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AddressSpaces.h b/contrib/llvm-project/clang/include/clang/Basic/AddressSpaces.h
index 2f2c5d5826bc..7b723d508fff 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AddressSpaces.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/AddressSpaces.h
@@ -59,6 +59,9 @@ enum class LangAS : unsigned {
// HLSL specific address spaces.
hlsl_groupshared,
+ // Wasm specific address spaces.
+ wasm_funcref,
+
// This denotes the count of language-specific address spaces and also
// the offset added to the target-specific address spaces, which are usually
// specified by address space attributes __attribute__(address_space(n))).
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AlignedAllocation.h b/contrib/llvm-project/clang/include/clang/Basic/AlignedAllocation.h
index 949e54c8c030..ac26eb4a276d 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AlignedAllocation.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/AlignedAllocation.h
@@ -15,9 +15,9 @@
#ifndef LLVM_CLANG_BASIC_ALIGNEDALLOCATION_H
#define LLVM_CLANG_BASIC_ALIGNEDALLOCATION_H
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/VersionTuple.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Attr.td b/contrib/llvm-project/clang/include/clang/Basic/Attr.td
index d449a2fe7f8f..d5204b286966 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Attr.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/Attr.td
@@ -168,6 +168,12 @@ def FunctionLike : SubsetSubject<DeclBase,
[{S->getFunctionType(false) != nullptr}],
"functions, function pointers">;
+// Function Pointer is a stricter version of FunctionLike that only allows function
+// pointers.
+def FunctionPointer : SubsetSubject<DeclBase,
+ [{S->isFunctionPointerType()}],
+ "functions pointers">;
+
def OpenCLKernelFunction
: SubsetSubject<Function, [{S->hasAttr<OpenCLKernelAttr>()}],
"kernel functions">;
@@ -287,26 +293,38 @@ class VariadicEnumArgument<string name, string type, list<string> values,
}
// This handles one spelling of an attribute.
-class Spelling<string name, string variety> {
+class Spelling<string name, string variety, int version = 1> {
string Name = name;
string Variety = variety;
+ int Version = version;
}
class GNU<string name> : Spelling<name, "GNU">;
class Declspec<string name> : Spelling<name, "Declspec">;
class Microsoft<string name> : Spelling<name, "Microsoft">;
class CXX11<string namespace, string name, int version = 1>
- : Spelling<name, "CXX11"> {
+ : Spelling<name, "CXX11", version> {
string Namespace = namespace;
- int Version = version;
}
class C2x<string namespace, string name, int version = 1>
- : Spelling<name, "C2x"> {
+ : Spelling<name, "C2x", version> {
string Namespace = namespace;
- int Version = version;
}
-class Keyword<string name> : Spelling<name, "Keyword">;
+class Keyword<string name, bit hasOwnParseRules>
+ : Spelling<name, "Keyword"> {
+ bit HasOwnParseRules = hasOwnParseRules;
+}
+
+// A keyword that can appear wherever a standard attribute can appear,
+// and that appertains to whatever a standard attribute would appertain to.
+// This is useful for things that affect semantics but that should otherwise
+// be treated like standard attributes.
+class RegularKeyword<string name> : Keyword<name, 0> {}
+
+// A keyword that has its own individual parsing rules.
+class CustomKeyword<string name> : Keyword<name, 1> {}
+
class Pragma<string namespace, string name> : Spelling<name, "Pragma"> {
string Namespace = namespace;
}
@@ -321,7 +339,8 @@ class GCC<string name, bit allowInC = 1> : Spelling<name, "GCC"> {
// The Clang spelling implies GNU<name>, CXX11<"clang", name>, and optionally,
// C2x<"clang", name>. This spelling should be used for any Clang-specific
// attributes.
-class Clang<string name, bit allowInC = 1> : Spelling<name, "Clang"> {
+class Clang<string name, bit allowInC = 1, int version = 1>
+ : Spelling<name, "Clang", version> {
bit AllowInC = allowInC;
}
@@ -408,6 +427,7 @@ def TargetRISCV : TargetArch<["riscv32", "riscv64"]>;
def TargetX86 : TargetArch<["x86"]>;
def TargetAnyX86 : TargetArch<["x86", "x86_64"]>;
def TargetWebAssembly : TargetArch<["wasm32", "wasm64"]>;
+def TargetNVPTX : TargetArch<["nvptx", "nvptx64"]>;
def TargetWindows : TargetSpec {
let OSes = ["Win32"];
}
@@ -702,13 +722,13 @@ def ArmBuiltinAlias : InheritableAttr, TargetSpecificAttr<TargetAnyArm> {
}
def Aligned : InheritableAttr {
- let Spellings = [GCC<"aligned">, Declspec<"align">, Keyword<"alignas">,
- Keyword<"_Alignas">];
+ let Spellings = [GCC<"aligned">, Declspec<"align">, CustomKeyword<"alignas">,
+ CustomKeyword<"_Alignas">];
let Args = [AlignedArgument<"Alignment", 1>];
let Accessors = [Accessor<"isGNU", [GCC<"aligned">]>,
- Accessor<"isC11", [Keyword<"_Alignas">]>,
- Accessor<"isAlignas", [Keyword<"alignas">,
- Keyword<"_Alignas">]>,
+ Accessor<"isC11", [CustomKeyword<"_Alignas">]>,
+ Accessor<"isAlignas", [CustomKeyword<"alignas">,
+ CustomKeyword<"_Alignas">]>,
Accessor<"isDeclspec",[Declspec<"align">]>];
let Documentation = [Undocumented];
}
@@ -749,7 +769,7 @@ def AlignNatural : InheritableAttr {
def AlwaysInline : DeclOrStmtAttr {
let Spellings = [GCC<"always_inline">, CXX11<"clang", "always_inline">,
- C2x<"clang", "always_inline">, Keyword<"__forceinline">];
+ C2x<"clang", "always_inline">, CustomKeyword<"__forceinline">];
let Accessors = [Accessor<"isClangAlwaysInline", [CXX11<"clang", "always_inline">,
C2x<"clang", "always_inline">]>];
let Subjects = SubjectList<[Function, Stmt], WarnDiag,
@@ -792,7 +812,8 @@ def XRayLogArgs : InheritableAttr {
def PatchableFunctionEntry
: InheritableAttr,
TargetSpecificAttr<TargetArch<
- ["aarch64", "aarch64_be", "riscv32", "riscv64", "x86", "x86_64"]>> {
+ ["aarch64", "aarch64_be", "loongarch32", "loongarch64", "riscv32",
+ "riscv64", "x86", "x86_64"]>> {
let Spellings = [GCC<"patchable_function_entry">];
let Subjects = SubjectList<[Function, ObjCMethod]>;
let Args = [UnsignedArgument<"Count">, DefaultIntArgument<"Offset", 0>];
@@ -826,7 +847,7 @@ def Annotate : InheritableParamAttr {
return AnnotateAttr::Create(Ctx, Annotation, nullptr, 0, CommonInfo);
}
static AnnotateAttr *CreateImplicit(ASTContext &Ctx, llvm::StringRef Annotation, \
- const AttributeCommonInfo &CommonInfo = {SourceRange{}}) {
+ const AttributeCommonInfo &CommonInfo) {
return AnnotateAttr::CreateImplicit(Ctx, Annotation, nullptr, 0, CommonInfo);
}
}];
@@ -871,7 +892,7 @@ def AVRSignal : InheritableAttr, TargetSpecificAttr<TargetAVR> {
}
def AsmLabel : InheritableAttr {
- let Spellings = [Keyword<"asm">, Keyword<"__asm__">];
+ let Spellings = [CustomKeyword<"asm">, CustomKeyword<"__asm__">];
let Args = [
// Label specifies the mangled name for the decl.
StringArgument<"Label">,
@@ -918,6 +939,7 @@ def Availability : InheritableAttr {
.Case("maccatalyst_app_extension", "macCatalyst (App Extension)")
.Case("swift", "Swift")
.Case("shadermodel", "HLSL ShaderModel")
+ .Case("ohos", "OpenHarmony OS")
.Default(llvm::StringRef());
}
static llvm::StringRef getPlatformNameSourceSpelling(llvm::StringRef Platform) {
@@ -958,10 +980,12 @@ static llvm::StringRef canonicalizePlatformName(llvm::StringRef Platform) {
}
def ExternalSourceSymbol : InheritableAttr {
- let Spellings = [Clang<"external_source_symbol">];
+ let Spellings = [Clang<"external_source_symbol", /*allowInC=*/1,
+ /*version=*/20230206>];
let Args = [StringArgument<"language", 1>,
StringArgument<"definedIn", 1>,
- BoolArgument<"generatedDeclaration", 1>];
+ BoolArgument<"generatedDeclaration", 1>,
+ StringArgument<"USR", 1>];
let HasCustomParsing = 1;
let Subjects = SubjectList<[Named]>;
let Documentation = [ExternalSourceSymbolDocs];
@@ -986,7 +1010,7 @@ def CarriesDependency : InheritableParamAttr {
}
def CDecl : DeclOrTypeAttr {
- let Spellings = [GCC<"cdecl">, Keyword<"__cdecl">, Keyword<"_cdecl">];
+ let Spellings = [GCC<"cdecl">, CustomKeyword<"__cdecl">, CustomKeyword<"_cdecl">];
// let Subjects = [Function, ObjCMethod];
let Documentation = [Undocumented];
}
@@ -1073,7 +1097,7 @@ def Cleanup : InheritableAttr {
let Spellings = [GCC<"cleanup">];
let Args = [DeclArgument<Function, "FunctionDecl">];
let Subjects = SubjectList<[LocalVar]>;
- let Documentation = [Undocumented];
+ let Documentation = [CleanupDocs];
}
def CmseNSEntry : InheritableAttr, TargetSpecificAttr<TargetARM> {
@@ -1111,10 +1135,10 @@ def Const : InheritableAttr {
def ConstInit : InheritableAttr {
// This attribute does not have a C [[]] spelling because it requires the
// CPlusPlus language option.
- let Spellings = [Keyword<"constinit">,
+ let Spellings = [CustomKeyword<"constinit">,
Clang<"require_constant_initialization", 0>];
let Subjects = SubjectList<[GlobalVar], ErrorDiag>;
- let Accessors = [Accessor<"isConstinit", [Keyword<"constinit">]>];
+ let Accessors = [Accessor<"isConstinit", [CustomKeyword<"constinit">]>];
let Documentation = [ConstInitDocs];
let LangOpts = [CPlusPlus];
let SimpleHandler = 1;
@@ -1211,6 +1235,12 @@ def CUDAHost : InheritableAttr {
}
def : MutualExclusions<[CUDAGlobal, CUDAHost]>;
+def NVPTXKernel : InheritableAttr, TargetSpecificAttr<TargetNVPTX> {
+ let Spellings = [Clang<"nvptx_kernel">];
+ let Subjects = SubjectList<[Function]>;
+ let Documentation = [Undocumented];
+}
+
def HIPManaged : InheritableAttr {
let Spellings = [GNU<"managed">, Declspec<"__managed__">];
let Subjects = SubjectList<[Var]>;
@@ -1259,7 +1289,7 @@ def SYCLSpecialClass: InheritableAttr {
}
def C11NoReturn : InheritableAttr {
- let Spellings = [Keyword<"_Noreturn">];
+ let Spellings = [CustomKeyword<"_Noreturn">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let SemaHandler = 0;
let Documentation = [C11NoReturnDocs];
@@ -1275,7 +1305,7 @@ def CXX11NoReturn : InheritableAttr {
// Similar to CUDA, OpenCL attributes do not receive a [[]] spelling because
// the specification does not expose them with one currently.
def OpenCLKernel : InheritableAttr {
- let Spellings = [Keyword<"__kernel">, Keyword<"kernel">];
+ let Spellings = [CustomKeyword<"__kernel">, CustomKeyword<"kernel">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [Undocumented];
let SimpleHandler = 1;
@@ -1299,26 +1329,28 @@ def OpenCLIntelReqdSubGroupSize: InheritableAttr {
// This attribute is both a type attribute, and a declaration attribute (for
// parameter variables).
def OpenCLAccess : Attr {
- let Spellings = [Keyword<"__read_only">, Keyword<"read_only">,
- Keyword<"__write_only">, Keyword<"write_only">,
- Keyword<"__read_write">, Keyword<"read_write">];
+ let Spellings = [CustomKeyword<"__read_only">, CustomKeyword<"read_only">,
+ CustomKeyword<"__write_only">, CustomKeyword<"write_only">,
+ CustomKeyword<"__read_write">, CustomKeyword<"read_write">];
let Subjects = SubjectList<[ParmVar, TypedefName], ErrorDiag>;
- let Accessors = [Accessor<"isReadOnly", [Keyword<"__read_only">,
- Keyword<"read_only">]>,
- Accessor<"isReadWrite", [Keyword<"__read_write">,
- Keyword<"read_write">]>,
- Accessor<"isWriteOnly", [Keyword<"__write_only">,
- Keyword<"write_only">]>];
+ let Accessors = [Accessor<"isReadOnly", [CustomKeyword<"__read_only">,
+ CustomKeyword<"read_only">]>,
+ Accessor<"isReadWrite", [CustomKeyword<"__read_write">,
+ CustomKeyword<"read_write">]>,
+ Accessor<"isWriteOnly", [CustomKeyword<"__write_only">,
+ CustomKeyword<"write_only">]>];
let Documentation = [OpenCLAccessDocs];
}
def OpenCLPrivateAddressSpace : TypeAttr {
- let Spellings = [Keyword<"__private">, Keyword<"private">, Clang<"opencl_private">];
+ let Spellings = [CustomKeyword<"__private">, CustomKeyword<"private">,
+ Clang<"opencl_private">];
let Documentation = [OpenCLAddressSpacePrivateDocs];
}
def OpenCLGlobalAddressSpace : TypeAttr {
- let Spellings = [Keyword<"__global">, Keyword<"global">, Clang<"opencl_global">];
+ let Spellings = [CustomKeyword<"__global">, CustomKeyword<"global">,
+ Clang<"opencl_global">];
let Documentation = [OpenCLAddressSpaceGlobalDocs];
}
@@ -1333,17 +1365,20 @@ def OpenCLGlobalHostAddressSpace : TypeAttr {
}
def OpenCLLocalAddressSpace : TypeAttr {
- let Spellings = [Keyword<"__local">, Keyword<"local">, Clang<"opencl_local">];
+ let Spellings = [CustomKeyword<"__local">, CustomKeyword<"local">,
+ Clang<"opencl_local">];
let Documentation = [OpenCLAddressSpaceLocalDocs];
}
def OpenCLConstantAddressSpace : TypeAttr {
- let Spellings = [Keyword<"__constant">, Keyword<"constant">, Clang<"opencl_constant">];
+ let Spellings = [CustomKeyword<"__constant">, CustomKeyword<"constant">,
+ Clang<"opencl_constant">];
let Documentation = [OpenCLAddressSpaceConstantDocs];
}
def OpenCLGenericAddressSpace : TypeAttr {
- let Spellings = [Keyword<"__generic">, Keyword<"generic">, Clang<"opencl_generic">];
+ let Spellings = [CustomKeyword<"__generic">, CustomKeyword<"generic">,
+ Clang<"opencl_generic">];
let Documentation = [OpenCLAddressSpaceGenericDocs];
}
@@ -1447,9 +1482,8 @@ def : MutualExclusions<[Likely, Unlikely]>;
def NoMerge : DeclOrStmtAttr {
let Spellings = [Clang<"nomerge">];
let Documentation = [NoMergeDocs];
- let Subjects = SubjectList<[Function, Stmt], ErrorDiag,
- "functions and statements">;
- let SimpleHandler = 1;
+ let Subjects = SubjectList<[Function, Stmt, Var], ErrorDiag,
+ "functions, statements and variables">;
}
def MustTail : StmtAttr {
@@ -1459,20 +1493,20 @@ def MustTail : StmtAttr {
}
def FastCall : DeclOrTypeAttr {
- let Spellings = [GCC<"fastcall">, Keyword<"__fastcall">,
- Keyword<"_fastcall">];
+ let Spellings = [GCC<"fastcall">, CustomKeyword<"__fastcall">,
+ CustomKeyword<"_fastcall">];
// let Subjects = [Function, ObjCMethod];
let Documentation = [FastCallDocs];
}
def RegCall : DeclOrTypeAttr {
- let Spellings = [GCC<"regcall">, Keyword<"__regcall">];
+ let Spellings = [GCC<"regcall">, CustomKeyword<"__regcall">];
let Documentation = [RegCallDocs];
}
def Final : InheritableAttr {
- let Spellings = [Keyword<"final">, Keyword<"sealed">];
- let Accessors = [Accessor<"isSpelledAsSealed", [Keyword<"sealed">]>];
+ let Spellings = [CustomKeyword<"final">, CustomKeyword<"sealed">];
+ let Accessors = [Accessor<"isSpelledAsSealed", [CustomKeyword<"sealed">]>];
let SemaHandler = 0;
// Omitted from docs, since this is language syntax, not an attribute, as far
// as users are concerned.
@@ -1818,7 +1852,7 @@ def Convergent : InheritableAttr {
}
def NoInline : DeclOrStmtAttr {
- let Spellings = [Keyword<"__noinline__">, GCC<"noinline">,
+ let Spellings = [CustomKeyword<"__noinline__">, GCC<"noinline">,
CXX11<"clang", "noinline">, C2x<"clang", "noinline">,
Declspec<"noinline">];
let Accessors = [Accessor<"isClangNoInline", [CXX11<"clang", "noinline">,
@@ -1847,13 +1881,23 @@ def RISCVInterrupt : InheritableAttr, TargetSpecificAttr<TargetRISCV> {
let Spellings = [GCC<"interrupt">];
let Subjects = SubjectList<[Function]>;
let Args = [EnumArgument<"Interrupt", "InterruptType",
- ["user", "supervisor", "machine"],
- ["user", "supervisor", "machine"],
+ ["supervisor", "machine"],
+ ["supervisor", "machine"],
1>];
let ParseKind = "Interrupt";
let Documentation = [RISCVInterruptDocs];
}
+def RISCVRVVVectorBits : TypeAttr {
+ let Spellings = [GNU<"riscv_rvv_vector_bits">];
+ let Subjects = SubjectList<[TypedefName], ErrorDiag>;
+ let Args = [UnsignedArgument<"NumBits">];
+ let Documentation = [RISCVRVVVectorBitsDocs];
+ let PragmaAttributeSupport = 0;
+ // Represented as VectorType instead.
+ let ASTNode = 0;
+}
+
// This is not a TargetSpecificAttr so that is silently accepted and
// ignored on other targets as encouraged by the OpenCL spec.
//
@@ -2004,22 +2048,22 @@ def PassObjectSize : InheritableParamAttr {
// Nullability type attributes.
def TypeNonNull : TypeAttr {
- let Spellings = [Keyword<"_Nonnull">];
+ let Spellings = [CustomKeyword<"_Nonnull">];
let Documentation = [TypeNonNullDocs];
}
def TypeNullable : TypeAttr {
- let Spellings = [Keyword<"_Nullable">];
+ let Spellings = [CustomKeyword<"_Nullable">];
let Documentation = [TypeNullableDocs];
}
def TypeNullableResult : TypeAttr {
- let Spellings = [Keyword<"_Nullable_result">];
+ let Spellings = [CustomKeyword<"_Nullable_result">];
let Documentation = [TypeNullableResultDocs];
}
def TypeNullUnspecified : TypeAttr {
- let Spellings = [Keyword<"_Null_unspecified">];
+ let Spellings = [CustomKeyword<"_Null_unspecified">];
let Documentation = [TypeNullUnspecifiedDocs];
}
@@ -2027,12 +2071,12 @@ def TypeNullUnspecified : TypeAttr {
// ignored because ARC is not enabled. The usual representation for this
// qualifier is as an ObjCOwnership attribute with Kind == "none".
def ObjCInertUnsafeUnretained : TypeAttr {
- let Spellings = [Keyword<"__unsafe_unretained">];
+ let Spellings = [CustomKeyword<"__unsafe_unretained">];
let Documentation = [InternalOnly];
}
def ObjCKindOf : TypeAttr {
- let Spellings = [Keyword<"__kindof">];
+ let Spellings = [CustomKeyword<"__kindof">];
let Documentation = [Undocumented];
}
@@ -2331,7 +2375,7 @@ def Overloadable : Attr {
}
def Override : InheritableAttr {
- let Spellings = [Keyword<"override">];
+ let Spellings = [CustomKeyword<"override">];
let SemaHandler = 0;
// Omitted from docs, since this is language syntax, not an attribute, as far
// as users are concerned.
@@ -2389,6 +2433,11 @@ def AArch64SVEPcs: DeclOrTypeAttr {
let Documentation = [AArch64SVEPcsDocs];
}
+def ArmStreaming : TypeAttr, TargetSpecificAttr<TargetAArch64> {
+ let Spellings = [RegularKeyword<"__arm_streaming">];
+ let Documentation = [ArmStreamingDocs];
+}
+
def Pure : InheritableAttr {
let Spellings = [GCC<"pure">];
let Documentation = [Undocumented];
@@ -2576,7 +2625,8 @@ def Sentinel : InheritableAttr {
}
def StdCall : DeclOrTypeAttr {
- let Spellings = [GCC<"stdcall">, Keyword<"__stdcall">, Keyword<"_stdcall">];
+ let Spellings = [GCC<"stdcall">, CustomKeyword<"__stdcall">,
+ CustomKeyword<"_stdcall">];
// let Subjects = [Function, ObjCMethod];
let Documentation = [StdCallDocs];
}
@@ -2645,15 +2695,15 @@ def SysVABI : DeclOrTypeAttr {
}
def ThisCall : DeclOrTypeAttr {
- let Spellings = [GCC<"thiscall">, Keyword<"__thiscall">,
- Keyword<"_thiscall">];
+ let Spellings = [GCC<"thiscall">, CustomKeyword<"__thiscall">,
+ CustomKeyword<"_thiscall">];
// let Subjects = [Function, ObjCMethod];
let Documentation = [ThisCallDocs];
}
def VectorCall : DeclOrTypeAttr {
- let Spellings = [Clang<"vectorcall">, Keyword<"__vectorcall">,
- Keyword<"_vectorcall">];
+ let Spellings = [Clang<"vectorcall">, CustomKeyword<"__vectorcall">,
+ CustomKeyword<"_vectorcall">];
// let Subjects = [Function, ObjCMethod];
let Documentation = [VectorCallDocs];
}
@@ -2672,7 +2722,8 @@ def ZeroCallUsedRegs : InheritableAttr {
}
def Pascal : DeclOrTypeAttr {
- let Spellings = [Clang<"pascal">, Keyword<"__pascal">, Keyword<"_pascal">];
+ let Spellings = [Clang<"pascal">, CustomKeyword<"__pascal">,
+ CustomKeyword<"_pascal">];
// let Subjects = [Function, ObjCMethod];
let Documentation = [Undocumented];
}
@@ -2831,6 +2882,7 @@ def Unavailable : InheritableAttr {
"IR_ARCInitReturnsUnrelated",
"IR_ARCFieldWithOwnership"], 1, /*fake*/ 1>];
let Documentation = [Undocumented];
+ let MeaningfulToClassTemplateDefinition = 1;
}
def DiagnoseIf : InheritableAttr {
@@ -3568,37 +3620,37 @@ def Thread : Attr {
}
def Win64 : IgnoredAttr {
- let Spellings = [Keyword<"__w64">];
+ let Spellings = [CustomKeyword<"__w64">];
let LangOpts = [MicrosoftExt];
}
def Ptr32 : TypeAttr {
- let Spellings = [Keyword<"__ptr32">];
+ let Spellings = [CustomKeyword<"__ptr32">];
let Documentation = [Ptr32Docs];
}
def Ptr64 : TypeAttr {
- let Spellings = [Keyword<"__ptr64">];
+ let Spellings = [CustomKeyword<"__ptr64">];
let Documentation = [Ptr64Docs];
}
def SPtr : TypeAttr {
- let Spellings = [Keyword<"__sptr">];
+ let Spellings = [CustomKeyword<"__sptr">];
let Documentation = [SPtrDocs];
}
def UPtr : TypeAttr {
- let Spellings = [Keyword<"__uptr">];
+ let Spellings = [CustomKeyword<"__uptr">];
let Documentation = [UPtrDocs];
}
def MSInheritance : InheritableAttr {
let LangOpts = [MicrosoftExt];
let Args = [DefaultBoolArgument<"BestCase", /*default*/1, /*fake*/1>];
- let Spellings = [Keyword<"__single_inheritance">,
- Keyword<"__multiple_inheritance">,
- Keyword<"__virtual_inheritance">,
- Keyword<"__unspecified_inheritance">];
+ let Spellings = [CustomKeyword<"__single_inheritance">,
+ CustomKeyword<"__multiple_inheritance">,
+ CustomKeyword<"__virtual_inheritance">,
+ CustomKeyword<"__unspecified_inheritance">];
let AdditionalMembers = [{
MSInheritanceModel getInheritanceModel() const {
// The spelling enum should agree with MSInheritanceModel.
@@ -3966,6 +4018,12 @@ def ReleaseHandle : InheritableParamAttr {
let Documentation = [ReleaseHandleDocs];
}
+def UnsafeBufferUsage : InheritableAttr {
+ let Spellings = [Clang<"unsafe_buffer_usage">];
+ let Subjects = SubjectList<[Function]>;
+ let Documentation = [UnsafeBufferUsageDocs];
+}
+
def DiagnoseAsBuiltin : InheritableAttr {
let Spellings = [Clang<"diagnose_as_builtin">];
let Args = [DeclArgument<Function, "Function">,
@@ -4092,7 +4150,7 @@ def HLSLResource : InheritableAttr {
}
def HLSLGroupSharedAddressSpace : TypeAttr {
- let Spellings = [Keyword<"groupshared">];
+ let Spellings = [CustomKeyword<"groupshared">];
let Subjects = SubjectList<[Var]>;
let Documentation = [HLSLGroupSharedAddressSpaceDocs];
}
@@ -4122,8 +4180,22 @@ def FunctionReturnThunks : InheritableAttr,
let Subjects = SubjectList<[Function]>;
let Documentation = [FunctionReturnThunksDocs];
}
+
+def WebAssemblyFuncref : TypeAttr, TargetSpecificAttr<TargetWebAssembly> {
+ let Spellings = [CustomKeyword<"__funcref">];
+ let Documentation = [WebAssemblyExportNameDocs];
+ let Subjects = SubjectList<[FunctionPointer], ErrorDiag>;
+}
+
def ReadOnlyPlacement : InheritableAttr {
let Spellings = [Clang<"enforce_read_only_placement">];
let Subjects = SubjectList<[Record]>;
let Documentation = [ReadOnlyPlacementDocs];
}
+
+def AvailableOnlyInDefaultEvalMethod : InheritableAttr {
+ let Spellings = [Clang<"available_only_in_default_eval_method">];
+ let Subjects = SubjectList<[TypedefName], ErrorDiag>;
+ let Documentation = [Undocumented];
+}
+
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td b/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td
index 6d7a3ffd2d52..2c950231255d 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td
@@ -539,17 +539,41 @@ def NoMergeDocs : Documentation {
let Category = DocCatStmt;
let Content = [{
If a statement is marked ``nomerge`` and contains call expressions, those call
-expressions inside the statement will not be merged during optimization. This
+expressions inside the statement will not be merged during optimization. This
attribute can be used to prevent the optimizer from obscuring the source
location of certain calls. For example, it will prevent tail merging otherwise
identical code sequences that raise an exception or terminate the program. Tail
merging normally reduces the precision of source location information, making
stack traces less useful for debugging. This attribute gives the user control
-over the tradeoff between code size and debug information precision.
+over the tradeoff between code size and debug information precision.
-``nomerge`` attribute can also be used as function attribute to prevent all
-calls to the specified function from merging. It has no effect on indirect
-calls.
+``nomerge`` attribute can also be used as function attribute to prevent all
+calls to the specified function from merging. It has no effect on indirect
+calls to such functions. For example:
+
+.. code-block:: c++
+
+ [[clang::nomerge]] void foo(int) {}
+
+ void bar(int x) {
+ auto *ptr = foo;
+ if (x) foo(1); else foo(2); // will not be merged
+ if (x) ptr(1); else ptr(2); // indirect call, can be merged
+ }
+
+``nomerge`` attribute can also be used for pointers to functions to
+prevent calls through such pointer from merging. In such case the
+effect applies only to a specific function pointer. For example:
+
+.. code-block:: c++
+
+ [[clang::nomerge]] void (*foo)(int);
+
+ void bar(int x) {
+ auto *ptr = foo;
+ if (x) foo(1); else foo(2); // will not be merged
+ if (x) ptr(1); else ptr(2); // 'ptr' has no 'nomerge' attribute, can be merged
+ }
}];
}
@@ -603,6 +627,9 @@ Any variables in scope, including all arguments to the function and the
return value must be trivially destructible. The calling convention of the
caller and callee must match, and they must not be variadic functions or have
old style K&R C function declarations.
+
+``clang::musttail`` provides assurances that the tail call can be optimized on
+all targets, not just one.
}];
}
@@ -1581,7 +1608,7 @@ attributes are ignored. Supported platforms are:
``watchos``
Apple's watchOS operating system. The minimum deployment target is specified by
the ``-mwatchos-version-min=*version*`` command-line argument.
-
+
``driverkit``
Apple's DriverKit userspace kernel extensions. The minimum deployment target
is specified as part of the triple.
@@ -1750,6 +1777,19 @@ defined_in=\ *string-literal*
source containers are modules, so ``defined_in`` should specify the Swift
module name.
+USR=\ *string-literal*
+ String that specifies a unified symbol resolution (USR) value for this
+ declaration. USR string uniquely identifies this particular declaration, and
+ is typically used when constructing an index of a codebase.
+ The USR value in this attribute is expected to be generated by an external
+ compiler that compiled the native declaration using its original source
+ language. The exact format of the USR string and its other attributes
+ are determined by the specification of this declaration's source language.
+ When not specified, Clang's indexer will use the Clang USR for this symbol.
+ User can query to see if Clang supports the use of the ``USR`` clause in
+ the ``external_source_symbol`` attribute with
+ ``__has_attribute(external_source_symbol) >= 20230206``.
+
generated_declaration
This declaration was automatically generated by some tool.
@@ -2280,7 +2320,7 @@ as ``-mlong-calls`` and ``-mno-long-calls``.
def RISCVInterruptDocs : Documentation {
let Category = DocCatFunction;
- let Heading = "interrupt (RISCV)";
+ let Heading = "interrupt (RISC-V)";
let Content = [{
Clang supports the GNU style ``__attribute__((interrupt))`` attribute on RISCV
targets. This attribute may be attached to a function definition and instructs
@@ -2301,6 +2341,40 @@ Version 1.10.
}];
}
+def RISCVRVVVectorBitsDocs : Documentation {
+ let Category = DocCatType;
+ let Content = [{
+On RISC-V targets, the ``riscv_rvv_vector_bits(N)`` attribute is used to define
+fixed-length variants of sizeless types.
+
+For example:
+
+.. code-block:: c
+
+ #include <riscv_vector.h>
+
+ #if defined(__riscv_v_fixed_vlen)
+ typedef vint8m1_t fixed_vint8m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
+ #endif
+
+Creates a type ``fixed_vint8m1_t_t`` that is a fixed-length variant of
+``vint8m1_t`` that contains exactly 512 bits. Unlike ``vint8m1_t``, this type
+can be used in globals, structs, unions, and arrays, all of which are
+unsupported for sizeless types.
+
+The attribute can be attached to a single RVV vector (such as ``vint8m1_t``).
+The attribute will be rejected unless
+``N==(__riscv_v_fixed_vlen*LMUL)``, the implementation defined feature macro that
+is enabled under the ``-mrvv-vector-bits`` flag. ``__riscv_v_fixed_vlen`` can
+only be a power of 2 between 64 and 65536.
+
+For types where LMUL!=1, ``__riscv_v_fixed_vlen`` needs to be scaled by the LMUL
+of the type before passing to the attribute.
+
+``vbool*_t`` types are not supported at this time.
+}];
+}
+
def AVRInterruptDocs : Documentation {
let Category = DocCatFunction;
let Heading = "interrupt (AVR)";
@@ -3883,7 +3957,7 @@ Whether a particular pointer may be "null" is an important concern when working
with pointers in the C family of languages. The various nullability attributes
indicate whether a particular pointer can be null or not, which makes APIs more
expressive and can help static analysis tools identify bugs involving null
-pointers. Clang supports several kinds of nullability attributes: the
+pointers. Clang supports several kinds of nullability attributes: the
``nonnull`` and ``returns_nonnull`` attributes indicate which function or
method parameters and result types can never be null, while nullability type
qualifiers indicate which pointer types can be null (``_Nullable``) or cannot
@@ -4059,7 +4133,7 @@ memory is not available rather than returning a null pointer:
The ``returns_nonnull`` attribute implies that returning a null pointer is
undefined behavior, which the optimizer may take advantage of. The ``_Nonnull``
type qualifier indicates that a pointer cannot be null in a more general manner
-(because it is part of the type system) and does not imply undefined behavior,
+(because it is part of the type system) and does not imply undefined behavior,
making it more widely applicable
}];
}
@@ -5166,6 +5240,9 @@ apply for values returned in callee-saved registers.
R11. R11 can be used as a scratch register. Floating-point registers
(XMMs/YMMs) are not preserved and need to be saved by the caller.
+- On AArch64 the callee preserve all general purpose registers, except X0-X8 and
+ X16-X18.
+
The idea behind this convention is to support calls to runtime functions
that have a hot path and a cold path. The hot path is usually a small piece
of code that doesn't use many registers. The cold path might need to call out to
@@ -5206,6 +5283,10 @@ returned in callee-saved registers.
R11. R11 can be used as a scratch register. Furthermore it also preserves
all floating-point registers (XMMs/YMMs).
+- On AArch64 the callee preserve all general purpose registers, except X0-X8 and
+ X16-X18. Furthermore it also preserves lower 128 bits of V8-V31 SIMD - floating
+ point registers.
+
The idea behind this convention is to support calls to runtime functions
that don't need to call out to any other functions.
@@ -5315,7 +5396,7 @@ takes precedence over the command line option ``-fpatchable-function-entry=N,M``
``M`` defaults to 0 if omitted.
This attribute is only supported on
-aarch64/aarch64-be/riscv32/riscv64/i386/x86-64 targets.
+aarch64/aarch64-be/loongarch32/loongarch64/riscv32/riscv64/i386/x86-64 targets.
}];
}
@@ -6008,15 +6089,15 @@ def CFGuardDocs : Documentation {
let Content = [{
Code can indicate CFG checks are not wanted with the ``__declspec(guard(nocf))``
attribute. This directs the compiler to not insert any CFG checks for the entire
-function. This approach is typically used only sparingly in specific situations
-where the programmer has manually inserted "CFG-equivalent" protection. The
-programmer knows that they are calling through some read-only function table
-whose address is obtained through read-only memory references and for which the
-index is masked to the function table limit. This approach may also be applied
-to small wrapper functions that are not inlined and that do nothing more than
-make a call through a function pointer. Since incorrect usage of this directive
-can compromise the security of CFG, the programmer must be very careful using
-the directive. Typically, this usage is limited to very small functions that
+function. This approach is typically used only sparingly in specific situations
+where the programmer has manually inserted "CFG-equivalent" protection. The
+programmer knows that they are calling through some read-only function table
+whose address is obtained through read-only memory references and for which the
+index is masked to the function table limit. This approach may also be applied
+to small wrapper functions that are not inlined and that do nothing more than
+make a call through a function pointer. Since incorrect usage of this directive
+can compromise the security of CFG, the programmer must be very careful using
+the directive. Typically, this usage is limited to very small functions that
only call one function.
`Control Flow Guard documentation <https://docs.microsoft.com/en-us/windows/win32/secbp/pe-metadata>`
@@ -6274,6 +6355,84 @@ attribute requires a string literal argument to identify the handle being releas
}];
}
+def UnsafeBufferUsageDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+The attribute ``[[clang::unsafe_buffer_usage]]`` should be placed on functions
+that need to be avoided as they are prone to buffer overflows. It is designed to
+work together with the off-by-default compiler warning ``-Wunsafe-buffer-usage``
+to help codebases transition away from raw pointer based buffer management,
+in favor of safer abstractions such as C++20 ``std::span``. The attribute causes
+``-Wunsafe-buffer-usage`` to warn on every use of the function, and it may
+enable ``-Wunsafe-buffer-usage`` to emit automatic fix-it hints
+which would help the user replace such unsafe functions with safe
+alternatives, though the attribute can be used even when the fix can't be automated.
+
+The attribute does not suppress ``-Wunsafe-buffer-usage`` inside the function
+to which it is attached. These warnings still need to be addressed.
+
+The attribute is warranted even if the only way a function can overflow
+the buffer is by violating the function's preconditions. For example, it
+would make sense to put the attribute on function ``foo()`` below because
+passing an incorrect size parameter would cause a buffer overflow:
+
+.. code-block:: c++
+
+ [[clang::unsafe_buffer_usage]]
+ void foo(int *buf, size_t size) {
+ for (size_t i = 0; i < size; ++i) {
+ buf[i] = i;
+ }
+ }
+
+The attribute is NOT warranted when the function uses safe abstractions,
+assuming that these abstractions weren't misused outside the function.
+For example, function ``bar()`` below doesn't need the attribute,
+because assuming that the container ``buf`` is well-formed (has size that
+fits the original buffer it refers to), overflow cannot occur:
+
+.. code-block:: c++
+
+ void bar(std::span<int> buf) {
+ for (size_t i = 0; i < buf.size(); ++i) {
+ buf[i] = i;
+ }
+ }
+
+In this case function ``bar()`` enables the user to keep the buffer
+"containerized" in a span for as long as possible. On the other hand,
+Function ``foo()`` in the previous example may have internal
+consistency, but by accepting a raw buffer it requires the user to unwrap
+their span, which is undesirable according to the programming model
+behind ``-Wunsafe-buffer-usage``.
+
+The attribute is warranted when a function accepts a raw buffer only to
+immediately put it into a span:
+
+.. code-block:: c++
+
+ [[clang::unsafe_buffer_usage]]
+ void baz(int *buf, size_t size) {
+ std::span<int> sp{ buf, size };
+ for (size_t i = 0; i < sp.size(); ++i) {
+ sp[i] = i;
+ }
+ }
+
+In this case ``baz()`` does not contain any unsafe operations, but the awkward
+parameter type causes the caller to unwrap the span unnecessarily.
+Note that regardless of the attribute, code inside ``baz()`` isn't flagged
+by ``-Wunsafe-buffer-usage`` as unsafe. It is definitely undesirable,
+but if ``baz()`` is on an API surface, there is no way to improve it
+to make it as safe as ``bar()`` without breaking the source and binary
+compatibility with existing users of the function. In such cases
+the proper solution would be to create a different function (possibly
+an overload of ``baz()``) that accepts a safe container like ``bar()``,
+and then use the attribute on the original ``baz()`` to help the users
+update their code to use the new function.
+ }];
+}
+
def DiagnoseAsBuiltinDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
@@ -6419,6 +6578,41 @@ Requirements on Development Tools - Engineering Specification Documentation
}];
}
+def ArmStreamingDocs : Documentation {
+ let Category = DocCatType;
+ let Content = [{
+.. Note:: This attribute has not been implemented yet, but once it is
+ implemented, it will behave as described below.
+
+The ``__arm_streaming`` keyword is only available on AArch64 targets.
+It applies to function types and specifies that the function has a
+"streaming interface". This means that:
+
+* the function requires the Scalable Matrix Extension (SME)
+
+* the function must be entered in streaming mode (that is, with PSTATE.SM
+ set to 1)
+
+* the function must return in streaming mode
+
+See `Procedure Call Standard for the Arm® 64-bit Architecture (AArch64)
+<https://github.com/ARM-software/abi-aa>`_ for more details about
+streaming-interface functions.
+
+Clang manages PSTATE.SM automatically; it is not the source code's
+responsibility to do this. For example, if a normal non-streaming
+function calls an ``__arm_streaming`` function, Clang generates code
+that switches into streaming mode before calling the function and
+switches back to non-streaming mode on return.
+
+``__arm_streaming`` can appear anywhere that a standard ``[[...]]`` type
+attribute can appear.
+
+See `Arm C Language Extensions <https://github.com/ARM-software/acle>`_
+for more details about this extension, and for other related SME features.
+ }];
+}
+
def AlwaysInlineDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
@@ -6847,3 +7041,39 @@ def ReadOnlyPlacementDocs : Documentation {
``enforce_read_only_placement`` attribute.
}];
}
+
+def WebAssemblyFuncrefDocs : Documentation {
+ let Category = DocCatType;
+ let Content = [{
+Clang supports the ``__funcref`` attribute for the WebAssembly target.
+This attribute may be attached to a function pointer type, where it modifies
+its underlying representation to be a WebAssembly ``funcref``.
+ }];
+}
+
+def CleanupDocs : Documentation {
+ let Category = DocCatType;
+ let Content = [{
+This attribute allows a function to be run when a local variable goes out of
+scope. The attribute takes the identifier of a function with a parameter type
+that is a pointer to the type with the attribute.
+
+.. code-block:: c
+
+ static void foo (int *) { ... }
+ static void bar (int *) { ... }
+ void baz (void) {
+ int x __attribute__((cleanup(foo)));
+ {
+ int y __attribute__((cleanup(bar)));
+ }
+ }
+
+The above example will result in a call to ``bar`` being passed the address of
+`y`` when ``y`` goes out of scope, then a call to ``foo`` being passed the
+address of ``x`` when ``x`` goes out of scope. If two or more variables share
+the same scope, their ``cleanup`` callbacks are invoked in the reverse order
+the variables were declared in. It is not possible to check the return value
+(if any) of these ``cleanup`` callback functions.
+}];
+}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AttributeCommonInfo.h b/contrib/llvm-project/clang/include/clang/Basic/AttributeCommonInfo.h
index 81a8d2134193..6396c0dc6ef0 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AttributeCommonInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/AttributeCommonInfo.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_BASIC_ATTRIBUTECOMMONINFO_H
#define LLVM_CLANG_BASIC_ATTRIBUTECOMMONINFO_H
#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/TokenKinds.h"
namespace clang {
class IdentifierInfo;
@@ -24,7 +25,7 @@ public:
/// The style used to specify an attribute.
enum Syntax {
/// __attribute__((...))
- AS_GNU,
+ AS_GNU = 1,
/// [[...]]
AS_CXX11,
@@ -51,6 +52,10 @@ public:
/// <vardecl> : <semantic>
AS_HLSLSemantic,
+
+ /// The attibute has no source code manifestation and is only created
+ /// implicitly.
+ AS_Implicit
};
enum Kind {
#define PARSED_ATTR(NAME) AT_##NAME,
@@ -71,63 +76,96 @@ private:
/// Corresponds to the Syntax enum.
unsigned SyntaxUsed : 4;
unsigned SpellingIndex : 4;
+ unsigned IsAlignas : 1;
+ unsigned IsRegularKeywordAttribute : 1;
protected:
static constexpr unsigned SpellingNotCalculated = 0xf;
public:
- AttributeCommonInfo(SourceRange AttrRange)
- : AttrRange(AttrRange), ScopeLoc(), AttrKind(0), SyntaxUsed(0),
- SpellingIndex(SpellingNotCalculated) {}
-
- AttributeCommonInfo(SourceLocation AttrLoc)
- : AttrRange(AttrLoc), ScopeLoc(), AttrKind(0), SyntaxUsed(0),
- SpellingIndex(SpellingNotCalculated) {}
+ /// Combines information about the source-code form of an attribute,
+ /// including its syntax and spelling.
+ class Form {
+ public:
+ constexpr Form(Syntax SyntaxUsed, unsigned SpellingIndex, bool IsAlignas,
+ bool IsRegularKeywordAttribute)
+ : SyntaxUsed(SyntaxUsed), SpellingIndex(SpellingIndex),
+ IsAlignas(IsAlignas),
+ IsRegularKeywordAttribute(IsRegularKeywordAttribute) {}
+ constexpr Form(tok::TokenKind Tok)
+ : SyntaxUsed(AS_Keyword), SpellingIndex(SpellingNotCalculated),
+ IsAlignas(Tok == tok::kw_alignas),
+ IsRegularKeywordAttribute(tok::isRegularKeywordAttribute(Tok)) {}
+
+ Syntax getSyntax() const { return Syntax(SyntaxUsed); }
+ unsigned getSpellingIndex() const { return SpellingIndex; }
+ bool isAlignas() const { return IsAlignas; }
+ bool isRegularKeywordAttribute() const { return IsRegularKeywordAttribute; }
+
+ static Form GNU() { return AS_GNU; }
+ static Form CXX11() { return AS_CXX11; }
+ static Form C2x() { return AS_C2x; }
+ static Form Declspec() { return AS_Declspec; }
+ static Form Microsoft() { return AS_Microsoft; }
+ static Form Keyword(bool IsAlignas, bool IsRegularKeywordAttribute) {
+ return Form(AS_Keyword, SpellingNotCalculated, IsAlignas,
+ IsRegularKeywordAttribute);
+ }
+ static Form Pragma() { return AS_Pragma; }
+ static Form ContextSensitiveKeyword() { return AS_ContextSensitiveKeyword; }
+ static Form HLSLSemantic() { return AS_HLSLSemantic; }
+ static Form Implicit() { return AS_Implicit; }
+
+ private:
+ constexpr Form(Syntax SyntaxUsed)
+ : SyntaxUsed(SyntaxUsed), SpellingIndex(SpellingNotCalculated),
+ IsAlignas(0), IsRegularKeywordAttribute(0) {}
+
+ unsigned SyntaxUsed : 4;
+ unsigned SpellingIndex : 4;
+ unsigned IsAlignas : 1;
+ unsigned IsRegularKeywordAttribute : 1;
+ };
AttributeCommonInfo(const IdentifierInfo *AttrName,
const IdentifierInfo *ScopeName, SourceRange AttrRange,
- SourceLocation ScopeLoc, Syntax SyntaxUsed)
+ SourceLocation ScopeLoc, Kind AttrKind, Form FormUsed)
: AttrName(AttrName), ScopeName(ScopeName), AttrRange(AttrRange),
- ScopeLoc(ScopeLoc),
- AttrKind(getParsedKind(AttrName, ScopeName, SyntaxUsed)),
- SyntaxUsed(SyntaxUsed), SpellingIndex(SpellingNotCalculated) {}
+ ScopeLoc(ScopeLoc), AttrKind(AttrKind),
+ SyntaxUsed(FormUsed.getSyntax()),
+ SpellingIndex(FormUsed.getSpellingIndex()),
+ IsAlignas(FormUsed.isAlignas()),
+ IsRegularKeywordAttribute(FormUsed.isRegularKeywordAttribute()) {
+ assert(SyntaxUsed >= AS_GNU && SyntaxUsed <= AS_Implicit &&
+ "Invalid syntax!");
+ }
AttributeCommonInfo(const IdentifierInfo *AttrName,
const IdentifierInfo *ScopeName, SourceRange AttrRange,
- SourceLocation ScopeLoc, Kind AttrKind, Syntax SyntaxUsed)
- : AttrName(AttrName), ScopeName(ScopeName), AttrRange(AttrRange),
- ScopeLoc(ScopeLoc), AttrKind(AttrKind), SyntaxUsed(SyntaxUsed),
- SpellingIndex(SpellingNotCalculated) {}
-
- AttributeCommonInfo(const IdentifierInfo *AttrName,
- const IdentifierInfo *ScopeName, SourceRange AttrRange,
- SourceLocation ScopeLoc, Kind AttrKind, Syntax SyntaxUsed,
- unsigned Spelling)
- : AttrName(AttrName), ScopeName(ScopeName), AttrRange(AttrRange),
- ScopeLoc(ScopeLoc), AttrKind(AttrKind), SyntaxUsed(SyntaxUsed),
- SpellingIndex(Spelling) {}
+ SourceLocation ScopeLoc, Form FormUsed)
+ : AttributeCommonInfo(
+ AttrName, ScopeName, AttrRange, ScopeLoc,
+ getParsedKind(AttrName, ScopeName, FormUsed.getSyntax()),
+ FormUsed) {}
AttributeCommonInfo(const IdentifierInfo *AttrName, SourceRange AttrRange,
- Syntax SyntaxUsed)
- : AttrName(AttrName), ScopeName(nullptr), AttrRange(AttrRange),
- ScopeLoc(), AttrKind(getParsedKind(AttrName, ScopeName, SyntaxUsed)),
- SyntaxUsed(SyntaxUsed), SpellingIndex(SpellingNotCalculated) {}
-
- AttributeCommonInfo(SourceRange AttrRange, Kind K, Syntax SyntaxUsed)
- : AttrName(nullptr), ScopeName(nullptr), AttrRange(AttrRange), ScopeLoc(),
- AttrKind(K), SyntaxUsed(SyntaxUsed),
- SpellingIndex(SpellingNotCalculated) {}
+ Form FormUsed)
+ : AttributeCommonInfo(AttrName, nullptr, AttrRange, SourceLocation(),
+ FormUsed) {}
- AttributeCommonInfo(SourceRange AttrRange, Kind K, Syntax SyntaxUsed,
- unsigned Spelling)
- : AttrName(nullptr), ScopeName(nullptr), AttrRange(AttrRange), ScopeLoc(),
- AttrKind(K), SyntaxUsed(SyntaxUsed), SpellingIndex(Spelling) {}
+ AttributeCommonInfo(SourceRange AttrRange, Kind K, Form FormUsed)
+ : AttributeCommonInfo(nullptr, nullptr, AttrRange, SourceLocation(), K,
+ FormUsed) {}
AttributeCommonInfo(AttributeCommonInfo &&) = default;
AttributeCommonInfo(const AttributeCommonInfo &) = default;
Kind getParsedKind() const { return Kind(AttrKind); }
Syntax getSyntax() const { return Syntax(SyntaxUsed); }
+ Form getForm() const {
+ return Form(getSyntax(), SpellingIndex, IsAlignas,
+ IsRegularKeywordAttribute);
+ }
const IdentifierInfo *getAttrName() const { return AttrName; }
SourceLocation getLoc() const { return AttrRange.getBegin(); }
SourceRange getRange() const { return AttrRange; }
@@ -148,29 +186,7 @@ public:
bool isGNUScope() const;
bool isClangScope() const;
- bool isAlignasAttribute() const {
- // FIXME: Use a better mechanism to determine this.
- // We use this in `isCXX11Attribute` below, so it _should_ only return
- // true for the `alignas` spelling, but it currently also returns true
- // for the `_Alignas` spelling, which only exists in C11. Distinguishing
- // between the two is important because they behave differently:
- // - `alignas` may only appear in the attribute-specifier-seq before
- // the decl-specifier-seq and is therefore associated with the
- // declaration.
- // - `_Alignas` may appear anywhere within the declaration-specifiers
- // and is therefore associated with the `DeclSpec`.
- // It's not clear how best to fix this:
- // - We have the necessary information in the form of the `SpellingIndex`,
- // but we would need to compare against AlignedAttr::Keyword_alignas,
- // and we can't depend on clang/AST/Attr.h here.
- // - We could test `getAttrName()->getName() == "alignas"`, but this is
- // inefficient.
- return getParsedKind() == AT_Aligned && isKeywordAttribute();
- }
-
- bool isCXX11Attribute() const {
- return SyntaxUsed == AS_CXX11 || isAlignasAttribute();
- }
+ bool isCXX11Attribute() const { return SyntaxUsed == AS_CXX11 || IsAlignas; }
bool isC2xAttribute() const { return SyntaxUsed == AS_C2x; }
@@ -186,6 +202,8 @@ public:
return SyntaxUsed == AS_Keyword || SyntaxUsed == AS_ContextSensitiveKeyword;
}
+ bool isRegularKeywordAttribute() const { return IsRegularKeywordAttribute; }
+
bool isContextSensitiveKeywordAttribute() const {
return SyntaxUsed == AS_ContextSensitiveKeyword;
}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Builtins.def b/contrib/llvm-project/clang/include/clang/Basic/Builtins.def
index 41f124556446..6dad8b512bd2 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Builtins.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/Builtins.def
@@ -39,6 +39,8 @@
// A -> "reference" to __builtin_va_list
// V -> Vector, followed by the number of elements and the base type.
// q -> Scalable vector, followed by the number of elements and the base type.
+// Q -> target builtin type, followed by a character to distinguish the builtin type
+// Qa -> AArch64 svcount_t builtin type.
// E -> ext_vector, followed by the number of elements and the base type.
// X -> _Complex, followed by the base type.
// Y -> ptrdiff_t
@@ -141,6 +143,7 @@ BUILTIN(__builtin_frexp , "ddi*" , "Fn")
BUILTIN(__builtin_frexpf, "ffi*" , "Fn")
BUILTIN(__builtin_frexpl, "LdLdi*", "Fn")
BUILTIN(__builtin_frexpf128, "LLdLLdi*", "Fn")
+BUILTIN(__builtin_frexpf16, "hhi*" , "Fn")
BUILTIN(__builtin_huge_val, "d", "ncE")
BUILTIN(__builtin_huge_valf, "f", "ncE")
BUILTIN(__builtin_huge_vall, "Ld", "ncE")
@@ -157,6 +160,7 @@ BUILTIN(__builtin_ldexp , "ddi" , "Fne")
BUILTIN(__builtin_ldexpf, "ffi" , "Fne")
BUILTIN(__builtin_ldexpl, "LdLdi", "Fne")
BUILTIN(__builtin_ldexpf128, "LLdLLdi", "Fne")
+BUILTIN(__builtin_ldexpf16, "hhi", "Fne")
BUILTIN(__builtin_modf , "ddd*" , "Fn")
BUILTIN(__builtin_modff, "fff*" , "Fn")
BUILTIN(__builtin_modfl, "LdLdLd*", "Fn")
@@ -350,6 +354,11 @@ BUILTIN(__builtin_roundf, "ff" , "Fnc")
BUILTIN(__builtin_roundf16, "hh" , "Fnc")
BUILTIN(__builtin_roundl, "LdLd" , "Fnc")
BUILTIN(__builtin_roundf128, "LLdLLd" , "Fnc")
+BUILTIN(__builtin_roundeven, "dd" , "Fnc")
+BUILTIN(__builtin_roundevenf, "ff" , "Fnc")
+BUILTIN(__builtin_roundevenf16, "hh" , "Fnc")
+BUILTIN(__builtin_roundevenl, "LdLd" , "Fnc")
+BUILTIN(__builtin_roundevenf128, "LLdLLd" , "Fnc")
BUILTIN(__builtin_scalbln , "ddLi", "Fne")
BUILTIN(__builtin_scalblnf, "ffLi", "Fne")
BUILTIN(__builtin_scalblnl, "LdLdLi", "Fne")
@@ -392,6 +401,7 @@ BUILTIN(__builtin_truncf16, "hh", "Fnc")
// Access to floating point environment
BUILTIN(__builtin_flt_rounds, "i", "n")
+BUILTIN(__builtin_set_flt_rounds, "vi", "n")
// C99 complex builtins
BUILTIN(__builtin_cabs, "dXd", "Fne")
@@ -479,6 +489,7 @@ BUILTIN(__builtin_isinf, "i.", "FnctE")
BUILTIN(__builtin_isinf_sign, "i.", "FnctE")
BUILTIN(__builtin_isnan, "i.", "FnctE")
BUILTIN(__builtin_isnormal, "i.", "FnctE")
+BUILTIN(__builtin_isfpclass, "i.", "nctE")
// FP signbit builtins
BUILTIN(__builtin_signbit, "i.", "Fnct")
@@ -551,7 +562,6 @@ BUILTIN(__builtin_assume_aligned, "v*vC*z.", "nctE")
BUILTIN(__builtin_bcmp, "ivC*vC*z", "FnE")
BUILTIN(__builtin_bcopy, "vv*v*z", "n")
BUILTIN(__builtin_bzero, "vv*z", "nF")
-BUILTIN(__builtin_fprintf, "iP*cC*.", "Fp:1:")
BUILTIN(__builtin_free, "vv*", "nF")
BUILTIN(__builtin_malloc, "v*z", "nF")
BUILTIN(__builtin_memchr, "v*vC*iz", "nFE")
@@ -562,7 +572,6 @@ BUILTIN(__builtin_memmove, "v*v*vC*z", "nFE")
BUILTIN(__builtin_mempcpy, "v*v*vC*z", "nF")
BUILTIN(__builtin_memset, "v*v*iz", "nF")
BUILTIN(__builtin_memset_inline, "vv*iIz", "n")
-BUILTIN(__builtin_printf, "icC*.", "Fp:0:")
BUILTIN(__builtin_stpcpy, "c*c*cC*", "nF")
BUILTIN(__builtin_stpncpy, "c*c*cC*z", "nF")
BUILTIN(__builtin_strcasecmp, "icC*cC*", "nF")
@@ -599,10 +608,20 @@ BUILTIN(__builtin_setjmp, "iv**", "j")
BUILTIN(__builtin_longjmp, "vv**i", "r")
BUILTIN(__builtin_unwind_init, "v", "")
BUILTIN(__builtin_eh_return_data_regno, "iIi", "ncE")
-BUILTIN(__builtin_snprintf, "ic*zcC*.", "nFp:2:")
-BUILTIN(__builtin_sprintf, "ic*cC*.", "nFP:1:")
-BUILTIN(__builtin_vsnprintf, "ic*zcC*a", "nFP:2:")
-BUILTIN(__builtin_vsprintf, "ic*cC*a", "nFP:1:")
+BUILTIN(__builtin_fprintf, "iP*RcC*R.", "nFp:1:")
+BUILTIN(__builtin_printf, "icC*R.", "nFp:0:")
+BUILTIN(__builtin_sprintf, "ic*RcC*R.", "nFp:1:")
+BUILTIN(__builtin_snprintf, "ic*RzcC*R.", "nFp:2:")
+BUILTIN(__builtin_vprintf, "icC*Ra", "nFP:0:")
+BUILTIN(__builtin_vfprintf, "iP*RcC*Ra", "nFP:1:")
+BUILTIN(__builtin_vsprintf, "ic*RcC*Ra", "nFP:1:")
+BUILTIN(__builtin_vsnprintf, "ic*RzcC*Ra", "nFP:2:")
+BUILTIN(__builtin_fscanf, "iP*RcC*R.", "Fs:1:")
+BUILTIN(__builtin_scanf, "icC*R.", "Fs:0:")
+BUILTIN(__builtin_sscanf, "icC*RcC*R.", "Fs:1:")
+BUILTIN(__builtin_vfscanf, "iP*RcC*Ra", "FS:1:")
+BUILTIN(__builtin_vscanf, "icC*Ra", "FS:0:")
+BUILTIN(__builtin_vsscanf, "icC*RcC*Ra", "FS:1:")
BUILTIN(__builtin_thread_pointer, "v*", "nc")
BUILTIN(__builtin_launder, "v*v*", "ntE")
LANGBUILTIN(__builtin_is_constant_evaluated, "b", "nE", CXX_LANG)
@@ -631,14 +650,14 @@ BUILTIN(__builtin___strlcpy_chk, "zc*cC*zz", "nF")
BUILTIN(__builtin___strncat_chk, "c*c*cC*zz", "nF")
BUILTIN(__builtin___strncpy_chk, "c*c*cC*zz", "nF")
BUILTIN(__builtin___stpncpy_chk, "c*c*cC*zz", "nF")
-BUILTIN(__builtin___snprintf_chk, "ic*zizcC*.", "Fp:4:")
-BUILTIN(__builtin___sprintf_chk, "ic*izcC*.", "Fp:3:")
-BUILTIN(__builtin___vsnprintf_chk, "ic*zizcC*a", "FP:4:")
-BUILTIN(__builtin___vsprintf_chk, "ic*izcC*a", "FP:3:")
-BUILTIN(__builtin___fprintf_chk, "iP*icC*.", "Fp:2:")
-BUILTIN(__builtin___printf_chk, "iicC*.", "Fp:1:")
-BUILTIN(__builtin___vfprintf_chk, "iP*icC*a", "FP:2:")
-BUILTIN(__builtin___vprintf_chk, "iicC*a", "FP:1:")
+BUILTIN(__builtin___snprintf_chk, "ic*RzizcC*R.", "Fp:4:")
+BUILTIN(__builtin___sprintf_chk, "ic*RizcC*R.", "Fp:3:")
+BUILTIN(__builtin___vsnprintf_chk, "ic*RzizcC*Ra", "FP:4:")
+BUILTIN(__builtin___vsprintf_chk, "ic*RizcC*Ra", "FP:3:")
+BUILTIN(__builtin___fprintf_chk, "iP*RicC*R.", "Fp:2:")
+BUILTIN(__builtin___printf_chk, "iicC*R.", "Fp:1:")
+BUILTIN(__builtin___vfprintf_chk, "iP*RicC*Ra", "FP:2:")
+BUILTIN(__builtin___vprintf_chk, "iicC*Ra", "FP:1:")
BUILTIN(__builtin_unpredictable, "LiLi" , "nc")
BUILTIN(__builtin_expect, "LiLiLi" , "ncE")
@@ -655,18 +674,29 @@ BUILTIN(__builtin_alloca_uninitialized, "v*z", "Fn")
BUILTIN(__builtin_alloca_with_align, "v*zIz", "Fn")
BUILTIN(__builtin_alloca_with_align_uninitialized, "v*zIz", "Fn")
BUILTIN(__builtin_call_with_static_chain, "v.", "nt")
+BUILTIN(__builtin_nondeterministic_value, "v.", "nt")
BUILTIN(__builtin_elementwise_abs, "v.", "nct")
BUILTIN(__builtin_elementwise_max, "v.", "nct")
BUILTIN(__builtin_elementwise_min, "v.", "nct")
BUILTIN(__builtin_elementwise_ceil, "v.", "nct")
BUILTIN(__builtin_elementwise_cos, "v.", "nct")
+BUILTIN(__builtin_elementwise_exp, "v.", "nct")
+BUILTIN(__builtin_elementwise_exp2, "v.", "nct")
BUILTIN(__builtin_elementwise_floor, "v.", "nct")
+BUILTIN(__builtin_elementwise_log, "v.", "nct")
+BUILTIN(__builtin_elementwise_log2, "v.", "nct")
+BUILTIN(__builtin_elementwise_log10, "v.", "nct")
+BUILTIN(__builtin_elementwise_pow, "v.", "nct")
BUILTIN(__builtin_elementwise_roundeven, "v.", "nct")
+BUILTIN(__builtin_elementwise_round, "v.", "nct")
+BUILTIN(__builtin_elementwise_rint, "v.", "nct")
+BUILTIN(__builtin_elementwise_nearbyint, "v.", "nct")
BUILTIN(__builtin_elementwise_sin, "v.", "nct")
BUILTIN(__builtin_elementwise_trunc, "v.", "nct")
BUILTIN(__builtin_elementwise_canonicalize, "v.", "nct")
BUILTIN(__builtin_elementwise_copysign, "v.", "nct")
+BUILTIN(__builtin_elementwise_fma, "v.", "nct")
BUILTIN(__builtin_elementwise_add_sat, "v.", "nct")
BUILTIN(__builtin_elementwise_sub_sat, "v.", "nct")
BUILTIN(__builtin_reduce_max, "v.", "nct")
@@ -889,6 +919,7 @@ ATOMIC_BUILTIN(__hip_atomic_compare_exchange_weak, "v.", "t")
ATOMIC_BUILTIN(__hip_atomic_compare_exchange_strong, "v.", "t")
ATOMIC_BUILTIN(__hip_atomic_exchange, "v.", "t")
ATOMIC_BUILTIN(__hip_atomic_fetch_add, "v.", "t")
+ATOMIC_BUILTIN(__hip_atomic_fetch_sub, "v.", "t")
ATOMIC_BUILTIN(__hip_atomic_fetch_and, "v.", "t")
ATOMIC_BUILTIN(__hip_atomic_fetch_or, "v.", "t")
ATOMIC_BUILTIN(__hip_atomic_fetch_xor, "v.", "t")
@@ -1415,6 +1446,10 @@ LIBBUILTIN(round, "dd", "fnc", MATH_H, ALL_LANGUAGES)
LIBBUILTIN(roundf, "ff", "fnc", MATH_H, ALL_LANGUAGES)
LIBBUILTIN(roundl, "LdLd", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(roundeven, "dd", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(roundevenf, "ff", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(roundevenl, "LdLd", "fnc", MATH_H, ALL_LANGUAGES)
+
LIBBUILTIN(scalbln, "ddLi", "fne", MATH_H, ALL_LANGUAGES)
LIBBUILTIN(scalblnf, "ffLi", "fne", MATH_H, ALL_LANGUAGES)
LIBBUILTIN(scalblnl, "LdLdLi", "fne", MATH_H, ALL_LANGUAGES)
@@ -1565,6 +1600,7 @@ LIBBUILTIN(addressof, "v*v&", "zfncThE", MEMORY, CXX_LANG)
LANGBUILTIN(__addressof, "v*v&", "zfncTE", CXX_LANG)
LIBBUILTIN(as_const, "v&v&", "zfncThE", UTILITY, CXX_LANG)
LIBBUILTIN(forward, "v&v&", "zfncThE", UTILITY, CXX_LANG)
+LIBBUILTIN(forward_like, "v&v&", "zfncThE", UTILITY, CXX_LANG)
LIBBUILTIN(move, "v&v&", "zfncThE", UTILITY, CXX_LANG)
LIBBUILTIN(move_if_noexcept, "v&v&", "zfncThE", UTILITY, CXX_LANG)
@@ -1573,6 +1609,7 @@ BUILTIN(__builtin_annotation, "v.", "tn")
// Invariants
BUILTIN(__builtin_assume, "vb", "nE")
+BUILTIN(__builtin_assume_separate_storage, "vvCD*vCD*", "nE")
// Multiprecision Arithmetic Builtins.
BUILTIN(__builtin_addcb, "UcUcCUcCUcCUc*", "n")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAArch64.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAArch64.def
index e6672a1702e9..eaae6c9ad846 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAArch64.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAArch64.def
@@ -39,6 +39,8 @@ BUILTIN(__builtin_arm_rbit, "UiUi", "nc")
BUILTIN(__builtin_arm_rbit64, "WUiWUi", "nc")
BUILTIN(__builtin_arm_cls, "UiZUi", "nc")
BUILTIN(__builtin_arm_cls64, "UiWUi", "nc")
+BUILTIN(__builtin_arm_clz, "UiZUi", "nc")
+BUILTIN(__builtin_arm_clz64, "UiWUi", "nc")
// HINT
BUILTIN(__builtin_arm_nop, "v", "")
@@ -257,16 +259,18 @@ TARGET_HEADER_BUILTIN(__umulh, "ULLiULLiULLi", "nh", INTRIN_H, ALL_MS_LANGUAGES,
TARGET_HEADER_BUILTIN(__break, "vi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__writex18byte, "vULiUc", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__writex18word, "vULiUs", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__writex18dword, "vULiULi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__writex18qword, "vULiULLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__readx18byte, "UcULi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__readx18word, "UsULi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__readx18dword, "ULiULi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__readx18qword, "ULLiULi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__writex18byte, "vUNiUc", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__writex18word, "vUNiUs", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__writex18dword, "vUNiUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__writex18qword, "vUNiULLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(__readx18byte, "UcUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__readx18word, "UsUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__readx18dword, "UNiUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__readx18qword, "ULLiUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
#undef BUILTIN
#undef LANGBUILTIN
+#undef TARGET_BUILTIN
#undef TARGET_HEADER_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAMDGPU.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAMDGPU.def
index c14237227cd3..29aa9ca7552e 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -100,6 +100,8 @@ BUILTIN(__builtin_amdgcn_rsq_clamp, "dd", "nc")
BUILTIN(__builtin_amdgcn_rsq_clampf, "ff", "nc")
BUILTIN(__builtin_amdgcn_sinf, "ff", "nc")
BUILTIN(__builtin_amdgcn_cosf, "ff", "nc")
+BUILTIN(__builtin_amdgcn_logf, "ff", "nc")
+BUILTIN(__builtin_amdgcn_exp2f, "ff", "nc")
BUILTIN(__builtin_amdgcn_log_clampf, "ff", "nc")
BUILTIN(__builtin_amdgcn_ldexp, "ddi", "nc")
BUILTIN(__builtin_amdgcn_ldexpf, "ffi", "nc")
@@ -214,8 +216,8 @@ TARGET_BUILTIN(__builtin_amdgcn_perm, "UiUiUiUi", "nc", "gfx8-insts")
TARGET_BUILTIN(__builtin_amdgcn_fmed3h, "hhhh", "nc", "gfx9-insts")
TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_f64, "dd*1d", "t", "gfx90a-insts")
-TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_f32, "ff*1f", "t", "gfx90a-insts")
-TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_v2f16, "V2hV2h*1V2h", "t", "gfx90a-insts")
+TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_f32, "ff*1f", "t", "atomic-fadd-rtn-insts")
+TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_v2f16, "V2hV2h*1V2h", "t", "atomic-buffer-global-pk-add-f16-insts")
TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fmin_f64, "dd*1d", "t", "gfx90a-insts")
TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fmax_f64, "dd*1d", "t", "gfx90a-insts")
@@ -227,16 +229,17 @@ TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_f64, "dd*3d", "t", "gfx90a-insts"
TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_f32, "ff*3f", "t", "gfx8-insts")
TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_f32, "ff*0f", "t", "gfx940-insts")
-TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_v2f16, "V2hV2h*0V2h", "t", "gfx940-insts")
-TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_v2bf16, "V2sV2s*0V2s", "t", "gfx940-insts")
-TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_v2bf16, "V2sV2s*1V2s", "t", "gfx940-insts")
-TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_v2bf16, "V2sV2s*3V2s", "t", "gfx940-insts")
+TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_v2f16, "V2hV2h*0V2h", "t", "atomic-flat-pk-add-16-insts")
+TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_v2bf16, "V2sV2s*0V2s", "t", "atomic-flat-pk-add-16-insts")
+TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_v2bf16, "V2sV2s*1V2s", "t", "atomic-global-pk-add-bf16-inst")
+TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_v2bf16, "V2sV2s*3V2s", "t", "atomic-ds-pk-add-16-insts")
+TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_v2f16, "V2hV2h*3V2h", "t", "atomic-ds-pk-add-16-insts")
//===----------------------------------------------------------------------===//
// Deep learning builtins.
//===----------------------------------------------------------------------===//
-TARGET_BUILTIN(__builtin_amdgcn_fdot2, "fV2hV2hfIb", "nc", "dot7-insts")
+TARGET_BUILTIN(__builtin_amdgcn_fdot2, "fV2hV2hfIb", "nc", "dot10-insts")
TARGET_BUILTIN(__builtin_amdgcn_fdot2_f16_f16, "hV2hV2hh", "nc", "dot9-insts")
TARGET_BUILTIN(__builtin_amdgcn_fdot2_bf16_bf16, "sV2sV2ss", "nc", "dot9-insts")
TARGET_BUILTIN(__builtin_amdgcn_fdot2_f32_bf16, "fV2sV2sfIb", "nc", "dot9-insts")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsARM.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsARM.def
index eabf830b359c..9ee918cb2147 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsARM.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsARM.def
@@ -119,6 +119,8 @@ BUILTIN(__builtin_arm_smusdx, "iii", "nc")
// Bit manipulation
BUILTIN(__builtin_arm_rbit, "UiUi", "nc")
+BUILTIN(__builtin_arm_clz, "UiZUi", "nc")
+BUILTIN(__builtin_arm_clz64, "UiWUi", "nc")
BUILTIN(__builtin_arm_cls, "UiZUi", "nc")
BUILTIN(__builtin_arm_cls64, "UiWUi", "nc")
@@ -343,4 +345,5 @@ TARGET_HEADER_BUILTIN(_InterlockedDecrement64_rel, "LLiLLiD*", "nh", INTRIN_H, A
#undef BUILTIN
#undef LANGBUILTIN
+#undef TARGET_BUILTIN
#undef TARGET_HEADER_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNEON.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNEON.def
index b8eb5a7b6173..9627005ba982 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNEON.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNEON.def
@@ -19,3 +19,4 @@
#undef GET_NEON_BUILTINS
#undef BUILTIN
+#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def
index ea0cd8c3e843..f645ad25cbd8 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def
@@ -54,7 +54,11 @@
#pragma push_macro("PTX76")
#pragma push_macro("PTX77")
#pragma push_macro("PTX78")
-#define PTX78 "ptx78"
+#pragma push_macro("PTX80")
+#pragma push_macro("PTX81")
+#define PTX81 "ptx81"
+#define PTX80 "ptx80|" PTX81
+#define PTX78 "ptx78|" PTX80
#define PTX77 "ptx77|" PTX78
#define PTX76 "ptx76|" PTX77
#define PTX75 "ptx75|" PTX76
@@ -95,6 +99,31 @@ BUILTIN(__nvvm_read_ptx_sreg_nctaid_y, "i", "nc")
BUILTIN(__nvvm_read_ptx_sreg_nctaid_z, "i", "nc")
BUILTIN(__nvvm_read_ptx_sreg_nctaid_w, "i", "nc")
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_clusterid_x, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_clusterid_y, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_clusterid_z, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_clusterid_w, "i", "nc", AND(SM_90, PTX78))
+
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_nclusterid_x, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_nclusterid_y, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_nclusterid_z, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_nclusterid_w, "i", "nc", AND(SM_90, PTX78))
+
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_ctaid_x, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_ctaid_y, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_ctaid_z, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_ctaid_w, "i", "nc", AND(SM_90, PTX78))
+
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_nctaid_x, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_nctaid_y, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_nctaid_z, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_nctaid_w, "i", "nc", AND(SM_90, PTX78))
+
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_ctarank, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_nctarank, "i", "nc", AND(SM_90, PTX78))
+
+TARGET_BUILTIN(__nvvm_is_explicit_cluster, "b", "nc", AND(SM_90, PTX78))
+
BUILTIN(__nvvm_read_ptx_sreg_laneid, "i", "nc")
BUILTIN(__nvvm_read_ptx_sreg_warpid, "i", "nc")
BUILTIN(__nvvm_read_ptx_sreg_nwarpid, "i", "nc")
@@ -144,16 +173,20 @@ TARGET_BUILTIN(__nvvm_fmin_nan_xorsign_abs_f16x2, "V2hV2hV2h", "",
AND(SM_86, PTX72))
TARGET_BUILTIN(__nvvm_fmin_ftz_nan_xorsign_abs_f16x2, "V2hV2hV2h", "",
AND(SM_86, PTX72))
-TARGET_BUILTIN(__nvvm_fmin_bf16, "UsUsUs", "", AND(SM_80, PTX70))
-TARGET_BUILTIN(__nvvm_fmin_nan_bf16, "UsUsUs", "", AND(SM_80, PTX70))
-TARGET_BUILTIN(__nvvm_fmin_xorsign_abs_bf16, "UsUsUs", "", AND(SM_86, PTX72))
-TARGET_BUILTIN(__nvvm_fmin_nan_xorsign_abs_bf16, "UsUsUs", "",
+TARGET_BUILTIN(__nvvm_fmin_bf16, "yyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_ftz_bf16, "yyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_nan_bf16, "yyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_ftz_nan_bf16, "yyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_xorsign_abs_bf16, "yyy", "", AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmin_nan_xorsign_abs_bf16, "yyy", "",
AND(SM_86, PTX72))
-TARGET_BUILTIN(__nvvm_fmin_bf16x2, "ZUiZUiZUi", "", AND(SM_80, PTX70))
-TARGET_BUILTIN(__nvvm_fmin_nan_bf16x2, "ZUiZUiZUi", "", AND(SM_80, PTX70))
-TARGET_BUILTIN(__nvvm_fmin_xorsign_abs_bf16x2, "ZUiZUiZUi", "",
+TARGET_BUILTIN(__nvvm_fmin_bf16x2, "V2yV2yV2y", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_ftz_bf16x2, "V2yV2yV2y", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_nan_bf16x2, "V2yV2yV2y", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_ftz_nan_bf16x2, "V2yV2yV2y", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_xorsign_abs_bf16x2, "V2yV2yV2y", "",
AND(SM_86, PTX72))
-TARGET_BUILTIN(__nvvm_fmin_nan_xorsign_abs_bf16x2, "ZUiZUiZUi", "",
+TARGET_BUILTIN(__nvvm_fmin_nan_xorsign_abs_bf16x2, "V2yV2yV2y", "",
AND(SM_86, PTX72))
BUILTIN(__nvvm_fmin_f, "fff", "")
BUILTIN(__nvvm_fmin_ftz_f, "fff", "")
@@ -186,16 +219,20 @@ TARGET_BUILTIN(__nvvm_fmax_nan_xorsign_abs_f16x2, "V2hV2hV2h", "",
AND(SM_86, PTX72))
TARGET_BUILTIN(__nvvm_fmax_ftz_nan_xorsign_abs_f16x2, "V2hV2hV2h", "",
AND(SM_86, PTX72))
-TARGET_BUILTIN(__nvvm_fmax_bf16, "UsUsUs", "", AND(SM_80, PTX70))
-TARGET_BUILTIN(__nvvm_fmax_nan_bf16, "UsUsUs", "", AND(SM_80, PTX70))
-TARGET_BUILTIN(__nvvm_fmax_xorsign_abs_bf16, "UsUsUs", "", AND(SM_86, PTX72))
-TARGET_BUILTIN(__nvvm_fmax_nan_xorsign_abs_bf16, "UsUsUs", "",
+TARGET_BUILTIN(__nvvm_fmax_bf16, "yyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_ftz_bf16, "yyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_nan_bf16, "yyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_ftz_nan_bf16, "yyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_xorsign_abs_bf16, "yyy", "", AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmax_nan_xorsign_abs_bf16, "yyy", "",
AND(SM_86, PTX72))
-TARGET_BUILTIN(__nvvm_fmax_bf16x2, "ZUiZUiZUi", "", AND(SM_80, PTX70))
-TARGET_BUILTIN(__nvvm_fmax_nan_bf16x2, "ZUiZUiZUi", "", AND(SM_80, PTX70))
-TARGET_BUILTIN(__nvvm_fmax_xorsign_abs_bf16x2, "ZUiZUiZUi", "",
+TARGET_BUILTIN(__nvvm_fmax_bf16x2, "V2yV2yV2y", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_ftz_bf16x2, "V2yV2yV2y", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_nan_bf16x2, "V2yV2yV2y", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_ftz_nan_bf16x2, "V2yV2yV2y", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_xorsign_abs_bf16x2, "V2yV2yV2y", "",
AND(SM_86, PTX72))
-TARGET_BUILTIN(__nvvm_fmax_nan_xorsign_abs_bf16x2, "ZUiZUiZUi", "",
+TARGET_BUILTIN(__nvvm_fmax_nan_xorsign_abs_bf16x2, "V2yV2yV2y", "",
AND(SM_86, PTX72))
BUILTIN(__nvvm_fmax_f, "fff", "")
BUILTIN(__nvvm_fmax_ftz_f, "fff", "")
@@ -323,10 +360,10 @@ TARGET_BUILTIN(__nvvm_fma_rn_sat_f16x2, "V2hV2hV2hV2h", "", AND(SM_53, PTX42))
TARGET_BUILTIN(__nvvm_fma_rn_ftz_sat_f16x2, "V2hV2hV2hV2h", "", AND(SM_53, PTX42))
TARGET_BUILTIN(__nvvm_fma_rn_relu_f16x2, "V2hV2hV2hV2h", "", AND(SM_80, PTX70))
TARGET_BUILTIN(__nvvm_fma_rn_ftz_relu_f16x2, "V2hV2hV2hV2h", "", AND(SM_80, PTX70))
-TARGET_BUILTIN(__nvvm_fma_rn_bf16, "UsUsUsUs", "", AND(SM_80, PTX70))
-TARGET_BUILTIN(__nvvm_fma_rn_relu_bf16, "UsUsUsUs", "", AND(SM_80, PTX70))
-TARGET_BUILTIN(__nvvm_fma_rn_bf16x2, "ZUiZUiZUiZUi", "", AND(SM_80, PTX70))
-TARGET_BUILTIN(__nvvm_fma_rn_relu_bf16x2, "ZUiZUiZUiZUi", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fma_rn_bf16, "yyyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fma_rn_relu_bf16, "yyyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fma_rn_bf16x2, "V2yV2yV2yV2y", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fma_rn_relu_bf16x2, "V2yV2yV2yV2y", "", AND(SM_80, PTX70))
BUILTIN(__nvvm_fma_rn_ftz_f, "ffff", "")
BUILTIN(__nvvm_fma_rn_f, "ffff", "")
BUILTIN(__nvvm_fma_rz_ftz_f, "ffff", "")
@@ -514,20 +551,20 @@ BUILTIN(__nvvm_ull2d_rp, "dULLi", "")
BUILTIN(__nvvm_f2h_rn_ftz, "Usf", "")
BUILTIN(__nvvm_f2h_rn, "Usf", "")
-TARGET_BUILTIN(__nvvm_ff2bf16x2_rn, "ZUiff", "", AND(SM_80,PTX70))
-TARGET_BUILTIN(__nvvm_ff2bf16x2_rn_relu, "ZUiff", "", AND(SM_80,PTX70))
-TARGET_BUILTIN(__nvvm_ff2bf16x2_rz, "ZUiff", "", AND(SM_80,PTX70))
-TARGET_BUILTIN(__nvvm_ff2bf16x2_rz_relu, "ZUiff", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_ff2bf16x2_rn, "V2yff", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_ff2bf16x2_rn_relu, "V2yff", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_ff2bf16x2_rz, "V2yff", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_ff2bf16x2_rz_relu, "V2yff", "", AND(SM_80,PTX70))
TARGET_BUILTIN(__nvvm_ff2f16x2_rn, "V2hff", "", AND(SM_80,PTX70))
TARGET_BUILTIN(__nvvm_ff2f16x2_rn_relu, "V2hff", "", AND(SM_80,PTX70))
TARGET_BUILTIN(__nvvm_ff2f16x2_rz, "V2hff", "", AND(SM_80,PTX70))
TARGET_BUILTIN(__nvvm_ff2f16x2_rz_relu, "V2hff", "", AND(SM_80,PTX70))
-TARGET_BUILTIN(__nvvm_f2bf16_rn, "ZUsf", "", AND(SM_80,PTX70))
-TARGET_BUILTIN(__nvvm_f2bf16_rn_relu, "ZUsf", "", AND(SM_80,PTX70))
-TARGET_BUILTIN(__nvvm_f2bf16_rz, "ZUsf", "", AND(SM_80,PTX70))
-TARGET_BUILTIN(__nvvm_f2bf16_rz_relu, "ZUsf", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_f2bf16_rn, "yf", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_f2bf16_rn_relu, "yf", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_f2bf16_rz, "yf", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_f2bf16_rz_relu, "yf", "", AND(SM_80,PTX70))
TARGET_BUILTIN(__nvvm_f2tf32_rna, "ZUif", "", AND(SM_80,PTX70))
@@ -553,6 +590,11 @@ TARGET_BUILTIN(__nvvm_bar_warp_sync, "vUi", "n", PTX60)
TARGET_BUILTIN(__nvvm_barrier_sync, "vUi", "n", PTX60)
TARGET_BUILTIN(__nvvm_barrier_sync_cnt, "vUiUi", "n", PTX60)
+TARGET_BUILTIN(__nvvm_barrier_cluster_arrive, "v", "n", AND(SM_90,PTX78))
+TARGET_BUILTIN(__nvvm_barrier_cluster_arrive_relaxed, "v", "n", AND(SM_90,PTX80))
+TARGET_BUILTIN(__nvvm_barrier_cluster_wait, "v", "n", AND(SM_90,PTX78))
+TARGET_BUILTIN(__nvvm_fence_sc_cluster, "v", "n", AND(SM_90,PTX78))
+
// Shuffle
BUILTIN(__nvvm_shfl_down_i32, "iiii", "")
@@ -782,8 +824,50 @@ TARGET_BUILTIN(__nvvm_atom_sys_cas_gen_ll, "LLiLLiD*LLiLLi", "n", SM_60)
BUILTIN(__nvvm_compiler_error, "vcC*4", "n")
BUILTIN(__nvvm_compiler_warn, "vcC*4", "n")
-// __ldg. This is not implemented as a builtin by nvcc.
+BUILTIN(__nvvm_ldu_c, "ccC*", "")
+BUILTIN(__nvvm_ldu_sc, "ScScC*", "")
+BUILTIN(__nvvm_ldu_s, "ssC*", "")
+BUILTIN(__nvvm_ldu_i, "iiC*", "")
+BUILTIN(__nvvm_ldu_l, "LiLiC*", "")
+BUILTIN(__nvvm_ldu_ll, "LLiLLiC*", "")
+
+BUILTIN(__nvvm_ldu_uc, "UcUcC*", "")
+BUILTIN(__nvvm_ldu_us, "UsUsC*", "")
+BUILTIN(__nvvm_ldu_ui, "UiUiC*", "")
+BUILTIN(__nvvm_ldu_ul, "ULiULiC*", "")
+BUILTIN(__nvvm_ldu_ull, "ULLiULLiC*", "")
+
+BUILTIN(__nvvm_ldu_h, "hhC*", "")
+BUILTIN(__nvvm_ldu_f, "ffC*", "")
+BUILTIN(__nvvm_ldu_d, "ddC*", "")
+
+BUILTIN(__nvvm_ldu_c2, "E2cE2cC*", "")
+BUILTIN(__nvvm_ldu_sc2, "E2ScE2ScC*", "")
+BUILTIN(__nvvm_ldu_c4, "E4cE4cC*", "")
+BUILTIN(__nvvm_ldu_sc4, "E4ScE4ScC*", "")
+BUILTIN(__nvvm_ldu_s2, "E2sE2sC*", "")
+BUILTIN(__nvvm_ldu_s4, "E4sE4sC*", "")
+BUILTIN(__nvvm_ldu_i2, "E2iE2iC*", "")
+BUILTIN(__nvvm_ldu_i4, "E4iE4iC*", "")
+BUILTIN(__nvvm_ldu_l2, "E2LiE2LiC*", "")
+BUILTIN(__nvvm_ldu_ll2, "E2LLiE2LLiC*", "")
+
+BUILTIN(__nvvm_ldu_uc2, "E2UcE2UcC*", "")
+BUILTIN(__nvvm_ldu_uc4, "E4UcE4UcC*", "")
+BUILTIN(__nvvm_ldu_us2, "E2UsE2UsC*", "")
+BUILTIN(__nvvm_ldu_us4, "E4UsE4UsC*", "")
+BUILTIN(__nvvm_ldu_ui2, "E2UiE2UiC*", "")
+BUILTIN(__nvvm_ldu_ui4, "E4UiE4UiC*", "")
+BUILTIN(__nvvm_ldu_ul2, "E2ULiE2ULiC*", "")
+BUILTIN(__nvvm_ldu_ull2, "E2ULLiE2ULLiC*", "")
+
+BUILTIN(__nvvm_ldu_h2, "E2hE2hC*", "")
+BUILTIN(__nvvm_ldu_f2, "E2fE2fC*", "")
+BUILTIN(__nvvm_ldu_f4, "E4fE4fC*", "")
+BUILTIN(__nvvm_ldu_d2, "E2dE2dC*", "")
+
BUILTIN(__nvvm_ldg_c, "ccC*", "")
+BUILTIN(__nvvm_ldg_sc, "ScScC*", "")
BUILTIN(__nvvm_ldg_s, "ssC*", "")
BUILTIN(__nvvm_ldg_i, "iiC*", "")
BUILTIN(__nvvm_ldg_l, "LiLiC*", "")
@@ -795,15 +879,19 @@ BUILTIN(__nvvm_ldg_ui, "UiUiC*", "")
BUILTIN(__nvvm_ldg_ul, "ULiULiC*", "")
BUILTIN(__nvvm_ldg_ull, "ULLiULLiC*", "")
+BUILTIN(__nvvm_ldg_h, "hhC*", "")
BUILTIN(__nvvm_ldg_f, "ffC*", "")
BUILTIN(__nvvm_ldg_d, "ddC*", "")
BUILTIN(__nvvm_ldg_c2, "E2cE2cC*", "")
+BUILTIN(__nvvm_ldg_sc2, "E2ScE2ScC*", "")
BUILTIN(__nvvm_ldg_c4, "E4cE4cC*", "")
+BUILTIN(__nvvm_ldg_sc4, "E4ScE4ScC*", "")
BUILTIN(__nvvm_ldg_s2, "E2sE2sC*", "")
BUILTIN(__nvvm_ldg_s4, "E4sE4sC*", "")
BUILTIN(__nvvm_ldg_i2, "E2iE2iC*", "")
BUILTIN(__nvvm_ldg_i4, "E4iE4iC*", "")
+BUILTIN(__nvvm_ldg_l2, "E2LiE2LiC*", "")
BUILTIN(__nvvm_ldg_ll2, "E2LLiE2LLiC*", "")
BUILTIN(__nvvm_ldg_uc2, "E2UcE2UcC*", "")
@@ -812,8 +900,10 @@ BUILTIN(__nvvm_ldg_us2, "E2UsE2UsC*", "")
BUILTIN(__nvvm_ldg_us4, "E4UsE4UsC*", "")
BUILTIN(__nvvm_ldg_ui2, "E2UiE2UiC*", "")
BUILTIN(__nvvm_ldg_ui4, "E4UiE4UiC*", "")
+BUILTIN(__nvvm_ldg_ul2, "E2ULiE2ULiC*", "")
BUILTIN(__nvvm_ldg_ull2, "E2ULLiE2ULLiC*", "")
+BUILTIN(__nvvm_ldg_h2, "E2hE2hC*", "")
BUILTIN(__nvvm_ldg_f2, "E2fE2fC*", "")
BUILTIN(__nvvm_ldg_f4, "E4fE4fC*", "")
BUILTIN(__nvvm_ldg_d2, "E2dE2dC*", "")
@@ -823,6 +913,7 @@ BUILTIN(__nvvm_isspacep_const, "bvC*", "nc")
BUILTIN(__nvvm_isspacep_global, "bvC*", "nc")
BUILTIN(__nvvm_isspacep_local, "bvC*", "nc")
BUILTIN(__nvvm_isspacep_shared, "bvC*", "nc")
+TARGET_BUILTIN(__nvvm_isspacep_shared_cluster,"bvC*", "nc", AND(SM_90,PTX78))
// Builtins to support WMMA instructions on sm_70
TARGET_BUILTIN(__hmma_m16n16k16_ld_a, "vi*iC*UiIi", "", AND(SM_70,PTX60))
@@ -930,10 +1021,10 @@ TARGET_BUILTIN(__nvvm_cp_async_mbarrier_arrive_shared, "vWi*3", "", AND(SM_80,PT
TARGET_BUILTIN(__nvvm_cp_async_mbarrier_arrive_noinc, "vWi*", "", AND(SM_80,PTX70))
TARGET_BUILTIN(__nvvm_cp_async_mbarrier_arrive_noinc_shared, "vWi*3", "", AND(SM_80,PTX70))
-TARGET_BUILTIN(__nvvm_cp_async_ca_shared_global_4, "vv*3vC*1", "", AND(SM_80,PTX70))
-TARGET_BUILTIN(__nvvm_cp_async_ca_shared_global_8, "vv*3vC*1", "", AND(SM_80,PTX70))
-TARGET_BUILTIN(__nvvm_cp_async_ca_shared_global_16, "vv*3vC*1", "", AND(SM_80,PTX70))
-TARGET_BUILTIN(__nvvm_cp_async_cg_shared_global_16, "vv*3vC*1", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_cp_async_ca_shared_global_4, "vv*3vC*1.", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_cp_async_ca_shared_global_8, "vv*3vC*1.", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_cp_async_ca_shared_global_16, "vv*3vC*1.", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_cp_async_cg_shared_global_16, "vv*3vC*1.", "", AND(SM_80,PTX70))
TARGET_BUILTIN(__nvvm_cp_async_commit_group, "v", "", AND(SM_80,PTX70))
TARGET_BUILTIN(__nvvm_cp_async_wait_group, "vIi", "", AND(SM_80,PTX70))
@@ -941,10 +1032,15 @@ TARGET_BUILTIN(__nvvm_cp_async_wait_all, "v", "", AND(SM_80,PTX70))
// bf16, bf16x2 abs, neg
-TARGET_BUILTIN(__nvvm_abs_bf16, "UsUs", "", AND(SM_80,PTX70))
-TARGET_BUILTIN(__nvvm_abs_bf16x2, "ZUiZUi", "", AND(SM_80,PTX70))
-TARGET_BUILTIN(__nvvm_neg_bf16, "UsUs", "", AND(SM_80,PTX70))
-TARGET_BUILTIN(__nvvm_neg_bf16x2, "ZUiZUi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_abs_bf16, "yy", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_abs_bf16x2, "V2yV2y", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_neg_bf16, "yy", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_neg_bf16x2, "V2yV2y", "", AND(SM_80,PTX70))
+
+TARGET_BUILTIN(__nvvm_mapa, "v*v*i", "", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_mapa_shared_cluster, "v*3v*3i", "", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_getctarank, "iv*", "", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_getctarank_shared_cluster, "iv*3", "", AND(SM_90,PTX78))
#undef BUILTIN
#undef TARGET_BUILTIN
@@ -974,3 +1070,5 @@ TARGET_BUILTIN(__nvvm_neg_bf16x2, "ZUiZUi", "", AND(SM_80,PTX70))
#pragma pop_macro("PTX76")
#pragma pop_macro("PTX77")
#pragma pop_macro("PTX78")
+#pragma pop_macro("PTX80")
+#pragma pop_macro("PTX81")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsPPC.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsPPC.def
index 7b7625cf11c5..1e52d351780c 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsPPC.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsPPC.def
@@ -19,15 +19,33 @@
// The format of this database matches clang/Basic/Builtins.def except for the
// MMA builtins that are using their own format documented below.
-#if defined(BUILTIN) && !defined(CUSTOM_BUILTIN)
-# define CUSTOM_BUILTIN(ID, INTR, TYPES, ACCUMULATE) \
- BUILTIN(__builtin_##ID, "i.", "t")
-#elif defined(CUSTOM_BUILTIN) && !defined(BUILTIN)
-# define BUILTIN(ID, TYPES, ATTRS)
+#ifndef BUILTIN
+#define BUILTIN(ID, TYPE, ATTRS)
#endif
-#define UNALIASED_CUSTOM_BUILTIN(ID, TYPES, ACCUMULATE) \
- CUSTOM_BUILTIN(ID, ID, TYPES, ACCUMULATE)
+#if defined(BUILTIN) && !defined(TARGET_BUILTIN)
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BUILTIN(ID, TYPE, ATTRS)
+#endif
+
+#ifndef CUSTOM_BUILTIN
+#define CUSTOM_BUILTIN(ID, INTR, TYPES, ACCUMULATE, FEATURE) \
+ TARGET_BUILTIN(__builtin_##ID, "i.", "t", FEATURE)
+#endif
+
+#define UNALIASED_CUSTOM_BUILTIN(ID, TYPES, ACCUMULATE, FEATURE) \
+ CUSTOM_BUILTIN(ID, ID, TYPES, ACCUMULATE, FEATURE)
+
+// GCC predefined macros to rename builtins, undef them to keep original names.
+#if defined(__GNUC__) && !defined(__clang__)
+#undef __builtin_vsx_xvnmaddadp
+#undef __builtin_vsx_xvnmaddasp
+#undef __builtin_vsx_xvmsubasp
+#undef __builtin_vsx_xvmsubadp
+#undef __builtin_vsx_xvmaddadp
+#undef __builtin_vsx_xvnmsubasp
+#undef __builtin_vsx_xvnmsubadp
+#undef __builtin_vsx_xvmaddasp
+#endif
// XL Compatibility built-ins
BUILTIN(__builtin_ppc_popcntb, "ULiULi", "")
@@ -46,7 +64,7 @@ BUILTIN(__builtin_ppc_dcbst, "vvC*", "")
BUILTIN(__builtin_ppc_dcbt, "vv*", "")
BUILTIN(__builtin_ppc_dcbtst, "vv*", "")
BUILTIN(__builtin_ppc_dcbz, "vv*", "")
-BUILTIN(__builtin_ppc_icbt, "vv*", "")
+TARGET_BUILTIN(__builtin_ppc_icbt, "vv*", "", "isa-v207-instructions")
BUILTIN(__builtin_ppc_fric, "dd", "")
BUILTIN(__builtin_ppc_frim, "dd", "")
BUILTIN(__builtin_ppc_frims, "ff", "")
@@ -74,12 +92,12 @@ BUILTIN(__builtin_ppc_fetch_and_swap, "UiUiD*Ui", "")
BUILTIN(__builtin_ppc_fetch_and_swaplp, "ULiULiD*ULi", "")
BUILTIN(__builtin_ppc_ldarx, "LiLiD*", "")
BUILTIN(__builtin_ppc_lwarx, "iiD*", "")
-BUILTIN(__builtin_ppc_lharx, "ssD*", "")
-BUILTIN(__builtin_ppc_lbarx, "ccD*", "")
+TARGET_BUILTIN(__builtin_ppc_lharx, "ssD*", "", "isa-v207-instructions")
+TARGET_BUILTIN(__builtin_ppc_lbarx, "ccD*", "", "isa-v207-instructions")
BUILTIN(__builtin_ppc_stdcx, "iLiD*Li", "")
BUILTIN(__builtin_ppc_stwcx, "iiD*i", "")
-BUILTIN(__builtin_ppc_sthcx, "isD*s", "")
-BUILTIN(__builtin_ppc_stbcx, "icD*i", "")
+TARGET_BUILTIN(__builtin_ppc_sthcx, "isD*s", "", "isa-v207-instructions")
+TARGET_BUILTIN(__builtin_ppc_stbcx, "icD*i", "", "isa-v207-instructions")
BUILTIN(__builtin_ppc_tdw, "vLLiLLiIUi", "")
BUILTIN(__builtin_ppc_tw, "viiIUi", "")
BUILTIN(__builtin_ppc_trap, "vi", "")
@@ -96,26 +114,27 @@ BUILTIN(__builtin_ppc_swdiv_nochk, "ddd", "")
BUILTIN(__builtin_ppc_swdivs_nochk, "fff", "")
BUILTIN(__builtin_ppc_alignx, "vIivC*", "nc")
BUILTIN(__builtin_ppc_rdlam, "UWiUWiUWiUWIi", "nc")
-BUILTIN(__builtin_ppc_compare_exp_uo, "idd", "")
-BUILTIN(__builtin_ppc_compare_exp_lt, "idd", "")
-BUILTIN(__builtin_ppc_compare_exp_gt, "idd", "")
-BUILTIN(__builtin_ppc_compare_exp_eq, "idd", "")
-BUILTIN(__builtin_ppc_test_data_class, "idIi", "t")
+TARGET_BUILTIN(__builtin_ppc_compare_exp_uo, "idd", "", "isa-v30-instructions,vsx")
+TARGET_BUILTIN(__builtin_ppc_compare_exp_lt, "idd", "", "isa-v30-instructions,vsx")
+TARGET_BUILTIN(__builtin_ppc_compare_exp_gt, "idd", "", "isa-v30-instructions,vsx")
+TARGET_BUILTIN(__builtin_ppc_compare_exp_eq, "idd", "", "isa-v30-instructions,vsx")
+TARGET_BUILTIN(__builtin_ppc_test_data_class, "idIi", "t", "isa-v30-instructions,vsx")
BUILTIN(__builtin_ppc_swdiv, "ddd", "")
BUILTIN(__builtin_ppc_swdivs, "fff", "")
// Compare
-BUILTIN(__builtin_ppc_cmpeqb, "LLiLLiLLi", "")
-BUILTIN(__builtin_ppc_cmprb, "iCIiii", "")
-BUILTIN(__builtin_ppc_setb, "LLiLLiLLi", "")
+TARGET_BUILTIN(__builtin_ppc_cmpeqb, "LLiLLiLLi", "", "isa-v30-instructions")
+TARGET_BUILTIN(__builtin_ppc_cmprb, "iCIiii", "", "isa-v30-instructions")
+TARGET_BUILTIN(__builtin_ppc_setb, "LLiLLiLLi", "", "isa-v30-instructions")
BUILTIN(__builtin_ppc_cmpb, "LLiLLiLLi", "")
// Multiply
BUILTIN(__builtin_ppc_mulhd, "LLiLiLi", "")
BUILTIN(__builtin_ppc_mulhdu, "ULLiULiULi", "")
BUILTIN(__builtin_ppc_mulhw, "iii", "")
BUILTIN(__builtin_ppc_mulhwu, "UiUiUi", "")
-BUILTIN(__builtin_ppc_maddhd, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_ppc_maddhdu, "ULLiULLiULLiULLi", "")
-BUILTIN(__builtin_ppc_maddld, "LLiLLiLLiLLi", "")
+TARGET_BUILTIN(__builtin_ppc_maddhd, "LLiLLiLLiLLi", "", "isa-v30-instructions")
+TARGET_BUILTIN(__builtin_ppc_maddhdu, "ULLiULLiULLiULLi", "",
+ "isa-v30-instructions")
+TARGET_BUILTIN(__builtin_ppc_maddld, "LLiLLiLLiLLi", "", "isa-v30-instructions")
// Rotate
BUILTIN(__builtin_ppc_rlwnm, "UiUiUiIUi", "")
BUILTIN(__builtin_ppc_rlwimi, "UiUiUiIUiIUi", "")
@@ -123,18 +142,18 @@ BUILTIN(__builtin_ppc_rldimi, "ULLiULLiULLiIUiIULLi", "")
// load
BUILTIN(__builtin_ppc_load2r, "UsUs*", "")
BUILTIN(__builtin_ppc_load4r, "UiUi*", "")
-BUILTIN(__builtin_ppc_load8r, "ULLiULLi*", "")
+TARGET_BUILTIN(__builtin_ppc_load8r, "ULLiULLi*", "", "isa-v206-instructions")
// store
BUILTIN(__builtin_ppc_store2r, "vUiUs*", "")
BUILTIN(__builtin_ppc_store4r, "vUiUi*", "")
-BUILTIN(__builtin_ppc_store8r, "vULLiULLi*", "")
-BUILTIN(__builtin_ppc_extract_exp, "Uid", "")
-BUILTIN(__builtin_ppc_extract_sig, "ULLid", "")
+TARGET_BUILTIN(__builtin_ppc_store8r, "vULLiULLi*", "", "isa-v206-instructions")
+TARGET_BUILTIN(__builtin_ppc_extract_exp, "Uid", "", "power9-vector")
+TARGET_BUILTIN(__builtin_ppc_extract_sig, "ULLid", "", "power9-vector")
BUILTIN(__builtin_ppc_mtfsb0, "vUIi", "")
BUILTIN(__builtin_ppc_mtfsb1, "vUIi", "")
BUILTIN(__builtin_ppc_mtfsf, "vUIiUi", "")
BUILTIN(__builtin_ppc_mtfsfi, "vUIiUIi", "")
-BUILTIN(__builtin_ppc_insert_exp, "ddULLi", "")
+TARGET_BUILTIN(__builtin_ppc_insert_exp, "ddULLi", "", "power9-vector")
BUILTIN(__builtin_ppc_fmsub, "dddd", "")
BUILTIN(__builtin_ppc_fmsubs, "ffff", "")
BUILTIN(__builtin_ppc_fnmadd, "dddd", "")
@@ -145,13 +164,13 @@ BUILTIN(__builtin_ppc_fre, "dd", "")
BUILTIN(__builtin_ppc_fres, "ff", "")
BUILTIN(__builtin_ppc_dcbtstt, "vv*", "")
BUILTIN(__builtin_ppc_dcbtt, "vv*", "")
-BUILTIN(__builtin_ppc_mftbu, "Ui","")
+BUILTIN(__builtin_ppc_mftbu, "Ui", "")
BUILTIN(__builtin_ppc_mfmsr, "Ui", "")
BUILTIN(__builtin_ppc_mfspr, "ULiIi", "")
BUILTIN(__builtin_ppc_mtmsr, "vUi", "")
BUILTIN(__builtin_ppc_mtspr, "vIiULi", "")
BUILTIN(__builtin_ppc_stfiw, "viC*d", "")
-BUILTIN(__builtin_ppc_addex, "LLiLLiLLiCIi", "")
+TARGET_BUILTIN(__builtin_ppc_addex, "LLiLLiLLiCIi", "", "isa-v30-instructions")
// select
BUILTIN(__builtin_ppc_maxfe, "LdLdLdLd.", "t")
BUILTIN(__builtin_ppc_maxfl, "dddd.", "t")
@@ -166,595 +185,696 @@ BUILTIN(__builtin_ppc_fnabss, "ff", "")
BUILTIN(__builtin_ppc_get_timebase, "ULLi", "n")
// This is just a placeholder, the types and attributes are wrong.
-BUILTIN(__builtin_altivec_vaddcuw, "V4UiV4UiV4Ui", "")
-
-BUILTIN(__builtin_altivec_vaddsbs, "V16ScV16ScV16Sc", "")
-BUILTIN(__builtin_altivec_vaddubs, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vaddshs, "V8SsV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vadduhs, "V8UsV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vaddsws, "V4SiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vadduws, "V4UiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vaddeuqm, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi","")
-BUILTIN(__builtin_altivec_vaddcuq, "V1ULLLiV1ULLLiV1ULLLi","")
-BUILTIN(__builtin_altivec_vaddecuq, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi","")
-BUILTIN(__builtin_altivec_vadduqm, "V1ULLLiV16UcV16Uc","")
-BUILTIN(__builtin_altivec_vaddeuqm_c, "V16UcV16UcV16UcV16Uc","")
-BUILTIN(__builtin_altivec_vaddcuq_c, "V16UcV16UcV16Uc","")
-BUILTIN(__builtin_altivec_vaddecuq_c, "V16UcV16UcV16UcV16Uc","")
-
-BUILTIN(__builtin_altivec_vsubsbs, "V16ScV16ScV16Sc", "")
-BUILTIN(__builtin_altivec_vsububs, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vsubshs, "V8SsV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vsubuhs, "V8UsV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vsubsws, "V4SiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vsubuws, "V4UiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vsubeuqm, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi","")
-BUILTIN(__builtin_altivec_vsubcuq, "V1ULLLiV1ULLLiV1ULLLi","")
-BUILTIN(__builtin_altivec_vsubecuq, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi","")
-BUILTIN(__builtin_altivec_vsubuqm, "V1ULLLiV16UcV16Uc","")
-BUILTIN(__builtin_altivec_vsubeuqm_c, "V16UcV16UcV16UcV16Uc","")
-BUILTIN(__builtin_altivec_vsubcuq_c, "V16UcV16UcV16Uc","")
-BUILTIN(__builtin_altivec_vsubecuq_c, "V16UcV16UcV16UcV16Uc","")
-
-BUILTIN(__builtin_altivec_vavgsb, "V16ScV16ScV16Sc", "")
-BUILTIN(__builtin_altivec_vavgub, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vavgsh, "V8SsV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vavguh, "V8UsV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vavgsw, "V4SiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vavguw, "V4UiV4UiV4Ui", "")
-
-BUILTIN(__builtin_altivec_vrfip, "V4fV4f", "")
-
-BUILTIN(__builtin_altivec_vcfsx, "V4fV4SiIi", "")
-BUILTIN(__builtin_altivec_vcfux, "V4fV4UiIi", "")
-BUILTIN(__builtin_altivec_vctsxs, "V4SiV4fIi", "")
-BUILTIN(__builtin_altivec_vctuxs, "V4UiV4fIi", "")
-
-BUILTIN(__builtin_altivec_dss, "vUIi", "")
-BUILTIN(__builtin_altivec_dssall, "v", "")
-BUILTIN(__builtin_altivec_dst, "vvC*iUIi", "")
-BUILTIN(__builtin_altivec_dstt, "vvC*iUIi", "")
-BUILTIN(__builtin_altivec_dstst, "vvC*iUIi", "")
-BUILTIN(__builtin_altivec_dststt, "vvC*iUIi", "")
-
-BUILTIN(__builtin_altivec_vexptefp, "V4fV4f", "")
-
-BUILTIN(__builtin_altivec_vrfim, "V4fV4f", "")
-
-BUILTIN(__builtin_altivec_lvx, "V4iLivC*", "")
-BUILTIN(__builtin_altivec_lvxl, "V4iLivC*", "")
-BUILTIN(__builtin_altivec_lvebx, "V16cLivC*", "")
-BUILTIN(__builtin_altivec_lvehx, "V8sLivC*", "")
-BUILTIN(__builtin_altivec_lvewx, "V4iLivC*", "")
-
-BUILTIN(__builtin_altivec_vlogefp, "V4fV4f", "")
-
-BUILTIN(__builtin_altivec_lvsl, "V16cUcvC*", "")
-BUILTIN(__builtin_altivec_lvsr, "V16cUcvC*", "")
-
-BUILTIN(__builtin_altivec_vmaddfp, "V4fV4fV4fV4f", "")
-BUILTIN(__builtin_altivec_vmhaddshs, "V8sV8sV8sV8s", "")
-BUILTIN(__builtin_altivec_vmhraddshs, "V8sV8sV8sV8s", "")
-
-BUILTIN(__builtin_altivec_vmsumubm, "V4UiV16UcV16UcV4Ui", "")
-BUILTIN(__builtin_altivec_vmsummbm, "V4SiV16ScV16UcV4Si", "")
-BUILTIN(__builtin_altivec_vmsumuhm, "V4UiV8UsV8UsV4Ui", "")
-BUILTIN(__builtin_altivec_vmsumshm, "V4SiV8SsV8SsV4Si", "")
-BUILTIN(__builtin_altivec_vmsumuhs, "V4UiV8UsV8UsV4Ui", "")
-BUILTIN(__builtin_altivec_vmsumshs, "V4SiV8SsV8SsV4Si", "")
-
-BUILTIN(__builtin_altivec_vmuleub, "V8UsV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vmulesb, "V8SsV16ScV16Sc", "")
-BUILTIN(__builtin_altivec_vmuleuh, "V4UiV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vmulesh, "V4SiV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vmuleuw, "V2ULLiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vmulesw, "V2SLLiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vmuloub, "V8UsV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vmulosb, "V8SsV16ScV16Sc", "")
-BUILTIN(__builtin_altivec_vmulouh, "V4UiV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vmulosh, "V4SiV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vmulouw, "V2ULLiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vmulosw, "V2SLLiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vmuleud, "V1ULLLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vmulesd, "V1SLLLiV2SLLiV2SLLi", "")
-BUILTIN(__builtin_altivec_vmuloud, "V1ULLLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vmulosd, "V1SLLLiV2SLLiV2SLLi", "")
-BUILTIN(__builtin_altivec_vmsumcud, "V1ULLLiV2ULLiV2ULLiV1ULLLi", "")
-
-BUILTIN(__builtin_altivec_vnmsubfp, "V4fV4fV4fV4f", "")
-
-BUILTIN(__builtin_altivec_vpkpx, "V8sV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vpkuhus, "V16UcV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vpkshss, "V16ScV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vpkuwus, "V8UsV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vpkswss, "V8SsV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vpkshus, "V16UcV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vpkswus, "V8UsV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vpksdss, "V4SiV2SLLiV2SLLi", "")
-BUILTIN(__builtin_altivec_vpksdus, "V4UiV2SLLiV2SLLi", "")
-BUILTIN(__builtin_altivec_vpkudus, "V4UiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vpkudum, "V4UiV2ULLiV2ULLi", "")
-
-BUILTIN(__builtin_altivec_vperm_4si, "V4iV4iV4iV16Uc", "")
-
-BUILTIN(__builtin_altivec_stvx, "vV4iLiv*", "")
-BUILTIN(__builtin_altivec_stvxl, "vV4iLiv*", "")
-BUILTIN(__builtin_altivec_stvebx, "vV16cLiv*", "")
-BUILTIN(__builtin_altivec_stvehx, "vV8sLiv*", "")
-BUILTIN(__builtin_altivec_stvewx, "vV4iLiv*", "")
-
-BUILTIN(__builtin_altivec_vcmpbfp, "V4iV4fV4f", "")
-
-BUILTIN(__builtin_altivec_vcmpgefp, "V4iV4fV4f", "")
-
-BUILTIN(__builtin_altivec_vcmpequb, "V16cV16cV16c", "")
-BUILTIN(__builtin_altivec_vcmpequh, "V8sV8sV8s", "")
-BUILTIN(__builtin_altivec_vcmpequw, "V4iV4iV4i", "")
-BUILTIN(__builtin_altivec_vcmpequd, "V2LLiV2LLiV2LLi", "")
-BUILTIN(__builtin_altivec_vcmpeqfp, "V4iV4fV4f", "")
-
-BUILTIN(__builtin_altivec_vcmpneb, "V16cV16cV16c", "")
-BUILTIN(__builtin_altivec_vcmpneh, "V8sV8sV8s", "")
-BUILTIN(__builtin_altivec_vcmpnew, "V4iV4iV4i", "")
-
-BUILTIN(__builtin_altivec_vcmpnezb, "V16cV16cV16c", "")
-BUILTIN(__builtin_altivec_vcmpnezh, "V8sV8sV8s", "")
-BUILTIN(__builtin_altivec_vcmpnezw, "V4iV4iV4i", "")
-
-BUILTIN(__builtin_altivec_vcmpgtsb, "V16cV16ScV16Sc", "")
-BUILTIN(__builtin_altivec_vcmpgtub, "V16cV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vcmpgtsh, "V8sV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vcmpgtuh, "V8sV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vcmpgtsw, "V4iV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vcmpgtuw, "V4iV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vcmpgtsd, "V2LLiV2LLiV2LLi", "")
-BUILTIN(__builtin_altivec_vcmpgtud, "V2LLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vcmpgtfp, "V4iV4fV4f", "")
+TARGET_BUILTIN(__builtin_altivec_vaddcuw, "V4UiV4UiV4Ui", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vaddsbs, "V16ScV16ScV16Sc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vaddubs, "V16UcV16UcV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vaddshs, "V8SsV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vadduhs, "V8UsV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vaddsws, "V4SiV4SiV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vadduws, "V4UiV4UiV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vaddeuqm, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vaddcuq, "V1ULLLiV1ULLLiV1ULLLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vaddecuq, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vadduqm, "V1ULLLiV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vaddeuqm_c, "V16UcV16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vaddcuq_c, "V16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vaddecuq_c, "V16UcV16UcV16UcV16Uc", "",
+ "power8-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vsubsbs, "V16ScV16ScV16Sc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsububs, "V16UcV16UcV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsubshs, "V8SsV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsubuhs, "V8UsV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsubsws, "V4SiV4SiV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsubuws, "V4UiV4UiV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsubeuqm, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vsubcuq, "V1ULLLiV1ULLLiV1ULLLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vsubecuq, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vsubuqm, "V1ULLLiV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vsubeuqm_c, "V16UcV16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vsubcuq_c, "V16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vsubecuq_c, "V16UcV16UcV16UcV16Uc", "",
+ "power8-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vavgsb, "V16ScV16ScV16Sc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vavgub, "V16UcV16UcV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vavgsh, "V8SsV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vavguh, "V8UsV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vavgsw, "V4SiV4SiV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vavguw, "V4UiV4UiV4Ui", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vrfip, "V4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vcfsx, "V4fV4SiIi", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcfux, "V4fV4UiIi", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vctsxs, "V4SiV4fIi", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vctuxs, "V4UiV4fIi", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_dss, "vUIi", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_dssall, "v", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_dst, "vvC*iUIi", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_dstt, "vvC*iUIi", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_dstst, "vvC*iUIi", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_dststt, "vvC*iUIi", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vexptefp, "V4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vrfim, "V4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_lvx, "V4iLivC*", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_lvxl, "V4iLivC*", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_lvebx, "V16cLivC*", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_lvehx, "V8sLivC*", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_lvewx, "V4iLivC*", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vlogefp, "V4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_lvsl, "V16cUcvC*", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_lvsr, "V16cUcvC*", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vmaddfp, "V4fV4fV4fV4f", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmhaddshs, "V8sV8sV8sV8s", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmhraddshs, "V8sV8sV8sV8s", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vmsumubm, "V4UiV16UcV16UcV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmsummbm, "V4SiV16ScV16UcV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmsumuhm, "V4UiV8UsV8UsV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmsumshm, "V4SiV8SsV8SsV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmsumuhs, "V4UiV8UsV8UsV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmsumshs, "V4SiV8SsV8SsV4Si", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vmuleub, "V8UsV16UcV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmulesb, "V8SsV16ScV16Sc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmuleuh, "V4UiV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmulesh, "V4SiV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmuleuw, "V2ULLiV4UiV4Ui", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vmulesw, "V2SLLiV4SiV4Si", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vmuloub, "V8UsV16UcV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmulosb, "V8SsV16ScV16Sc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmulouh, "V4UiV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmulosh, "V4SiV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmulouw, "V2ULLiV4UiV4Ui", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vmulosw, "V2SLLiV4SiV4Si", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vmuleud, "V1ULLLiV2ULLiV2ULLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vmulesd, "V1SLLLiV2SLLiV2SLLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vmuloud, "V1ULLLiV2ULLiV2ULLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vmulosd, "V1SLLLiV2SLLiV2SLLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vmsumcud, "V1ULLLiV2ULLiV2ULLiV1ULLLi", "",
+ "power10-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vnmsubfp, "V4fV4fV4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vpkpx, "V8sV4UiV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vpkuhus, "V16UcV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vpkshss, "V16ScV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vpkuwus, "V8UsV4UiV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vpkswss, "V8SsV4SiV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vpkshus, "V16UcV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vpkswus, "V8UsV4SiV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vpksdss, "V4SiV2SLLiV2SLLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vpksdus, "V4UiV2SLLiV2SLLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vpkudus, "V4UiV2ULLiV2ULLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vpkudum, "V4UiV2ULLiV2ULLi", "",
+ "power8-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vperm_4si, "V4iV4iV4iV16Uc", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_stvx, "vV4iLiv*", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_stvxl, "vV4iLiv*", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_stvebx, "vV16cLiv*", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_stvehx, "vV8sLiv*", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_stvewx, "vV4iLiv*", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpbfp, "V4iV4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpgefp, "V4iV4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpequb, "V16cV16cV16c", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpequh, "V8sV8sV8s", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpequw, "V4iV4iV4i", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpequd, "V2LLiV2LLiV2LLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpeqfp, "V4iV4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpneb, "V16cV16cV16c", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpneh, "V8sV8sV8s", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpnew, "V4iV4iV4i", "", "power9-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpnezb, "V16cV16cV16c", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpnezh, "V8sV8sV8s", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpnezw, "V4iV4iV4i", "", "power9-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsb, "V16cV16ScV16Sc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtub, "V16cV16UcV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsh, "V8sV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtuh, "V8sV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsw, "V4iV4SiV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtuw, "V4iV4UiV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsd, "V2LLiV2LLiV2LLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtud, "V2LLiV2ULLiV2ULLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtfp, "V4iV4fV4f", "", "altivec")
// P10 Vector compare builtins.
-BUILTIN(__builtin_altivec_vcmpequq, "V1LLLiV1ULLLiV1ULLLi", "")
-BUILTIN(__builtin_altivec_vcmpgtsq, "V1LLLiV1SLLLiV1SLLLi", "")
-BUILTIN(__builtin_altivec_vcmpgtuq, "V1LLLiV1ULLLiV1ULLLi", "")
-BUILTIN(__builtin_altivec_vcmpequq_p, "iiV1ULLLiV1LLLi", "")
-BUILTIN(__builtin_altivec_vcmpgtsq_p, "iiV1SLLLiV1SLLLi", "")
-BUILTIN(__builtin_altivec_vcmpgtuq_p, "iiV1ULLLiV1ULLLi", "")
-
-BUILTIN(__builtin_altivec_vmaxsb, "V16ScV16ScV16Sc", "")
-BUILTIN(__builtin_altivec_vmaxub, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vmaxsh, "V8SsV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vmaxuh, "V8UsV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vmaxsw, "V4SiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vmaxuw, "V4UiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vmaxsd, "V2LLiV2LLiV2LLi", "")
-BUILTIN(__builtin_altivec_vmaxud, "V2ULLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vmaxfp, "V4fV4fV4f", "")
-
-BUILTIN(__builtin_altivec_mfvscr, "V8Us", "")
-
-BUILTIN(__builtin_altivec_vminsb, "V16ScV16ScV16Sc", "")
-BUILTIN(__builtin_altivec_vminub, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vminsh, "V8SsV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vminuh, "V8UsV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vminsw, "V4SiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vminuw, "V4UiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vminsd, "V2LLiV2LLiV2LLi", "")
-BUILTIN(__builtin_altivec_vminud, "V2ULLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vminfp, "V4fV4fV4f", "")
-
-BUILTIN(__builtin_altivec_mtvscr, "vV4i", "")
-
-BUILTIN(__builtin_altivec_vrefp, "V4fV4f", "")
-
-BUILTIN(__builtin_altivec_vrlb, "V16cV16cV16Uc", "")
-BUILTIN(__builtin_altivec_vrlh, "V8sV8sV8Us", "")
-BUILTIN(__builtin_altivec_vrlw, "V4iV4iV4Ui", "")
-BUILTIN(__builtin_altivec_vrld, "V2LLiV2LLiV2ULLi", "")
-
-BUILTIN(__builtin_altivec_vsel_4si, "V4iV4iV4iV4Ui", "")
-
-BUILTIN(__builtin_altivec_vsl, "V4iV4iV4i", "")
-BUILTIN(__builtin_altivec_vslo, "V4iV4iV4i", "")
-
-BUILTIN(__builtin_altivec_vsrab, "V16cV16cV16Uc", "")
-BUILTIN(__builtin_altivec_vsrah, "V8sV8sV8Us", "")
-BUILTIN(__builtin_altivec_vsraw, "V4iV4iV4Ui", "")
-
-BUILTIN(__builtin_altivec_vsr, "V4iV4iV4i", "")
-BUILTIN(__builtin_altivec_vsro, "V4iV4iV4i", "")
+TARGET_BUILTIN(__builtin_altivec_vcmpequq, "V1LLLiV1ULLLiV1ULLLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsq, "V1LLLiV1SLLLiV1SLLLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtuq, "V1LLLiV1ULLLiV1ULLLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpequq_p, "iiV1ULLLiV1LLLi", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsq_p, "iiV1SLLLiV1SLLLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtuq_p, "iiV1ULLLiV1ULLLi", "",
+ "power10-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vmaxsb, "V16ScV16ScV16Sc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmaxub, "V16UcV16UcV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmaxsh, "V8SsV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmaxuh, "V8UsV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmaxsw, "V4SiV4SiV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmaxuw, "V4UiV4UiV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmaxsd, "V2LLiV2LLiV2LLi", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vmaxud, "V2ULLiV2ULLiV2ULLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vmaxfp, "V4fV4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_mfvscr, "V8Us", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vminsb, "V16ScV16ScV16Sc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vminub, "V16UcV16UcV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vminsh, "V8SsV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vminuh, "V8UsV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vminsw, "V4SiV4SiV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vminuw, "V4UiV4UiV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vminsd, "V2LLiV2LLiV2LLi", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vminud, "V2ULLiV2ULLiV2ULLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vminfp, "V4fV4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_mtvscr, "vV4i", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vrefp, "V4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vrlb, "V16cV16cV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vrlh, "V8sV8sV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vrlw, "V4iV4iV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vrld, "V2LLiV2LLiV2ULLi", "", "power8-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vsel_4si, "V4iV4iV4iV4Ui", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vsl, "V4iV4iV4i", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vslo, "V4iV4iV4i", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vsrab, "V16cV16cV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsrah, "V8sV8sV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsraw, "V4iV4iV4Ui", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vsr, "V4iV4iV4i", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsro, "V4iV4iV4i", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vrfin, "V4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vrsqrtefp, "V4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vsubcuw, "V4UiV4UiV4Ui", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vsum4sbs, "V4SiV16ScV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsum4ubs, "V4UiV16UcV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsum4shs, "V4SiV8SsV4Si", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vsum2sws, "V4SiV4SiV4Si", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vsumsws, "V4SiV4SiV4Si", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vrfiz, "V4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vupkhsb, "V8sV16c", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vupkhpx, "V4UiV8s", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vupkhsh, "V4iV8s", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vupkhsw, "V2LLiV4i", "", "power8-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vupklsb, "V8sV16c", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vupklpx, "V4UiV8s", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vupklsh, "V4iV8s", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vupklsw, "V2LLiV4i", "", "power8-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpbfp_p, "iiV4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpgefp_p, "iiV4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpequb_p, "iiV16cV16c", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpequh_p, "iiV8sV8s", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpequw_p, "iiV4iV4i", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpequd_p, "iiV2LLiV2LLi", "", "vsx")
+TARGET_BUILTIN(__builtin_altivec_vcmpeqfp_p, "iiV4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpneb_p, "iiV16cV16c", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpneh_p, "iiV8sV8s", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpnew_p, "iiV4iV4i", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpned_p, "iiV2LLiV2LLi", "", "vsx")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsb_p, "iiV16ScV16Sc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtub_p, "iiV16UcV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsh_p, "iiV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtuh_p, "iiV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsw_p, "iiV4SiV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtuw_p, "iiV4UiV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsd_p, "iiV2LLiV2LLi", "", "vsx")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtud_p, "iiV2ULLiV2ULLi", "", "vsx")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtfp_p, "iiV4fV4f", "", "altivec")
-BUILTIN(__builtin_altivec_vrfin, "V4fV4f", "")
-
-BUILTIN(__builtin_altivec_vrsqrtefp, "V4fV4f", "")
-
-BUILTIN(__builtin_altivec_vsubcuw, "V4UiV4UiV4Ui", "")
-
-BUILTIN(__builtin_altivec_vsum4sbs, "V4SiV16ScV4Si", "")
-BUILTIN(__builtin_altivec_vsum4ubs, "V4UiV16UcV4Ui", "")
-BUILTIN(__builtin_altivec_vsum4shs, "V4SiV8SsV4Si", "")
-
-BUILTIN(__builtin_altivec_vsum2sws, "V4SiV4SiV4Si", "")
-
-BUILTIN(__builtin_altivec_vsumsws, "V4SiV4SiV4Si", "")
-
-BUILTIN(__builtin_altivec_vrfiz, "V4fV4f", "")
-
-BUILTIN(__builtin_altivec_vupkhsb, "V8sV16c", "")
-BUILTIN(__builtin_altivec_vupkhpx, "V4UiV8s", "")
-BUILTIN(__builtin_altivec_vupkhsh, "V4iV8s", "")
-BUILTIN(__builtin_altivec_vupkhsw, "V2LLiV4i", "")
-
-BUILTIN(__builtin_altivec_vupklsb, "V8sV16c", "")
-BUILTIN(__builtin_altivec_vupklpx, "V4UiV8s", "")
-BUILTIN(__builtin_altivec_vupklsh, "V4iV8s", "")
-BUILTIN(__builtin_altivec_vupklsw, "V2LLiV4i", "")
-
-BUILTIN(__builtin_altivec_vcmpbfp_p, "iiV4fV4f", "")
-
-BUILTIN(__builtin_altivec_vcmpgefp_p, "iiV4fV4f", "")
-
-BUILTIN(__builtin_altivec_vcmpequb_p, "iiV16cV16c", "")
-BUILTIN(__builtin_altivec_vcmpequh_p, "iiV8sV8s", "")
-BUILTIN(__builtin_altivec_vcmpequw_p, "iiV4iV4i", "")
-BUILTIN(__builtin_altivec_vcmpequd_p, "iiV2LLiV2LLi", "")
-BUILTIN(__builtin_altivec_vcmpeqfp_p, "iiV4fV4f", "")
-
-BUILTIN(__builtin_altivec_vcmpneb_p, "iiV16cV16c", "")
-BUILTIN(__builtin_altivec_vcmpneh_p, "iiV8sV8s", "")
-BUILTIN(__builtin_altivec_vcmpnew_p, "iiV4iV4i", "")
-BUILTIN(__builtin_altivec_vcmpned_p, "iiV2LLiV2LLi", "")
-
-BUILTIN(__builtin_altivec_vcmpgtsb_p, "iiV16ScV16Sc", "")
-BUILTIN(__builtin_altivec_vcmpgtub_p, "iiV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vcmpgtsh_p, "iiV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vcmpgtuh_p, "iiV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vcmpgtsw_p, "iiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vcmpgtuw_p, "iiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vcmpgtsd_p, "iiV2LLiV2LLi", "")
-BUILTIN(__builtin_altivec_vcmpgtud_p, "iiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vcmpgtfp_p, "iiV4fV4f", "")
-
-BUILTIN(__builtin_altivec_vgbbd, "V16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vbpermq, "V2ULLiV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vbpermd, "V2ULLiV2ULLiV16Uc", "")
+TARGET_BUILTIN(__builtin_altivec_vgbbd, "V16UcV16Uc", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vbpermq, "V2ULLiV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vbpermd, "V2ULLiV2ULLiV16Uc", "",
+ "power9-vector")
// P8 Crypto built-ins.
-BUILTIN(__builtin_altivec_crypto_vsbox, "V16UcV16Uc", "")
-BUILTIN(__builtin_altivec_crypto_vpermxor, "V16UcV16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_crypto_vpermxor_be, "V16UcV16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_crypto_vshasigmaw, "V4UiV4UiIiIi", "")
-BUILTIN(__builtin_altivec_crypto_vshasigmad, "V2ULLiV2ULLiIiIi", "")
-BUILTIN(__builtin_altivec_crypto_vcipher, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_crypto_vcipherlast, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_crypto_vncipher, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_crypto_vncipherlast, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_crypto_vpmsumb, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_crypto_vpmsumh, "V8UsV8UsV8Us", "")
-BUILTIN(__builtin_altivec_crypto_vpmsumw, "V4UiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_crypto_vpmsumd, "V2ULLiV2ULLiV2ULLi", "")
-
-BUILTIN(__builtin_altivec_vclzb, "V16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vclzh, "V8UsV8Us", "")
-BUILTIN(__builtin_altivec_vclzw, "V4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vclzd, "V2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vctzb, "V16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vctzh, "V8UsV8Us", "")
-BUILTIN(__builtin_altivec_vctzw, "V4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vctzd, "V2ULLiV2ULLi", "")
+TARGET_BUILTIN(__builtin_altivec_crypto_vsbox, "V16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vpermxor, "V16UcV16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vpermxor_be, "V16UcV16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vshasigmaw, "V4UiV4UiIiIi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vshasigmad, "V2ULLiV2ULLiIiIi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vcipher, "V16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vcipherlast, "V16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vncipher, "V16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vncipherlast, "V16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vpmsumb, "V16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vpmsumh, "V8UsV8UsV8Us", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vpmsumw, "V4UiV4UiV4Ui", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vpmsumd, "V2ULLiV2ULLiV2ULLi", "",
+ "power8-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vclzb, "V16UcV16Uc", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vclzh, "V8UsV8Us", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vclzw, "V4UiV4Ui", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vclzd, "V2ULLiV2ULLi", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vctzb, "V16UcV16Uc", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vctzh, "V8UsV8Us", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vctzw, "V4UiV4Ui", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vctzd, "V2ULLiV2ULLi", "", "power9-vector")
// P8 BCD builtins.
-BUILTIN(__builtin_ppc_bcdadd, "V16UcV16UcV16UcIi", "")
-BUILTIN(__builtin_ppc_bcdsub, "V16UcV16UcV16UcIi", "")
-BUILTIN(__builtin_ppc_bcdadd_p, "iiV16UcV16Uc", "")
-BUILTIN(__builtin_ppc_bcdsub_p, "iiV16UcV16Uc", "")
-
-BUILTIN(__builtin_altivec_vclzlsbb, "SiV16Uc", "")
-BUILTIN(__builtin_altivec_vctzlsbb, "SiV16Uc", "")
-BUILTIN(__builtin_altivec_vprtybw, "V4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vprtybd, "V2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vprtybq, "V1ULLLiV1ULLLi", "")
+TARGET_BUILTIN(__builtin_ppc_bcdadd, "V16UcV16UcV16UcIi", "",
+ "isa-v207-instructions")
+TARGET_BUILTIN(__builtin_ppc_bcdsub, "V16UcV16UcV16UcIi", "",
+ "isa-v207-instructions")
+TARGET_BUILTIN(__builtin_ppc_bcdadd_p, "iiV16UcV16Uc", "",
+ "isa-v207-instructions")
+TARGET_BUILTIN(__builtin_ppc_bcdsub_p, "iiV16UcV16Uc", "",
+ "isa-v207-instructions")
+
+TARGET_BUILTIN(__builtin_altivec_vclzlsbb, "SiV16Uc", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vctzlsbb, "SiV16Uc", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vprtybw, "V4UiV4Ui", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vprtybd, "V2ULLiV2ULLi", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vprtybq, "V1ULLLiV1ULLLi", "", "power9-vector")
// Vector population count built-ins
-BUILTIN(__builtin_altivec_vpopcntb, "V16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vpopcnth, "V8UsV8Us", "")
-BUILTIN(__builtin_altivec_vpopcntw, "V4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vpopcntd, "V2ULLiV2ULLi", "")
+TARGET_BUILTIN(__builtin_altivec_vpopcntb, "V16UcV16Uc", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vpopcnth, "V8UsV8Us", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vpopcntw, "V4UiV4Ui", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vpopcntd, "V2ULLiV2ULLi", "", "power8-vector")
// Absolute difference built-ins
-BUILTIN(__builtin_altivec_vabsdub, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vabsduh, "V8UsV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vabsduw, "V4UiV4UiV4Ui", "")
+TARGET_BUILTIN(__builtin_altivec_vabsdub, "V16UcV16UcV16Uc", "",
+ "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vabsduh, "V8UsV8UsV8Us", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vabsduw, "V4UiV4UiV4Ui", "", "power9-vector")
// P9 Shift built-ins.
-BUILTIN(__builtin_altivec_vslv, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vsrv, "V16UcV16UcV16Uc", "")
+TARGET_BUILTIN(__builtin_altivec_vslv, "V16UcV16UcV16Uc", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vsrv, "V16UcV16UcV16Uc", "", "power9-vector")
// P9 Vector rotate built-ins
-BUILTIN(__builtin_altivec_vrlwmi, "V4UiV4UiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vrldmi, "V2ULLiV2ULLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vrlwnm, "V4UiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vrldnm, "V2ULLiV2ULLiV2ULLi", "")
+TARGET_BUILTIN(__builtin_altivec_vrlwmi, "V4UiV4UiV4UiV4Ui", "",
+ "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vrldmi, "V2ULLiV2ULLiV2ULLiV2ULLi", "",
+ "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vrlwnm, "V4UiV4UiV4Ui", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vrldnm, "V2ULLiV2ULLiV2ULLi", "",
+ "power9-vector")
// P9 Vector extend sign builtins.
-BUILTIN(__builtin_altivec_vextsb2w, "V4SiV16Sc", "")
-BUILTIN(__builtin_altivec_vextsb2d, "V2SLLiV16Sc", "")
-BUILTIN(__builtin_altivec_vextsh2w, "V4SiV8Ss", "")
-BUILTIN(__builtin_altivec_vextsh2d, "V2SLLiV8Ss", "")
-BUILTIN(__builtin_altivec_vextsw2d, "V2SLLiV4Si", "")
+TARGET_BUILTIN(__builtin_altivec_vextsb2w, "V4SiV16Sc", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vextsb2d, "V2SLLiV16Sc", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vextsh2w, "V4SiV8Ss", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vextsh2d, "V2SLLiV8Ss", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vextsw2d, "V2SLLiV4Si", "", "power9-vector")
// P10 Vector extend sign builtins.
-BUILTIN(__builtin_altivec_vextsd2q, "V1SLLLiV2SLLi", "")
+TARGET_BUILTIN(__builtin_altivec_vextsd2q, "V1SLLLiV2SLLi", "",
+ "power10-vector")
// P10 Vector Extract with Mask built-ins.
-BUILTIN(__builtin_altivec_vextractbm, "UiV16Uc", "")
-BUILTIN(__builtin_altivec_vextracthm, "UiV8Us", "")
-BUILTIN(__builtin_altivec_vextractwm, "UiV4Ui", "")
-BUILTIN(__builtin_altivec_vextractdm, "UiV2ULLi", "")
-BUILTIN(__builtin_altivec_vextractqm, "UiV1ULLLi", "")
+TARGET_BUILTIN(__builtin_altivec_vextractbm, "UiV16Uc", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextracthm, "UiV8Us", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextractwm, "UiV4Ui", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextractdm, "UiV2ULLi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextractqm, "UiV1ULLLi", "", "power10-vector")
// P10 Vector Divide Extended built-ins.
-BUILTIN(__builtin_altivec_vdivesw, "V4SiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vdiveuw, "V4UiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vdivesd, "V2LLiV2LLiV2LLi", "")
-BUILTIN(__builtin_altivec_vdiveud, "V2ULLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vdivesq, "V1SLLLiV1SLLLiV1SLLLi", "")
-BUILTIN(__builtin_altivec_vdiveuq, "V1ULLLiV1ULLLiV1ULLLi", "")
+TARGET_BUILTIN(__builtin_altivec_vdivesw, "V4SiV4SiV4Si", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vdiveuw, "V4UiV4UiV4Ui", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vdivesd, "V2LLiV2LLiV2LLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vdiveud, "V2ULLiV2ULLiV2ULLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vdivesq, "V1SLLLiV1SLLLiV1SLLLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vdiveuq, "V1ULLLiV1ULLLiV1ULLLi", "",
+ "power10-vector")
// P10 Vector Multiply High built-ins.
-BUILTIN(__builtin_altivec_vmulhsw, "V4SiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vmulhuw, "V4UiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vmulhsd, "V2LLiV2LLiV2LLi", "")
-BUILTIN(__builtin_altivec_vmulhud, "V2ULLiV2ULLiV2ULLi", "")
+TARGET_BUILTIN(__builtin_altivec_vmulhsw, "V4SiV4SiV4Si", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vmulhuw, "V4UiV4UiV4Ui", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vmulhsd, "V2LLiV2LLiV2LLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vmulhud, "V2ULLiV2ULLiV2ULLi", "",
+ "power10-vector")
// P10 Vector Expand with Mask built-ins.
-BUILTIN(__builtin_altivec_vexpandbm, "V16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vexpandhm, "V8UsV8Us", "")
-BUILTIN(__builtin_altivec_vexpandwm, "V4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vexpanddm, "V2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vexpandqm, "V1ULLLiV1ULLLi", "")
+TARGET_BUILTIN(__builtin_altivec_vexpandbm, "V16UcV16Uc", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vexpandhm, "V8UsV8Us", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vexpandwm, "V4UiV4Ui", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vexpanddm, "V2ULLiV2ULLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vexpandqm, "V1ULLLiV1ULLLi", "",
+ "power10-vector")
// P10 Vector Count with Mask built-ins.
-BUILTIN(__builtin_altivec_vcntmbb, "ULLiV16UcUi", "")
-BUILTIN(__builtin_altivec_vcntmbh, "ULLiV8UsUi", "")
-BUILTIN(__builtin_altivec_vcntmbw, "ULLiV4UiUi", "")
-BUILTIN(__builtin_altivec_vcntmbd, "ULLiV2ULLiUi", "")
+TARGET_BUILTIN(__builtin_altivec_vcntmbb, "ULLiV16UcUi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vcntmbh, "ULLiV8UsUi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vcntmbw, "ULLiV4UiUi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vcntmbd, "ULLiV2ULLiUi", "", "power10-vector")
// P10 Move to VSR with Mask built-ins.
-BUILTIN(__builtin_altivec_mtvsrbm, "V16UcULLi", "")
-BUILTIN(__builtin_altivec_mtvsrhm, "V8UsULLi", "")
-BUILTIN(__builtin_altivec_mtvsrwm, "V4UiULLi", "")
-BUILTIN(__builtin_altivec_mtvsrdm, "V2ULLiULLi", "")
-BUILTIN(__builtin_altivec_mtvsrqm, "V1ULLLiULLi", "")
+TARGET_BUILTIN(__builtin_altivec_mtvsrbm, "V16UcULLi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_mtvsrhm, "V8UsULLi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_mtvsrwm, "V4UiULLi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_mtvsrdm, "V2ULLiULLi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_mtvsrqm, "V1ULLLiULLi", "", "power10-vector")
// P10 Vector Parallel Bits built-ins.
-BUILTIN(__builtin_altivec_vpdepd, "V2ULLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vpextd, "V2ULLiV2ULLiV2ULLi", "")
+TARGET_BUILTIN(__builtin_altivec_vpdepd, "V2ULLiV2ULLiV2ULLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vpextd, "V2ULLiV2ULLiV2ULLi", "",
+ "power10-vector")
// P10 Vector String Isolate Built-ins.
-BUILTIN(__builtin_altivec_vstribr, "V16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vstribl, "V16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vstrihr, "V8sV8s", "")
-BUILTIN(__builtin_altivec_vstrihl, "V8sV8s", "")
-BUILTIN(__builtin_altivec_vstribr_p, "iiV16Uc", "")
-BUILTIN(__builtin_altivec_vstribl_p, "iiV16Uc", "")
-BUILTIN(__builtin_altivec_vstrihr_p, "iiV8s", "")
-BUILTIN(__builtin_altivec_vstrihl_p, "iiV8s", "")
+TARGET_BUILTIN(__builtin_altivec_vstribr, "V16UcV16Uc", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vstribl, "V16UcV16Uc", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vstrihr, "V8sV8s", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vstrihl, "V8sV8s", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vstribr_p, "iiV16Uc", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vstribl_p, "iiV16Uc", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vstrihr_p, "iiV8s", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vstrihl_p, "iiV8s", "", "power10-vector")
// P10 Vector Centrifuge built-in.
-BUILTIN(__builtin_altivec_vcfuged, "V2ULLiV2ULLiV2ULLi", "")
+TARGET_BUILTIN(__builtin_altivec_vcfuged, "V2ULLiV2ULLiV2ULLi", "",
+ "power10-vector")
// P10 Vector Gather Every N-th Bit built-in.
-BUILTIN(__builtin_altivec_vgnb, "ULLiV1ULLLiIi", "")
+TARGET_BUILTIN(__builtin_altivec_vgnb, "ULLiV1ULLLiIi", "", "power10-vector")
// P10 Vector Clear Bytes built-ins.
-BUILTIN(__builtin_altivec_vclrlb, "V16UcV16UcUi", "")
-BUILTIN(__builtin_altivec_vclrrb, "V16UcV16UcUi", "")
+TARGET_BUILTIN(__builtin_altivec_vclrlb, "V16UcV16UcUi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vclrrb, "V16UcV16UcUi", "", "power10-vector")
// P10 Vector Count Leading / Trailing Zeroes under bit Mask built-ins.
-BUILTIN(__builtin_altivec_vclzdm, "V2ULLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vctzdm, "V2ULLiV2ULLiV2ULLi", "")
+TARGET_BUILTIN(__builtin_altivec_vclzdm, "V2ULLiV2ULLiV2ULLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vctzdm, "V2ULLiV2ULLiV2ULLi", "",
+ "power10-vector")
// P10 Vector Shift built-ins.
-BUILTIN(__builtin_altivec_vsldbi, "V16UcV16UcV16UcIi", "")
-BUILTIN(__builtin_altivec_vsrdbi, "V16UcV16UcV16UcIi", "")
+TARGET_BUILTIN(__builtin_altivec_vsldbi, "V16UcV16UcV16UcIi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vsrdbi, "V16UcV16UcV16UcIi", "",
+ "power10-vector")
// P10 Vector Insert built-ins.
-BUILTIN(__builtin_altivec_vinsblx, "V16UcV16UcUiUi", "")
-BUILTIN(__builtin_altivec_vinsbrx, "V16UcV16UcUiUi", "")
-BUILTIN(__builtin_altivec_vinshlx, "V8UsV8UsUiUi", "")
-BUILTIN(__builtin_altivec_vinshrx, "V8UsV8UsUiUi", "")
-BUILTIN(__builtin_altivec_vinswlx, "V4UiV4UiUiUi", "")
-BUILTIN(__builtin_altivec_vinswrx, "V4UiV4UiUiUi", "")
-BUILTIN(__builtin_altivec_vinsdlx, "V2ULLiV2ULLiULLiULLi", "")
-BUILTIN(__builtin_altivec_vinsdrx, "V2ULLiV2ULLiULLiULLi", "")
-BUILTIN(__builtin_altivec_vinsbvlx, "V16UcV16UcUiV16Uc", "")
-BUILTIN(__builtin_altivec_vinsbvrx, "V16UcV16UcUiV16Uc", "")
-BUILTIN(__builtin_altivec_vinshvlx, "V8UsV8UsUiV8Us", "")
-BUILTIN(__builtin_altivec_vinshvrx, "V8UsV8UsUiV8Us", "")
-BUILTIN(__builtin_altivec_vinswvlx, "V4UiV4UiUiV4Ui", "")
-BUILTIN(__builtin_altivec_vinswvrx, "V4UiV4UiUiV4Ui", "")
-BUILTIN(__builtin_altivec_vinsw, "V16UcV16UcUiIi", "")
-BUILTIN(__builtin_altivec_vinsd, "V16UcV16UcULLiIi", "")
-BUILTIN(__builtin_altivec_vinsw_elt, "V16UcV16UcUiiC", "")
-BUILTIN(__builtin_altivec_vinsd_elt, "V16UcV16UcULLiiC", "")
+TARGET_BUILTIN(__builtin_altivec_vinsblx, "V16UcV16UcUiUi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinsbrx, "V16UcV16UcUiUi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinshlx, "V8UsV8UsUiUi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinshrx, "V8UsV8UsUiUi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinswlx, "V4UiV4UiUiUi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinswrx, "V4UiV4UiUiUi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinsdlx, "V2ULLiV2ULLiULLiULLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinsdrx, "V2ULLiV2ULLiULLiULLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinsbvlx, "V16UcV16UcUiV16Uc", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinsbvrx, "V16UcV16UcUiV16Uc", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinshvlx, "V8UsV8UsUiV8Us", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinshvrx, "V8UsV8UsUiV8Us", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinswvlx, "V4UiV4UiUiV4Ui", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinswvrx, "V4UiV4UiUiV4Ui", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinsw, "V16UcV16UcUiIi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinsd, "V16UcV16UcULLiIi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinsw_elt, "V16UcV16UcUiiC", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinsd_elt, "V16UcV16UcULLiiC", "",
+ "power10-vector")
// P10 Vector Extract built-ins.
-BUILTIN(__builtin_altivec_vextdubvlx, "V2ULLiV16UcV16UcUi", "")
-BUILTIN(__builtin_altivec_vextdubvrx, "V2ULLiV16UcV16UcUi", "")
-BUILTIN(__builtin_altivec_vextduhvlx, "V2ULLiV8UsV8UsUi", "")
-BUILTIN(__builtin_altivec_vextduhvrx, "V2ULLiV8UsV8UsUi", "")
-BUILTIN(__builtin_altivec_vextduwvlx, "V2ULLiV4UiV4UiUi", "")
-BUILTIN(__builtin_altivec_vextduwvrx, "V2ULLiV4UiV4UiUi", "")
-BUILTIN(__builtin_altivec_vextddvlx, "V2ULLiV2ULLiV2ULLiUi", "")
-BUILTIN(__builtin_altivec_vextddvrx, "V2ULLiV2ULLiV2ULLiUi", "")
+TARGET_BUILTIN(__builtin_altivec_vextdubvlx, "V2ULLiV16UcV16UcUi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextdubvrx, "V2ULLiV16UcV16UcUi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextduhvlx, "V2ULLiV8UsV8UsUi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextduhvrx, "V2ULLiV8UsV8UsUi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextduwvlx, "V2ULLiV4UiV4UiUi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextduwvrx, "V2ULLiV4UiV4UiUi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextddvlx, "V2ULLiV2ULLiV2ULLiUi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextddvrx, "V2ULLiV2ULLiV2ULLiUi", "",
+ "power10-vector")
// P10 Vector rotate built-ins.
-BUILTIN(__builtin_altivec_vrlqmi, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi", "")
-BUILTIN(__builtin_altivec_vrlqnm, "V1ULLLiV1ULLLiV1ULLLi", "")
+TARGET_BUILTIN(__builtin_altivec_vrlqmi, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vrlqnm, "V1ULLLiV1ULLLiV1ULLLi", "",
+ "power10-vector")
// VSX built-ins.
-BUILTIN(__builtin_vsx_lxvd2x, "V2dLivC*", "")
-BUILTIN(__builtin_vsx_lxvw4x, "V4iLivC*", "")
-BUILTIN(__builtin_vsx_lxvd2x_be, "V2dSLLivC*", "")
-BUILTIN(__builtin_vsx_lxvw4x_be, "V4iSLLivC*", "")
+TARGET_BUILTIN(__builtin_vsx_lxvd2x, "V2dLivC*", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_lxvw4x, "V4iLivC*", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_lxvd2x_be, "V2dSLLivC*", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_lxvw4x_be, "V4iSLLivC*", "", "vsx")
-BUILTIN(__builtin_vsx_stxvd2x, "vV2dLiv*", "")
-BUILTIN(__builtin_vsx_stxvw4x, "vV4iLiv*", "")
-BUILTIN(__builtin_vsx_stxvd2x_be, "vV2dSLLivC*", "")
-BUILTIN(__builtin_vsx_stxvw4x_be, "vV4iSLLivC*", "")
+TARGET_BUILTIN(__builtin_vsx_stxvd2x, "vV2dLiv*", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_stxvw4x, "vV4iLiv*", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_stxvd2x_be, "vV2dSLLivC*", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_stxvw4x_be, "vV4iSLLivC*", "", "vsx")
-BUILTIN(__builtin_vsx_lxvl, "V4ivC*ULLi", "")
-BUILTIN(__builtin_vsx_lxvll, "V4ivC*ULLi", "")
-BUILTIN(__builtin_vsx_stxvl, "vV4iv*ULLi", "")
-BUILTIN(__builtin_vsx_stxvll, "vV4iv*ULLi", "")
-BUILTIN(__builtin_vsx_ldrmb, "V16UcCc*Ii", "")
-BUILTIN(__builtin_vsx_strmb, "vCc*IiV16Uc", "")
+TARGET_BUILTIN(__builtin_vsx_lxvl, "V4ivC*ULLi", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_lxvll, "V4ivC*ULLi", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_stxvl, "vV4iv*ULLi", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_stxvll, "vV4iv*ULLi", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_ldrmb, "V16UcCc*Ii", "", "isa-v207-instructions")
+TARGET_BUILTIN(__builtin_vsx_strmb, "vCc*IiV16Uc", "", "isa-v207-instructions")
-BUILTIN(__builtin_vsx_xvmaxdp, "V2dV2dV2d", "")
-BUILTIN(__builtin_vsx_xvmaxsp, "V4fV4fV4f", "")
-BUILTIN(__builtin_vsx_xsmaxdp, "ddd", "")
+TARGET_BUILTIN(__builtin_vsx_xvmaxdp, "V2dV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvmaxsp, "V4fV4fV4f", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xsmaxdp, "ddd", "", "vsx")
-BUILTIN(__builtin_vsx_xvmindp, "V2dV2dV2d", "")
-BUILTIN(__builtin_vsx_xvminsp, "V4fV4fV4f", "")
-BUILTIN(__builtin_vsx_xsmindp, "ddd", "")
+TARGET_BUILTIN(__builtin_vsx_xvmindp, "V2dV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvminsp, "V4fV4fV4f", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xsmindp, "ddd", "", "vsx")
-BUILTIN(__builtin_vsx_xvdivdp, "V2dV2dV2d", "")
-BUILTIN(__builtin_vsx_xvdivsp, "V4fV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvdivdp, "V2dV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvdivsp, "V4fV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvrdpip, "V2dV2d", "")
-BUILTIN(__builtin_vsx_xvrspip, "V4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvrdpip, "V2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvrspip, "V4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvcmpeqdp, "V2ULLiV2dV2d", "")
-BUILTIN(__builtin_vsx_xvcmpeqsp, "V4UiV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvcmpeqdp, "V2ULLiV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcmpeqsp, "V4UiV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvcmpeqdp_p, "iiV2dV2d", "")
-BUILTIN(__builtin_vsx_xvcmpeqsp_p, "iiV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvcmpeqdp_p, "iiV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcmpeqsp_p, "iiV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvcmpgedp, "V2ULLiV2dV2d", "")
-BUILTIN(__builtin_vsx_xvcmpgesp, "V4UiV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvcmpgedp, "V2ULLiV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcmpgesp, "V4UiV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvcmpgedp_p, "iiV2dV2d", "")
-BUILTIN(__builtin_vsx_xvcmpgesp_p, "iiV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvcmpgedp_p, "iiV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcmpgesp_p, "iiV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvcmpgtdp, "V2ULLiV2dV2d", "")
-BUILTIN(__builtin_vsx_xvcmpgtsp, "V4UiV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvcmpgtdp, "V2ULLiV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcmpgtsp, "V4UiV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvcmpgtdp_p, "iiV2dV2d", "")
-BUILTIN(__builtin_vsx_xvcmpgtsp_p, "iiV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvcmpgtdp_p, "iiV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcmpgtsp_p, "iiV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvrdpim, "V2dV2d", "")
-BUILTIN(__builtin_vsx_xvrspim, "V4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvrdpim, "V2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvrspim, "V4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvrdpi, "V2dV2d", "")
-BUILTIN(__builtin_vsx_xvrspi, "V4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvrdpi, "V2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvrspi, "V4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvrdpic, "V2dV2d", "")
-BUILTIN(__builtin_vsx_xvrspic, "V4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvrdpic, "V2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvrspic, "V4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvrdpiz, "V2dV2d", "")
-BUILTIN(__builtin_vsx_xvrspiz, "V4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvrdpiz, "V2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvrspiz, "V4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvmaddadp, "V2dV2dV2dV2d", "")
-BUILTIN(__builtin_vsx_xvmaddasp, "V4fV4fV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvmaddadp, "V2dV2dV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvmaddasp, "V4fV4fV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvmsubadp, "V2dV2dV2dV2d", "")
-BUILTIN(__builtin_vsx_xvmsubasp, "V4fV4fV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvmsubadp, "V2dV2dV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvmsubasp, "V4fV4fV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvmuldp, "V2dV2dV2d", "")
-BUILTIN(__builtin_vsx_xvmulsp, "V4fV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvmuldp, "V2dV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvmulsp, "V4fV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvnmaddadp, "V2dV2dV2dV2d", "")
-BUILTIN(__builtin_vsx_xvnmaddasp, "V4fV4fV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvnmaddadp, "V2dV2dV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvnmaddasp, "V4fV4fV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvnmsubadp, "V2dV2dV2dV2d", "")
-BUILTIN(__builtin_vsx_xvnmsubasp, "V4fV4fV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvnmsubadp, "V2dV2dV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvnmsubasp, "V4fV4fV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvredp, "V2dV2d", "")
-BUILTIN(__builtin_vsx_xvresp, "V4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvredp, "V2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvresp, "V4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvrsqrtedp, "V2dV2d", "")
-BUILTIN(__builtin_vsx_xvrsqrtesp, "V4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvrsqrtedp, "V2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvrsqrtesp, "V4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvsqrtdp, "V2dV2d", "")
-BUILTIN(__builtin_vsx_xvsqrtsp, "V4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvsqrtdp, "V2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvsqrtsp, "V4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xxleqv, "V4UiV4UiV4Ui", "")
+TARGET_BUILTIN(__builtin_vsx_xxleqv, "V4UiV4UiV4Ui", "", "power8-vector")
-BUILTIN(__builtin_vsx_xvcpsgndp, "V2dV2dV2d", "")
-BUILTIN(__builtin_vsx_xvcpsgnsp, "V4fV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvcpsgndp, "V2dV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcpsgnsp, "V4fV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvabssp, "V4fV4f", "")
-BUILTIN(__builtin_vsx_xvabsdp, "V2dV2d", "")
+TARGET_BUILTIN(__builtin_vsx_xvabssp, "V4fV4f", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvabsdp, "V2dV2d", "", "vsx")
-BUILTIN(__builtin_vsx_xxgenpcvbm, "V16UcV16Uci", "")
-BUILTIN(__builtin_vsx_xxgenpcvhm, "V8UsV8Usi", "")
-BUILTIN(__builtin_vsx_xxgenpcvwm, "V4UiV4Uii", "")
-BUILTIN(__builtin_vsx_xxgenpcvdm, "V2ULLiV2ULLii", "")
+TARGET_BUILTIN(__builtin_vsx_xxgenpcvbm, "V16UcV16Uci", "", "power10-vector")
+TARGET_BUILTIN(__builtin_vsx_xxgenpcvhm, "V8UsV8Usi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_vsx_xxgenpcvwm, "V4UiV4Uii", "", "power10-vector")
+TARGET_BUILTIN(__builtin_vsx_xxgenpcvdm, "V2ULLiV2ULLii", "", "power10-vector")
// vector Insert/Extract exponent/significand builtins
-BUILTIN(__builtin_vsx_xviexpdp, "V2dV2ULLiV2ULLi", "")
-BUILTIN(__builtin_vsx_xviexpsp, "V4fV4UiV4Ui", "")
-BUILTIN(__builtin_vsx_xvxexpdp, "V2ULLiV2d", "")
-BUILTIN(__builtin_vsx_xvxexpsp, "V4UiV4f", "")
-BUILTIN(__builtin_vsx_xvxsigdp, "V2ULLiV2d", "")
-BUILTIN(__builtin_vsx_xvxsigsp, "V4UiV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xviexpdp, "V2dV2ULLiV2ULLi", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_xviexpsp, "V4fV4UiV4Ui", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_xvxexpdp, "V2ULLiV2d", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_xvxexpsp, "V4UiV4f", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_xvxsigdp, "V2ULLiV2d", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_xvxsigsp, "V4UiV4f", "", "power9-vector")
// Conversion builtins
-BUILTIN(__builtin_vsx_xvcvdpsxws, "V4SiV2d", "")
-BUILTIN(__builtin_vsx_xvcvdpuxws, "V4UiV2d", "")
-BUILTIN(__builtin_vsx_xvcvspsxds, "V2SLLiV4f", "")
-BUILTIN(__builtin_vsx_xvcvspuxds, "V2ULLiV4f", "")
-BUILTIN(__builtin_vsx_xvcvsxwdp, "V2dV4Si", "")
-BUILTIN(__builtin_vsx_xvcvuxwdp, "V2dV4Ui", "")
-BUILTIN(__builtin_vsx_xvcvspdp, "V2dV4f", "")
-BUILTIN(__builtin_vsx_xvcvsxdsp, "V4fV2SLLi", "")
-BUILTIN(__builtin_vsx_xvcvuxdsp, "V4fV2ULLi", "")
-BUILTIN(__builtin_vsx_xvcvdpsp, "V4fV2d", "")
-
-BUILTIN(__builtin_vsx_xvcvsphp, "V4fV4f", "")
-BUILTIN(__builtin_vsx_xvcvhpsp, "V4fV8Us", "")
-
-BUILTIN(__builtin_vsx_xvcvspbf16, "V16UcV16Uc", "")
-BUILTIN(__builtin_vsx_xvcvbf16spn, "V16UcV16Uc", "")
+TARGET_BUILTIN(__builtin_vsx_xvcvdpsxws, "V4SiV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcvdpuxws, "V4UiV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcvspsxds, "V2SLLiV4f", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcvspuxds, "V2ULLiV4f", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcvsxwdp, "V2dV4Si", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcvuxwdp, "V2dV4Ui", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcvspdp, "V2dV4f", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcvsxdsp, "V4fV2SLLi", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcvuxdsp, "V4fV2ULLi", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcvdpsp, "V4fV2d", "", "vsx")
+
+TARGET_BUILTIN(__builtin_vsx_xvcvsphp, "V4fV4f", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_xvcvhpsp, "V4fV8Us", "", "power9-vector")
+
+TARGET_BUILTIN(__builtin_vsx_xvcvspbf16, "V16UcV16Uc", "", "power10-vector")
+TARGET_BUILTIN(__builtin_vsx_xvcvbf16spn, "V16UcV16Uc", "", "power10-vector")
// Vector Test Data Class builtins
-BUILTIN(__builtin_vsx_xvtstdcdp, "V2ULLiV2dIi", "")
-BUILTIN(__builtin_vsx_xvtstdcsp, "V4UiV4fIi", "")
+TARGET_BUILTIN(__builtin_vsx_xvtstdcdp, "V2ULLiV2dIi", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_xvtstdcsp, "V4UiV4fIi", "", "power9-vector")
-BUILTIN(__builtin_vsx_insertword, "V16UcV4UiV16UcIi", "")
-BUILTIN(__builtin_vsx_extractuword, "V2ULLiV16UcIi", "")
+TARGET_BUILTIN(__builtin_vsx_insertword, "V16UcV4UiV16UcIi", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_extractuword, "V2ULLiV16UcIi", "", "vsx")
-BUILTIN(__builtin_vsx_xxpermdi, "v.", "t")
-BUILTIN(__builtin_vsx_xxsldwi, "v.", "t")
+TARGET_BUILTIN(__builtin_vsx_xxpermdi, "v.", "t", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xxsldwi, "v.", "t", "vsx")
-BUILTIN(__builtin_vsx_xxeval, "V2ULLiV2ULLiV2ULLiV2ULLiIi", "")
+TARGET_BUILTIN(__builtin_vsx_xxeval, "V2ULLiV2ULLiV2ULLiV2ULLiIi", "",
+ "power10-vector")
-BUILTIN(__builtin_vsx_xvtlsbb, "iV16UcUi", "")
+TARGET_BUILTIN(__builtin_vsx_xvtlsbb, "iV16UcUi", "", "power10-vector")
-BUILTIN(__builtin_vsx_xvtdivdp, "iV2dV2d", "")
-BUILTIN(__builtin_vsx_xvtdivsp, "iV4fV4f", "")
-BUILTIN(__builtin_vsx_xvtsqrtdp, "iV2d", "")
-BUILTIN(__builtin_vsx_xvtsqrtsp, "iV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvtdivdp, "iV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvtdivsp, "iV4fV4f", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvtsqrtdp, "iV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvtsqrtsp, "iV4f", "", "vsx")
// P10 Vector Permute Extended built-in.
-BUILTIN(__builtin_vsx_xxpermx, "V16UcV16UcV16UcV16UcIi", "")
+TARGET_BUILTIN(__builtin_vsx_xxpermx, "V16UcV16UcV16UcV16UcIi", "",
+ "power10-vector")
// P10 Vector Blend built-ins.
-BUILTIN(__builtin_vsx_xxblendvb, "V16UcV16UcV16UcV16Uc", "")
-BUILTIN(__builtin_vsx_xxblendvh, "V8UsV8UsV8UsV8Us", "")
-BUILTIN(__builtin_vsx_xxblendvw, "V4UiV4UiV4UiV4Ui", "")
-BUILTIN(__builtin_vsx_xxblendvd, "V2ULLiV2ULLiV2ULLiV2ULLi", "")
+TARGET_BUILTIN(__builtin_vsx_xxblendvb, "V16UcV16UcV16UcV16Uc", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_vsx_xxblendvh, "V8UsV8UsV8UsV8Us", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_vsx_xxblendvw, "V4UiV4UiV4UiV4Ui", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_vsx_xxblendvd, "V2ULLiV2ULLiV2ULLiV2ULLi", "",
+ "power10-vector")
// Float 128 built-ins
-BUILTIN(__builtin_sqrtf128_round_to_odd, "LLdLLd", "")
-BUILTIN(__builtin_addf128_round_to_odd, "LLdLLdLLd", "")
-BUILTIN(__builtin_subf128_round_to_odd, "LLdLLdLLd", "")
-BUILTIN(__builtin_mulf128_round_to_odd, "LLdLLdLLd", "")
-BUILTIN(__builtin_divf128_round_to_odd, "LLdLLdLLd", "")
-BUILTIN(__builtin_fmaf128_round_to_odd, "LLdLLdLLdLLd", "")
-BUILTIN(__builtin_truncf128_round_to_odd, "dLLd", "")
-BUILTIN(__builtin_vsx_scalar_extract_expq, "ULLiLLd", "")
-BUILTIN(__builtin_vsx_scalar_insert_exp_qp, "LLdLLdULLi", "")
+TARGET_BUILTIN(__builtin_sqrtf128_round_to_odd, "LLdLLd", "", "float128")
+TARGET_BUILTIN(__builtin_addf128_round_to_odd, "LLdLLdLLd", "", "float128")
+TARGET_BUILTIN(__builtin_subf128_round_to_odd, "LLdLLdLLd", "", "float128")
+TARGET_BUILTIN(__builtin_mulf128_round_to_odd, "LLdLLdLLd", "", "float128")
+TARGET_BUILTIN(__builtin_divf128_round_to_odd, "LLdLLdLLd", "", "float128")
+TARGET_BUILTIN(__builtin_fmaf128_round_to_odd, "LLdLLdLLdLLd", "", "float128")
+TARGET_BUILTIN(__builtin_truncf128_round_to_odd, "dLLd", "", "float128")
+TARGET_BUILTIN(__builtin_vsx_scalar_extract_expq, "ULLiLLd", "", "float128")
+TARGET_BUILTIN(__builtin_vsx_scalar_insert_exp_qp, "LLdLLdULLi", "", "float128")
// Fastmath by default builtins
BUILTIN(__builtin_ppc_rsqrtf, "V4fV4f", "")
@@ -763,60 +883,60 @@ BUILTIN(__builtin_ppc_recipdivf, "V4fV4fV4f", "")
BUILTIN(__builtin_ppc_recipdivd, "V2dV2dV2d", "")
// HTM builtins
-BUILTIN(__builtin_tbegin, "UiUIi", "")
-BUILTIN(__builtin_tend, "UiUIi", "")
+TARGET_BUILTIN(__builtin_tbegin, "UiUIi", "", "htm")
+TARGET_BUILTIN(__builtin_tend, "UiUIi", "", "htm")
-BUILTIN(__builtin_tabort, "UiUi", "")
-BUILTIN(__builtin_tabortdc, "UiUiUiUi", "")
-BUILTIN(__builtin_tabortdci, "UiUiUii", "")
-BUILTIN(__builtin_tabortwc, "UiUiUiUi", "")
-BUILTIN(__builtin_tabortwci, "UiUiUii", "")
+TARGET_BUILTIN(__builtin_tabort, "UiUi", "", "htm")
+TARGET_BUILTIN(__builtin_tabortdc, "UiUiUiUi", "", "htm")
+TARGET_BUILTIN(__builtin_tabortdci, "UiUiUii", "", "htm")
+TARGET_BUILTIN(__builtin_tabortwc, "UiUiUiUi", "", "htm")
+TARGET_BUILTIN(__builtin_tabortwci, "UiUiUii", "", "htm")
-BUILTIN(__builtin_tcheck, "Ui", "")
-BUILTIN(__builtin_treclaim, "UiUi", "")
-BUILTIN(__builtin_trechkpt, "Ui", "")
-BUILTIN(__builtin_tsr, "UiUi", "")
+TARGET_BUILTIN(__builtin_tcheck, "Ui", "", "htm")
+TARGET_BUILTIN(__builtin_treclaim, "UiUi", "", "htm")
+TARGET_BUILTIN(__builtin_trechkpt, "Ui", "", "htm")
+TARGET_BUILTIN(__builtin_tsr, "UiUi", "", "htm")
-BUILTIN(__builtin_tendall, "Ui", "")
-BUILTIN(__builtin_tresume, "Ui", "")
-BUILTIN(__builtin_tsuspend, "Ui", "")
+TARGET_BUILTIN(__builtin_tendall, "Ui", "", "htm")
+TARGET_BUILTIN(__builtin_tresume, "Ui", "", "htm")
+TARGET_BUILTIN(__builtin_tsuspend, "Ui", "", "htm")
-BUILTIN(__builtin_get_texasr, "LUi", "c")
-BUILTIN(__builtin_get_texasru, "LUi", "c")
-BUILTIN(__builtin_get_tfhar, "LUi", "c")
-BUILTIN(__builtin_get_tfiar, "LUi", "c")
+TARGET_BUILTIN(__builtin_get_texasr, "LUi", "c", "htm")
+TARGET_BUILTIN(__builtin_get_texasru, "LUi", "c", "htm")
+TARGET_BUILTIN(__builtin_get_tfhar, "LUi", "c", "htm")
+TARGET_BUILTIN(__builtin_get_tfiar, "LUi", "c", "htm")
-BUILTIN(__builtin_set_texasr, "vLUi", "c")
-BUILTIN(__builtin_set_texasru, "vLUi", "c")
-BUILTIN(__builtin_set_tfhar, "vLUi", "c")
-BUILTIN(__builtin_set_tfiar, "vLUi", "c")
+TARGET_BUILTIN(__builtin_set_texasr, "vLUi", "c", "htm")
+TARGET_BUILTIN(__builtin_set_texasru, "vLUi", "c", "htm")
+TARGET_BUILTIN(__builtin_set_tfhar, "vLUi", "c", "htm")
+TARGET_BUILTIN(__builtin_set_tfiar, "vLUi", "c", "htm")
-BUILTIN(__builtin_ttest, "LUi", "")
+TARGET_BUILTIN(__builtin_ttest, "LUi", "", "htm")
// Scalar built-ins
-BUILTIN(__builtin_divwe, "SiSiSi", "")
-BUILTIN(__builtin_divweu, "UiUiUi", "")
-BUILTIN(__builtin_divde, "SLLiSLLiSLLi", "")
-BUILTIN(__builtin_divdeu, "ULLiULLiULLi", "")
-BUILTIN(__builtin_bpermd, "SLLiSLLiSLLi", "")
-BUILTIN(__builtin_pdepd, "ULLiULLiULLi", "")
-BUILTIN(__builtin_pextd, "ULLiULLiULLi", "")
-BUILTIN(__builtin_cfuged, "ULLiULLiULLi", "")
-BUILTIN(__builtin_cntlzdm, "ULLiULLiULLi", "")
-BUILTIN(__builtin_cnttzdm, "ULLiULLiULLi", "")
+TARGET_BUILTIN(__builtin_divwe, "SiSiSi", "", "extdiv")
+TARGET_BUILTIN(__builtin_divweu, "UiUiUi", "", "extdiv")
+TARGET_BUILTIN(__builtin_divde, "SLLiSLLiSLLi", "", "extdiv")
+TARGET_BUILTIN(__builtin_divdeu, "ULLiULLiULLi", "", "extdiv")
+TARGET_BUILTIN(__builtin_bpermd, "SLLiSLLiSLLi", "", "bpermd")
+TARGET_BUILTIN(__builtin_pdepd, "ULLiULLiULLi", "", "isa-v31-instructions")
+TARGET_BUILTIN(__builtin_pextd, "ULLiULLiULLi", "", "isa-v31-instructions")
+TARGET_BUILTIN(__builtin_cfuged, "ULLiULLiULLi", "", "isa-v31-instructions")
+TARGET_BUILTIN(__builtin_cntlzdm, "ULLiULLiULLi", "", "isa-v31-instructions")
+TARGET_BUILTIN(__builtin_cnttzdm, "ULLiULLiULLi", "", "isa-v31-instructions")
// Double-double (un)pack
BUILTIN(__builtin_unpack_longdouble, "dLdIi", "")
BUILTIN(__builtin_pack_longdouble, "Lddd", "")
// Generate random number
-BUILTIN(__builtin_darn, "LLi", "")
-BUILTIN(__builtin_darn_raw, "LLi", "")
-BUILTIN(__builtin_darn_32, "i", "")
+TARGET_BUILTIN(__builtin_darn, "LLi", "", "isa-v30-instructions")
+TARGET_BUILTIN(__builtin_darn_raw, "LLi", "", "isa-v30-instructions")
+TARGET_BUILTIN(__builtin_darn_32, "i", "", "isa-v30-instructions")
// Vector int128 (un)pack
-BUILTIN(__builtin_unpack_vector_int128, "ULLiV1LLLii", "")
-BUILTIN(__builtin_pack_vector_int128, "V1LLLiULLiULLi", "")
+TARGET_BUILTIN(__builtin_unpack_vector_int128, "ULLiV1LLLii", "", "vsx")
+TARGET_BUILTIN(__builtin_pack_vector_int128, "V1LLLiULLiULLi", "", "vsx")
// Set the floating point rounding mode
BUILTIN(__builtin_setrnd, "di", "")
@@ -850,86 +970,159 @@ BUILTIN(__builtin_dcbf, "vvC*", "")
// its given accumulator.
// Provided builtins with _mma_ prefix for compatibility.
-CUSTOM_BUILTIN(mma_lxvp, vsx_lxvp, "W256SLiW256C*", false)
-CUSTOM_BUILTIN(mma_stxvp, vsx_stxvp, "vW256SLiW256*", false)
-CUSTOM_BUILTIN(mma_assemble_pair, vsx_assemble_pair, "vW256*VV", false)
-CUSTOM_BUILTIN(mma_disassemble_pair, vsx_disassemble_pair, "vv*W256*", false)
-CUSTOM_BUILTIN(vsx_build_pair, vsx_assemble_pair, "vW256*VV", false)
-CUSTOM_BUILTIN(mma_build_acc, mma_assemble_acc, "vW512*VVVV", false)
+CUSTOM_BUILTIN(mma_lxvp, vsx_lxvp, "W256SLiW256C*", false,
+ "paired-vector-memops")
+CUSTOM_BUILTIN(mma_stxvp, vsx_stxvp, "vW256SLiW256*", false,
+ "paired-vector-memops")
+CUSTOM_BUILTIN(mma_assemble_pair, vsx_assemble_pair, "vW256*VV", false,
+ "paired-vector-memops")
+CUSTOM_BUILTIN(mma_disassemble_pair, vsx_disassemble_pair, "vv*W256*", false,
+ "paired-vector-memops")
+CUSTOM_BUILTIN(vsx_build_pair, vsx_assemble_pair, "vW256*VV", false,
+ "paired-vector-memops")
+CUSTOM_BUILTIN(mma_build_acc, mma_assemble_acc, "vW512*VVVV", false, "mma")
// UNALIASED_CUSTOM_BUILTIN macro is used for built-ins that have
// the same name as that of the intrinsic they generate, i.e. the
// ID and INTR are the same.
// This avoids repeating the ID and INTR in the macro expression.
-UNALIASED_CUSTOM_BUILTIN(vsx_lxvp, "W256SLiW256C*", false)
-UNALIASED_CUSTOM_BUILTIN(vsx_stxvp, "vW256SLiW256*", false)
-UNALIASED_CUSTOM_BUILTIN(vsx_assemble_pair, "vW256*VV", false)
-UNALIASED_CUSTOM_BUILTIN(vsx_disassemble_pair, "vv*W256*", false)
-
-UNALIASED_CUSTOM_BUILTIN(mma_assemble_acc, "vW512*VVVV", false)
-UNALIASED_CUSTOM_BUILTIN(mma_disassemble_acc, "vv*W512*", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xxmtacc, "vW512*", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xxmfacc, "vW512*", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xxsetaccz, "vW512*", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xvi4ger8, "vW512*VV", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xvi8ger4, "vW512*VV", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2, "vW512*VV", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2s, "vW512*VV", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2, "vW512*VV", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf32ger, "vW512*VV", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf64ger, "vW512*W256V", false)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvi4ger8, "vW512*VVi15i15i255", false)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvi8ger4, "vW512*VVi15i15i15", false)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2, "vW512*VVi15i15i3", false)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2s, "vW512*VVi15i15i3", false)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2, "vW512*VVi15i15i3", false)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32ger, "vW512*VVi15i15", false)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64ger, "vW512*W256Vi15i3", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xvi4ger8pp, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvi8ger4pp, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvi8ger4spp, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2pp, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2spp, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvi4ger8pp, "vW512*VVi15i15i255", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvi8ger4pp, "vW512*VVi15i15i15", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvi8ger4spp, "vW512*VVi15i15i15", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2pp, "vW512*VVi15i15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2spp, "vW512*VVi15i15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2pp, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2pn, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2np, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2nn, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2pp, "vW512*VVi15i15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2pn, "vW512*VVi15i15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2np, "vW512*VVi15i15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2nn, "vW512*VVi15i15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf32gerpp, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf32gerpn, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf32gernp, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf32gernn, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gerpp, "vW512*VVi15i15", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gerpn, "vW512*VVi15i15", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gernp, "vW512*VVi15i15", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gernn, "vW512*VVi15i15", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf64gerpp, "vW512*W256V", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf64gerpn, "vW512*W256V", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf64gernp, "vW512*W256V", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf64gernn, "vW512*W256V", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gerpp, "vW512*W256Vi15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gerpn, "vW512*W256Vi15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gernp, "vW512*W256Vi15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gernn, "vW512*W256Vi15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2, "vW512*VV", false)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2, "vW512*VVi15i15i3", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2pp, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2pn, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2np, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2nn, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2pp, "vW512*VVi15i15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2pn, "vW512*VVi15i15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2np, "vW512*VVi15i15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2nn, "vW512*VVi15i15i3", true)
+UNALIASED_CUSTOM_BUILTIN(vsx_lxvp, "W256SLiW256C*", false,
+ "paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(vsx_stxvp, "vW256SLiW256*", false,
+ "paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(vsx_assemble_pair, "vW256*VV", false,
+ "paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(vsx_disassemble_pair, "vv*W256*", false,
+ "paired-vector-memops")
+
+// TODO: Require only mma after backend supports these without paired memops
+UNALIASED_CUSTOM_BUILTIN(mma_assemble_acc, "vW512*VVVV", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_disassemble_acc, "vv*W512*", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xxmtacc, "vW512*", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xxmfacc, "vW512*", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xxsetaccz, "vW512*", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvi4ger8, "vW512*VV", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvi8ger4, "vW512*VV", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2, "vW512*VV", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2s, "vW512*VV", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2, "vW512*VV", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf32ger, "vW512*VV", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf64ger, "vW512*W256V", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi4ger8, "vW512*VVi15i15i255", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi8ger4, "vW512*VVi15i15i15", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2, "vW512*VVi15i15i3", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2s, "vW512*VVi15i15i3", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2, "vW512*VVi15i15i3", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32ger, "vW512*VVi15i15", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64ger, "vW512*W256Vi15i3", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvi4ger8pp, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvi8ger4pp, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvi8ger4spp, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2pp, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2spp, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi4ger8pp, "vW512*VVi15i15i255", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi8ger4pp, "vW512*VVi15i15i15", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi8ger4spp, "vW512*VVi15i15i15", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2pp, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2spp, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2pp, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2pn, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2np, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2nn, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2pp, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2pn, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2np, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2nn, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf32gerpp, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf32gerpn, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf32gernp, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf32gernn, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gerpp, "vW512*VVi15i15", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gerpn, "vW512*VVi15i15", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gernp, "vW512*VVi15i15", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gernn, "vW512*VVi15i15", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf64gerpp, "vW512*W256V", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf64gerpn, "vW512*W256V", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf64gernp, "vW512*W256V", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf64gernn, "vW512*W256V", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gerpp, "vW512*W256Vi15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gerpn, "vW512*W256Vi15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gernp, "vW512*W256Vi15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gernn, "vW512*W256Vi15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2, "vW512*VV", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2, "vW512*VVi15i15i3", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2pp, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2pn, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2np, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2nn, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2pp, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2pn, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2np, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2nn, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
// FIXME: Obviously incomplete.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCV.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCV.def
index c26e3b807370..50e912c2c1c7 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCV.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCV.def
@@ -16,68 +16,78 @@
#endif
// Zbb extension
-TARGET_BUILTIN(__builtin_riscv_orc_b_32, "ZiZi", "nc", "zbb")
-TARGET_BUILTIN(__builtin_riscv_orc_b_64, "WiWi", "nc", "zbb,64bit")
-TARGET_BUILTIN(__builtin_riscv_clz_32, "ZiZi", "nc", "zbb")
-TARGET_BUILTIN(__builtin_riscv_clz_64, "WiWi", "nc", "zbb,64bit")
-TARGET_BUILTIN(__builtin_riscv_ctz_32, "ZiZi", "nc", "zbb")
-TARGET_BUILTIN(__builtin_riscv_ctz_64, "WiWi", "nc", "zbb,64bit")
+TARGET_BUILTIN(__builtin_riscv_orc_b_32, "UiUi", "nc", "zbb")
+TARGET_BUILTIN(__builtin_riscv_orc_b_64, "UWiUWi", "nc", "zbb,64bit")
+TARGET_BUILTIN(__builtin_riscv_clz_32, "UiUi", "nc", "zbb|xtheadbb")
+TARGET_BUILTIN(__builtin_riscv_clz_64, "UiUWi", "nc", "zbb|xtheadbb,64bit")
+TARGET_BUILTIN(__builtin_riscv_ctz_32, "UiUi", "nc", "zbb")
+TARGET_BUILTIN(__builtin_riscv_ctz_64, "UiUWi", "nc", "zbb,64bit")
// Zbc or Zbkc extension
-TARGET_BUILTIN(__builtin_riscv_clmul, "LiLiLi", "nc", "zbc|zbkc")
-TARGET_BUILTIN(__builtin_riscv_clmulh, "LiLiLi", "nc", "zbc|zbkc")
-TARGET_BUILTIN(__builtin_riscv_clmulr, "LiLiLi", "nc", "zbc")
+TARGET_BUILTIN(__builtin_riscv_clmul_32, "UiUiUi", "nc", "zbc|zbkc")
+TARGET_BUILTIN(__builtin_riscv_clmul_64, "UWiUWiUWi", "nc", "zbc|zbkc,64bit")
+TARGET_BUILTIN(__builtin_riscv_clmulh_32, "UiUiUi", "nc", "zbc|zbkc,32bit")
+TARGET_BUILTIN(__builtin_riscv_clmulh_64, "UWiUWiUWi", "nc", "zbc|zbkc,64bit")
+TARGET_BUILTIN(__builtin_riscv_clmulr_32, "UiUiUi", "nc", "zbc,32bit")
+TARGET_BUILTIN(__builtin_riscv_clmulr_64, "UWiUWiUWi", "nc", "zbc,64bit")
// Zbkx
-TARGET_BUILTIN(__builtin_riscv_xperm4, "LiLiLi", "nc", "zbkx")
-TARGET_BUILTIN(__builtin_riscv_xperm8, "LiLiLi", "nc", "zbkx")
+TARGET_BUILTIN(__builtin_riscv_xperm4_32, "UiUiUi", "nc", "zbkx,32bit")
+TARGET_BUILTIN(__builtin_riscv_xperm4_64, "UWiUWiUWi", "nc", "zbkx,64bit")
+TARGET_BUILTIN(__builtin_riscv_xperm8_32, "UiUiUi", "nc", "zbkx,32bit")
+TARGET_BUILTIN(__builtin_riscv_xperm8_64, "UWiUWiUWi", "nc", "zbkx,64bit")
// Zbkb extension
-TARGET_BUILTIN(__builtin_riscv_brev8, "LiLi", "nc", "zbkb")
-TARGET_BUILTIN(__builtin_riscv_zip_32, "ZiZi", "nc", "zbkb,32bit")
-TARGET_BUILTIN(__builtin_riscv_unzip_32, "ZiZi", "nc", "zbkb,32bit")
+TARGET_BUILTIN(__builtin_riscv_brev8_32, "UiUi", "nc", "zbkb")
+TARGET_BUILTIN(__builtin_riscv_brev8_64, "UWiUWi", "nc", "zbkb,64bit")
+TARGET_BUILTIN(__builtin_riscv_zip_32, "UiUi", "nc", "zbkb,32bit")
+TARGET_BUILTIN(__builtin_riscv_unzip_32, "UiUi", "nc", "zbkb,32bit")
// Zknd extension
-TARGET_BUILTIN(__builtin_riscv_aes32dsi_32, "ZiZiZiIUc", "nc", "zknd,32bit")
-TARGET_BUILTIN(__builtin_riscv_aes32dsmi_32, "ZiZiZiIUc", "nc", "zknd,32bit")
-TARGET_BUILTIN(__builtin_riscv_aes64ds_64, "WiWiWi", "nc", "zknd,64bit")
-TARGET_BUILTIN(__builtin_riscv_aes64dsm_64, "WiWiWi", "nc", "zknd,64bit")
-TARGET_BUILTIN(__builtin_riscv_aes64im_64, "WiWi", "nc", "zknd,64bit")
+TARGET_BUILTIN(__builtin_riscv_aes32dsi, "UiUiUiIUi", "nc", "zknd,32bit")
+TARGET_BUILTIN(__builtin_riscv_aes32dsmi, "UiUiUiIUi", "nc", "zknd,32bit")
+TARGET_BUILTIN(__builtin_riscv_aes64ds, "UWiUWiUWi", "nc", "zknd,64bit")
+TARGET_BUILTIN(__builtin_riscv_aes64dsm, "UWiUWiUWi", "nc", "zknd,64bit")
+TARGET_BUILTIN(__builtin_riscv_aes64im, "UWiUWi", "nc", "zknd,64bit")
-// Zknd & zkne
-TARGET_BUILTIN(__builtin_riscv_aes64ks1i_64, "WiWiIUi", "nc", "zknd|zkne,64bit")
-TARGET_BUILTIN(__builtin_riscv_aes64ks2_64, "WiWiWi", "nc", "zknd|zkne,64bit")
+// Zknd & Zkne
+TARGET_BUILTIN(__builtin_riscv_aes64ks1i, "UWiUWiIUi", "nc", "zknd|zkne,64bit")
+TARGET_BUILTIN(__builtin_riscv_aes64ks2, "UWiUWiUWi", "nc", "zknd|zkne,64bit")
// Zkne extension
-TARGET_BUILTIN(__builtin_riscv_aes32esi_32, "ZiZiZiIUc", "nc", "zkne,32bit")
-TARGET_BUILTIN(__builtin_riscv_aes32esmi_32, "ZiZiZiIUc", "nc", "zkne,32bit")
-TARGET_BUILTIN(__builtin_riscv_aes64es_64, "WiWiWi", "nc", "zkne,64bit")
-TARGET_BUILTIN(__builtin_riscv_aes64esm_64, "WiWiWi", "nc", "zkne,64bit")
+TARGET_BUILTIN(__builtin_riscv_aes32esi, "UiUiUiIUi", "nc", "zkne,32bit")
+TARGET_BUILTIN(__builtin_riscv_aes32esmi, "UiUiUiIUi", "nc", "zkne,32bit")
+TARGET_BUILTIN(__builtin_riscv_aes64es, "UWiUWiUWi", "nc", "zkne,64bit")
+TARGET_BUILTIN(__builtin_riscv_aes64esm, "UWiUWiUWi", "nc", "zkne,64bit")
// Zknh extension
-TARGET_BUILTIN(__builtin_riscv_sha256sig0, "LiLi", "nc", "zknh")
-TARGET_BUILTIN(__builtin_riscv_sha256sig1, "LiLi", "nc", "zknh")
-TARGET_BUILTIN(__builtin_riscv_sha256sum0, "LiLi", "nc", "zknh")
-TARGET_BUILTIN(__builtin_riscv_sha256sum1, "LiLi", "nc", "zknh")
+TARGET_BUILTIN(__builtin_riscv_sha256sig0, "UiUi", "nc", "zknh")
+TARGET_BUILTIN(__builtin_riscv_sha256sig1, "UiUi", "nc", "zknh")
+TARGET_BUILTIN(__builtin_riscv_sha256sum0, "UiUi", "nc", "zknh")
+TARGET_BUILTIN(__builtin_riscv_sha256sum1, "UiUi", "nc", "zknh")
-TARGET_BUILTIN(__builtin_riscv_sha512sig0h_32, "ZiZiZi", "nc", "zknh,32bit")
-TARGET_BUILTIN(__builtin_riscv_sha512sig0l_32, "ZiZiZi", "nc", "zknh,32bit")
-TARGET_BUILTIN(__builtin_riscv_sha512sig1h_32, "ZiZiZi", "nc", "zknh,32bit")
-TARGET_BUILTIN(__builtin_riscv_sha512sig1l_32, "ZiZiZi", "nc", "zknh,32bit")
-TARGET_BUILTIN(__builtin_riscv_sha512sum0r_32, "ZiZiZi", "nc", "zknh,32bit")
-TARGET_BUILTIN(__builtin_riscv_sha512sum1r_32, "ZiZiZi", "nc", "zknh,32bit")
-TARGET_BUILTIN(__builtin_riscv_sha512sig0_64, "WiWi", "nc", "zknh,64bit")
-TARGET_BUILTIN(__builtin_riscv_sha512sig1_64, "WiWi", "nc", "zknh,64bit")
-TARGET_BUILTIN(__builtin_riscv_sha512sum0_64, "WiWi", "nc", "zknh,64bit")
-TARGET_BUILTIN(__builtin_riscv_sha512sum1_64, "WiWi", "nc", "zknh,64bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sig0h, "UiUiUi", "nc", "zknh,32bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sig0l, "UiUiUi", "nc", "zknh,32bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sig1h, "UiUiUi", "nc", "zknh,32bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sig1l, "UiUiUi", "nc", "zknh,32bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sum0r, "UiUiUi", "nc", "zknh,32bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sum1r, "UiUiUi", "nc", "zknh,32bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sig0, "UWiUWi", "nc", "zknh,64bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sig1, "UWiUWi", "nc", "zknh,64bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sum0, "UWiUWi", "nc", "zknh,64bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sum1, "UWiUWi", "nc", "zknh,64bit")
// Zksed extension
-TARGET_BUILTIN(__builtin_riscv_sm4ed, "LiLiLiIUc", "nc", "zksed")
-TARGET_BUILTIN(__builtin_riscv_sm4ks, "LiLiLiIUc", "nc", "zksed")
+TARGET_BUILTIN(__builtin_riscv_sm4ed, "UiUiUiIUi", "nc", "zksed")
+TARGET_BUILTIN(__builtin_riscv_sm4ks, "UiUiUiIUi", "nc", "zksed")
// Zksh extension
-TARGET_BUILTIN(__builtin_riscv_sm3p0, "LiLi", "nc", "zksh")
-TARGET_BUILTIN(__builtin_riscv_sm3p1, "LiLi", "nc", "zksh")
+TARGET_BUILTIN(__builtin_riscv_sm3p0, "UiUi", "nc", "zksh")
+TARGET_BUILTIN(__builtin_riscv_sm3p1, "UiUi", "nc", "zksh")
+
+// Zihintntl extension
+TARGET_BUILTIN(__builtin_riscv_ntl_load, "v.", "t", "experimental-zihintntl")
+TARGET_BUILTIN(__builtin_riscv_ntl_store, "v.", "t", "experimental-zihintntl")
#undef BUILTIN
#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCVVector.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCVVector.def
index 008cb939a30b..6dfa87a1a1d3 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCVVector.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCVVector.def
@@ -16,6 +16,7 @@
#endif
#include "clang/Basic/riscv_vector_builtins.inc"
+#include "clang/Basic/riscv_sifive_vector_builtins.inc"
#undef BUILTIN
#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsSME.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsSME.def
new file mode 100644
index 000000000000..180ee20295cc
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsSME.def
@@ -0,0 +1,21 @@
+//===--- BuiltinsSME.def - SME Builtin function database --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SME-specific builtin function database. Users of
+// this file must define the BUILTIN macro to make use of this information.
+//
+//===----------------------------------------------------------------------===//
+
+// The format of this database matches clang/Basic/Builtins.def.
+
+#define GET_SME_BUILTINS
+#include "clang/Basic/arm_sme_builtins.inc"
+#undef GET_SME_BUILTINS
+
+#undef BUILTIN
+#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsWebAssembly.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsWebAssembly.def
index 9064ded12fd1..7e950914ad94 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsWebAssembly.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsWebAssembly.def
@@ -161,7 +161,7 @@ TARGET_BUILTIN(__builtin_wasm_narrow_u_i16x8_i32x4, "V8UsV4iV4i", "nc", "simd128
TARGET_BUILTIN(__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4, "V4iV2d", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4, "V4UiV2d", "nc", "simd128")
-// Relaxed SIMD builtins (experimental)
+// Relaxed SIMD builtins
TARGET_BUILTIN(__builtin_wasm_relaxed_madd_f32x4, "V4fV4fV4fV4f", "nc", "relaxed-simd")
TARGET_BUILTIN(__builtin_wasm_relaxed_nmadd_f32x4, "V4fV4fV4fV4f", "nc", "relaxed-simd")
TARGET_BUILTIN(__builtin_wasm_relaxed_madd_f64x2, "V2dV2dV2dV2d", "nc", "relaxed-simd")
@@ -190,5 +190,24 @@ TARGET_BUILTIN(__builtin_wasm_relaxed_dot_i8x16_i7x16_s_i16x8, "V8sV16ScV16Sc",
TARGET_BUILTIN(__builtin_wasm_relaxed_dot_i8x16_i7x16_add_s_i32x4, "V4iV16ScV16ScV4i", "nc", "relaxed-simd")
TARGET_BUILTIN(__builtin_wasm_relaxed_dot_bf16x8_add_f32_f32x4, "V4fV8UsV8UsV4f", "nc", "relaxed-simd")
+// Reference Types builtins
+// Some builtins are custom type-checked - see 't' as part of the third argument,
+// in which case the argument spec (second argument) is unused.
+
+TARGET_BUILTIN(__builtin_wasm_ref_null_extern, "i", "nct", "reference-types")
+
+// A funcref represented as a function pointer with the funcref attribute
+// attached to the type, therefore SemaChecking will check for the right
+// return type.
+TARGET_BUILTIN(__builtin_wasm_ref_null_func, "i", "nct", "reference-types")
+
+// Table builtins
+TARGET_BUILTIN(__builtin_wasm_table_set, "viii", "t", "reference-types")
+TARGET_BUILTIN(__builtin_wasm_table_get, "iii", "t", "reference-types")
+TARGET_BUILTIN(__builtin_wasm_table_size, "zi", "nt", "reference-types")
+TARGET_BUILTIN(__builtin_wasm_table_grow, "iiii", "nt", "reference-types")
+TARGET_BUILTIN(__builtin_wasm_table_fill, "viiii", "t", "reference-types")
+TARGET_BUILTIN(__builtin_wasm_table_copy, "viiiii", "t", "reference-types")
+
#undef BUILTIN
#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def
index 122896b417c8..10ac3b3c34ef 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def
@@ -2116,6 +2116,20 @@ TARGET_HEADER_BUILTIN(__readgsword, "UsUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES,
TARGET_HEADER_BUILTIN(__readgsdword, "UNiUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(__readgsqword, "ULLiUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+// AVX-VNNI-INT16
+TARGET_BUILTIN(__builtin_ia32_vpdpwsud128, "V4iV4iV4iV4i", "nV:128:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwsud256, "V8iV8iV8iV8i", "nV:256:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwsuds128, "V4iV4iV4iV4i", "nV:128:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwsuds256, "V8iV8iV8iV8i", "nV:256:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwusd128, "V4iV4iV4iV4i", "nV:128:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwusd256, "V8iV8iV8iV8i", "nV:256:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwusds128, "V4iV4iV4iV4i", "nV:128:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwusds256, "V8iV8iV8iV8i", "nV:256:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwuud128, "V4iV4iV4iV4i", "nV:128:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwuud256, "V8iV8iV8iV8i", "nV:256:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwuuds128, "V4iV4iV4iV4i", "nV:128:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwuuds256, "V8iV8iV8iV8i", "nV:256:", "avxvnniint16")
+
// AVX-NE-CONVERT
TARGET_BUILTIN(__builtin_ia32_vbcstnebf162ps128, "V4fyC*", "nV:128:", "avxneconvert")
TARGET_BUILTIN(__builtin_ia32_vbcstnebf162ps256, "V8fyC*", "nV:256:", "avxneconvert")
@@ -2132,6 +2146,11 @@ TARGET_BUILTIN(__builtin_ia32_vcvtneoph2ps256, "V8fV16xC*", "nV:256:", "avxnecon
TARGET_BUILTIN(__builtin_ia32_vcvtneps2bf16128, "V8yV4f", "nV:128:", "avx512bf16,avx512vl|avxneconvert")
TARGET_BUILTIN(__builtin_ia32_vcvtneps2bf16256, "V8yV8f", "nV:256:", "avx512bf16,avx512vl|avxneconvert")
+// SHA512
+TARGET_BUILTIN(__builtin_ia32_vsha512msg1, "V4ULLiV4ULLiV2ULLi", "nV:256:", "sha512")
+TARGET_BUILTIN(__builtin_ia32_vsha512msg2, "V4ULLiV4ULLiV4ULLi", "nV:256:", "sha512")
+TARGET_BUILTIN(__builtin_ia32_vsha512rnds2, "V4ULLiV4ULLiV4ULLiV2ULLi", "nV:256:", "sha512")
+
TARGET_HEADER_BUILTIN(_InterlockedAnd64, "WiWiD*Wi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(_InterlockedDecrement64, "WiWiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(_InterlockedExchange64, "WiWiD*Wi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
@@ -2141,6 +2160,17 @@ TARGET_HEADER_BUILTIN(_InterlockedIncrement64, "WiWiD*", "nh", INTRIN_H, ALL
TARGET_HEADER_BUILTIN(_InterlockedOr64, "WiWiD*Wi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(_InterlockedXor64, "WiWiD*Wi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+// SM3
+TARGET_BUILTIN(__builtin_ia32_vsm3msg1, "V4UiV4UiV4UiV4Ui", "nV:128:", "sm3")
+TARGET_BUILTIN(__builtin_ia32_vsm3msg2, "V4UiV4UiV4UiV4Ui", "nV:128:", "sm3")
+TARGET_BUILTIN(__builtin_ia32_vsm3rnds2, "V4UiV4UiV4UiV4UiIUi", "nV:128:", "sm3")
+
+// SM4
+TARGET_BUILTIN(__builtin_ia32_vsm4key4128, "V4UiV4UiV4Ui", "nV:128:", "sm4")
+TARGET_BUILTIN(__builtin_ia32_vsm4key4256, "V8UiV8UiV8Ui", "nV:256:", "sm4")
+TARGET_BUILTIN(__builtin_ia32_vsm4rnds4128, "V4UiV4UiV4Ui", "nV:128:", "sm4")
+TARGET_BUILTIN(__builtin_ia32_vsm4rnds4256, "V8UiV8UiV8Ui", "nV:256:", "sm4")
+
#undef BUILTIN
#undef TARGET_BUILTIN
#undef TARGET_HEADER_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86_64.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86_64.def
index 4b9e7d29d651..e5c1fe8b3192 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86_64.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86_64.def
@@ -117,6 +117,8 @@ TARGET_BUILTIN(__builtin_ia32_tilestored64_internal, "vUsUsv*zV256i", "n", "amx-
TARGET_BUILTIN(__builtin_ia32_tilezero_internal, "V256iUsUs", "n", "amx-tile")
TARGET_BUILTIN(__builtin_ia32_tdpbf16ps_internal, "V256iUsUsUsV256iV256iV256i", "n", "amx-bf16")
TARGET_BUILTIN(__builtin_ia32_tdpfp16ps_internal, "V256iUsUsUsV256iV256iV256i", "n", "amx-fp16")
+TARGET_BUILTIN(__builtin_ia32_tcmmimfp16ps_internal, "V256iUsUsUsV256iV256iV256i", "n", "amx-complex")
+TARGET_BUILTIN(__builtin_ia32_tcmmrlfp16ps_internal, "V256iUsUsUsV256iV256iV256i", "n", "amx-complex")
// AMX
TARGET_BUILTIN(__builtin_ia32_tile_loadconfig, "vvC*", "n", "amx-tile")
TARGET_BUILTIN(__builtin_ia32_tile_storeconfig, "vvC*", "n", "amx-tile")
@@ -134,6 +136,9 @@ TARGET_BUILTIN(__builtin_ia32_tdpbuud, "vIUcIUcIUc", "n", "amx-int8")
TARGET_BUILTIN(__builtin_ia32_tdpbf16ps, "vIUcIUcIUc", "n", "amx-bf16")
TARGET_BUILTIN(__builtin_ia32_ptwrite64, "vUOi", "n", "ptwrite")
+TARGET_BUILTIN(__builtin_ia32_tcmmimfp16ps, "vIUcIUcIUc", "n", "amx-complex")
+TARGET_BUILTIN(__builtin_ia32_tcmmrlfp16ps, "vIUcIUcIUc", "n", "amx-complex")
+
TARGET_BUILTIN(__builtin_ia32_prefetchi, "vvC*Ui", "nc", "prefetchi")
TARGET_BUILTIN(__builtin_ia32_cmpccxadd32, "Siv*SiSiIi", "n", "cmpccxadd")
TARGET_BUILTIN(__builtin_ia32_cmpccxadd64, "SLLiv*SLLiSLLiIi", "n", "cmpccxadd")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def b/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def
index 436226c6f178..11aec88c5335 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def
@@ -36,6 +36,7 @@ CODEGENOPT(Dwarf64 , 1, 0) ///< -gdwarf64.
CODEGENOPT(Dwarf32 , 1, 1) ///< -gdwarf32.
CODEGENOPT(PreserveAsmComments, 1, 1) ///< -dA, -fno-preserve-as-comments.
CODEGENOPT(AssumeSaneOperatorNew , 1, 1) ///< implicit __attribute__((malloc)) operator new
+CODEGENOPT(AssumeUniqueVTables , 1, 1) ///< Assume a class has only one vtable.
CODEGENOPT(Autolink , 1, 1) ///< -fno-autolink
CODEGENOPT(ObjCAutoRefCountExceptions , 1, 0) ///< Whether ARC should be EH-safe.
CODEGENOPT(Backchain , 1, 0) ///< -mbackchain
@@ -52,6 +53,7 @@ CODEGENOPT(UniqueBasicBlockSectionNames, 1, 1) ///< Set for -funique-basic-block
///< Produce unique section names with
///< basic block sections.
CODEGENOPT(EnableAIXExtendedAltivecABI, 1, 0) ///< Set for -mabi=vec-extabi. Enables the extended Altivec ABI on AIX.
+CODEGENOPT(XCOFFReadOnlyPointers, 1, 0) ///< Set for -mxcoff-roptr.
ENUM_CODEGENOPT(FramePointer, FramePointerKind, 2, FramePointerKind::None) /// frame-pointer: all,non-leaf,none
CODEGENOPT(ClearASTBeforeBackend , 1, 0) ///< Free the AST before running backend code generation. Only works with -disable-free.
@@ -84,8 +86,6 @@ CODEGENOPT(EmitDeclMetadata , 1, 0) ///< Emit special metadata indicating what
///< Only useful when running CodeGen as a
///< subroutine.
CODEGENOPT(EmitVersionIdentMetadata , 1, 1) ///< Emit compiler version metadata.
-CODEGENOPT(EmitGcovArcs , 1, 0) ///< Emit coverage data files, aka. GCDA.
-CODEGENOPT(EmitGcovNotes , 1, 0) ///< Emit coverage "notes" files, aka GCNO.
CODEGENOPT(EmitOpenCLArgMetadata , 1, 0) ///< Emit OpenCL kernel arg metadata.
CODEGENOPT(EmulatedTLS , 1, 0) ///< Set by default or -f[no-]emulated-tls.
/// Embed Bitcode mode (off/all/bitcode/marker).
@@ -115,6 +115,9 @@ CODEGENOPT(StackSizeSection , 1, 0) ///< Set when -fstack-size-section is enabl
CODEGENOPT(ForceDwarfFrameSection , 1, 0) ///< Set when -fforce-dwarf-frame is
///< enabled.
+///< Set when -femit-compact-unwind-non-canonical is enabled.
+CODEGENOPT(EmitCompactUnwindNonCanonical, 1, 0)
+
///< Set when -femit-dwarf-unwind is passed.
ENUM_CODEGENOPT(EmitDwarfUnwind, llvm::EmitDwarfUnwindType, 2,
llvm::EmitDwarfUnwindType::Default)
@@ -128,8 +131,8 @@ CODEGENOPT(XRayAlwaysEmitTypedEvents , 1, 0)
///< Set when -fxray-ignore-loops is enabled.
CODEGENOPT(XRayIgnoreLoops , 1, 0)
-///< Set with -fno-xray-function-index to omit the index section.
-CODEGENOPT(XRayOmitFunctionIndex , 1, 0)
+///< Emit the XRay function index section.
+CODEGENOPT(XRayFunctionIndex , 1, 1)
///< Set the minimum number of instructions in a function to determine selective
@@ -162,10 +165,12 @@ CODEGENOPT(PrepareForThinLTO , 1, 0) ///< Set when -flto=thin is enabled on the
///< compile step.
CODEGENOPT(LTOUnit, 1, 0) ///< Emit IR to support LTO unit features (CFI, whole
///< program vtable opt).
+CODEGENOPT(FatLTO, 1, 0) ///< Set when -ffat-lto-objects is enabled.
CODEGENOPT(EnableSplitLTOUnit, 1, 0) ///< Enable LTO unit splitting to support
/// CFI and traditional whole program
/// devirtualization that require whole
/// program IR support.
+CODEGENOPT(UnifiedLTO, 1, 0) ///< Use the unified LTO pipeline.
CODEGENOPT(IncrementalLinkerCompatible, 1, 0) ///< Emit an object file which can
///< be used with an incremental
///< linker.
@@ -257,6 +262,8 @@ CODEGENOPT(SanitizeMinimalRuntime, 1, 0) ///< Use "_minimal" sanitizer runtime f
///< diagnostics.
CODEGENOPT(SanitizeCfiICallGeneralizePointers, 1, 0) ///< Generalize pointer types in
///< CFI icall function signatures
+CODEGENOPT(SanitizeCfiICallNormalizeIntegers, 1, 0) ///< Normalize integer types in
+ ///< CFI icall function signatures
CODEGENOPT(SanitizeCfiCanonicalJumpTables, 1, 0) ///< Make jump table symbols canonical
///< instead of creating a local jump table.
CODEGENOPT(SanitizeCoverageType, 2, 0) ///< Type of sanitizer coverage
@@ -318,6 +325,8 @@ CODEGENOPT(UseRegisterSizedBitfieldAccess , 1, 0)
CODEGENOPT(VerifyModule , 1, 1) ///< Control whether the module should be run
///< through the LLVM Verifier.
+CODEGENOPT(VerifyEach , 1, 1) ///< Control whether the LLVM verifier
+ ///< should run after every pass.
CODEGENOPT(StackRealignment , 1, 0) ///< Control whether to force stack
///< realignment.
@@ -333,8 +342,8 @@ VALUE_CODEGENOPT(WarnStackSize , 32, UINT_MAX) ///< Set via -fwarn-stack-siz
CODEGENOPT(NoStackArgProbe, 1, 0) ///< Set when -mno-stack-arg-probe is used
CODEGENOPT(DebugStrictDwarf, 1, 1) ///< Whether or not to use strict DWARF info.
-CODEGENOPT(EnableAssignmentTracking, 1,0) ///< Enable the Assignment Tracking
- ///< debug info feature.
+/// Control the Assignment Tracking debug info feature.
+ENUM_CODEGENOPT(AssignmentTrackingMode, AssignmentTrackingOpts, 2, AssignmentTrackingOpts::Disabled)
CODEGENOPT(DebugColumnInfo, 1, 0) ///< Whether or not to use column information
///< in debug info.
@@ -356,7 +365,7 @@ CODEGENOPT(DebugFwdTemplateParams, 1, 0) ///< Whether to emit complete
///< template parameter descriptions in
///< forward declarations (versus just
///< including them in the name).
-ENUM_CODEGENOPT(DebugSimpleTemplateNames, codegenoptions::DebugTemplateNamesKind, 2, codegenoptions::DebugTemplateNamesKind::Full) ///< Whether to emit template parameters
+ENUM_CODEGENOPT(DebugSimpleTemplateNames, llvm::codegenoptions::DebugTemplateNamesKind, 2, llvm::codegenoptions::DebugTemplateNamesKind::Full) ///< Whether to emit template parameters
///< in the textual names of template
///< specializations.
///< Implies DebugFwdTemplateNames to
@@ -386,7 +395,7 @@ VALUE_CODEGENOPT(SmallDataLimit, 32, 0)
VALUE_CODEGENOPT(SSPBufferSize, 32, 0)
/// The kind of generated debug info.
-ENUM_CODEGENOPT(DebugInfo, codegenoptions::DebugInfoKind, 4, codegenoptions::NoDebugInfo)
+ENUM_CODEGENOPT(DebugInfo, llvm::codegenoptions::DebugInfoKind, 4, llvm::codegenoptions::NoDebugInfo)
/// Whether to generate macro debug info.
CODEGENOPT(MacroDebugInfo, 1, 0)
@@ -470,6 +479,10 @@ CODEGENOPT(Addrsig, 1, 0)
/// Whether to emit unused static constants.
CODEGENOPT(KeepStaticConsts, 1, 0)
+/// Whether to emit all variables that have a persistent storage duration,
+/// including global, static and thread local variables.
+CODEGENOPT(KeepPersistentStorageVariables, 1, 0)
+
/// Whether to follow the AAPCS enforcing at least one read before storing to a volatile bitfield
CODEGENOPT(ForceAAPCSBitfieldLoad, 1, 0)
@@ -497,9 +510,6 @@ CODEGENOPT(SkipRaxSetup, 1, 0)
ENUM_CODEGENOPT(ZeroCallUsedRegs, llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind,
5, llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip)
-/// Whether to use opaque pointers.
-CODEGENOPT(OpaquePointers, 1, 0)
-
/// Modify C++ ABI to returning `this` pointer from constructors and
/// non-deleting destructors. (No effect on Microsoft ABI.)
CODEGENOPT(CtorDtorReturnThis, 1, 0)
diff --git a/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h b/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h
index 4175fe3072ab..14fc94fe27f9 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h
@@ -13,10 +13,10 @@
#ifndef LLVM_CLANG_BASIC_CODEGENOPTIONS_H
#define LLVM_CLANG_BASIC_CODEGENOPTIONS_H
-#include "clang/Basic/DebugInfoOptions.h"
#include "clang/Basic/Sanitizers.h"
#include "clang/Basic/XRayInstr.h"
#include "llvm/ADT/FloatingPointMode.h"
+#include "llvm/Frontend/Debug/Options.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Regex.h"
#include "llvm/Target/TargetOptions.h"
@@ -55,13 +55,14 @@ public:
};
enum VectorLibrary {
- NoLibrary, // Don't use any vector library.
- Accelerate, // Use the Accelerate framework.
- LIBMVEC, // GLIBC vector math library.
- MASSV, // IBM MASS vector library.
- SVML, // Intel short vector math library.
- SLEEF, // SLEEF SIMD Library for Evaluating Elementary Functions.
- Darwin_libsystem_m // Use Darwin's libsytem_m vector functions.
+ NoLibrary, // Don't use any vector library.
+ Accelerate, // Use the Accelerate framework.
+ LIBMVEC, // GLIBC vector math library.
+ MASSV, // IBM MASS vector library.
+ SVML, // Intel short vector math library.
+ SLEEF, // SLEEF SIMD Library for Evaluating Elementary Functions.
+ Darwin_libsystem_m, // Use Darwin's libsytem_m vector functions.
+ ArmPL // Arm Performance Libraries.
};
enum ObjCDispatchMethodKind {
@@ -163,6 +164,12 @@ public:
Never, // No loop is assumed to be finite.
};
+ enum AssignmentTrackingOpts {
+ Disabled,
+ Enabled,
+ Forced,
+ };
+
/// The code model to use (-mcmodel).
std::string CodeModel;
@@ -200,8 +207,11 @@ public:
/// if non-empty.
std::string RecordCommandLine;
- std::map<std::string, std::string> DebugPrefixMap;
- std::map<std::string, std::string> CoveragePrefixMap;
+ llvm::SmallVector<std::pair<std::string, std::string>, 0> DebugPrefixMap;
+
+ /// Prefix replacement map for source-based code coverage to remap source
+ /// file paths in coverage mapping.
+ llvm::SmallVector<std::pair<std::string, std::string>, 0> CoveragePrefixMap;
/// The ABI to use for passing floating point arguments.
std::string FloatABI;
@@ -273,6 +283,9 @@ public:
/// Name of the profile file to use as output for with -fmemory-profile.
std::string MemoryProfileOutput;
+ /// Name of the profile file to use as input for -fmemory-profile-use.
+ std::string MemoryProfileUsePath;
+
/// Name of the profile file to use as input for -fprofile-instr-use
std::string ProfileInstrumentUsePath;
@@ -326,12 +339,12 @@ public:
/// Optimization remark with an optional regular expression pattern.
struct OptRemark {
- RemarkKind Kind;
+ RemarkKind Kind = RK_Missing;
std::string Pattern;
std::shared_ptr<llvm::Regex> Regex;
/// By default, optimization remark is missing.
- OptRemark() : Kind(RK_Missing), Regex(nullptr) {}
+ OptRemark() = default;
/// Returns true iff the optimization remark holds a valid regular
/// expression.
@@ -362,9 +375,6 @@ public:
/// transformation.
OptRemark OptimizationRemarkAnalysis;
- /// Set of files defining the rules for the symbol rewriting.
- std::vector<std::string> RewriteMapFiles;
-
/// Set of sanitizer checks that are non-fatal (i.e. execution should be
/// continued when possible).
SanitizerSet SanitizeRecover;
@@ -417,6 +427,11 @@ public:
/// coverage pass should actually not be instrumented.
std::vector<std::string> SanitizeCoverageIgnorelistFiles;
+ /// Path to ignorelist file specifying which objects
+ /// (files, functions) listed for instrumentation by sanitizer
+ /// binary metadata pass should not be instrumented.
+ std::vector<std::string> SanitizeMetadataIgnorelistFiles;
+
/// Name of the stack usage file (i.e., .su file) if user passes
/// -fstack-usage. If empty, it can be implied that -fstack-usage is not
/// passed on the command line.
@@ -494,12 +509,12 @@ public:
/// Check if type and variable info should be emitted.
bool hasReducedDebugInfo() const {
- return getDebugInfo() >= codegenoptions::DebugInfoConstructor;
+ return getDebugInfo() >= llvm::codegenoptions::DebugInfoConstructor;
}
/// Check if maybe unused type info should be emitted.
bool hasMaybeUnusedDebugInfo() const {
- return getDebugInfo() >= codegenoptions::UnusedTypeInfo;
+ return getDebugInfo() >= llvm::codegenoptions::UnusedTypeInfo;
}
// Check if any one of SanitizeCoverage* is enabled.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Cuda.h b/contrib/llvm-project/clang/include/clang/Basic/Cuda.h
index 8ff28944f23d..7f65d711bb32 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Cuda.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Cuda.h
@@ -37,9 +37,11 @@ enum class CudaVersion {
CUDA_116,
CUDA_117,
CUDA_118,
- FULLY_SUPPORTED = CUDA_115,
+ CUDA_120,
+ CUDA_121,
+ FULLY_SUPPORTED = CUDA_118,
PARTIALLY_SUPPORTED =
- CUDA_118, // Partially supported. Proceed with a warning.
+ CUDA_121, // Partially supported. Proceed with a warning.
NEW = 10000, // Too new. Issue a warning, but allow using it.
};
const char *CudaVersionToString(CudaVersion V);
@@ -92,6 +94,8 @@ enum class CudaArch {
GFX90a,
GFX90c,
GFX940,
+ GFX941,
+ GFX942,
GFX1010,
GFX1011,
GFX1012,
@@ -107,6 +111,8 @@ enum class CudaArch {
GFX1101,
GFX1102,
GFX1103,
+ GFX1150,
+ GFX1151,
Generic, // A processor model named 'generic' if the target backend defines a
// public one.
LAST,
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DarwinSDKInfo.h b/contrib/llvm-project/clang/include/clang/Basic/DarwinSDKInfo.h
index 62e23682872a..dedfbd934a7b 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DarwinSDKInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DarwinSDKInfo.h
@@ -11,10 +11,10 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/VersionTuple.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/Triple.h"
#include <optional>
namespace llvm {
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DebugInfoOptions.h b/contrib/llvm-project/clang/include/clang/Basic/DebugInfoOptions.h
deleted file mode 100644
index a99a2b5903d7..000000000000
--- a/contrib/llvm-project/clang/include/clang/Basic/DebugInfoOptions.h
+++ /dev/null
@@ -1,66 +0,0 @@
-//===--- DebugInfoOptions.h - Debug Info Emission Types ---------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_BASIC_DEBUGINFOOPTIONS_H
-#define LLVM_CLANG_BASIC_DEBUGINFOOPTIONS_H
-
-namespace clang {
-namespace codegenoptions {
-
-enum DebugInfoFormat {
- DIF_DWARF,
- DIF_CodeView,
-};
-
-enum DebugInfoKind {
- /// Don't generate debug info.
- NoDebugInfo,
-
- /// Emit location information but do not generate debug info in the output.
- /// This is useful in cases where the backend wants to track source
- /// locations for instructions without actually emitting debug info for them
- /// (e.g., when -Rpass is used).
- LocTrackingOnly,
-
- /// Emit only debug directives with the line numbers data
- DebugDirectivesOnly,
-
- /// Emit only debug info necessary for generating line number tables
- /// (-gline-tables-only).
- DebugLineTablesOnly,
-
- /// Limit generated debug info for classes to reduce size. This emits class
- /// type info only where the constructor is emitted, if it is a class that
- /// has a constructor.
- /// FIXME: Consider combining this with LimitedDebugInfo.
- DebugInfoConstructor,
-
- /// Limit generated debug info to reduce size (-fno-standalone-debug). This
- /// emits forward decls for types that could be replaced with forward decls in
- /// the source code. For dynamic C++ classes type info is only emitted into
- /// the module that contains the classe's vtable.
- LimitedDebugInfo,
-
- /// Generate complete debug info.
- FullDebugInfo,
-
- /// Generate debug info for types that may be unused in the source
- /// (-fno-eliminate-unused-debug-types).
- UnusedTypeInfo,
-};
-
-enum class DebugTemplateNamesKind {
- Full,
- Simple,
- Mangled
-};
-
-} // end namespace codegenoptions
-} // end namespace clang
-
-#endif
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.h b/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.h
index b9ba459d1358..5606a22fe9d6 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.h
@@ -1565,7 +1565,7 @@ inline DiagnosticBuilder DiagnosticsEngine::Report(unsigned DiagID) {
/// currently in-flight diagnostic.
class Diagnostic {
const DiagnosticsEngine *DiagObj;
- StringRef StoredDiagMessage;
+ std::optional<StringRef> StoredDiagMessage;
public:
explicit Diagnostic(const DiagnosticsEngine *DO) : DiagObj(DO) {}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.td b/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.td
index 21de05b707a5..8d66e265fbae 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.td
@@ -55,11 +55,11 @@ class DiagCategory<string Name> {
}
// Diagnostic Groups.
-class DiagGroup<string Name, list<DiagGroup> subgroups = []> {
+class DiagGroup<string Name, list<DiagGroup> subgroups = [], code docs = [{}]> {
string GroupName = Name;
list<DiagGroup> SubGroups = subgroups;
string CategoryName = "";
- code Documentation = [{}];
+ code Documentation = docs;
}
class InGroup<DiagGroup G> { DiagGroup Group = G; }
//class IsGroup<string Name> { DiagGroup Group = DiagGroup<Name>; }
@@ -157,7 +157,6 @@ class DefaultRemark { Severity DefaultSeverity = SEV_Remark; }
// Definitions for Diagnostics.
include "DiagnosticASTKinds.td"
-include "DiagnosticAnalysisKinds.td"
include "DiagnosticCommentKinds.td"
include "DiagnosticCommonKinds.td"
include "DiagnosticCrossTUKinds.td"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td
index 4e2e0bd3079c..566cdc340605 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td
@@ -15,6 +15,10 @@ def note_constexpr_invalid_cast : Note<
" performs the conversions of a reinterpret_cast}1|cast from %1}0"
" is not allowed in a constant expression"
"%select{| in C++ standards before C++20||}0">;
+def note_constexpr_invalid_void_star_cast : Note<
+ "cast from %0 is not allowed in a constant expression "
+ "%select{in C++ standards before C++2c|because the pointed object "
+ "type %2 is not similar to the target type %3}1">;
def note_constexpr_invalid_downcast : Note<
"cannot cast object of dynamic type %0 to type %1">;
def note_constexpr_overflow : Note<
@@ -65,7 +69,7 @@ def note_consteval_address_accessible : Note<
"%select{pointer|reference}0 to a consteval declaration "
"is not a constant expression">;
def note_constexpr_uninitialized : Note<
- "%select{|sub}0object of type %1 is not initialized">;
+ "subobject %0 is not initialized">;
def note_constexpr_static_local : Note<
"control flows through the definition of a %select{static|thread_local}0 variable">;
def note_constexpr_subobject_declared_here : Note<
@@ -127,6 +131,8 @@ def note_constexpr_null_subobject : Note<
"access array element of|perform pointer arithmetic on|"
"access real component of|"
"access imaginary component of}0 null pointer">;
+def note_constexpr_null_callee : Note<
+ "'%0' evaluates to a null function pointer">;
def note_constexpr_function_param_value_unknown : Note<
"function parameter %0 with unknown value cannot be used in a constant "
"expression">;
@@ -775,7 +781,7 @@ def err_module_odr_violation_field : Error<
"%select{non-|}5bitfield %4|"
"bitfield %4 with one width expression|"
"%select{non-|}5mutable field %4|"
- "field %4 with %select{no|an}5 initalizer|"
+ "field %4 with %select{no|an}5 initializer|"
"field %4 with an initializer"
"}3">;
def note_module_odr_violation_field : Note<
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommonKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommonKinds.td
index c59adcc72a68..ee994e765e09 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommonKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommonKinds.td
@@ -55,7 +55,7 @@ def err_expected_colon_after_setter_name : Error<
def err_expected_string_literal : Error<"expected string literal "
"%select{in %1|for diagnostic message in static_assert|"
"for optional message in 'availability' attribute|"
- "for %select{language|source container}1 name in "
+ "for %select{language name|source container name|USR}1 in "
"'external_source_symbol' attribute}0">;
def err_invalid_string_udl : Error<
"string literal with user-defined suffix cannot be used here">;
@@ -63,6 +63,10 @@ def err_invalid_character_udl : Error<
"character literal with user-defined suffix cannot be used here">;
def err_invalid_numeric_udl : Error<
"numeric literal with user-defined suffix cannot be used here">;
+def warn_pragma_debug_missing_argument : Warning<
+ "missing argument to debug command '%0'">, InGroup<IgnoredPragmas>;
+def warn_pragma_debug_unexpected_argument : Warning<
+ "unexpected argument to debug command">, InGroup<IgnoredPragmas>;
}
@@ -117,7 +121,7 @@ def note_pragma_entered_here : Note<"#pragma entered here">;
def note_decl_hiding_tag_type : Note<
"%1 %0 is hidden by a non-type declaration of %0 here">;
def err_attribute_not_type_attr : Error<
- "%0 attribute cannot be applied to types">;
+ "%0%select{ attribute|}1 cannot be applied to types">;
def err_enum_template : Error<"enumeration cannot be a template">;
def warn_cxx20_compat_consteval : Warning<
@@ -126,6 +130,12 @@ def warn_cxx20_compat_consteval : Warning<
def warn_missing_type_specifier : Warning<
"type specifier missing, defaults to 'int'">,
InGroup<ImplicitInt>, DefaultIgnore;
+
+def ext_c_empty_initializer : Extension<
+ "use of an empty initializer is a C2x extension">, InGroup<C2x>;
+def warn_c2x_compat_empty_initializer : Warning<
+ "use of an empty initializer is incompatible with C standards before C2x">,
+ InGroup<CPre2xCompat>, DefaultIgnore;
}
let CategoryName = "Nullability Issue" in {
@@ -165,6 +175,8 @@ def warn_unknown_attribute_ignored : Warning<
"unknown attribute %0 ignored">, InGroup<UnknownAttributes>;
def warn_attribute_ignored : Warning<"%0 attribute ignored">,
InGroup<IgnoredAttributes>;
+def err_keyword_not_supported_on_target : Error<
+ "%0 is not supported on this target">;
def err_use_of_tag_name_without_tag : Error<
"must use '%1' tag to refer to type %0%select{| in this scope}2">;
@@ -200,14 +212,14 @@ def ext_cxx11_longlong : Extension<
def warn_cxx98_compat_longlong : Warning<
"'long long' is incompatible with C++98">,
InGroup<CXX98CompatPedantic>, DefaultIgnore;
-def ext_cxx2b_size_t_suffix : ExtWarn<
- "'size_t' suffix for literals is a C++2b extension">,
- InGroup<CXX2b>;
+def ext_cxx23_size_t_suffix : ExtWarn<
+ "'size_t' suffix for literals is a C++23 extension">,
+ InGroup<CXX23>;
def warn_cxx20_compat_size_t_suffix : Warning<
"'size_t' suffix for literals is incompatible with C++ standards before "
- "C++2b">, InGroup<CXXPre2bCompat>, DefaultIgnore;
-def err_cxx2b_size_t_suffix: Error<
- "'size_t' suffix for literals is a C++2b feature">;
+ "C++23">, InGroup<CXXPre23Compat>, DefaultIgnore;
+def err_cxx23_size_t_suffix: Error<
+ "'size_t' suffix for literals is a C++23 feature">;
def err_size_t_literal_too_large: Error<
"%select{signed |}0'size_t' literal is out of range of possible "
"%select{signed |}0'size_t' values">;
@@ -293,12 +305,13 @@ def warn_slh_does_not_support_asm_goto : Warning<
// Sema && Serialization
def warn_dup_category_def : Warning<
- "duplicate definition of category %1 on interface %0">;
+ "duplicate definition of category %1 on interface %0">,
+ InGroup<DiagGroup<"objc-duplicate-category-definition">>;
// Targets
def err_target_unknown_triple : Error<
- "unknown target triple '%0', please use -triple or -arch">;
+ "unknown target triple '%0'">;
def err_target_unknown_cpu : Error<"unknown target CPU '%0'">;
def note_valid_options : Note<"valid target CPU values are: %0">;
def err_target_unsupported_cpu_for_micromips : Error<
@@ -329,6 +342,9 @@ def err_opt_not_valid_on_target : Error<
"option '%0' cannot be specified on this target">;
def err_invalid_feature_combination : Error<
"invalid feature combination: %0">;
+def warn_target_unrecognized_env : Warning<
+ "mismatch between architecture and environment in target triple '%0'; did you mean '%1'?">,
+ InGroup<InvalidCommandLineArgument>;
// Source manager
def err_cannot_open_file : Error<"cannot open file '%0': %1">, DefaultFatal;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDocs.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDocs.td
index bf88d5d04567..e9862422b499 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDocs.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDocs.td
@@ -81,3 +81,9 @@ Diagnostic flags
}];
}
+defvar GCCWriteStringsDocs = [{
+**Note:** enabling this warning in C will change the semantic behavior of the
+program by treating all string literals as having type ``const char *``
+instead of ``char *``. This can cause unexpected behaviors with type-sensitive
+constructs like ``_Generic``.
+}];
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td
index f3d43b2e0667..1b69324d073a 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -29,6 +29,8 @@ def err_drv_invalid_arch_name : Error<
"invalid arch name '%0'">;
def err_drv_invalid_riscv_arch_name : Error<
"invalid arch name '%0', %1">;
+def err_drv_invalid_riscv_cpu_name_for_target : Error<
+ "cpu '%0' does not support rv%select{32|64}1">;
def warn_drv_invalid_arch_name_with_suggestion : Warning<
"ignoring invalid /arch: argument '%0'; for %select{64|32}1-bit expected one of %2">,
InGroup<UnusedCommandLineArgument>;
@@ -80,6 +82,9 @@ def err_drv_hipspv_no_hip_path : Error<
def err_drv_undetermined_gpu_arch : Error<
"cannot determine %0 architecture: %1; consider passing it via "
"'%2'">;
+def warn_drv_multi_gpu_arch : Warning<
+ "multiple %0 architectures are detected: %1; only the first one is used for "
+ "'%2'">, InGroup<MultiGPU>;
def err_drv_cuda_version_unsupported : Error<
"GPU arch %0 is supported by CUDA versions between %1 and %2 (inclusive), "
"but installation at %3 is %4; use '--cuda-path' to specify a different CUDA "
@@ -113,6 +118,12 @@ def warn_drv_unsupported_option_for_target : Warning<
def warn_drv_unsupported_option_for_flang : Warning<
"the argument '%0' is not supported for option '%1'. Mapping to '%1%2'">,
InGroup<OptionIgnored>;
+def warn_drv_unsupported_diag_option_for_flang : Warning<
+ "The warning option '-%0' is not supported">,
+ InGroup<OptionIgnored>;
+def warn_drv_unsupported_option_for_processor : Warning<
+ "ignoring '%0' option as it is not currently supported for processor '%1'">,
+ InGroup<OptionIgnored>;
def err_drv_invalid_thread_model_for_target : Error<
"invalid thread model '%0' in '%1' for this target">;
@@ -127,7 +138,7 @@ def err_drv_invalid_unwindlib_name : Error<
def err_drv_incompatible_unwindlib : Error<
"--rtlib=libgcc requires --unwindlib=libgcc">;
def err_drv_incompatible_options : Error<
- "The combination of '%0' and '%1' is incompatible">;
+ "the combination of '%0' and '%1' is incompatible">;
def err_drv_invalid_stdlib_name : Error<
"invalid library name in argument '%0'">;
def err_drv_invalid_output_with_multiple_archs : Error<
@@ -215,8 +226,10 @@ def err_drv_malformed_sanitizer_coverage_allowlist : Error<
"malformed sanitizer coverage allowlist: '%0'">;
def err_drv_malformed_sanitizer_coverage_ignorelist : Error<
"malformed sanitizer coverage ignorelist: '%0'">;
-def err_drv_unsupported_static_ubsan_darwin : Error<
- "static UndefinedBehaviorSanitizer runtime is not supported on darwin">;
+def err_drv_malformed_sanitizer_metadata_ignorelist : Error<
+ "malformed sanitizer metadata ignorelist: '%0'">;
+def err_drv_unsupported_static_sanitizer_darwin : Error<
+ "static %0 runtime is not supported on darwin">;
def err_drv_duplicate_config : Error<
"no more than one option '--config' is allowed">;
def err_drv_cannot_open_config_file : Error<
@@ -248,6 +261,7 @@ def warn_drv_unknown_argument_clang_cl : Warning<
def warn_drv_unknown_argument_clang_cl_with_suggestion : Warning<
"unknown argument ignored in clang-cl '%0'; did you mean '%1'?">,
InGroup<UnknownArgument>;
+def err_drv_unknown_target_triple : Error<"unknown target triple '%0'">;
def warn_drv_ycyu_different_arg_clang_cl : Warning<
"support for '/Yc' and '/Yu' with different filenames not implemented yet; flags ignored">,
@@ -405,7 +419,10 @@ def warn_drv_assuming_mfloat_abi_is : Warning<
"unknown platform, assuming -mfloat-abi=%0">;
def warn_drv_unsupported_float_abi_by_lib : Warning<
"float ABI '%0' is not supported by current library">,
- InGroup<DiagGroup<"unsupported-abi">>;
+ InGroup<UnsupportedABI>;
+def warn_drv_no_floating_point_registers: Warning<
+ "'%0': selected processor lacks floating point registers">,
+ InGroup<UnsupportedABI>;
def warn_ignoring_ftabstop_value : Warning<
"ignoring invalid -ftabstop value '%0', using default value %1">;
def warn_drv_overriding_flag_option : Warning<
@@ -558,17 +575,13 @@ def err_drv_unsupported_fpatchable_function_entry_argument : Error<
"the second argument of '-fpatchable-function-entry' must be smaller than the first argument">;
def warn_drv_unable_to_find_directory_expected : Warning<
- "unable to find %0 directory, expected to be in '%1'">,
+ "unable to find %0 directory, expected to be in '%1' found via %2">,
InGroup<InvalidOrNonExistentDirectory>, DefaultIgnore;
def warn_drv_ps_force_pic : Warning<
"option '%0' was ignored by the %1 toolchain, using '-fPIC'">,
InGroup<OptionIgnored>;
-def warn_drv_ps_sdk_dir : Warning<
- "environment variable '%0' is set, but points to invalid or nonexistent directory '%1'">,
- InGroup<InvalidOrNonExistentDirectory>;
-
def err_drv_defsym_invalid_format : Error<"defsym must be of the form: sym=value: %0">;
def err_drv_defsym_invalid_symval : Error<"value is not an integer: %0">;
def warn_drv_msvc_not_found : Warning<
@@ -608,6 +621,9 @@ def warn_drv_darwin_sdk_invalid_settings : Warning<
"SDK settings were ignored as 'SDKSettings.json' could not be parsed">,
InGroup<DiagGroup<"darwin-sdk-settings">>;
+def err_drv_darwin_sdk_missing_arclite : Error<
+ "SDK does not contain 'libarclite' at the path '%0'; try increasing the minimum deployment target">;
+
def err_drv_trivial_auto_var_init_stop_after_missing_dependency : Error<
"'-ftrivial-auto-var-init-stop-after=*' is used without "
"'-ftrivial-auto-var-init=zero' or '-ftrivial-auto-var-init=pattern'">;
@@ -632,21 +648,14 @@ def warn_drv_libstdcxx_not_found : Warning<
"command line to use the libc++ standard library instead">,
InGroup<DiagGroup<"stdlibcxx-not-found">>;
-def warn_deperecated_fcoroutines_ts_flag : Warning<
- "the '-fcoroutines-ts' flag is deprecated and it will be removed in Clang 17; "
- "use '-std=c++20' or higher to use standard C++ coroutines instead">,
- InGroup<DeprecatedExperimentalCoroutine>;
-def warn_deprecated_fmodules_ts_flag : Warning<
- "the '-fmodules-ts' flag is deprecated and it will be removed in Clang 17; "
- "use '-std=c++20' or higher to use standard C++ modules instead">,
- InGroup<DiagGroup<"deprecated-module-ts">>;
-
def err_drv_cannot_mix_options : Error<"cannot specify '%1' along with '%0'">;
def err_drv_invalid_object_mode : Error<
"OBJECT_MODE setting %0 is not recognized and is not a valid setting">;
def err_aix_unsupported_tls_model : Error<"TLS model '%0' is not yet supported on AIX">;
+def err_roptr_requires_data_sections: Error<"-mxcoff-roptr is supported only with -fdata-sections">;
+def err_roptr_cannot_build_shared: Error<"-mxcoff-roptr is not supported with -shared">;
def err_invalid_cxx_abi : Error<"invalid C++ ABI name '%0'">;
def err_unsupported_cxx_abi : Error<"C++ ABI '%0' is not supported on target triple '%1'">;
@@ -697,6 +706,9 @@ def err_drv_dxc_missing_target_profile : Error<
"target profile option (-T) is missing">;
def err_drv_hlsl_unsupported_target : Error<
"HLSL code generation is unsupported for target '%0'">;
+def warn_drv_dxc_missing_dxv : Warning<"dxv not found. "
+ "Resulting DXIL will not be validated or signed for use in release environments.">,
+ InGroup<DXILValidation>;
def err_drv_invalid_range_dxil_validator_version : Error<
"invalid validator version : %0\n"
@@ -715,6 +727,18 @@ def warn_drv_sarif_format_unstable : Warning<
def err_drv_riscv_unsupported_with_linker_relaxation : Error<
"%0 is unsupported with RISC-V linker relaxation (-mrelax)">;
+def warn_drv_loongarch_conflicting_implied_val : Warning<
+ "ignoring '%0' as it conflicts with that implied by '%1' (%2)">,
+ InGroup<OptionIgnored>;
def err_drv_loongarch_invalid_mfpu_EQ : Error<
"invalid argument '%0' to -mfpu=; must be one of: 64, 32, none, 0 (alias for none)">;
+
+def err_drv_expand_response_file : Error<
+ "failed to expand response file: %0">;
+
+def warn_drv_missing_multilib : Warning<
+ "no multilib found matching flags: %0">,
+ InGroup<DiagGroup<"missing-multilib">>;
+def note_drv_available_multilibs : Note<
+ "available multilibs are:%0">;
}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticError.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticError.h
index 3660bd1b3b3d..744f7fe19db7 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticError.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticError.h
@@ -35,8 +35,8 @@ public:
}
/// Extracts and returns the diagnostic payload from the given \c Error if
- /// the error is a \c DiagnosticError. Returns none if the given error is not
- /// a \c DiagnosticError.
+ /// the error is a \c DiagnosticError. Returns std::nullopt if the given error
+ /// is not a \c DiagnosticError.
static std::optional<PartialDiagnosticAt> take(llvm::Error &Err) {
std::optional<PartialDiagnosticAt> Result;
Err = llvm::handleErrors(std::move(Err), [&](DiagnosticError &E) {
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontendKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontendKinds.td
index d0f672ae5a1b..9ed9a88fa3d6 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontendKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontendKinds.td
@@ -11,7 +11,7 @@ class BackendInfo : CatBackend, ShowInSystemHeader;
let Component = "Frontend" in {
def err_fe_error_opening : Error<"error opening '%0': %1">;
-def err_fe_error_reading : Error<"error reading '%0'">;
+def err_fe_error_reading : Error<"error reading '%0': %1">;
def err_fe_error_reading_stdin : Error<"error reading stdin: %0">;
def err_fe_error_backend : Error<"error in backend: %0">, DefaultFatal;
@@ -55,6 +55,9 @@ def warn_fe_backend_unsupported_fp_exceptions : Warning<
def warn_fe_backend_invalid_feature_flag : Warning<
"feature flag '%0' must start with either '+' to enable the feature or '-'"
" to disable it; flag ignored">, InGroup<InvalidCommandLineArgument>;
+def warn_fe_backend_readonly_feature_flag : Warning<
+ "feature flag '%0' is ignored since the feature is read only">,
+ InGroup<InvalidCommandLineArgument>;
def err_incompatible_fp_eval_method_options : Error<
"option 'ffp-eval-method' cannot be used with option "
@@ -209,7 +212,7 @@ def note_incompatible_analyzer_plugin_api : Note<
def err_module_build_requires_fmodules : Error<
"module compilation requires '-fmodules'">;
def err_module_interface_requires_cpp_modules : Error<
- "module interface compilation requires '-std=c++20' or '-fmodules-ts'">;
+ "module interface compilation requires '-std=c++20'">;
def warn_module_config_mismatch : Warning<
"module file %0 cannot be loaded due to a configuration mismatch with the current "
"compilation">, InGroup<DiagGroup<"module-file-config-mismatch">>, DefaultError;
@@ -241,8 +244,10 @@ def warn_module_config_macro_undef : Warning<
def note_module_def_undef_here : Note<
"macro was %select{defined|#undef'd}0 here">;
def remark_module_build : Remark<"building module '%0' as '%1'">,
+ ShowInSystemHeader,
InGroup<ModuleBuild>;
def remark_module_build_done : Remark<"finished building module '%0'">,
+ ShowInSystemHeader,
InGroup<ModuleBuild>;
def remark_module_lock : Remark<"locking '%0' to build module '%1'">,
InGroup<ModuleLock>;
@@ -278,6 +283,10 @@ def err_avx_calling_convention : Error<warn_avx_calling_convention.Summary>;
def err_alias_to_undefined : Error<
"%select{alias|ifunc}0 must point to a defined "
"%select{variable or |}1function">;
+def note_alias_requires_mangled_name : Note<
+ "the %select{function or variable|function}0 specified in an %select{alias|ifunc}1 must refer to its mangled name">;
+def note_alias_mangled_name_alternative: Note<
+ "function by that name is mangled as \"%0\"">;
def warn_alias_to_weak_alias : Warning<
"%select{alias|ifunc}2 will always resolve to %0 even if weak definition of "
"%1 is overridden">,
@@ -305,6 +314,10 @@ def warn_atomic_op_oversized : Warning<
"; the access size (%0 bytes) exceeds the max lock-free size (%1 bytes)">,
InGroup<AtomicAlignment>;
+def warn_sync_op_misaligned : Warning<
+ "__sync builtin operation MUST have natural alignment (consider using __atomic).">,
+ InGroup<SyncAlignment>;
+
def warn_alias_with_section : Warning<
"%select{alias|ifunc}1 will not be in section '%0' but in the same section "
"as the %select{aliasee|resolver}2">,
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td
index 6c997c37cc5c..7b4d415bf064 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td
@@ -60,14 +60,12 @@ def CompoundTokenSplit : DiagGroup<"compound-token-split",
CompoundTokenSplitBySpace]>;
def CoroutineMissingUnhandledException :
DiagGroup<"coroutine-missing-unhandled-exception">;
-def DeprecatedExperimentalCoroutine :
- DiagGroup<"deprecated-experimental-coroutine">;
def DeprecatedCoroutine :
- DiagGroup<"deprecated-coroutine", [DeprecatedExperimentalCoroutine]>;
+ DiagGroup<"deprecated-coroutine">;
def AlwaysInlineCoroutine :
DiagGroup<"always-inline-coroutine">;
def CoroNonAlignedAllocationFunction :
- DiagGroup<"coro-non-aligned-allocation-funciton">;
+ DiagGroup<"coro-non-aligned-allocation-function">;
def Coroutine : DiagGroup<"coroutine", [CoroutineMissingUnhandledException, DeprecatedCoroutine,
AlwaysInlineCoroutine, CoroNonAlignedAllocationFunction]>;
def ObjCBoolConstantConversion : DiagGroup<"objc-bool-constant-conversion">;
@@ -185,9 +183,11 @@ def DeprecatedCopyWithUserProvidedCopy : DiagGroup<"deprecated-copy-with-user-pr
def DeprecatedCopyWithUserProvidedDtor : DiagGroup<"deprecated-copy-with-user-provided-dtor">;
def DeprecatedCopy : DiagGroup<"deprecated-copy", [DeprecatedCopyWithUserProvidedCopy]>;
def DeprecatedCopyWithDtor : DiagGroup<"deprecated-copy-with-dtor", [DeprecatedCopyWithUserProvidedDtor]>;
+def DeprecatedLiteralOperator : DiagGroup<"deprecated-literal-operator">;
// For compatibility with GCC.
def : DiagGroup<"deprecated-copy-dtor", [DeprecatedCopyWithDtor]>;
def DeprecatedDeclarations : DiagGroup<"deprecated-declarations">;
+def DeprecatedRedundantConstexprStaticDef : DiagGroup<"deprecated-redundant-constexpr-static-def">;
def UnavailableDeclarations : DiagGroup<"unavailable-declarations">;
def UnguardedAvailabilityNew : DiagGroup<"unguarded-availability-new">;
def UnguardedAvailability : DiagGroup<"unguarded-availability",
@@ -221,12 +221,15 @@ def Deprecated : DiagGroup<"deprecated", [DeprecatedAnonEnumEnumConversion,
DeprecatedEnumFloatConversion,
DeprecatedBuiltins,
DeprecatedIncrementBool,
+ DeprecatedLiteralOperator,
DeprecatedPragma,
DeprecatedRegister,
DeprecatedThisCapture,
DeprecatedType,
DeprecatedVolatile,
- DeprecatedWritableStr]>,
+ DeprecatedWritableStr,
+ DeprecatedRedundantConstexprStaticDef,
+ ]>,
DiagCategory<"Deprecations">;
def CXX20Designator : DiagGroup<"c++20-designator">;
@@ -258,7 +261,6 @@ def EmptyBody : DiagGroup<"empty-body">;
def Exceptions : DiagGroup<"exceptions">;
def DeclarationAfterStatement : DiagGroup<"declaration-after-statement">;
-def GNUEmptyInitializer : DiagGroup<"gnu-empty-initializer">;
def GNUEmptyStruct : DiagGroup<"gnu-empty-struct">;
def ExtraTokens : DiagGroup<"extra-tokens">;
def CXX98CompatExtraSemi : DiagGroup<"c++98-compat-extra-semi">;
@@ -307,9 +309,14 @@ def CXXPre20CompatPedantic : DiagGroup<"pre-c++20-compat-pedantic",
[CXXPre20Compat]>;
def : DiagGroup<"c++98-c++11-c++14-c++17-compat-pedantic",
[CXXPre20CompatPedantic]>;
-def CXXPre2bCompat : DiagGroup<"pre-c++2b-compat">;
-def CXXPre2bCompatPedantic :
- DiagGroup<"pre-c++2b-compat-pedantic", [CXXPre2bCompat]>;
+def CXXPre23Compat : DiagGroup<"pre-c++23-compat">;
+def CXXPre23CompatPedantic :
+ DiagGroup<"pre-c++23-compat-pedantic", [CXXPre23Compat]>;
+def CXXPre26Compat : DiagGroup<"pre-c++26-compat">;
+def CXXPre26CompatPedantic :
+ DiagGroup<"pre-c++26-compat-pedantic", [CXXPre26Compat]>;
+def : DiagGroup<"pre-c++2c-compat", [CXXPre26Compat]>;
+def : DiagGroup<"pre-c++2c-compat-pedantic", [CXXPre26CompatPedantic]>;
def CXX98CompatBindToTemporaryCopy :
DiagGroup<"c++98-compat-bind-to-temporary-copy">;
@@ -324,7 +331,7 @@ def CXX98Compat : DiagGroup<"c++98-compat",
CXXPre14Compat,
CXXPre17Compat,
CXXPre20Compat,
- CXXPre2bCompat]>;
+ CXXPre23Compat]>;
// Warnings for C++11 features which are Extensions in C++98 mode.
def CXX98CompatPedantic : DiagGroup<"c++98-compat-pedantic",
[CXX98Compat,
@@ -333,7 +340,7 @@ def CXX98CompatPedantic : DiagGroup<"c++98-compat-pedantic",
CXXPre14CompatPedantic,
CXXPre17CompatPedantic,
CXXPre20CompatPedantic,
- CXXPre2bCompatPedantic]>;
+ CXXPre23CompatPedantic]>;
def CXX11Narrowing : DiagGroup<"c++11-narrowing">;
@@ -363,39 +370,39 @@ def CXX11Compat : DiagGroup<"c++11-compat",
CXXPre14Compat,
CXXPre17Compat,
CXXPre20Compat,
- CXXPre2bCompat]>;
+ CXXPre23Compat]>;
def : DiagGroup<"c++0x-compat", [CXX11Compat]>;
def CXX11CompatPedantic : DiagGroup<"c++11-compat-pedantic",
[CXX11Compat,
CXXPre14CompatPedantic,
CXXPre17CompatPedantic,
CXXPre20CompatPedantic,
- CXXPre2bCompatPedantic]>;
+ CXXPre23CompatPedantic]>;
def CXX14Compat : DiagGroup<"c++14-compat", [CXXPre17Compat,
CXXPre20Compat,
- CXXPre2bCompat]>;
+ CXXPre23Compat]>;
def CXX14CompatPedantic : DiagGroup<"c++14-compat-pedantic",
[CXX14Compat,
CXXPre17CompatPedantic,
CXXPre20CompatPedantic,
- CXXPre2bCompatPedantic]>;
+ CXXPre23CompatPedantic]>;
def CXX17Compat : DiagGroup<"c++17-compat", [DeprecatedRegister,
DeprecatedIncrementBool,
CXX17CompatMangling,
CXXPre20Compat,
- CXXPre2bCompat]>;
+ CXXPre23Compat]>;
def CXX17CompatPedantic : DiagGroup<"c++17-compat-pedantic",
[CXX17Compat,
CXXPre20CompatPedantic,
- CXXPre2bCompatPedantic]>;
+ CXXPre23CompatPedantic]>;
def : DiagGroup<"c++1z-compat", [CXX17Compat]>;
-def CXX20Compat : DiagGroup<"c++20-compat", [CXXPre2bCompat]>;
+def CXX20Compat : DiagGroup<"c++20-compat", [CXXPre23Compat]>;
def CXX20CompatPedantic : DiagGroup<"c++20-compat-pedantic",
[CXX20Compat,
- CXXPre2bCompatPedantic]>;
+ CXXPre23CompatPedantic]>;
def : DiagGroup<"c++2a-compat", [CXX20Compat]>;
def : DiagGroup<"c++2a-compat-pedantic", [CXX20CompatPedantic]>;
@@ -803,6 +810,7 @@ def AtomicAlignment : DiagGroup<"atomic-alignment">;
def CustomAtomic : DiagGroup<"custom-atomic-properties">;
def AtomicProperties : DiagGroup<"atomic-properties",
[ImplicitAtomic, CustomAtomic]>;
+def SyncAlignment : DiagGroup<"sync-alignment">;
def ARCUnsafeRetainedAssign : DiagGroup<"arc-unsafe-retained-assign">;
def ARCRetainCycles : DiagGroup<"arc-retain-cycles">;
def ARCNonPodMemAccess : DiagGroup<"arc-non-pod-memaccess">;
@@ -849,15 +857,17 @@ def WritableStrings : DiagGroup<"writable-strings", [DeprecatedWritableStr]>;
//
// FIXME: Should this affect C++11 (where this is an error,
// not just deprecated) or not?
-def GCCWriteStrings : DiagGroup<"write-strings" , [WritableStrings]>;
+def GCCWriteStrings : DiagGroup<"write-strings" , [WritableStrings],
+ GCCWriteStringsDocs>;
def CharSubscript : DiagGroup<"char-subscripts">;
def LargeByValueCopy : DiagGroup<"large-by-value-copy">;
def DuplicateArgDecl : DiagGroup<"duplicate-method-arg">;
def SignedEnumBitfield : DiagGroup<"signed-enum-bitfield">;
+def ReservedModuleIdentifier : DiagGroup<"reserved-module-identifier">;
def ReservedIdentifier : DiagGroup<"reserved-identifier",
- [ReservedIdAsMacro]>;
+ [ReservedIdAsMacro, ReservedModuleIdentifier, UserDefinedLiterals]>;
// Unreachable code warning groups.
//
@@ -1055,7 +1065,7 @@ def Consumed : DiagGroup<"consumed">;
// warning should be active _only_ when -Wall is passed in, mark it as
// DefaultIgnore in addition to putting it here.
def All : DiagGroup<"all", [Most, Parentheses, Switch, SwitchBool,
- MisleadingIndentation]>;
+ MisleadingIndentation, PackedNonPod]>;
// Warnings that should be in clang-cl /w4.
def : DiagGroup<"CL4", [All, Extra]>;
@@ -1108,14 +1118,20 @@ def CXX17 : DiagGroup<"c++17-extensions", [CXX17Attrs]>;
// earlier C++ versions.
def CXX20 : DiagGroup<"c++20-extensions", [CXX20Designator, CXX20Attrs]>;
-// A warning group for warnings about using C++2b features as extensions in
+// A warning group for warnings about using C++23 features as extensions in
// earlier C++ versions.
-def CXX2b : DiagGroup<"c++2b-extensions">;
+def CXX23 : DiagGroup<"c++23-extensions">;
+
+// A warning group for warnings about using C++26 features as extensions in
+// earlier C++ versions.
+def CXX26 : DiagGroup<"c++26-extensions">;
def : DiagGroup<"c++0x-extensions", [CXX11]>;
def : DiagGroup<"c++1y-extensions", [CXX14]>;
def : DiagGroup<"c++1z-extensions", [CXX17]>;
def : DiagGroup<"c++2a-extensions", [CXX20]>;
+def : DiagGroup<"c++2b-extensions", [CXX23]>;
+def : DiagGroup<"c++2c-extensions", [CXX26]>;
def DelegatingCtorCycles :
DiagGroup<"delegating-ctor-cycles">;
@@ -1135,7 +1151,7 @@ def GNU : DiagGroup<"gnu", [GNUAlignofExpression, GNUAnonymousStruct,
GNUBinaryLiteral, GNUCaseRange,
GNUComplexInteger, GNUCompoundLiteralInitializer,
GNUConditionalOmittedOperand, GNUDesignator,
- GNUEmptyInitializer, GNUEmptyStruct,
+ GNUEmptyStruct,
VLAExtension, GNUFlexibleArrayInitializer,
GNUFlexibleArrayUnionMember, GNUFoldingConstant,
GNUImaginaryConstant, GNUIncludeNext,
@@ -1190,6 +1206,7 @@ def MicrosoftCommentPaste : DiagGroup<"microsoft-comment-paste">;
def MicrosoftEndOfFile : DiagGroup<"microsoft-end-of-file">;
def MicrosoftInaccessibleBase : DiagGroup<"microsoft-inaccessible-base">;
def MicrosoftStaticAssert : DiagGroup<"microsoft-static-assert">;
+def MicrosoftInitFromPredefined : DiagGroup<"microsoft-init-from-predefined">;
// Aliases.
def : DiagGroup<"msvc-include", [MicrosoftInclude]>;
@@ -1207,7 +1224,7 @@ def Microsoft : DiagGroup<"microsoft",
MicrosoftFlexibleArray, MicrosoftExtraQualification, MicrosoftCast,
MicrosoftConstInit, MicrosoftVoidPseudoDtor, MicrosoftAnonTag,
MicrosoftCommentPaste, MicrosoftEndOfFile, MicrosoftStaticAssert,
- MicrosoftInconsistentDllImport]>;
+ MicrosoftInitFromPredefined, MicrosoftInconsistentDllImport]>;
def ClangClPch : DiagGroup<"clang-cl-pch">;
@@ -1278,7 +1295,7 @@ invocation.
The diagnostic information can be saved to a file in a machine readable format,
like YAML by adding the `-foptimization-record-file=<file>` command-line flag.
-Results can be filtered by function name by passing
+Results can be filtered by function name by passing
`-mllvm -filter-print-funcs=foo`, where `foo` is the target function's name.
.. code-block: console
@@ -1321,6 +1338,12 @@ def CudaUnknownVersion: DiagGroup<"unknown-cuda-version">;
// ignored by CUDA.
def HIPOnly : DiagGroup<"hip-only">;
+// Warning about mixed HIP and OpenMP compilation / target offloading.
+def HIPOpenMPOffloading: DiagGroup<"hip-omp-target-directives">;
+
+// Warning about multiple GPUs are detected.
+def MultiGPU: DiagGroup<"multi-gpu">;
+
// Warnings which cause linking of the runtime libraries like
// libc and the CRT to be skipped.
def AVRRtlibLinkingQuirks : DiagGroup<"avr-rtlib-linking-quirks">;
@@ -1334,6 +1357,8 @@ def OptionIgnored : DiagGroup<"option-ignored">;
def UnknownArgument : DiagGroup<"unknown-argument">;
+def UnsupportedABI : DiagGroup<"unsupported-abi">;
+
// A warning group for warnings about code that clang accepts when
// compiling OpenCL C/C++ but which is not compatible with the SPIR(-V) spec.
def SpirCompat : DiagGroup<"spir-compat">;
@@ -1412,6 +1437,9 @@ def BranchProtection : DiagGroup<"branch-protection">;
// Warnings for HLSL Clang extensions
def HLSLExtension : DiagGroup<"hlsl-extensions">;
+// Warnings for DXIL validation
+def DXILValidation : DiagGroup<"dxil-validation">;
+
// Warnings and notes related to const_var_decl_type attribute checks
def ReadOnlyPlacementChecks : DiagGroup<"read-only-types">;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h
index 6dd78bf93eaa..bf4995175ef1 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h
@@ -36,7 +36,7 @@ namespace clang {
DIAG_SIZE_SERIALIZATION = 120,
DIAG_SIZE_LEX = 400,
DIAG_SIZE_PARSE = 700,
- DIAG_SIZE_AST = 250,
+ DIAG_SIZE_AST = 300,
DIAG_SIZE_COMMENT = 100,
DIAG_SIZE_CROSSTU = 100,
DIAG_SIZE_SEMA = 4500,
@@ -159,6 +159,10 @@ public:
Result.Severity = Bits & 0x7;
return Result;
}
+
+ bool operator==(DiagnosticMapping Other) const {
+ return serialize() == Other.serialize();
+ }
};
/// Used for handling and querying diagnostic IDs.
@@ -208,6 +212,9 @@ public:
/// default.
static bool isDefaultMappingAsError(unsigned DiagID);
+ /// Get the default mapping for this diagnostic.
+ static DiagnosticMapping getDefaultMapping(unsigned DiagID);
+
/// Determine whether the given built-in diagnostic ID is a Note.
static bool isBuiltinNote(unsigned DiagID);
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLexKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLexKinds.td
index 3b1b466e7602..0eb270aeea0e 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLexKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLexKinds.td
@@ -138,13 +138,13 @@ def ext_mathematical_notation : ExtWarn<
def ext_delimited_escape_sequence : Extension<
"%select{delimited|named}0 escape sequences are a "
- "%select{Clang|C++2b}1 extension">,
+ "%select{Clang|C++23}1 extension">,
InGroup<DiagGroup<"delimited-escape-sequence-extension">>;
-def warn_cxx2b_delimited_escape_sequence : Warning<
+def warn_cxx23_delimited_escape_sequence : Warning<
"%select{delimited|named}0 escape sequences are "
- "incompatible with C++ standards before C++2b">,
- InGroup<CXXPre2bCompat>, DefaultIgnore;
+ "incompatible with C++ standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
def err_delimited_escape_empty : Error<
"delimited escape sequence cannot be empty">;
@@ -197,6 +197,14 @@ def warn_cxx98_compat_literal_ucn_escape_basic_scs : Warning<
def warn_cxx98_compat_literal_ucn_control_character : Warning<
"universal character name referring to a control character "
"is incompatible with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
+def warn_c2x_compat_literal_ucn_escape_basic_scs : Warning<
+ "specifying character '%0' with a universal character name is "
+ "incompatible with C standards before C2x">,
+ InGroup<CPre2xCompat>, DefaultIgnore;
+def warn_c2x_compat_literal_ucn_control_character : Warning<
+ "universal character name referring to a control character "
+ "is incompatible with C standards before C2x">,
+ InGroup<CPre2xCompat>, DefaultIgnore;
def warn_ucn_not_valid_in_c89 : Warning<
"universal character names are only valid in C99 or C++; "
"treating as '\\' followed by identifier">, InGroup<Unicode>;
@@ -276,6 +284,13 @@ def ext_ms_reserved_user_defined_literal : ExtWarn<
"identifier">, InGroup<ReservedUserDefinedLiteral>;
def err_unsupported_string_concat : Error<
"unsupported non-standard concatenation of string literals">;
+
+def err_unevaluated_string_prefix : Error<
+ "an unevaluated string literal cannot have an encoding prefix">;
+def err_unevaluated_string_udl : Error<
+ "an unevaluated string literal cannot be a user-defined literal">;
+def err_unevaluated_string_invalid_escape_sequence : Error<
+ "invalid escape sequence '%0' in an unevaluated string literal">;
def err_string_concat_mixed_suffix : Error<
"differing user-defined suffixes ('%0' and '%1') in string literal "
"concatenation">;
@@ -396,10 +411,10 @@ def ext_pp_include_next_directive : Extension<
"#include_next is a language extension">, InGroup<GNUIncludeNext>;
def ext_pp_warning_directive : Extension<
- "#warning is a %select{C2x|C++2b}0 extension">;
-def warn_cxx2b_compat_warning_directive : Warning<
- "#warning is incompatible with C++ standards before C++2b">,
- InGroup<CXXPre2bCompat>, DefaultIgnore;
+ "#warning is a %select{C2x|C++23}0 extension">;
+def warn_cxx23_compat_warning_directive : Warning<
+ "#warning is incompatible with C++ standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
def warn_c2x_compat_warning_directive : Warning<
"#warning is incompatible with C standards before C2x">,
InGroup<CPre2xCompat>, DefaultIgnore;
@@ -655,10 +670,6 @@ def warn_pragma_debug_missing_command : Warning<
"missing debug command">, InGroup<IgnoredPragmas>;
def warn_pragma_debug_unexpected_command : Warning<
"unexpected debug command '%0'">, InGroup<IgnoredPragmas>;
-def warn_pragma_debug_missing_argument : Warning<
- "missing argument to debug command '%0'">, InGroup<IgnoredPragmas>;
-def warn_pragma_debug_unexpected_argument : Warning<
- "unexpected argument to debug command">, InGroup<IgnoredPragmas>;
def warn_pragma_debug_unknown_module : Warning<
"unknown module '%0'">, InGroup<IgnoredPragmas>;
// #pragma module
@@ -743,14 +754,14 @@ def ext_c2x_pp_directive : ExtWarn<
"use of a '#%select{<BUG IF SEEN>|elifdef|elifndef}0' directive "
"is a C2x extension">,
InGroup<C2x>;
-def warn_cxx2b_compat_pp_directive : Warning<
+def warn_cxx23_compat_pp_directive : Warning<
"use of a '#%select{<BUG IF SEEN>|elifdef|elifndef}0' directive "
- "is incompatible with C++ standards before C++2b">,
- InGroup<CXXPre2bCompat>, DefaultIgnore;
-def ext_cxx2b_pp_directive : ExtWarn<
+ "is incompatible with C++ standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
+def ext_cxx23_pp_directive : ExtWarn<
"use of a '#%select{<BUG IF SEEN>|elifdef|elifndef}0' directive "
- "is a C++2b extension">,
- InGroup<CXX2b>;
+ "is a C++23 extension">,
+ InGroup<CXX23>;
def err_pp_visibility_non_macro : Error<"no macro named %0">;
@@ -888,6 +899,8 @@ def warn_use_of_private_header_outside_module : Warning<
InGroup<DiagGroup<"private-header">>, DefaultError;
def err_undeclared_use_of_module : Error<
"module %0 does not depend on a module exporting '%1'">;
+def err_undeclared_use_of_module_indirect : Error<
+ "module %0 does not directly depend on a module exporting '%1', which is part of indirectly-used module %2">;
def warn_non_modular_include_in_framework_module : Warning<
"include of non-modular header inside framework module '%0': '%1'">,
InGroup<NonModularIncludeInFrameworkModule>, DefaultIgnore;
@@ -943,4 +956,15 @@ def err_dep_source_scanner_unexpected_tokens_at_import : Error<
}
+def err_pp_double_begin_pragma_unsafe_buffer_usage :
+Error<"already inside '#pragma unsafe_buffer_usage'">;
+
+def err_pp_unmatched_end_begin_pragma_unsafe_buffer_usage :
+Error<"not currently inside '#pragma unsafe_buffer_usage'">;
+
+def err_pp_unclosed_pragma_unsafe_buffer_usage :
+Error<"'#pragma unsafe_buffer_usage' was not ended">;
+
+def err_pp_pragma_unsafe_buffer_usage_syntax :
+Error<"Expected 'begin' or 'end'">;
}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.def b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.def
index 7be81f6b6a95..6d0c1b14acc1 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.def
@@ -90,11 +90,15 @@ VALUE_DIAGOPT(ConstexprBacktraceLimit, 32, DefaultConstexprBacktraceLimit)
VALUE_DIAGOPT(SpellCheckingLimit, 32, DefaultSpellCheckingLimit)
/// Limit number of lines shown in a snippet.
VALUE_DIAGOPT(SnippetLineLimit, 32, DefaultSnippetLineLimit)
+/// Show line number column on the left of snippets.
+VALUE_DIAGOPT(ShowLineNumbers, 1, DefaultShowLineNumbers)
VALUE_DIAGOPT(TabStop, 32, DefaultTabStop) /// The distance between tab stops.
/// Column limit for formatting message diagnostics, or 0 if unused.
VALUE_DIAGOPT(MessageLength, 32, 0)
+DIAGOPT(ShowSafeBufferUsageSuggestions, 1, 0)
+
#undef DIAGOPT
#undef ENUM_DIAGOPT
#undef VALUE_DIAGOPT
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.h
index 4b0d45a3ff7c..7e218b9c71e6 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.h
@@ -84,7 +84,8 @@ public:
DefaultTemplateBacktraceLimit = 10,
DefaultConstexprBacktraceLimit = 10,
DefaultSpellCheckingLimit = 50,
- DefaultSnippetLineLimit = 1,
+ DefaultSnippetLineLimit = 16,
+ DefaultShowLineNumbers = 1,
};
// Define simple diagnostic options (with no accessors).
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td
index e99beb3a7636..8d729c31641e 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -157,11 +157,17 @@ def err_duplicate_default_assoc : Error<
"duplicate default generic association">;
def note_previous_default_assoc : Note<
"previous default generic association is here">;
+def ext_generic_with_type_arg : Extension<
+ "passing a type argument as the first operand to '_Generic' is a Clang "
+ "extension">, InGroup<DiagGroup<"generic-type-extension">>;
def ext_c99_feature : Extension<
"'%0' is a C99 extension">, InGroup<C99>;
def ext_c11_feature : Extension<
"'%0' is a C11 extension">, InGroup<C11>;
+def warn_c2x_compat_keyword : Warning<
+ "'%0' is incompatible with C standards before C2x">,
+ InGroup<CPre2xCompat>, DefaultIgnore;
def err_c11_noreturn_misplaced : Error<
"'_Noreturn' keyword must precede function declarator">;
@@ -179,8 +185,6 @@ def ext_gnu_statement_expr_macro : Extension<
InGroup<GNUStatementExpressionFromMacroExpansion>;
def ext_gnu_conditional_expr : Extension<
"use of GNU ?: conditional expression extension, omitting middle operand">, InGroup<GNUConditionalOmittedOperand>;
-def ext_gnu_empty_initializer : Extension<
- "use of GNU empty initializer extension">, InGroup<GNUEmptyInitializer>;
def ext_gnu_array_range : Extension<"use of GNU array range extension">,
InGroup<GNUDesignator>;
def ext_gnu_missing_equal_designator : ExtWarn<
@@ -299,14 +303,14 @@ def ext_c_label_end_of_compound_statement : ExtWarn<
"label at end of compound statement is a C2x extension">,
InGroup<C2x>;
def ext_cxx_label_end_of_compound_statement : ExtWarn<
- "label at end of compound statement is a C++2b extension">,
- InGroup<CXX2b>;
+ "label at end of compound statement is a C++23 extension">,
+ InGroup<CXX23>;
def warn_c2x_compat_label_end_of_compound_statement : Warning<
"label at end of compound statement is incompatible with C standards before C2x">,
InGroup<CPre2xCompat>, DefaultIgnore;
def warn_cxx20_compat_label_end_of_compound_statement : Warning<
- "label at end of compound statement is incompatible with C++ standards before C++2b">,
- InGroup<CXXPre2bCompat>, DefaultIgnore;
+ "label at end of compound statement is incompatible with C++ standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
def err_address_of_label_outside_fn : Error<
"use of address-of-label extension outside of a function body">;
def err_asm_operand_wide_string_literal : Error<
@@ -371,9 +375,6 @@ def warn_cxx11_compat_decltype_auto_type_specifier : Warning<
def ext_auto_type : Extension<
"'__auto_type' is a GNU extension">,
InGroup<GNUAutoType>;
-def warn_c2x_compat_typeof_type_specifier : Warning<
- "'%select{typeof|typeof_unqual}0' is incompatible with C standards before "
- "C2x">, InGroup<CPre2xCompat>, DefaultIgnore;
def ext_for_range : ExtWarn<
"range-based for loop is a C++11 extension">, InGroup<CXX11>;
def warn_cxx98_compat_for_range : Warning<
@@ -552,7 +553,7 @@ def err_invalid_operator_on_type : Error<
def err_expected_unqualified_id : Error<
"expected %select{identifier|unqualified-id}0">;
def err_while_loop_outside_of_a_function : Error<
- "while loop outside of a function">;
+ "while loop outside of a function">;
def err_brackets_go_after_unqualified_id : Error<
"brackets are not allowed here; to declare an array, "
"place the brackets after the %select{identifier|name}0">;
@@ -568,11 +569,11 @@ def err_expected_init_in_condition_lparen : Error<
def err_extraneous_rparen_in_condition : Error<
"extraneous ')' after condition, expected a statement">;
def ext_alias_in_init_statement : ExtWarn<
- "alias declaration in this context is a C++2b extension">,
- InGroup<CXX2b>;
+ "alias declaration in this context is a C++23 extension">,
+ InGroup<CXX23>;
def warn_cxx20_alias_in_init_statement : Warning<
- "alias declaration in this context is incompatible with C++ standards before C++2b">,
- DefaultIgnore, InGroup<CXXPre2bCompat>;
+ "alias declaration in this context is incompatible with C++ standards before C++23">,
+ DefaultIgnore, InGroup<CXXPre23Compat>;
def warn_dangling_else : Warning<
"add explicit braces to avoid dangling else">,
InGroup<DanglingElse>;
@@ -654,11 +655,11 @@ def warn_cxx14_compat_constexpr_if : Warning<
"constexpr if is incompatible with C++ standards before C++17">,
DefaultIgnore, InGroup<CXXPre17Compat>;
def ext_consteval_if : ExtWarn<
- "consteval if is a C++2b extension">,
- InGroup<CXX2b>;
+ "consteval if is a C++23 extension">,
+ InGroup<CXX23>;
def warn_cxx20_compat_consteval_if : Warning<
- "consteval if is incompatible with C++ standards before C++2b">,
- InGroup<CXXPre2bCompat>, DefaultIgnore;
+ "consteval if is incompatible with C++ standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
def ext_init_statement : ExtWarn<
"'%select{if|switch}0' initialization statements are a C++17 extension">,
@@ -704,9 +705,6 @@ def warn_cxx98_compat_nullptr : Warning<
"'nullptr' is incompatible with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
def ext_c_nullptr : Extension<
"'nullptr' is a C2x extension">, InGroup<C2x>;
-def warn_c17_compat_nullptr : Warning<
- "'nullptr' is incompatible with C standards before C2x">,
- InGroup<CPre2xCompat>, DefaultIgnore;
def warn_wrong_clang_attr_namespace : Warning<
"'__clang__' is a predefined macro name, not an attribute scope specifier; "
@@ -721,8 +719,17 @@ def warn_cxx14_compat_ns_enum_attribute : Warning<
def warn_cxx98_compat_alignas : Warning<"'alignas' is incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
def warn_cxx98_compat_attribute : Warning<
- "C++11 attribute syntax is incompatible with C++98">,
+ "[[]] attributes are incompatible with C++ standards before C++11">,
InGroup<CXX98Compat>, DefaultIgnore;
+def warn_ext_cxx11_attributes : Extension<
+ "[[]] attributes are a C++11 extension">,
+ InGroup<CXX11>;
+def warn_pre_c2x_compat_attributes : Warning<
+ "[[]] attributes are incompatible with C standards before C2x">,
+ DefaultIgnore, InGroup<CPre2xCompat>;
+def warn_ext_c2x_attributes : Extension<
+ "[[]] attributes are a C2x extension">,
+ InGroup<C2x>;
def err_cxx11_attribute_forbids_arguments : Error<
"attribute %0 cannot have an argument list">;
def err_attribute_requires_arguments : Error<
@@ -738,10 +745,12 @@ def ext_using_attribute_ns : ExtWarn<
def err_using_attribute_ns_conflict : Error<
"attribute with scope specifier cannot follow default scope specifier">;
def err_attributes_not_allowed : Error<"an attribute list cannot appear here">;
+def err_keyword_not_allowed : Error<"%0 cannot appear here">;
def ext_cxx11_attr_placement : ExtWarn<
- "ISO C++ does not allow an attribute list to appear here">,
+ "ISO C++ does not allow %select{an attribute list|%0}1 to appear here">,
InGroup<DiagGroup<"cxx-attribute-extension">>;
def err_attributes_misplaced : Error<"misplaced attributes; expected attributes here">;
+def err_keyword_misplaced : Error<"misplaced %0; expected %0 here">;
def err_l_square_l_square_not_attribute : Error<
"C++11 only allows consecutive left square brackets when "
"introducing an attribute">;
@@ -1019,14 +1028,15 @@ def err_lambda_capture_multiple_ellipses : Error<
def err_capture_default_first : Error<
"capture default must be first">;
def ext_decl_attrs_on_lambda : ExtWarn<
- "an attribute specifier sequence in this position is a C++2b extension">,
- InGroup<CXX2b>;
+ "%select{an attribute specifier sequence|%0}1 in this position "
+ "is a C++23 extension">, InGroup<CXX23>;
def ext_lambda_missing_parens : ExtWarn<
- "lambda without a parameter clause is a C++2b extension">,
- InGroup<CXX2b>;
+ "lambda without a parameter clause is a C++23 extension">,
+ InGroup<CXX23>;
def warn_cxx20_compat_decl_attrs_on_lambda : Warning<
- "an attribute specifier sequence in this position is incompatible with C++ "
- "standards before C++2b">, InGroup<CXXPre2bCompat>, DefaultIgnore;
+ "%select{an attribute specifier sequence|%1}0 in this position "
+ "is incompatible with C++ standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
// C++17 lambda expressions
def err_expected_star_this_capture : Error<
@@ -1049,12 +1059,12 @@ def warn_cxx17_compat_lambda_template_parameter_list: Warning<
def err_lambda_template_parameter_list_empty : Error<
"lambda template parameter list cannot be empty">;
-// C++2b static lambdas
+// C++23 static lambdas
def err_static_lambda: ExtWarn<
- "static lambdas are a C++2b extension">, InGroup<CXX2b>;
+ "static lambdas are a C++23 extension">, InGroup<CXX23>;
def warn_cxx20_compat_static_lambda : Warning<
- "static lambdas are incompatible with C++ standards before C++2b">,
- InGroup<CXXPre2bCompat>, DefaultIgnore;
+ "static lambdas are incompatible with C++ standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
def err_static_mutable_lambda : Error<
"lambda cannot be both mutable and static">;
def err_static_lambda_captures : Error<
@@ -1104,7 +1114,7 @@ def err_availability_query_repeated_star : Error<
// External source symbol attribute
def err_external_source_symbol_expected_keyword : Error<
- "expected 'language', 'defined_in', or 'generated_declaration'">;
+ "expected 'language', 'defined_in', 'generated_declaration', or 'USR'">;
def err_external_source_symbol_duplicate_clause : Error<
"duplicate %0 clause in an 'external_source_symbol' attribute">;
@@ -1303,6 +1313,10 @@ def note_pragma_attribute_namespace_on_attribute : Note<
def warn_no_support_for_eval_method_source_on_m32 : Warning<
"Setting the floating point evaluation method to `source` on a target"
" without SSE is not supported.">, InGroup<Pragmas>;
+// - #pragma __debug
+def warn_pragma_debug_dependent_argument : Warning<
+ "%select{value|type}0-dependent expression passed as an argument to debug "
+ "command">, InGroup<IgnoredPragmas>;
// OpenCL EXTENSION pragma (OpenCL 1.1 [9.1])
def warn_pragma_expected_colon : Warning<
@@ -1358,6 +1372,7 @@ def err_omp_expected_punc_after_iterator : Error<
"expected ',' or ')' after iterator specifier">;
def err_omp_decl_in_declare_simd_variant : Error<
"function declaration is expected after 'declare %select{simd|variant}0' directive">;
+def err_omp_sink_and_source_iteration_not_allowd: Error<" '%0 %select{sink:|source:}1' must be with '%select{omp_cur_iteration - 1|omp_cur_iteration}1'">;
def err_omp_unknown_map_type : Error<
"incorrect map type, expected one of 'to', 'from', 'tofrom', 'alloc', 'release', or 'delete'">;
def err_omp_unknown_map_type_modifier : Error<
@@ -1369,6 +1384,9 @@ def err_omp_map_type_modifier_missing : Error<
"missing map type modifier">;
def err_omp_declare_simd_inbranch_notinbranch : Error<
"unexpected '%0' clause, '%1' is specified already">;
+def err_omp_expected_clause_argument
+ : Error<"expected '%0' clause with an argument on '#pragma omp %1' "
+ "construct">;
def err_expected_end_declare_target_or_variant : Error<
"expected '#pragma omp end declare %select{target|variant}0'">;
def err_expected_begin_declare_variant
@@ -1520,6 +1538,8 @@ def err_omp_requires_out_inout_depend_type : Error<
def warn_omp_more_one_omp_all_memory : Warning<
"reserved locator 'omp_all_memory' cannot be specified more than once">,
InGroup<OpenMPClauses>;
+def warn_omp_depend_in_ordered_deprecated : Warning<"'depend' clause for"
+ " 'ordered' is deprecated; use 'doacross' instead">, InGroup<Deprecated>;
// Pragma loop support.
def err_pragma_loop_missing_argument : Error<
@@ -1571,9 +1591,6 @@ def warn_ext_int_deprecated : Warning<
def ext_bit_int : Extension<
"'_BitInt' in %select{C17 and earlier|C++}0 is a Clang extension">,
InGroup<DiagGroup<"bit-int-extension">>;
-def warn_c17_compat_bit_int : Warning<
- "'_BitInt' is incompatible with C standards before C2x">,
- InGroup<CPre2xCompat>, DefaultIgnore;
} // end of Parse Issue category.
let CategoryName = "Modules Issue" in {
@@ -1583,8 +1600,12 @@ def err_module_expected_ident : Error<
"expected a module name after '%select{module|import}0'">;
def err_attribute_not_module_attr : Error<
"%0 attribute cannot be applied to a module">;
+def err_keyword_not_module_attr : Error<
+ "%0 cannot be applied to a module">;
def err_attribute_not_import_attr : Error<
"%0 attribute cannot be applied to a module import">;
+def err_keyword_not_import_attr : Error<
+ "%0 cannot be applied to a module import">;
def err_module_expected_semi : Error<
"expected ';' after module name">;
def err_global_module_introducer_not_at_start : Error<
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td
index e4651678603d..0c0530f8a11d 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -83,8 +83,8 @@ def err_typecheck_converted_constant_expression_indirect : Error<
"bind reference to a temporary">;
def err_expr_not_cce : Error<
"%select{case value|enumerator value|non-type template argument|"
- "array size|explicit specifier argument|noexcept specifier argument}0 "
- "is not a constant expression">;
+ "array size|explicit specifier argument|noexcept specifier argument|"
+ "call to 'size()'|call to 'data()'}0 is not a constant expression">;
def ext_cce_narrowing : ExtWarn<
"%select{case value|enumerator value|non-type template argument|"
"array size|explicit specifier argument|noexcept specifier argument}0 "
@@ -112,6 +112,9 @@ def err_expr_not_string_literal : Error<"expression is not a string literal">;
def ext_predef_outside_function : Warning<
"predefined identifier is only valid inside function">,
InGroup<DiagGroup<"predefined-identifier-outside-function">>;
+def ext_init_from_predefined : ExtWarn<
+ "initializing an array from a '%0' predefined identifier is a Microsoft extension">,
+ InGroup<MicrosoftInitFromPredefined>;
def warn_float_overflow : Warning<
"magnitude of floating-point constant too large for type %0; maximum is %1">,
InGroup<LiteralRange>;
@@ -186,6 +189,8 @@ def warn_initializer_overrides : Warning<
"this subobject">, InGroup<InitializerOverrides>;
def ext_initializer_overrides : ExtWarn<warn_initializer_overrides.Summary>,
InGroup<InitializerOverrides>, SFINAEFailure;
+def ext_initializer_union_overrides : ExtWarn<warn_initializer_overrides.Summary>,
+ InGroup<InitializerOverrides>, DefaultError, SFINAEFailure;
def err_initializer_overrides_destructed : Error<
"initializer would partially override prior initialization of object of "
"type %1 with non-trivial destruction">;
@@ -401,6 +406,11 @@ def warn_reserved_extern_symbol: Warning<
"it starts with '_' followed by a capital letter|"
"it contains '__'}1">,
InGroup<ReservedIdentifier>, DefaultIgnore;
+def warn_deprecated_literal_operator_id: Warning<
+ "identifier %0 preceded by whitespace in a literal operator declaration "
+ "is deprecated">, InGroup<DeprecatedLiteralOperator>, DefaultIgnore;
+def warn_reserved_module_name : Warning<
+ "%0 is a reserved name for a module">, InGroup<ReservedModuleIdentifier>;
def warn_parameter_size: Warning<
"%0 is a large (%1 bytes) pass-by-value argument; "
@@ -426,9 +436,6 @@ def err_ellipsis_first_param : Error<
"ISO C requires a named parameter before '...'">;
def err_declarator_need_ident : Error<"declarator requires an identifier">;
def err_language_linkage_spec_unknown : Error<"unknown linkage language">;
-def err_language_linkage_spec_not_ascii : Error<
- "string literal in language linkage specifier cannot have an "
- "encoding-prefix">;
def ext_use_out_of_scope_declaration : ExtWarn<
"use of out-of-scope declaration of %0%select{| whose type is not "
"compatible with that of an implicit declaration}1">,
@@ -443,7 +450,7 @@ def warn_qual_return_type : Warning<
def warn_deprecated_redundant_constexpr_static_def : Warning<
"out-of-line definition of constexpr static data member is redundant "
"in C++17 and is deprecated">,
- InGroup<Deprecated>, DefaultIgnore;
+ InGroup<DeprecatedRedundantConstexprStaticDef>, DefaultIgnore;
def warn_decl_shadow :
Warning<"declaration shadows a %select{"
@@ -1538,6 +1545,24 @@ def err_static_assert_requirement_failed : Error<
"static assertion failed due to requirement '%0'%select{: %2|}1">;
def note_expr_evaluates_to : Note<
"expression evaluates to '%0 %1 %2'">;
+def err_static_assert_invalid_message : Error<
+ "the message in a static assertion must be a string literal or an "
+ "object with 'data()' and 'size()' member functions">;
+def err_static_assert_missing_member_function : Error<
+ "the message object in this static assertion is missing %select{"
+ "a 'size()' member function|"
+ "a 'data()' member function|"
+ "'data()' and 'size()' member functions}0">;
+def err_static_assert_invalid_mem_fn_ret_ty : Error<
+ "the message in a static assertion must have a '%select{size|data}0()' member "
+ "function returning an object convertible to '%select{std::size_t|const char *}0'">;
+def warn_static_assert_message_constexpr : Warning<
+ "the message in this static assertion is not a "
+ "constant expression">,
+ DefaultError, InGroup<DiagGroup<"invalid-static-assert-message">>;
+def err_static_assert_message_constexpr : Error<
+ "the message in a static assertion must be produced by a "
+ "constant expression">;
def warn_consteval_if_always_true : Warning<
"consteval if is always true in an %select{unevaluated|immediate}0 context">,
@@ -1554,6 +1579,8 @@ def warn_inline_namespace_reopened_noninline : Warning<
InGroup<InlineNamespaceReopenedNoninline>;
def err_inline_namespace_mismatch : Error<
"non-inline namespace cannot be reopened as inline">;
+def err_inline_namespace_std : Error<
+ "cannot declare the namespace 'std' to be inline">;
def err_unexpected_friend : Error<
"friends can only be classes or functions">;
@@ -2189,6 +2216,8 @@ def err_reference_has_multiple_inits : Error<
"reference cannot be initialized with multiple values">;
def err_init_non_aggr_init_list : Error<
"initialization of non-aggregate type %0 with an initializer list">;
+def err_designated_init_for_non_aggregate : Error<
+ "initialization of non-aggregate type %0 with a designated initializer list">;
def err_init_reference_member_uninitialized : Error<
"reference member of type %0 uninitialized">;
def note_uninit_reference_member : Note<
@@ -2311,8 +2340,6 @@ def err_auto_variable_cannot_appear_in_own_initializer : Error<
def err_binding_cannot_appear_in_own_initializer : Error<
"binding %0 cannot appear in the initializer of its own "
"decomposition declaration">;
-def err_illegal_decl_array_of_auto : Error<
- "'%0' declared as array of %1">;
def err_new_array_of_auto : Error<
"cannot allocate array of 'auto'">;
def err_auto_not_allowed : Error<
@@ -2368,7 +2395,7 @@ def err_auto_expr_init_paren_braces : Error<
"%select{parenthesized|nested}0 initializer list">;
def warn_cxx20_compat_auto_expr : Warning<
"'auto' as a functional-style cast is incompatible with C++ standards "
- "before C++2b">, InGroup<CXXPre2bCompat>, DefaultIgnore;
+ "before C++23">, InGroup<CXXPre23Compat>, DefaultIgnore;
def err_auto_missing_trailing_return : Error<
"'auto' return without trailing return type; deduced return types are a "
"C++14 extension">;
@@ -2649,12 +2676,26 @@ def warn_cxx14_compat_constexpr_not_const : Warning<
"in C++14; add 'const' to avoid a change in behavior">,
InGroup<DiagGroup<"constexpr-not-const">>;
def err_invalid_consteval_take_address : Error<
- "cannot take address of consteval function %0 outside"
+ "cannot take address of %select{immediate|consteval}2 "
+ "%select{function|call operator of}1 %0 outside"
" of an immediate invocation">;
def err_invalid_consteval_call : Error<
- "call to consteval function %q0 is not a constant expression">;
+ "call to %select{immediate|consteval}1 function "
+ "%q0 is not a constant expression">;
+
+def err_immediate_function_used_before_definition : Error<
+ "immediate function %0 used before it is defined">;
+
+def note_immediate_function_reason : Note<
+ "%0 is an immediate %select{function|constructor}5 because "
+ "%select{its body|the%select{| default}7 initializer of %8}6 "
+ "%select{evaluates the address of %select{an immediate|a consteval}2 "
+ "function %1|contains a call to %select{an immediate|a consteval}2 "
+ "%select{function|constructor}4 %1 and that call is not a constant "
+ "expression}3">;
+
def note_invalid_consteval_initializer : Note<
- "in the default initalizer of %0">;
+ "in the default initializer of %0">;
def note_invalid_consteval_initializer_here : Note<
"initialized here %0">;
def err_invalid_consteval_decl_kind : Error<
@@ -2665,7 +2706,7 @@ def err_invalid_constexpr : Error<
def err_invalid_constexpr_member : Error<"non-static data member cannot be "
"constexpr%select{; did you intend to make it %select{const|static}0?|}1">;
def err_constexpr_tag : Error<
- "%select{class|struct|interface|union|enum}0 "
+ "%select{class|struct|interface|union|enum|enum class|enum struct}0 "
"cannot be marked %sub{select_constexpr_spec_kind}1">;
def err_constexpr_dtor : Error<
"destructor cannot be declared %sub{select_constexpr_spec_kind}0">;
@@ -2725,13 +2766,13 @@ def warn_cxx17_compat_constexpr_body_invalid_stmt : Warning<
"use of this statement in a constexpr %select{function|constructor}0 "
"is incompatible with C++ standards before C++20">,
InGroup<CXXPre20Compat>, DefaultIgnore;
-def ext_constexpr_body_invalid_stmt_cxx2b : ExtWarn<
+def ext_constexpr_body_invalid_stmt_cxx23 : ExtWarn<
"use of this statement in a constexpr %select{function|constructor}0 "
- "is a C++2b extension">, InGroup<CXX2b>;
+ "is a C++23 extension">, InGroup<CXX23>;
def warn_cxx20_compat_constexpr_body_invalid_stmt : Warning<
"use of this statement in a constexpr %select{function|constructor}0 "
- "is incompatible with C++ standards before C++2b">,
- InGroup<CXXPre2bCompat>, DefaultIgnore;
+ "is incompatible with C++ standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
def ext_constexpr_type_definition : ExtWarn<
"type definition in a constexpr %select{function|constructor}0 "
"is a C++14 extension">, InGroup<CXX14>;
@@ -2752,15 +2793,15 @@ def warn_cxx11_compat_constexpr_local_var : Warning<
def ext_constexpr_static_var : ExtWarn<
"definition of a %select{static|thread_local}1 variable "
"in a constexpr %select{function|constructor}0 "
- "is a C++2b extension">, InGroup<CXX2b>;
+ "is a C++23 extension">, InGroup<CXX23>;
def warn_cxx20_compat_constexpr_var : Warning<
"definition of a %select{static variable|thread_local variable|variable "
"of non-literal type}1 in a constexpr %select{function|constructor}0 "
- "is incompatible with C++ standards before C++2b">,
- InGroup<CXXPre2bCompat>, DefaultIgnore;
+ "is incompatible with C++ standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
def err_constexpr_local_var_non_literal_type : Error<
"variable of non-literal type %1 cannot be defined in a constexpr "
- "%select{function|constructor}0 before C++2b">;
+ "%select{function|constructor}0 before C++23">;
def ext_constexpr_local_var_no_init : ExtWarn<
"uninitialized variable in a constexpr %select{function|constructor}0 "
"is a C++20 extension">, InGroup<CXX20>;
@@ -2961,6 +3002,10 @@ def warn_attribute_ignored_no_calls_in_stmt: Warning<
"statement">,
InGroup<IgnoredAttributes>;
+def warn_attribute_ignored_non_function_pointer: Warning<
+ "%0 attribute is ignored because %1 is not a function pointer">,
+ InGroup<IgnoredAttributes>;
+
def warn_function_attribute_ignored_in_stmt : Warning<
"attribute is ignored on this statement as it only applies to functions; "
"use '%0' on statements">,
@@ -3015,7 +3060,7 @@ def err_musttail_no_variadic : Error<
def err_nsobject_attribute : Error<
"'NSObject' attribute is for pointer types only">;
def err_attributes_are_not_compatible : Error<
- "%0 and %1 attributes are not compatible">;
+ "%0 and %1%select{ attributes|}2 are not compatible">;
def err_attribute_invalid_argument : Error<
"%select{a reference type|an array type|a non-vector or "
"non-vectorizable scalar type}0 is an invalid argument to attribute %1">;
@@ -3054,6 +3099,14 @@ def err_attribute_arm_feature_sve_bits_unsupported : Error<
"value of 128, 256, 512, 1024 or 2048.">;
def err_sve_vector_in_non_sve_target : Error<
"SVE vector type %0 cannot be used in a target without sve">;
+def err_attribute_riscv_rvv_bits_unsupported : Error<
+ "%0 is only supported when '-mrvv-vector-bits=<bits>' is specified with a "
+ "value of \"zvl\" or a power 2 in the range [64,65536]">;
+def err_attribute_bad_rvv_vector_size : Error<
+ "invalid RVV vector size '%0', expected size is '%1' based on LMUL of type "
+ "and '-mrvv-vector-bits'">;
+def err_attribute_invalid_rvv_type : Error<
+ "%0 attribute applied to non-RVV type %1">;
def err_attribute_requires_positive_integer : Error<
"%0 attribute requires a %select{positive|non-negative}1 "
"integral compile time constant expression">;
@@ -3164,10 +3217,11 @@ def err_attribute_invalid_size : Error<
"vector size not an integral multiple of component size">;
def err_attribute_zero_size : Error<"zero %0 size">;
def err_attribute_size_too_large : Error<"%0 size too large">;
-def err_typecheck_sve_ambiguous : Error<
- "cannot combine fixed-length and sizeless SVE vectors in expression, result is ambiguous (%0 and %1)">;
-def err_typecheck_sve_gnu_ambiguous : Error<
- "cannot combine GNU and SVE vectors in expression, result is ambiguous (%0 and %1)">;
+def err_typecheck_sve_rvv_ambiguous : Error<
+ "cannot combine fixed-length and sizeless %select{SVE|RVV}0 vectors "
+ "in expression, result is ambiguous (%1 and %2)">;
+def err_typecheck_sve_rvv_gnu_ambiguous : Error<
+ "cannot combine GNU and %select{SVE|RVV}0 vectors in expression, result is ambiguous (%1 and %2)">;
def err_typecheck_vector_not_convertable_implict_truncation : Error<
"cannot convert between %select{scalar|vector}0 type %1 and vector type"
" %2 as implicit conversion would cause truncation">;
@@ -3414,16 +3468,20 @@ def warn_attribute_has_no_effect_on_compile_time_if : Warning<
def note_attribute_has_no_effect_on_compile_time_if_here : Note<
"annotating the 'if %select{constexpr|consteval}0' statement here">;
def err_decl_attribute_invalid_on_stmt : Error<
- "%0 attribute cannot be applied to a statement">;
+ "%0%select{ attribute|}1 cannot be applied to a statement">;
def err_attribute_invalid_on_decl : Error<
- "%0 attribute cannot be applied to a declaration">;
+ "%0%select{ attribute|}1 cannot be applied to a declaration">;
def warn_type_attribute_deprecated_on_decl : Warning<
"applying attribute %0 to a declaration is deprecated; apply it to the type instead">,
InGroup<DeprecatedAttributes>;
def warn_declspec_attribute_ignored : Warning<
"attribute %0 is ignored, place it after "
- "\"%select{class|struct|interface|union|enum}1\" to apply attribute to "
+ "\"%select{class|struct|interface|union|enum|enum class|enum struct}1\" to apply attribute to "
"type declaration">, InGroup<IgnoredAttributes>;
+def err_declspec_keyword_has_no_effect : Error<
+ "%0 cannot appear here, place it after "
+ "\"%select{class|struct|interface|union|enum}1\" to apply it to the "
+ "type declaration">;
def warn_attribute_precede_definition : Warning<
"attribute declaration must precede definition">,
InGroup<IgnoredAttributes>;
@@ -3522,11 +3580,11 @@ def err_attribute_weakref_without_alias : Error<
def err_alias_not_supported_on_darwin : Error <
"aliases are not supported on darwin">;
def warn_attribute_wrong_decl_type_str : Warning<
- "%0 attribute only applies to %1">, InGroup<IgnoredAttributes>;
+ "%0%select{ attribute|}1 only applies to %2">, InGroup<IgnoredAttributes>;
def err_attribute_wrong_decl_type_str : Error<
warn_attribute_wrong_decl_type_str.Summary>;
def warn_attribute_wrong_decl_type : Warning<
- "%0 attribute only applies to %select{"
+ "%0%select{ attribute|}1 only applies to %select{"
"functions"
"|unions"
"|variables and functions"
@@ -3539,13 +3597,15 @@ def warn_attribute_wrong_decl_type : Warning<
"|types and namespaces"
"|variables, functions and classes"
"|kernel functions"
- "|non-K&R-style functions}1">,
+ "|non-K&R-style functions}2">,
InGroup<IgnoredAttributes>;
def err_attribute_wrong_decl_type : Error<warn_attribute_wrong_decl_type.Summary>;
def warn_type_attribute_wrong_type : Warning<
"'%0' only applies to %select{function|pointer|"
"Objective-C object or block pointer}1 types; type here is %2">,
InGroup<IgnoredAttributes>;
+def err_type_attribute_wrong_type : Error<
+ warn_type_attribute_wrong_type.Summary>;
def warn_incomplete_encoded_type : Warning<
"encoding of %0 type is incomplete because %1 component has unknown encoding">,
InGroup<DiagGroup<"encode-type">>;
@@ -3596,7 +3656,7 @@ def err_invalid_pcs : Error<"invalid PCS type">;
def warn_attribute_not_on_decl : Warning<
"%0 attribute ignored when parsing type">, InGroup<IgnoredAttributes>;
def err_base_specifier_attribute : Error<
- "%0 attribute cannot be applied to a base specifier">;
+ "%0%select{ attribute|}1 cannot be applied to a base specifier">;
def warn_declspec_allocator_nonpointer : Warning<
"ignoring __declspec(allocator) because the function return type %0 is not "
"a pointer or reference type">, InGroup<IgnoredAttributes>;
@@ -4661,9 +4721,6 @@ def note_ovl_candidate_bad_cvr : Note<
"%select{const|restrict|const and restrict|volatile|const and volatile|"
"volatile and restrict|const, volatile, and restrict}4 qualifier"
"%select{||s||s|s|s}4">;
-def note_ovl_candidate_bad_unaligned : Note<
- "candidate %sub{select_ovl_candidate_kind}0,1,2 not viable: "
- "%ordinal5 argument (%3) would lose __unaligned qualifier">;
def note_ovl_candidate_bad_base_to_derived_conv : Note<
"candidate %sub{select_ovl_candidate_kind}0,1,2 not viable: "
"cannot %select{convert from|convert from|bind}3 "
@@ -4678,6 +4735,8 @@ def note_ovl_candidate_bad_target : Note<
def note_ovl_candidate_constraints_not_satisfied : Note<
"candidate %sub{select_ovl_candidate_kind}0,1,2 not viable: constraints "
"not satisfied">;
+def note_ovl_surrogate_constraints_not_satisfied : Note<
+ "conversion candidate %0 not viable: constraints not satisfied">;
def note_implicit_member_target_infer_collision : Note<
"implicit %sub{select_special_member_kind}0 inferred target collision: call to both "
"%select{__device__|__global__|__host__|__host__ __device__}1 and "
@@ -5314,6 +5373,8 @@ def note_constraint_normalization_here : Note<
def note_parameter_mapping_substitution_here : Note<
"while substituting into concept arguments here; substitution failures not "
"allowed in concept arguments">;
+def note_lambda_substitution_here : Note<
+ "while substituting into a lambda expression here">;
def note_instantiation_contexts_suppressed : Note<
"(skipping %0 context%s0 in backtrace; use -ftemplate-backtrace-limit=0 to "
"see all)">;
@@ -5472,10 +5533,10 @@ def note_template_kw_refers_to_non_template : Note<
def err_template_kw_refers_to_dependent_non_template : Error<
"%0%select{| following the 'template' keyword}1 "
"cannot refer to a dependent template">;
-def err_template_kw_refers_to_class_template : Error<
- "'%0%1' instantiated to a class template, not a function template">;
-def note_referenced_class_template : Note<
- "class template declared here">;
+def err_template_kw_refers_to_type_template : Error<
+ "'%0%1' is expected to be a non-type template, but instantiated to a %select{class|type alias}2 template">;
+def note_referenced_type_template : Note<
+ "%select{class|type alias}0 template declared here">;
def err_template_kw_missing : Error<
"missing 'template' keyword prior to dependent template name '%0%1'">;
def ext_template_outside_of_template : ExtWarn<
@@ -6161,6 +6222,8 @@ def note_enters_block_captures_non_trivial_c_struct : Note<
"to destroy">;
def note_enters_compound_literal_scope : Note<
"jump enters lifetime of a compound literal that is non-trivial to destruct">;
+def note_enters_statement_expression : Note<
+ "jump enters a statement expression">;
def note_exits_cleanup : Note<
"jump exits scope of variable with __attribute__((cleanup))">;
@@ -6733,7 +6796,7 @@ def err_arithmetic_nonfragile_interface : Error<
def warn_deprecated_comma_subscript : Warning<
"top-level comma expression in array subscript is deprecated "
- "in C++20 and unsupported in C++2b">,
+ "in C++20 and unsupported in C++23">,
InGroup<DeprecatedCommaSubscript>;
def ext_subscript_non_lvalue : Extension<
@@ -6927,7 +6990,7 @@ def warn_standalone_specifier : Warning<"'%0' ignored on this declaration">,
def ext_standalone_specifier : ExtWarn<"'%0' is not permitted on a declaration "
"of a type">, InGroup<MissingDeclarations>;
def err_standalone_class_nested_name_specifier : Error<
- "forward declaration of %select{class|struct|interface|union|enum}0 cannot "
+ "forward declaration of %select{class|struct|interface|union|enum|enum class|enum struct}0 cannot "
"have a nested name specifier">;
def err_typecheck_sclass_func : Error<"illegal storage class on function">;
def err_static_block_func : Error<
@@ -6963,9 +7026,8 @@ def err_typecheck_indirection_requires_pointer : Error<
def ext_typecheck_indirection_through_void_pointer : ExtWarn<
"ISO C does not allow indirection on operand of type %0">,
InGroup<VoidPointerDeref>;
-def ext_typecheck_indirection_through_void_pointer_cpp
- : ExtWarn<"ISO C++ does not allow indirection on operand of type %0">,
- InGroup<VoidPointerDeref>, DefaultError, SFINAEFailure;
+def err_typecheck_indirection_through_void_pointer_cpp
+ : Error<"indirection not permitted on operand of type %0">;
def warn_indirection_through_null : Warning<
"indirection of non-volatile null pointer will be deleted, not trap">,
InGroup<NullDereference>;
@@ -7386,6 +7448,8 @@ def err_attribute_arm_builtin_alias : Error<
"'__clang_arm_builtin_alias' attribute can only be applied to an ARM builtin">;
def err_attribute_arm_mve_polymorphism : Error<
"'__clang_arm_mve_strict_polymorphism' attribute can only be applied to an MVE/NEON vector type">;
+def err_attribute_webassembly_funcref : Error<
+ "'__funcref' attribute can only be applied to a function pointer type">;
def warn_setter_getter_impl_required : Warning<
"property %0 requires method %1 to be defined - "
@@ -7636,7 +7700,7 @@ def warn_increment_bool : Warning<
"incompatible with C++17">, InGroup<DeprecatedIncrementBool>;
def ext_increment_bool : ExtWarn<
"ISO C++17 does not allow incrementing expression of type bool">,
- DefaultError, InGroup<IncrementBool>;
+ DefaultError, SFINAEFailure, InGroup<IncrementBool>;
def err_increment_decrement_enum : Error<
"cannot %select{decrement|increment}0 expression of enum type %1">;
@@ -7907,7 +7971,7 @@ let CategoryName = "Lambda Issue" in {
"is a C++20 extension">, InGroup<CXX20>;
def warn_deprecated_this_capture : Warning<
"implicit capture of 'this' with a capture default of '=' is deprecated">,
- InGroup<DeprecatedThisCapture>, DefaultIgnore;
+ InGroup<DeprecatedThisCapture>;
def note_deprecated_this_capture : Note<
"add an explicit capture of 'this' to capture '*this' by reference">;
@@ -8504,6 +8568,9 @@ def err_atomic_op_needs_atomic_int_ptr_or_fp : Error<
def err_atomic_op_needs_atomic_int_or_ptr : Error<
"address argument to atomic operation must be a pointer to %select{|atomic }0"
"integer or pointer (%1 invalid)">;
+def err_atomic_op_needs_atomic_int_or_fp : Error<
+ "address argument to atomic operation must be a pointer to %select{|atomic }0"
+ "integer or supported floating point type (%1 invalid)">;
def err_atomic_op_needs_atomic_int : Error<
"address argument to atomic operation must be a pointer to "
"%select{|atomic }0integer (%1 invalid)">;
@@ -8631,6 +8698,10 @@ def note_cuda_device_builtin_surftex_should_be_template_class : Note<
def err_hip_invalid_args_builtin_mangled_name : Error<
"invalid argument: symbol must be a device-side function or global variable">;
+def warn_hip_omp_target_directives : Warning<
+ "HIP does not support OpenMP target directives; directive has been ignored">,
+ InGroup<HIPOpenMPOffloading>, DefaultError;
+
def warn_non_pod_vararg_with_format_string : Warning<
"cannot pass %select{non-POD|non-trivial}0 object of type %1 to variadic "
"%select{function|block|method|constructor}2; expected type from format "
@@ -8724,8 +8795,6 @@ def err_cast_pointer_to_non_pointer_int : Error<
def err_nullptr_cast : Error<
"cannot cast an object of type %select{'nullptr_t' to %1|%1 to 'nullptr_t'}0"
>;
-def err_cast_to_bfloat16 : Error<"cannot type-cast to __bf16">;
-def err_cast_from_bfloat16 : Error<"cannot type-cast from __bf16">;
def err_typecheck_expect_scalar_operand : Error<
"operand of type %0 where arithmetic or pointer type is required">;
def err_typecheck_cond_incompatible_operands : Error<
@@ -9131,9 +9200,9 @@ def err_operator_overload_needs_class_or_enum : Error<
def err_operator_overload_variadic : Error<"overloaded %0 cannot be variadic">;
def warn_cxx20_compat_operator_overload_static : Warning<
"declaring overloaded %0 as 'static' is incompatible with C++ standards "
- "before C++2b">, InGroup<CXXPre2bCompat>, DefaultIgnore;
+ "before C++23">, InGroup<CXXPre23Compat>, DefaultIgnore;
def ext_operator_overload_static : ExtWarn<
- "declaring overloaded %0 as 'static' is a C++2b extension">, InGroup<CXX2b>;
+ "declaring overloaded %0 as 'static' is a C++23 extension">, InGroup<CXX23>;
def err_operator_overload_static : Error<
"overloaded %0 cannot be a static member function">;
def err_operator_overload_default_arg : Error<
@@ -9141,9 +9210,9 @@ def err_operator_overload_default_arg : Error<
def ext_subscript_overload : Warning<
"overloaded %0 with %select{no|a defaulted|more than one}1 parameter is a "
- "C++2b extension">, InGroup<CXXPre2bCompat>, DefaultIgnore;
+ "C++23 extension">, InGroup<CXXPre23Compat>, DefaultIgnore;
def error_subscript_overload : Error<
- "overloaded %0 cannot have %select{no|a defaulted|more than one}1 parameter before C++2b">;
+ "overloaded %0 cannot have %select{no|a defaulted|more than one}1 parameter before C++23">;
def err_operator_overload_must_be : Error<
"overloaded %0 must be a %select{unary|binary|unary or binary}2 operator "
@@ -9220,8 +9289,8 @@ def ext_string_literal_operator_template : ExtWarn<
"string literal operator templates are a GNU extension">,
InGroup<GNUStringLiteralOperatorTemplate>;
def warn_user_literal_reserved : Warning<
- "user-defined literal suffixes not starting with '_' are reserved"
- "%select{; no literal will invoke this operator|}0">,
+ "user-defined literal suffixes %select{<ERROR>|not starting with '_'|containing '__'}0 are reserved"
+ "%select{; no literal will invoke this operator|}1">,
InGroup<UserDefinedLiterals>;
// C++ conversion functions
@@ -9409,12 +9478,21 @@ def note_defaulted_comparison_cannot_deduce_undeduced_auto : Note<
"%select{|member|base class}0 %1 declared here">;
def note_defaulted_comparison_cannot_deduce_callee : Note<
"selected 'operator<=>' for %select{|member|base class}0 %1 declared here">;
-def err_incorrect_defaulted_comparison_constexpr : Error<
+def ext_defaulted_comparison_constexpr_mismatch : Extension<
+ "defaulted definition of %select{%sub{select_defaulted_comparison_kind}1|"
+ "three-way comparison operator}0 that is "
+ "declared %select{constexpr|consteval}2 but"
+ "%select{|for which the corresponding implicit 'operator==' }0 "
+ "invokes a non-constexpr comparison function is a C++23 extension">,
+ InGroup<DiagGroup<"c++23-default-comp-relaxed-constexpr">>;
+def warn_cxx23_compat_defaulted_comparison_constexpr_mismatch : Warning<
"defaulted definition of %select{%sub{select_defaulted_comparison_kind}1|"
- "three-way comparison operator}0 "
- "cannot be declared %select{constexpr|consteval}2 because "
- "%select{it|the corresponding implicit 'operator=='}0 "
- "invokes a non-constexpr comparison function">;
+ "three-way comparison operator}0 that is "
+ "declared %select{constexpr|consteval}2 but"
+ "%select{|for which the corresponding implicit 'operator==' }0 "
+ "invokes a non-constexpr comparison function is incompatible with C++ "
+ "standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
def note_defaulted_comparison_not_constexpr : Note<
"non-constexpr comparison function would be used to compare "
"%select{|member %1|base class %1}0">;
@@ -9423,6 +9501,10 @@ def note_defaulted_comparison_not_constexpr_here : Note<
def note_in_declaration_of_implicit_equality_comparison : Note<
"while declaring the corresponding implicit 'operator==' "
"for this defaulted 'operator<=>'">;
+def err_volatile_comparison_operator : Error<
+ "defaulted comparison function must not be volatile">;
+def err_ref_qualifier_comparison_operator : Error<
+ "ref-qualifier '&&' is not allowed on a defaulted comparison operator">;
def ext_implicit_exception_spec_mismatch : ExtWarn<
"function previously declared with an %select{explicit|implicit}0 exception "
@@ -9606,7 +9688,7 @@ def note_lambda_capture_initializer : Note<
"%select{implicitly |}2captured%select{| by reference}3"
"%select{%select{ due to use|}2 here|"
" via initialization of lambda capture %0}1">;
-def note_init_with_default_member_initalizer : Note<
+def note_init_with_default_member_initializer : Note<
"initializing field %0 with default member initializer">;
// Check for initializing a member variable with the address or a reference to
@@ -10022,12 +10104,6 @@ def err_mips_builtin_requires_dspr2 : Error<
"this builtin requires 'dsp r2' ASE, please use -mdspr2">;
def err_mips_builtin_requires_msa : Error<
"this builtin requires 'msa' ASE, please use -mmsa">;
-def err_ppc_builtin_only_on_arch : Error<
- "this builtin is only valid on POWER%0 or later CPUs">;
-def err_ppc_builtin_requires_vsx : Error<
- "this builtin requires VSX to be enabled">;
-def err_ppc_builtin_requires_htm : Error<
- "this builtin requires HTM to be enabled">;
def err_ppc_builtin_requires_abi : Error<
"this builtin requires ABI -mabi=%0">;
def err_ppc_invalid_use_mma_type : Error<
@@ -10464,6 +10540,9 @@ def err_omp_lastprivate_incomplete_type : Error<
"a lastprivate variable with incomplete type %0">;
def err_omp_reduction_incomplete_type : Error<
"a reduction list item with incomplete type %0">;
+def warn_omp_minus_in_reduction_deprecated : Warning<
+ "minus(-) operator for reductions is deprecated; use + or user defined reduction instead">,
+ InGroup<Deprecated>;
def err_omp_unexpected_clause_value : Error<
"expected %0 in OpenMP clause '%1'">;
def err_omp_unexpected_call_to_omp_runtime_api
@@ -10628,9 +10707,12 @@ def err_omp_simd_region_cannot_use_stmt : Error<
def warn_omp_loop_64_bit_var : Warning<
"OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed">,
InGroup<OpenMPLoopForm>;
-def err_omp_unknown_reduction_identifier : Error<
+def err_omp_unknown_reduction_identifier_prior_omp_6_0 : Error<
"incorrect reduction identifier, expected one of '+', '-', '*', '&', '|', '^', "
"'&&', '||', 'min' or 'max' or declare reduction for type %0">;
+def err_omp_unknown_reduction_identifier_since_omp_6_0 : Error<
+ "incorrect reduction identifier, expected one of '+', '*', '&', '|', '^', "
+ "'&&', '||', 'min' or 'max' or declare reduction for type %0">;
def err_omp_not_resolved_reduction_identifier : Error<
"unable to resolve declare reduction construct for type %0">;
def err_omp_reduction_ref_type_arg : Error<
@@ -10814,7 +10896,7 @@ def note_omp_previous_named_if_clause : Note<
def err_omp_ordered_directive_with_param : Error<
"'ordered' directive %select{without any clauses|with 'threads' clause}0 cannot be closely nested inside ordered region with specified parameter">;
def err_omp_ordered_directive_without_param : Error<
- "'ordered' directive with 'depend' clause cannot be closely nested inside ordered region without specified parameter">;
+ "'ordered' directive with '%0' clause cannot be closely nested inside ordered region without specified parameter">;
def note_omp_ordered_param : Note<
"'ordered' clause%select{| with specified parameter}0">;
def err_omp_expected_base_var_name : Error<
@@ -10846,7 +10928,7 @@ def note_omp_critical_hint_here : Note<
def note_omp_critical_no_hint : Note<
"%select{|previous }0directive with no 'hint' clause specified">;
def err_omp_depend_clause_thread_simd : Error<
- "'depend' clauses cannot be mixed with '%0' clause">;
+ "'%0' clauses cannot be mixed with '%1' clause">;
def err_omp_depend_sink_expected_loop_iteration : Error<
"expected%select{| %1}0 loop iteration variable">;
def err_omp_depend_sink_unexpected_expr : Error<
@@ -10855,8 +10937,8 @@ def err_omp_depend_sink_expected_plus_minus : Error<
"expected '+' or '-' operation">;
def err_omp_taskwait_depend_mutexinoutset_not_allowed : Error<
"'mutexinoutset' modifier not allowed in 'depend' clause on 'taskwait' directive">;
-def err_omp_depend_sink_source_not_allowed : Error<
- "'depend(%select{source|sink:vec}0)' clause%select{|s}0 cannot be mixed with 'depend(%select{sink:vec|source}0)' clause%select{s|}0">;
+def err_omp_sink_and_source_not_allowed : Error<
+ "'%0(%select{source|sink:vec}1)' clause%select{|s}1 cannot be mixed with '%0(%select{sink:vec|source}1)' clause%select{s|}1">;
def err_omp_depend_zero_length_array_section_not_allowed : Error<
"zero-length array section is not allowed in 'depend' clause">;
def err_omp_depend_sink_source_with_modifier : Error<
@@ -11209,30 +11291,18 @@ def note_global_module_introducer_missing : Note<
def err_export_within_anonymous_namespace : Error<
"export declaration appears within anonymous namespace">;
def note_anonymous_namespace : Note<"anonymous namespace begins here">;
-def ext_export_no_name_block : ExtWarn<
- "ISO C++20 does not permit %select{an empty|a static_assert}0 declaration "
- "to appear in an export block">, InGroup<ExportUnnamed>;
-def ext_export_no_names : ExtWarn<
- "ISO C++20 does not permit a declaration that does not introduce any names "
- "to be exported">, InGroup<ExportUnnamed>;
-def introduces_no_names : Error<
- "declaration does not introduce any names to be exported">;
def note_export : Note<"export block begins here">;
-def err_export_no_name : Error<
- "%select{empty|static_assert|asm}0 declaration cannot be exported">;
-def ext_export_using_directive : ExtWarn<
- "ISO C++20 does not permit using directive to be exported">,
- InGroup<DiagGroup<"export-using-directive">>;
def err_export_within_export : Error<
"export declaration appears within another export declaration">;
+def err_export_anon_ns_internal : Error<
+ "anonymous namespaces cannot be exported">;
def err_export_internal : Error<
"declaration of %0 with internal linkage cannot be exported">;
def err_export_using_internal : Error<
"using declaration referring to %1 with %select{internal|module|unknown}0 "
"linkage cannot be exported">;
def err_export_not_in_module_interface : Error<
- "export declaration can only be used within a module interface unit"
- "%select{ after the module declaration|}0">;
+ "export declaration can only be used within a module purview">;
def err_export_inline_not_defined : Error<
"inline function not defined%select{| before the private module fragment}0">;
def err_export_partition_impl : Error<
@@ -11249,11 +11319,14 @@ def err_private_module_fragment_not_module_interface : Error<
"private module fragment in module implementation unit">;
def note_not_module_interface_add_export : Note<
"add 'export' here if this is intended to be a module interface unit">;
-def err_invalid_module_name : Error<
- "%0 is %select{an invalid|a reserved}1 name for a module">;
+def err_invalid_module_name : Error<"%0 is an invalid name for a module">;
def err_extern_def_in_header_unit : Error<
"non-inline external definitions are not permitted in C++ header units">;
+def warn_experimental_header_unit : Warning<
+ "the implementation of header units is in an experimental phase">,
+ InGroup<DiagGroup<"experimental-header-units">>;
+
def ext_equivalent_internal_linkage_decl_in_modules : ExtWarn<
"ambiguous use of internal linkage declaration %0 defined in multiple modules">,
InGroup<DiagGroup<"modules-ambiguous-internal-linkage">>;
@@ -11286,15 +11359,7 @@ def err_coroutine_invalid_func_context : Error<
"|a consteval function}0">;
def err_implied_coroutine_type_not_found : Error<
"%0 type was not found; include <coroutine> before defining "
- "a coroutine; include <experimental/coroutine> if your version "
- "of libcxx is less than 14.0">;
-def warn_deprecated_coroutine_namespace : Warning<
- "support for 'std::experimental::%0' will be removed in Clang 17; "
- "use 'std::%0' instead">,
- InGroup<DeprecatedExperimentalCoroutine>;
-def err_mixed_use_std_and_experimental_namespace_for_coroutine : Error<
- "conflicting mixed use of std and std::experimental namespaces for "
- "coroutine components">;
+ "a coroutine">;
def err_implicit_coroutine_std_nothrow_type_not_found : Error<
"std::nothrow was not found; include <new> before defining a coroutine which "
"uses get_return_object_on_allocation_failure()">;
@@ -11378,6 +11443,8 @@ def err_coro_invalid_addr_of_label : Error<
let CategoryName = "Documentation Issue" in {
def warn_not_a_doxygen_trailing_member_comment : Warning<
"not a Doxygen trailing comment">, InGroup<Documentation>, DefaultIgnore;
+def warn_splice_in_doxygen_comment : Warning<
+ "line splicing in Doxygen comments are not supported">, InGroup<Documentation>, DefaultIgnore;
} // end of documentation issue category
let CategoryName = "Nullability Issue" in {
@@ -11509,6 +11576,10 @@ def err_objc_type_args_wrong_arity : Error<
"too %select{many|few}0 type arguments for class %1 (have %2, expected %3)">;
}
+def err_type_available_only_in_default_eval_method : Error<
+ "cannot use type '%0' within '#pragma clang fp eval_method'; type is set "
+ "according to the default eval method for the translation unit">;
+
def err_objc_type_arg_not_id_compatible : Error<
"type argument %0 is neither an Objective-C object nor a block type">;
@@ -11740,6 +11811,9 @@ def err_riscv_builtin_requires_extension : Error<
"builtin requires%select{| at least one of the following extensions to be enabled}0: %1">;
def err_riscv_builtin_invalid_lmul : Error<
"LMUL argument must be in the range [0,3] or [5,7]">;
+def err_riscv_type_requires_extension : Error<
+ "RISC-V type %0 requires the '%1' extension"
+>;
def err_std_source_location_impl_not_found : Error<
"'std::source_location::__impl' was not found; it must be defined before '__builtin_source_location' is called">;
@@ -11752,6 +11826,8 @@ def err_hlsl_attr_invalid_type : Error<
"attribute %0 only applies to a field or parameter of type '%1'">;
def err_hlsl_attr_invalid_ast_node : Error<
"attribute %0 only applies to %1">;
+def err_hlsl_entry_shader_attr_mismatch : Error<
+ "%0 attribute on entry function does not match the pipeline stage">;
def err_hlsl_numthreads_argument_oor : Error<"argument '%select{X|Y|Z}0' to numthreads attribute cannot exceed %1">;
def err_hlsl_numthreads_invalid : Error<"total number of threads cannot exceed %0">;
def err_hlsl_missing_numthreads : Error<"missing numthreads attribute for %0 shader entry">;
@@ -11788,10 +11864,56 @@ def warn_unsafe_buffer_variable : Warning<
InGroup<UnsafeBufferUsage>, DefaultIgnore;
def warn_unsafe_buffer_operation : Warning<
"%select{unsafe pointer operation|unsafe pointer arithmetic|"
- "unsafe buffer access}0">,
+ "unsafe buffer access|function introduces unsafe buffer manipulation}0">,
InGroup<UnsafeBufferUsage>, DefaultIgnore;
def note_unsafe_buffer_operation : Note<
"used%select{| in pointer arithmetic| in buffer access}0 here">;
+def note_unsafe_buffer_variable_fixit_group : Note<
+ "change type of %0 to '%select{std::span|std::array|std::span::iterator}1' to preserve bounds information%select{|, and change %2 to '%select{std::span|std::array|std::span::iterator}1' to propagate bounds information between them}3">;
+def note_safe_buffer_usage_suggestions_disabled : Note<
+ "pass -fsafe-buffer-usage-suggestions to receive code hardening suggestions">;
def err_loongarch_builtin_requires_la32 : Error<
"this builtin requires target: loongarch32">;
+
+def err_builtin_pass_in_regs_non_class : Error<
+ "argument %0 is not an unqualified class type">;
+
+
+// WebAssembly reference type and table diagnostics.
+def err_wasm_reference_pr : Error<
+ "%select{pointer|reference}0 to WebAssembly reference type is not allowed">;
+def err_wasm_ca_reference : Error<
+ "cannot %select{capture|take address of}0 WebAssembly reference">;
+def err_wasm_funcref_not_wasm : Error<
+ "invalid use of '__funcref' keyword outside the WebAssembly triple">;
+def err_wasm_table_pr : Error<
+ "cannot form a %select{pointer|reference}0 to a WebAssembly table">;
+def err_typecheck_wasm_table_must_have_zero_length : Error<
+ "only zero-length WebAssembly tables are currently supported">;
+def err_wasm_table_in_function : Error<
+ "WebAssembly table cannot be declared within a function">;
+def err_wasm_table_as_function_parameter : Error<
+ "cannot use WebAssembly table as a function parameter">;
+def err_wasm_table_invalid_uett_operand : Error<
+ "invalid application of '%0' to WebAssembly table">;
+def err_wasm_cast_table : Error<
+ "cannot cast %select{to|from}0 a WebAssembly table">;
+def err_wasm_table_conditional_expression : Error<
+ "cannot use a WebAssembly table within a branch of a conditional expression">;
+def err_wasm_table_art : Error<
+ "cannot %select{assign|return|throw|subscript}0 a WebAssembly table">;
+def err_wasm_reftype_tc : Error<
+ "cannot %select{throw|catch}0 a WebAssembly reference type">;
+def err_wasm_reftype_exception_spec : Error<
+ "WebAssembly reference type not allowed in exception specification">;
+def err_wasm_table_must_be_static : Error<
+ "WebAssembly table must be static">;
+def err_wasm_reftype_multidimensional_array : Error<
+ "multi-dimensional arrays of WebAssembly references are not allowed">;
+def err_wasm_builtin_arg_must_be_table_type : Error <
+ "%ordinal0 argument must be a WebAssembly table">;
+def err_wasm_builtin_arg_must_match_table_element_type : Error <
+ "%ordinal0 argument must match the element type of the WebAssembly table in the %ordinal1 argument">;
+def err_wasm_builtin_arg_must_be_integer_type : Error <
+ "%ordinal0 argument must be an integer">;
} // end of sema component.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerializationKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerializationKinds.td
index f515ea0d9f6d..5197aa16c4ae 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerializationKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerializationKinds.td
@@ -75,6 +75,7 @@ def note_module_file_conflict : Note<
def remark_module_import : Remark<
"importing module '%0'%select{| into '%3'}2 from '%1'">,
+ ShowInSystemHeader,
InGroup<ModuleImport>;
def err_imported_module_not_found : Error<
@@ -127,6 +128,16 @@ def warn_module_system_bit_conflict : Warning<
"module file '%0' was validated as a system module and is now being imported "
"as a non-system module; any difference in diagnostic options will be ignored">,
InGroup<ModuleConflict>;
+
+def warn_eagerly_load_for_standard_cplusplus_modules : Warning<
+ "the form '-fmodule-file=<BMI-path>' is deprecated for standard C++ named modules;"
+ "consider to use '-fmodule-file=<module-name>=<BMI-path>' instead">,
+ InGroup<DiagGroup<"eager-load-cxx-named-modules">>;
+
+def warn_reading_std_cxx_module_by_implicit_paths : Warning<
+ "it is deprecated to read module '%0' implicitly; it is going to be removed in clang 18; "
+ "consider to specify the dependencies explicitly">,
+ InGroup<DiagGroup<"read-modules-implicitly">>;
} // let CategoryName
let CategoryName = "AST Serialization Issue" in {
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DirectoryEntry.h b/contrib/llvm-project/clang/include/clang/Basic/DirectoryEntry.h
index 6c8da1213655..5d083e68facd 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DirectoryEntry.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DirectoryEntry.h
@@ -46,6 +46,7 @@ class DirectoryEntry {
StringRef Name; // Name of the directory.
public:
+ LLVM_DEPRECATED("Use DirectoryEntryRef::getName() instead.", "")
StringRef getName() const { return Name; }
};
@@ -71,7 +72,7 @@ public:
bool isSameRef(DirectoryEntryRef RHS) const { return ME == RHS.ME; }
DirectoryEntryRef() = delete;
- DirectoryEntryRef(const MapEntry &ME) : ME(&ME) {}
+ explicit DirectoryEntryRef(const MapEntry &ME) : ME(&ME) {}
/// Allow DirectoryEntryRef to degrade into 'const DirectoryEntry*' to
/// facilitate incremental adoption.
@@ -196,6 +197,21 @@ static_assert(std::is_trivially_copyable<OptionalDirectoryEntryRef>::value,
} // namespace clang
namespace llvm {
+
+template <> struct PointerLikeTypeTraits<clang::DirectoryEntryRef> {
+ static inline void *getAsVoidPointer(clang::DirectoryEntryRef Dir) {
+ return const_cast<clang::DirectoryEntryRef::MapEntry *>(&Dir.getMapEntry());
+ }
+
+ static inline clang::DirectoryEntryRef getFromVoidPointer(void *Ptr) {
+ return clang::DirectoryEntryRef(
+ *reinterpret_cast<const clang::DirectoryEntryRef::MapEntry *>(Ptr));
+ }
+
+ static constexpr int NumLowBitsAvailable = PointerLikeTypeTraits<
+ const clang::DirectoryEntryRef::MapEntry *>::NumLowBitsAvailable;
+};
+
/// Specialisation of DenseMapInfo for DirectoryEntryRef.
template <> struct DenseMapInfo<clang::DirectoryEntryRef> {
static inline clang::DirectoryEntryRef getEmptyKey() {
diff --git a/contrib/llvm-project/clang/include/clang/Basic/ExceptionSpecificationType.h b/contrib/llvm-project/clang/include/clang/Basic/ExceptionSpecificationType.h
index 5616860555c8..d3c9e9cd063b 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/ExceptionSpecificationType.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/ExceptionSpecificationType.h
@@ -50,6 +50,11 @@ inline bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType) {
return ESpecType == EST_Unevaluated || ESpecType == EST_Uninstantiated;
}
+inline bool isExplicitThrowExceptionSpec(ExceptionSpecificationType ESpecType) {
+ return ESpecType == EST_Dynamic || ESpecType == EST_MSAny ||
+ ESpecType == EST_NoexceptFalse;
+}
+
/// Possible results from evaluation of a noexcept expression.
enum CanThrowResult {
CT_Cannot,
diff --git a/contrib/llvm-project/clang/include/clang/Basic/FPOptions.def b/contrib/llvm-project/clang/include/clang/Basic/FPOptions.def
index 0c687e3c3fa0..4517be6f178d 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/FPOptions.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/FPOptions.def
@@ -26,4 +26,5 @@ OPTION(AllowReciprocal, bool, 1, NoSignedZero)
OPTION(AllowApproxFunc, bool, 1, AllowReciprocal)
OPTION(FPEvalMethod, LangOptions::FPEvalMethodKind, 2, AllowApproxFunc)
OPTION(Float16ExcessPrecision, LangOptions::ExcessPrecisionKind, 2, FPEvalMethod)
+OPTION(BFloat16ExcessPrecision, LangOptions::ExcessPrecisionKind, 2, FPEvalMethod)
#undef OPTION
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Features.def b/contrib/llvm-project/clang/include/clang/Basic/Features.def
index 0581c61dcba3..e05ac4625827 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Features.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/Features.def
@@ -242,10 +242,15 @@ EXTENSION(c_alignas, true)
EXTENSION(c_alignof, true)
EXTENSION(c_atomic, true)
EXTENSION(c_generic_selections, true)
+EXTENSION(c_generic_selection_with_controlling_type, true)
EXTENSION(c_static_assert, true)
EXTENSION(c_thread_local, PP.getTargetInfo().isTLSSupported())
+// C2x features supported by other languages as extensions
+EXTENSION(c_attributes, true)
// C++11 features supported by other languages as extensions.
EXTENSION(cxx_atomic, LangOpts.CPlusPlus)
+EXTENSION(cxx_default_function_template_args, LangOpts.CPlusPlus)
+EXTENSION(cxx_defaulted_functions, LangOpts.CPlusPlus)
EXTENSION(cxx_deleted_functions, LangOpts.CPlusPlus)
EXTENSION(cxx_explicit_conversions, LangOpts.CPlusPlus)
EXTENSION(cxx_inline_namespaces, LangOpts.CPlusPlus)
@@ -268,6 +273,7 @@ EXTENSION(pragma_clang_attribute_external_declaration, true)
EXTENSION(statement_attributes_with_gnu_syntax, true)
EXTENSION(gnu_asm, LangOpts.GNUAsm)
EXTENSION(gnu_asm_goto_with_outputs, LangOpts.GNUAsm)
+EXTENSION(gnu_asm_goto_with_outputs_full, LangOpts.GNUAsm)
EXTENSION(matrix_types, LangOpts.MatrixTypes)
EXTENSION(matrix_types_scalar_division, true)
EXTENSION(cxx_attributes_on_using_declarations, LangOpts.CPlusPlus11)
diff --git a/contrib/llvm-project/clang/include/clang/Basic/FileEntry.h b/contrib/llvm-project/clang/include/clang/Basic/FileEntry.h
index 93e84cf233a3..50110b8572ef 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/FileEntry.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/FileEntry.h
@@ -70,7 +70,7 @@ public:
const FileEntry &getFileEntry() const {
return *getBaseMapEntry().second->V.get<FileEntry *>();
}
- DirectoryEntryRef getDir() const { return *getBaseMapEntry().second->Dir; }
+ DirectoryEntryRef getDir() const { return ME->second->Dir; }
inline off_t getSize() const;
inline unsigned getUID() const;
@@ -118,17 +118,14 @@ public:
/// VFSs that use external names. In that case, the \c FileEntryRef
/// returned by the \c FileManager will have the external name, and not the
/// name that was used to lookup the file.
- ///
- /// The second type is really a `const MapEntry *`, but that confuses
- /// gcc5.3. Once that's no longer supported, change this back.
- llvm::PointerUnion<FileEntry *, const void *> V;
+ llvm::PointerUnion<FileEntry *, const MapEntry *> V;
- /// Directory the file was found in. Set if and only if V is a FileEntry.
- OptionalDirectoryEntryRef Dir;
+ /// Directory the file was found in.
+ DirectoryEntryRef Dir;
MapValue() = delete;
MapValue(FileEntry &FE, DirectoryEntryRef Dir) : V(&FE), Dir(Dir) {}
- MapValue(MapEntry &ME) : V(&ME) {}
+ MapValue(MapEntry &ME, DirectoryEntryRef Dir) : V(&ME), Dir(Dir) {}
};
/// Check if RHS referenced the file in exactly the same way.
@@ -165,10 +162,10 @@ public:
/// Retrieve the base MapEntry after redirects.
const MapEntry &getBaseMapEntry() const {
- const MapEntry *ME = this->ME;
- while (const void *Next = ME->second->V.dyn_cast<const void *>())
- ME = static_cast<const MapEntry *>(Next);
- return *ME;
+ const MapEntry *Base = ME;
+ while (const auto *Next = Base->second->V.dyn_cast<const MapEntry *>())
+ Base = Next;
+ return *Base;
}
private:
@@ -237,6 +234,21 @@ static_assert(std::is_trivially_copyable<OptionalFileEntryRef>::value,
} // namespace clang
namespace llvm {
+
+template <> struct PointerLikeTypeTraits<clang::FileEntryRef> {
+ static inline void *getAsVoidPointer(clang::FileEntryRef File) {
+ return const_cast<clang::FileEntryRef::MapEntry *>(&File.getMapEntry());
+ }
+
+ static inline clang::FileEntryRef getFromVoidPointer(void *Ptr) {
+ return clang::FileEntryRef(
+ *reinterpret_cast<const clang::FileEntryRef::MapEntry *>(Ptr));
+ }
+
+ static constexpr int NumLowBitsAvailable = PointerLikeTypeTraits<
+ const clang::FileEntryRef::MapEntry *>::NumLowBitsAvailable;
+};
+
/// Specialisation of DenseMapInfo for FileEntryRef.
template <> struct DenseMapInfo<clang::FileEntryRef> {
static inline clang::FileEntryRef getEmptyKey() {
diff --git a/contrib/llvm-project/clang/include/clang/Basic/FileManager.h b/contrib/llvm-project/clang/include/clang/Basic/FileManager.h
index 84d569363a4a..502b69c3b41b 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/FileManager.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/FileManager.h
@@ -320,7 +320,7 @@ public:
/// This is a very expensive operation, despite its results being cached,
/// and should only be used when the physical layout of the file system is
/// required, which is (almost) never.
- StringRef getCanonicalName(const DirectoryEntry *Dir);
+ StringRef getCanonicalName(DirectoryEntryRef Dir);
/// Retrieve the canonical name for a given file.
///
diff --git a/contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h b/contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h
index 1886b1d7ba62..f87f76714815 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h
@@ -50,6 +50,12 @@ enum class ReservedIdentifierStatus {
ContainsDoubleUnderscore,
};
+enum class ReservedLiteralSuffixIdStatus {
+ NotReserved = 0,
+ NotStartsWithUnderscore,
+ ContainsDoubleUnderscore,
+};
+
/// Determine whether an identifier is reserved for use as a name at global
/// scope. Such identifiers might be implementation-specific global functions
/// or variables.
@@ -76,6 +82,21 @@ enum { IdentifierInfoAlignment = 8 };
static constexpr int ObjCOrBuiltinIDBits = 16;
+/// The "layout" of ObjCOrBuiltinID is:
+/// - The first value (0) represents "not a special identifier".
+/// - The next (NUM_OBJC_KEYWORDS - 1) values represent ObjCKeywordKinds (not
+/// including objc_not_keyword).
+/// - The next (NUM_INTERESTING_IDENTIFIERS - 1) values represent
+/// InterestingIdentifierKinds (not including not_interesting).
+/// - The rest of the values represent builtin IDs (not including NotBuiltin).
+static constexpr int FirstObjCKeywordID = 1;
+static constexpr int LastObjCKeywordID =
+ FirstObjCKeywordID + tok::NUM_OBJC_KEYWORDS - 2;
+static constexpr int FirstInterestingIdentifierID = LastObjCKeywordID + 1;
+static constexpr int LastInterestingIdentifierID =
+ FirstInterestingIdentifierID + tok::NUM_INTERESTING_IDENTIFIERS - 2;
+static constexpr int FirstBuiltinID = LastInterestingIdentifierID + 1;
+
/// One of these records is kept for each identifier that
/// is lexed. This contains information about whether the token was \#define'd,
/// is a language keyword, or if it is a front-end token of some sort (e.g. a
@@ -290,7 +311,9 @@ public:
///
/// For example, 'class' will return tok::objc_class if ObjC is enabled.
tok::ObjCKeywordKind getObjCKeywordID() const {
- if (ObjCOrBuiltinID < tok::NUM_OBJC_KEYWORDS)
+ static_assert(FirstObjCKeywordID == 1,
+ "hard-coding this assumption to simplify code");
+ if (ObjCOrBuiltinID <= LastObjCKeywordID)
return tok::ObjCKeywordKind(ObjCOrBuiltinID);
else
return tok::objc_not_keyword;
@@ -301,15 +324,30 @@ public:
///
/// 0 is not-built-in. 1+ are specific builtin functions.
unsigned getBuiltinID() const {
- if (ObjCOrBuiltinID >= tok::NUM_OBJC_KEYWORDS)
- return ObjCOrBuiltinID - tok::NUM_OBJC_KEYWORDS;
+ if (ObjCOrBuiltinID >= FirstBuiltinID)
+ return 1 + (ObjCOrBuiltinID - FirstBuiltinID);
else
return 0;
}
void setBuiltinID(unsigned ID) {
- ObjCOrBuiltinID = ID + tok::NUM_OBJC_KEYWORDS;
- assert(ObjCOrBuiltinID - unsigned(tok::NUM_OBJC_KEYWORDS) == ID
- && "ID too large for field!");
+ assert(ID != 0);
+ ObjCOrBuiltinID = FirstBuiltinID + (ID - 1);
+ assert(getBuiltinID() == ID && "ID too large for field!");
+ }
+ void clearBuiltinID() { ObjCOrBuiltinID = 0; }
+
+ tok::InterestingIdentifierKind getInterestingIdentifierID() const {
+ if (ObjCOrBuiltinID >= FirstInterestingIdentifierID &&
+ ObjCOrBuiltinID <= LastInterestingIdentifierID)
+ return tok::InterestingIdentifierKind(
+ 1 + (ObjCOrBuiltinID - FirstInterestingIdentifierID));
+ else
+ return tok::not_interesting;
+ }
+ void setInterestingIdentifierID(unsigned ID) {
+ assert(ID != tok::not_interesting);
+ ObjCOrBuiltinID = FirstInterestingIdentifierID + (ID - 1);
+ assert(getInterestingIdentifierID() == ID && "ID too large for field!");
}
unsigned getObjCOrBuiltinID() const { return ObjCOrBuiltinID; }
@@ -459,6 +497,10 @@ public:
/// 7.1.3, C++ [lib.global.names]).
ReservedIdentifierStatus isReserved(const LangOptions &LangOpts) const;
+ /// Determine whether \p this is a name reserved for future standardization or
+ /// the implementation (C++ [usrlit.suffix]).
+ ReservedLiteralSuffixIdStatus isReservedLiteralSuffixId() const;
+
/// If the identifier is an "uglified" reserved name, return a cleaned form.
/// e.g. _Foo => Foo. Otherwise, just returns the name.
StringRef deuglifiedName() const;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/LLVM.h b/contrib/llvm-project/clang/include/clang/Basic/LLVM.h
index 7ffc4c403473..f4956cd16cbc 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/LLVM.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/LLVM.h
@@ -19,9 +19,6 @@
// dependencies.
// Casting.h has complex templates that cannot be easily forward declared.
#include "llvm/Support/Casting.h"
-// None.h includes an enumerator that is desired & cannot be forward declared
-// without a definition of NoneType.
-#include "llvm/ADT/None.h"
// Add this header as a workaround to prevent `too few template arguments for
// class template 'SmallVector'` building error with build compilers like XL.
#include "llvm/ADT/SmallVector.h"
@@ -37,7 +34,6 @@ namespace llvm {
template<unsigned InternalLen> class SmallString;
template<typename T, unsigned N> class SmallVector;
template<typename T> class SmallVectorImpl;
- template <typename T> using Optional = std::optional<T>;
template <class T> class Expected;
template<typename T>
@@ -69,7 +65,6 @@ namespace clang {
// ADT's.
using llvm::ArrayRef;
using llvm::MutableArrayRef;
- using llvm::Optional;
using llvm::OwningArrayRef;
using llvm::SaveAndRestore;
using llvm::SmallString;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/LangOptions.def b/contrib/llvm-project/clang/include/clang/Basic/LangOptions.def
index d1cbe4306439..f7ec0406f33e 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/LangOptions.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/LangOptions.def
@@ -97,7 +97,8 @@ LANGOPT(CPlusPlus11 , 1, 0, "C++11")
LANGOPT(CPlusPlus14 , 1, 0, "C++14")
LANGOPT(CPlusPlus17 , 1, 0, "C++17")
LANGOPT(CPlusPlus20 , 1, 0, "C++20")
-LANGOPT(CPlusPlus2b , 1, 0, "C++2b")
+LANGOPT(CPlusPlus23 , 1, 0, "C++23")
+LANGOPT(CPlusPlus26 , 1, 0, "C++26")
LANGOPT(ObjC , 1, 0, "Objective-C")
BENIGN_LANGOPT(ObjCDefaultSynthProperties , 1, 0,
"Objective-C auto-synthesized properties")
@@ -171,7 +172,6 @@ BENIGN_LANGOPT(EmitAllDecls , 1, 0, "emitting all declarations")
LANGOPT(MathErrno , 1, 1, "errno in math functions")
BENIGN_LANGOPT(HeinousExtensions , 1, 0, "extensions that we really don't like and may be ripped out at any time")
LANGOPT(Modules , 1, 0, "modules semantics")
-COMPATIBLE_LANGOPT(ModulesTS , 1, 0, "C++ Modules TS syntax")
COMPATIBLE_LANGOPT(CPlusPlusModules, 1, 0, "C++ modules syntax")
BENIGN_ENUM_LANGOPT(CompilingModule, CompilingModuleKind, 3, CMK_None,
"compiling a module interface")
@@ -243,7 +243,7 @@ LANGOPT(OpenMP , 32, 0, "OpenMP support and version of OpenMP (31, 40
LANGOPT(OpenMPExtensions , 1, 1, "Enable all Clang extensions for OpenMP directives and clauses")
LANGOPT(OpenMPSimd , 1, 0, "Use SIMD only OpenMP support.")
LANGOPT(OpenMPUseTLS , 1, 0, "Use TLS for threadprivates or runtime calls")
-LANGOPT(OpenMPIsDevice , 1, 0, "Generate code only for OpenMP target device")
+LANGOPT(OpenMPIsTargetDevice , 1, 0, "Generate code only for OpenMP target device")
LANGOPT(OpenMPCUDAMode , 1, 0, "Generate code for OpenMP pragmas in SIMT/SPMD mode")
LANGOPT(OpenMPIRBuilder , 1, 0, "Use the experimental OpenMP-IR-Builder codegen path.")
LANGOPT(OpenMPCUDANumSMs , 32, 0, "Number of SMs for CUDA devices.")
@@ -317,7 +317,8 @@ COMPATIBLE_LANGOPT(ExpStrictFP, 1, false, "Enable experimental strict floating p
BENIGN_LANGOPT(RoundingMath, 1, false, "Do not assume default floating-point rounding behavior")
BENIGN_ENUM_LANGOPT(FPExceptionMode, FPExceptionModeKind, 2, FPE_Default, "FP Exception Behavior Mode type")
BENIGN_ENUM_LANGOPT(FPEvalMethod, FPEvalMethodKind, 2, FEM_UnsetOnCommandLine, "FP type used for floating point arithmetic")
-ENUM_LANGOPT(Float16ExcessPrecision, ExcessPrecisionKind, 2, FPP_Standard, "Intermediate truncation behavior for floating point arithmetic")
+ENUM_LANGOPT(Float16ExcessPrecision, ExcessPrecisionKind, 2, FPP_Standard, "Intermediate truncation behavior for Float16 arithmetic")
+ENUM_LANGOPT(BFloat16ExcessPrecision, ExcessPrecisionKind, 2, FPP_Standard, "Intermediate truncation behavior for BFloat16 arithmetic")
LANGOPT(NoBitFieldTypeAlign , 1, 0, "bit-field type alignment")
LANGOPT(HexagonQdsp6Compat , 1, 0, "hexagon-qdsp6 backward compatibility")
LANGOPT(ObjCAutoRefCount , 1, 0, "Objective-C automated reference counting")
@@ -465,6 +466,8 @@ LANGOPT(IncrementalExtensions, 1, 0, " True if we want to process statements"
"avoid tearing the Lexer and etc. down). Controlled by "
"-fincremental-extensions.")
+BENIGN_LANGOPT(CheckNew, 1, 0, "Do not assume C++ operator new may not return NULL")
+
#undef LANGOPT
#undef COMPATIBLE_LANGOPT
#undef BENIGN_LANGOPT
diff --git a/contrib/llvm-project/clang/include/clang/Basic/LangOptions.h b/contrib/llvm-project/clang/include/clang/Basic/LangOptions.h
index 38261c4200e4..3ef68ca8af66 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/LangOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/LangOptions.h
@@ -23,7 +23,7 @@
#include "clang/Basic/Visibility.h"
#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/TargetParser/Triple.h"
#include <optional>
#include <string>
#include <vector>
@@ -479,14 +479,18 @@ public:
/// The seed used by the randomize structure layout feature.
std::string RandstructSeed;
- /// Indicates whether the __FILE__ macro should use the target's
- /// platform-specific file separator or whether it should use the build
- /// environment's platform-specific file separator.
+ /// Indicates whether to use target's platform-specific file separator when
+ /// __FILE__ macro is used and when concatenating filename with directory or
+ /// to use build environment environment's platform-specific file separator.
///
/// The plaform-specific path separator is the backslash(\) for Windows and
/// forward slash (/) elsewhere.
bool UseTargetPathSeparator = false;
+ // Indicates whether we should keep all nullptr checks for pointers
+ // received as a result of a standard operator new (-fcheck-new)
+ bool CheckNew = false;
+
LangOptions();
/// Set language defaults for the given input language and
diff --git a/contrib/llvm-project/clang/include/clang/Basic/LangStandard.h b/contrib/llvm-project/clang/include/clang/Basic/LangStandard.h
index 4e78570ff733..fd949bcd6855 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/LangStandard.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/LangStandard.h
@@ -55,12 +55,13 @@ enum LangFeatures {
CPlusPlus14 = (1 << 7),
CPlusPlus17 = (1 << 8),
CPlusPlus20 = (1 << 9),
- CPlusPlus2b = (1 << 10),
- Digraphs = (1 << 11),
- GNUMode = (1 << 12),
- HexFloat = (1 << 13),
- OpenCL = (1 << 14),
- HLSL = (1 << 15)
+ CPlusPlus23 = (1 << 10),
+ CPlusPlus26 = (1 << 11),
+ Digraphs = (1 << 12),
+ GNUMode = (1 << 13),
+ HexFloat = (1 << 14),
+ OpenCL = (1 << 15),
+ HLSL = (1 << 16)
};
/// LangStandard - Information about the properties of a particular language
@@ -118,8 +119,11 @@ public:
/// isCPlusPlus20 - Language is a C++20 variant (or later).
bool isCPlusPlus20() const { return Flags & CPlusPlus20; }
- /// isCPlusPlus2b - Language is a post-C++20 variant (or later).
- bool isCPlusPlus2b() const { return Flags & CPlusPlus2b; }
+ /// isCPlusPlus23 - Language is a post-C++23 variant (or later).
+ bool isCPlusPlus23() const { return Flags & CPlusPlus23; }
+
+ /// isCPlusPlus26 - Language is a post-C++26 variant (or later).
+ bool isCPlusPlus26() const { return Flags & CPlusPlus26; }
/// hasDigraphs - Language supports digraphs.
bool hasDigraphs() const { return Flags & Digraphs; }
diff --git a/contrib/llvm-project/clang/include/clang/Basic/LangStandards.def b/contrib/llvm-project/clang/include/clang/Basic/LangStandards.def
index c5d4da1cb2f9..5c28bdd28ef2 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/LangStandards.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/LangStandards.def
@@ -151,15 +151,29 @@ LANGSTANDARD(gnucxx20, "gnu++20",
CPlusPlus20 | Digraphs | HexFloat | GNUMode)
LANGSTANDARD_ALIAS_DEPR(gnucxx20, "gnu++2a")
-LANGSTANDARD(cxx2b, "c++2b",
- CXX, "Working draft for ISO C++ 2023 DIS",
+LANGSTANDARD(cxx23, "c++23",
+ CXX, "ISO C++ 2023 DIS",
LineComment | CPlusPlus | CPlusPlus11 | CPlusPlus14 | CPlusPlus17 |
- CPlusPlus20 | CPlusPlus2b | Digraphs | HexFloat)
+ CPlusPlus20 | CPlusPlus23 | Digraphs | HexFloat)
+LANGSTANDARD_ALIAS_DEPR(cxx23, "c++2b")
-LANGSTANDARD(gnucxx2b, "gnu++2b",
- CXX, "Working draft for ISO C++ 2023 DIS with GNU extensions",
+LANGSTANDARD(gnucxx23, "gnu++23",
+ CXX, "ISO C++ 2023 DIS with GNU extensions",
LineComment | CPlusPlus | CPlusPlus11 | CPlusPlus14 | CPlusPlus17 |
- CPlusPlus20 | CPlusPlus2b | Digraphs | HexFloat | GNUMode)
+ CPlusPlus20 | CPlusPlus23 | Digraphs | HexFloat | GNUMode)
+LANGSTANDARD_ALIAS_DEPR(gnucxx23, "gnu++2b")
+
+LANGSTANDARD(cxx26, "c++2c",
+ CXX, "Working draft for C++2c",
+ LineComment | CPlusPlus | CPlusPlus11 | CPlusPlus14 | CPlusPlus17 |
+ CPlusPlus20 | CPlusPlus23 | CPlusPlus26 | Digraphs | HexFloat)
+LANGSTANDARD_ALIAS(cxx26, "c++26")
+
+LANGSTANDARD(gnucxx26, "gnu++2c",
+ CXX, "Working draft for C++2c with GNU extensions",
+ LineComment | CPlusPlus | CPlusPlus11 | CPlusPlus14 | CPlusPlus17 |
+ CPlusPlus20 | CPlusPlus23 | CPlusPlus26 | Digraphs | HexFloat | GNUMode)
+LANGSTANDARD_ALIAS(gnucxx26, "gnu++26")
// OpenCL
LANGSTANDARD(opencl10, "cl1.0",
@@ -200,14 +214,6 @@ LANGSTANDARD_ALIAS_DEPR(openclcpp10, "CLC++")
LANGSTANDARD_ALIAS_DEPR(openclcpp10, "CLC++1.0")
LANGSTANDARD_ALIAS_DEPR(openclcpp2021, "CLC++2021")
-// CUDA
-LANGSTANDARD(cuda, "cuda", CUDA, "NVIDIA CUDA(tm)",
- LineComment | CPlusPlus | CPlusPlus11 | CPlusPlus14 | Digraphs)
-
-// HIP
-LANGSTANDARD(hip, "hip", HIP, "HIP",
- LineComment | CPlusPlus | CPlusPlus11 | CPlusPlus14 | Digraphs)
-
// HLSL
LANGSTANDARD(hlsl, "hlsl",
HLSL, "High Level Shader Language",
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Linkage.h b/contrib/llvm-project/clang/include/clang/Basic/Linkage.h
index f4d442c084cf..0b7b61954a07 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Linkage.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Linkage.h
@@ -43,11 +43,6 @@ enum Linkage : unsigned char {
/// translation units because of types defined in a inline function.
VisibleNoLinkage,
- /// Internal linkage according to the Modules TS, but can be referred
- /// to from other translation units indirectly through inline functions and
- /// templates in the module interface.
- ModuleInternalLinkage,
-
/// Module linkage, which indicates that the entity can be referred
/// to from other translation units within the same module, and indirectly
/// from arbitrary other translation units through inline functions and
@@ -98,8 +93,6 @@ inline Linkage getFormalLinkage(Linkage L) {
return ExternalLinkage;
case VisibleNoLinkage:
return NoLinkage;
- case ModuleInternalLinkage:
- return InternalLinkage;
default:
return L;
}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Module.h b/contrib/llvm-project/clang/include/clang/Basic/Module.h
index c042cf15d19b..a4ad8ad2f768 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Module.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Module.h
@@ -103,16 +103,22 @@ public:
/// The location of the module definition.
SourceLocation DefinitionLoc;
+ // FIXME: Consider if reducing the size of this enum (having Partition and
+ // Named modules only) then representing interface/implementation separately
+ // is more efficient.
enum ModuleKind {
/// This is a module that was defined by a module map and built out
/// of header files.
ModuleMapModule,
+ /// This is a C++ 20 header unit.
+ ModuleHeaderUnit,
+
/// This is a C++20 module interface unit.
ModuleInterfaceUnit,
- /// This is a C++ 20 header unit.
- ModuleHeaderUnit,
+ /// This is a C++20 module implementation unit.
+ ModuleImplementationUnit,
/// This is a C++ 20 module partition interface.
ModulePartitionInterface,
@@ -120,11 +126,17 @@ public:
/// This is a C++ 20 module partition implementation.
ModulePartitionImplementation,
- /// This is a fragment of the global module within some C++ module.
- GlobalModuleFragment,
+ /// This is the explicit Global Module Fragment of a modular TU.
+ /// As per C++ [module.global.frag].
+ ExplicitGlobalModuleFragment,
/// This is the private module fragment within some C++ module.
PrivateModuleFragment,
+
+ /// This is an implicit fragment of the global module which contains
+ /// only language linkage declarations (made in the purview of the
+ /// named module).
+ ImplicitGlobalModuleFragment,
};
/// The kind of this module.
@@ -137,15 +149,14 @@ public:
/// The build directory of this module. This is the directory in
/// which the module is notionally built, and relative to which its headers
/// are found.
- const DirectoryEntry *Directory = nullptr;
+ OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr Directory;
/// The presumed file name for the module map defining this module.
/// Only non-empty when building from preprocessed source.
std::string PresumedModuleMapFile;
/// The umbrella header or directory.
- llvm::PointerUnion<const FileEntryRef::MapEntry *, const DirectoryEntry *>
- Umbrella;
+ llvm::PointerUnion<FileEntryRef, DirectoryEntryRef> Umbrella;
/// The module signature.
ASTFileSignature Signature;
@@ -163,14 +174,29 @@ public:
/// Does this Module scope describe part of the purview of a standard named
/// C++ module?
bool isModulePurview() const {
- return Kind == ModuleInterfaceUnit || Kind == ModulePartitionInterface ||
- Kind == ModulePartitionImplementation ||
- Kind == PrivateModuleFragment;
+ switch (Kind) {
+ case ModuleInterfaceUnit:
+ case ModuleImplementationUnit:
+ case ModulePartitionInterface:
+ case ModulePartitionImplementation:
+ case PrivateModuleFragment:
+ return true;
+ default:
+ return false;
+ }
}
/// Does this Module scope describe a fragment of the global module within
/// some C++ module.
- bool isGlobalModule() const { return Kind == GlobalModuleFragment; }
+ bool isGlobalModule() const {
+ return isExplicitGlobalModule() || isImplicitGlobalModule();
+ }
+ bool isExplicitGlobalModule() const {
+ return Kind == ExplicitGlobalModuleFragment;
+ }
+ bool isImplicitGlobalModule() const {
+ return Kind == ImplicitGlobalModuleFragment;
+ }
bool isPrivateModule() const { return Kind == PrivateModuleFragment; }
@@ -189,7 +215,7 @@ private:
OptionalFileEntryRef ASTFile;
/// The top-level headers associated with this module.
- llvm::SmallSetVector<const FileEntry *, 2> TopHeaders;
+ llvm::SmallSetVector<FileEntryRef, 2> TopHeaders;
/// top-level header filenames that aren't resolved to FileEntries yet.
std::vector<std::string> TopHeaderNames;
@@ -215,9 +241,7 @@ public:
struct Header {
std::string NameAsWritten;
std::string PathRelativeToRootModuleDirectory;
- OptionalFileEntryRefDegradesToFileEntryPtr Entry;
-
- explicit operator bool() { return Entry.has_value(); }
+ FileEntryRef Entry;
};
/// Information about a directory name as found in the module map
@@ -225,9 +249,7 @@ public:
struct DirectoryName {
std::string NameAsWritten;
std::string PathRelativeToRootModuleDirectory;
- const DirectoryEntry *Entry;
-
- explicit operator bool() { return Entry; }
+ DirectoryEntryRef Entry;
};
/// The headers that are part of this module.
@@ -547,6 +569,11 @@ public:
Kind == ModulePartitionImplementation;
}
+ /// Is this a module implementation.
+ bool isModuleImplementation() const {
+ return Kind == ModuleImplementationUnit;
+ }
+
/// Is this module a header unit.
bool isHeaderUnit() const { return Kind == ModuleHeaderUnit; }
// Is this a C++20 module interface or a partition.
@@ -619,27 +646,30 @@ public:
getTopLevelModule()->ASTFile = File;
}
- /// Retrieve the directory for which this module serves as the
- /// umbrella.
- DirectoryName getUmbrellaDir() const;
+ /// Retrieve the umbrella directory as written.
+ std::optional<DirectoryName> getUmbrellaDirAsWritten() const {
+ if (Umbrella && Umbrella.is<DirectoryEntryRef>())
+ return DirectoryName{UmbrellaAsWritten,
+ UmbrellaRelativeToRootModuleDirectory,
+ Umbrella.get<DirectoryEntryRef>()};
+ return std::nullopt;
+ }
- /// Retrieve the header that serves as the umbrella header for this
- /// module.
- Header getUmbrellaHeader() const {
- if (auto *ME = Umbrella.dyn_cast<const FileEntryRef::MapEntry *>())
+ /// Retrieve the umbrella header as written.
+ std::optional<Header> getUmbrellaHeaderAsWritten() const {
+ if (Umbrella && Umbrella.is<FileEntryRef>())
return Header{UmbrellaAsWritten, UmbrellaRelativeToRootModuleDirectory,
- FileEntryRef(*ME)};
- return Header{};
+ Umbrella.get<FileEntryRef>()};
+ return std::nullopt;
}
- /// Determine whether this module has an umbrella directory that is
- /// not based on an umbrella header.
- bool hasUmbrellaDir() const {
- return Umbrella && Umbrella.is<const DirectoryEntry *>();
- }
+ /// Get the effective umbrella directory for this module: either the one
+ /// explicitly written in the module map file, or the parent of the umbrella
+ /// header.
+ OptionalDirectoryEntryRef getEffectiveUmbrellaDir() const;
/// Add a top-level header associated with this module.
- void addTopHeader(const FileEntry *File);
+ void addTopHeader(FileEntryRef File);
/// Add a top-level header filename associated with this module.
void addTopHeaderFilename(StringRef Filename) {
@@ -647,7 +677,7 @@ public:
}
/// The top-level headers associated with this module.
- ArrayRef<const FileEntry *> getTopHeaders(FileManager &FileMgr);
+ ArrayRef<FileEntryRef> getTopHeaders(FileManager &FileMgr);
/// Determine whether this module has declared its intention to
/// directly use another module.
@@ -709,16 +739,11 @@ public:
using submodule_iterator = std::vector<Module *>::iterator;
using submodule_const_iterator = std::vector<Module *>::const_iterator;
- submodule_iterator submodule_begin() { return SubModules.begin(); }
- submodule_const_iterator submodule_begin() const {return SubModules.begin();}
- submodule_iterator submodule_end() { return SubModules.end(); }
- submodule_const_iterator submodule_end() const { return SubModules.end(); }
-
llvm::iterator_range<submodule_iterator> submodules() {
- return llvm::make_range(submodule_begin(), submodule_end());
+ return llvm::make_range(SubModules.begin(), SubModules.end());
}
llvm::iterator_range<submodule_const_iterator> submodules() const {
- return llvm::make_range(submodule_begin(), submodule_end());
+ return llvm::make_range(SubModules.begin(), SubModules.end());
}
/// Appends this module's list of exported modules to \p Exported.
@@ -794,6 +819,11 @@ public:
ConflictCallback Cb = [](ArrayRef<Module *>, Module *,
StringRef) {});
+ /// Make transitive imports visible for [module.import]/7.
+ void makeTransitiveImportsVisible(
+ Module *M, SourceLocation Loc, VisibleCallback Vis = [](Module *) {},
+ ConflictCallback Cb = [](ArrayRef<Module *>, Module *, StringRef) {});
+
private:
/// Import locations for each visible module. Indexed by the module's
/// VisibilityID.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/ObjCRuntime.h b/contrib/llvm-project/clang/include/clang/Basic/ObjCRuntime.h
index 30a5fde40754..0f714ed3ad60 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/ObjCRuntime.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/ObjCRuntime.h
@@ -16,10 +16,10 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/HashBuilder.h"
#include "llvm/Support/VersionTuple.h"
+#include "llvm/TargetParser/Triple.h"
#include <string>
namespace clang {
diff --git a/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensionTypes.def b/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensionTypes.def
index 84ffbe936b77..17c72d69a020 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensionTypes.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensionTypes.def
@@ -28,10 +28,10 @@ INTEL_SUBGROUP_AVC_TYPE(mce_result_t, MceResult)
INTEL_SUBGROUP_AVC_TYPE(ime_result_t, ImeResult)
INTEL_SUBGROUP_AVC_TYPE(ref_result_t, RefResult)
INTEL_SUBGROUP_AVC_TYPE(sic_result_t, SicResult)
-INTEL_SUBGROUP_AVC_TYPE(ime_result_single_reference_streamout_t, ImeResultSingleRefStreamout)
-INTEL_SUBGROUP_AVC_TYPE(ime_result_dual_reference_streamout_t, ImeResultDualRefStreamout)
-INTEL_SUBGROUP_AVC_TYPE(ime_single_reference_streamin_t, ImeSingleRefStreamin)
-INTEL_SUBGROUP_AVC_TYPE(ime_dual_reference_streamin_t, ImeDualRefStreamin)
+INTEL_SUBGROUP_AVC_TYPE(ime_result_single_reference_streamout_t, ImeResultSingleReferenceStreamout)
+INTEL_SUBGROUP_AVC_TYPE(ime_result_dual_reference_streamout_t, ImeResultDualReferenceStreamout)
+INTEL_SUBGROUP_AVC_TYPE(ime_single_reference_streamin_t, ImeSingleReferenceStreamin)
+INTEL_SUBGROUP_AVC_TYPE(ime_dual_reference_streamin_t, ImeDualReferenceStreamin)
#undef INTEL_SUBGROUP_AVC_TYPE
#endif // INTEL_SUBGROUP_AVC_TYPE
diff --git a/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensions.def b/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensions.def
index 70b4f15a95a7..6f73b2613750 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensions.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensions.def
@@ -31,7 +31,7 @@
// If extensions are to be enumerated without any information,
// define OPENCLEXTNAME(ext) where ext is the name of the extension.
//
-// Difference between optional core feature and core feature is that the
+// Difference between optional core feature and core feature is that the
// later is unconditionally supported in specific OpenCL version.
//
// As per The OpenCL Extension Specification, Section 1.2, in this file, an
diff --git a/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.def b/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.def
index 64c488caa6a9..c999b8b9c4ff 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.def
@@ -80,6 +80,9 @@
#ifndef OPENMP_NUMTASKS_MODIFIER
#define OPENMP_NUMTASKS_MODIFIER(Name)
#endif
+#ifndef OPENMP_DOACROSS_MODIFIER
+#define OPENMP_DOACROSS_MODIFIER(Name)
+#endif
// Static attributes for 'schedule' clause.
OPENMP_SCHEDULE_KIND(static)
@@ -201,6 +204,12 @@ OPENMP_GRAINSIZE_MODIFIER(strict)
// Modifiers for the 'num_tasks' clause.
OPENMP_NUMTASKS_MODIFIER(strict)
+// Modifiers for the 'doacross' clause.
+OPENMP_DOACROSS_MODIFIER(source)
+OPENMP_DOACROSS_MODIFIER(sink)
+OPENMP_DOACROSS_MODIFIER(sink_omp_cur_iteration)
+OPENMP_DOACROSS_MODIFIER(source_omp_cur_iteration)
+
#undef OPENMP_NUMTASKS_MODIFIER
#undef OPENMP_GRAINSIZE_MODIFIER
#undef OPENMP_BIND_KIND
@@ -224,4 +233,5 @@ OPENMP_NUMTASKS_MODIFIER(strict)
#undef OPENMP_DIST_SCHEDULE_KIND
#undef OPENMP_DEFAULTMAP_KIND
#undef OPENMP_DEFAULTMAP_MODIFIER
+#undef OPENMP_DOACROSS_MODIFIER
diff --git a/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.h b/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.h
index 6491ee27782c..f5fc7a8ce5bb 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.h
@@ -215,6 +215,13 @@ enum OpenMPNumTasksClauseModifier {
OMPC_NUMTASKS_unknown
};
+/// OpenMP dependence types for 'doacross' clause.
+enum OpenMPDoacrossClauseModifier {
+#define OPENMP_DOACROSS_MODIFIER(Name) OMPC_DOACROSS_##Name,
+#include "clang/Basic/OpenMPKinds.def"
+ OMPC_DOACROSS_unknown
+};
+
/// Contains 'interop' data for 'append_args' and 'init' clauses.
class Expr;
struct OMPInteropInfo final {
diff --git a/contrib/llvm-project/clang/include/clang/Basic/ParsedAttrInfo.h b/contrib/llvm-project/clang/include/clang/Basic/ParsedAttrInfo.h
new file mode 100644
index 000000000000..4444f12de9ce
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/ParsedAttrInfo.h
@@ -0,0 +1,152 @@
+//===- ParsedAttrInfo.h - Info needed to parse an attribute -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ParsedAttrInfo class, which dictates how to
+// parse an attribute. This class is the one that plugins derive to
+// define a new attribute.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_PARSEDATTRINFO_H
+#define LLVM_CLANG_BASIC_PARSEDATTRINFO_H
+
+#include "clang/Basic/AttrSubjectMatchRules.h"
+#include "clang/Basic/AttributeCommonInfo.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Registry.h"
+#include <climits>
+#include <list>
+
+namespace clang {
+
+class Decl;
+class LangOptions;
+class ParsedAttr;
+class Sema;
+class Stmt;
+class TargetInfo;
+
+struct ParsedAttrInfo {
+ /// Corresponds to the Kind enum.
+ unsigned AttrKind : 16;
+ /// The number of required arguments of this attribute.
+ unsigned NumArgs : 4;
+ /// The number of optional arguments of this attributes.
+ unsigned OptArgs : 4;
+ /// The number of non-fake arguments specified in the attribute definition.
+ unsigned NumArgMembers : 4;
+ /// True if the parsing does not match the semantic content.
+ unsigned HasCustomParsing : 1;
+ // True if this attribute accepts expression parameter pack expansions.
+ unsigned AcceptsExprPack : 1;
+ /// True if this attribute is only available for certain targets.
+ unsigned IsTargetSpecific : 1;
+ /// True if this attribute applies to types.
+ unsigned IsType : 1;
+ /// True if this attribute applies to statements.
+ unsigned IsStmt : 1;
+ /// True if this attribute has any spellings that are known to gcc.
+ unsigned IsKnownToGCC : 1;
+ /// True if this attribute is supported by #pragma clang attribute.
+ unsigned IsSupportedByPragmaAttribute : 1;
+ /// The syntaxes supported by this attribute and how they're spelled.
+ struct Spelling {
+ AttributeCommonInfo::Syntax Syntax;
+ const char *NormalizedFullName;
+ };
+ ArrayRef<Spelling> Spellings;
+ // The names of the known arguments of this attribute.
+ ArrayRef<const char *> ArgNames;
+
+protected:
+ constexpr ParsedAttrInfo(AttributeCommonInfo::Kind AttrKind =
+ AttributeCommonInfo::NoSemaHandlerAttribute)
+ : AttrKind(AttrKind), NumArgs(0), OptArgs(0), NumArgMembers(0),
+ HasCustomParsing(0), AcceptsExprPack(0), IsTargetSpecific(0), IsType(0),
+ IsStmt(0), IsKnownToGCC(0), IsSupportedByPragmaAttribute(0) {}
+
+ constexpr ParsedAttrInfo(AttributeCommonInfo::Kind AttrKind, unsigned NumArgs,
+ unsigned OptArgs, unsigned NumArgMembers,
+ unsigned HasCustomParsing, unsigned AcceptsExprPack,
+ unsigned IsTargetSpecific, unsigned IsType,
+ unsigned IsStmt, unsigned IsKnownToGCC,
+ unsigned IsSupportedByPragmaAttribute,
+ ArrayRef<Spelling> Spellings,
+ ArrayRef<const char *> ArgNames)
+ : AttrKind(AttrKind), NumArgs(NumArgs), OptArgs(OptArgs),
+ NumArgMembers(NumArgMembers), HasCustomParsing(HasCustomParsing),
+ AcceptsExprPack(AcceptsExprPack), IsTargetSpecific(IsTargetSpecific),
+ IsType(IsType), IsStmt(IsStmt), IsKnownToGCC(IsKnownToGCC),
+ IsSupportedByPragmaAttribute(IsSupportedByPragmaAttribute),
+ Spellings(Spellings), ArgNames(ArgNames) {}
+
+public:
+ virtual ~ParsedAttrInfo() = default;
+
+ /// Check if this attribute has specified spelling.
+ bool hasSpelling(AttributeCommonInfo::Syntax Syntax, StringRef Name) const {
+ return llvm::any_of(Spellings, [&](const Spelling &S) {
+ return (S.Syntax == Syntax && S.NormalizedFullName == Name);
+ });
+ }
+
+ /// Check if this attribute appertains to D, and issue a diagnostic if not.
+ virtual bool diagAppertainsToDecl(Sema &S, const ParsedAttr &Attr,
+ const Decl *D) const {
+ return true;
+ }
+ /// Check if this attribute appertains to St, and issue a diagnostic if not.
+ virtual bool diagAppertainsToStmt(Sema &S, const ParsedAttr &Attr,
+ const Stmt *St) const {
+ return true;
+ }
+ /// Check if the given attribute is mutually exclusive with other attributes
+ /// already applied to the given declaration.
+ virtual bool diagMutualExclusion(Sema &S, const ParsedAttr &A,
+ const Decl *D) const {
+ return true;
+ }
+ /// Check if this attribute is allowed by the language we are compiling.
+ virtual bool acceptsLangOpts(const LangOptions &LO) const { return true; }
+
+ /// Check if this attribute is allowed when compiling for the given target.
+ virtual bool existsInTarget(const TargetInfo &Target) const { return true; }
+ /// Convert the spelling index of Attr to a semantic spelling enum value.
+ virtual unsigned
+ spellingIndexToSemanticSpelling(const ParsedAttr &Attr) const {
+ return UINT_MAX;
+ }
+ /// Returns true if the specified parameter index for this attribute in
+ /// Attr.td is an ExprArgument or VariadicExprArgument, or a subclass thereof;
+ /// returns false otherwise.
+ virtual bool isParamExpr(size_t N) const { return false; }
+ /// Populate Rules with the match rules of this attribute.
+ virtual void getPragmaAttributeMatchRules(
+ llvm::SmallVectorImpl<std::pair<attr::SubjectMatchRule, bool>> &Rules,
+ const LangOptions &LangOpts) const {}
+
+ enum AttrHandling { NotHandled, AttributeApplied, AttributeNotApplied };
+ /// If this ParsedAttrInfo knows how to handle this ParsedAttr applied to this
+ /// Decl then do so and return either AttributeApplied if it was applied or
+ /// AttributeNotApplied if it wasn't. Otherwise return NotHandled.
+ virtual AttrHandling handleDeclAttribute(Sema &S, Decl *D,
+ const ParsedAttr &Attr) const {
+ return NotHandled;
+ }
+
+ static const ParsedAttrInfo &get(const AttributeCommonInfo &A);
+ static ArrayRef<const ParsedAttrInfo *> getAllBuiltin();
+};
+
+typedef llvm::Registry<ParsedAttrInfo> ParsedAttrInfoRegistry;
+
+const std::list<std::unique_ptr<ParsedAttrInfo>> &getAttributePluginInstances();
+
+} // namespace clang
+
+#endif // LLVM_CLANG_BASIC_PARSEDATTRINFO_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/RISCVVTypes.def b/contrib/llvm-project/clang/include/clang/Basic/RISCVVTypes.def
index 1d4024dfb20d..575bca58b51e 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/RISCVVTypes.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/RISCVVTypes.def
@@ -40,6 +40,10 @@
//
//===----------------------------------------------------------------------===//
+#ifndef RVV_TYPE
+#define RVV_TYPE(Name, Id, SingletonId)
+#endif
+
#ifndef RVV_VECTOR_TYPE
#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, IsFP)\
RVV_TYPE(Name, Id, SingletonId)
@@ -140,6 +144,292 @@ RVV_PREDICATE_TYPE("__rvv_bool16_t", RvvBool16, RvvBool16Ty, 4)
RVV_PREDICATE_TYPE("__rvv_bool32_t", RvvBool32, RvvBool32Ty, 2)
RVV_PREDICATE_TYPE("__rvv_bool64_t", RvvBool64, RvvBool64Ty, 1)
+//===- Tuple vector types -------------------------------------------------===//
+//===- Int8 tuple types --------------------------------------------------===//
+RVV_VECTOR_TYPE_INT("__rvv_int8mf8x2_t", RvvInt8mf8x2, RvvInt8mf8x2Ty, 1, 8, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf8x3_t", RvvInt8mf8x3, RvvInt8mf8x3Ty, 1, 8, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf8x4_t", RvvInt8mf8x4, RvvInt8mf8x4Ty, 1, 8, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf8x5_t", RvvInt8mf8x5, RvvInt8mf8x5Ty, 1, 8, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf8x6_t", RvvInt8mf8x6, RvvInt8mf8x6Ty, 1, 8, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf8x7_t", RvvInt8mf8x7, RvvInt8mf8x7Ty, 1, 8, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf8x8_t", RvvInt8mf8x8, RvvInt8mf8x8Ty, 1, 8, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int8mf4x2_t", RvvInt8mf4x2, RvvInt8mf4x2Ty, 2, 8, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf4x3_t", RvvInt8mf4x3, RvvInt8mf4x3Ty, 2, 8, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf4x4_t", RvvInt8mf4x4, RvvInt8mf4x4Ty, 2, 8, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf4x5_t", RvvInt8mf4x5, RvvInt8mf4x5Ty, 2, 8, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf4x6_t", RvvInt8mf4x6, RvvInt8mf4x6Ty, 2, 8, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf4x7_t", RvvInt8mf4x7, RvvInt8mf4x7Ty, 2, 8, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf4x8_t", RvvInt8mf4x8, RvvInt8mf4x8Ty, 2, 8, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int8mf2x2_t", RvvInt8mf2x2, RvvInt8mf2x2Ty, 4, 8, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf2x3_t", RvvInt8mf2x3, RvvInt8mf2x3Ty, 4, 8, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf2x4_t", RvvInt8mf2x4, RvvInt8mf2x4Ty, 4, 8, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf2x5_t", RvvInt8mf2x5, RvvInt8mf2x5Ty, 4, 8, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf2x6_t", RvvInt8mf2x6, RvvInt8mf2x6Ty, 4, 8, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf2x7_t", RvvInt8mf2x7, RvvInt8mf2x7Ty, 4, 8, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf2x8_t", RvvInt8mf2x8, RvvInt8mf2x8Ty, 4, 8, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int8m1x2_t", RvvInt8m1x2, RvvInt8m1x2Ty, 8, 8, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m1x3_t", RvvInt8m1x3, RvvInt8m1x3Ty, 8, 8, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m1x4_t", RvvInt8m1x4, RvvInt8m1x4Ty, 8, 8, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m1x5_t", RvvInt8m1x5, RvvInt8m1x5Ty, 8, 8, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m1x6_t", RvvInt8m1x6, RvvInt8m1x6Ty, 8, 8, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m1x7_t", RvvInt8m1x7, RvvInt8m1x7Ty, 8, 8, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m1x8_t", RvvInt8m1x8, RvvInt8m1x8Ty, 8, 8, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int8m2x2_t", RvvInt8m2x2, RvvInt8m2x2Ty, 16, 8, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m2x3_t", RvvInt8m2x3, RvvInt8m2x3Ty, 16, 8, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m2x4_t", RvvInt8m2x4, RvvInt8m2x4Ty, 16, 8, 4, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int8m4x2_t", RvvInt8m4x2, RvvInt8m4x2Ty, 32, 8, 2, true)
+
+//===- Uint8 tuple types -------------------------------------------------===//
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf8x2_t", RvvUint8mf8x2, RvvUint8mf8x2Ty, 1, 8, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf8x3_t", RvvUint8mf8x3, RvvUint8mf8x3Ty, 1, 8, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf8x4_t", RvvUint8mf8x4, RvvUint8mf8x4Ty, 1, 8, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf8x5_t", RvvUint8mf8x5, RvvUint8mf8x5Ty, 1, 8, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf8x6_t", RvvUint8mf8x6, RvvUint8mf8x6Ty, 1, 8, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf8x7_t", RvvUint8mf8x7, RvvUint8mf8x7Ty, 1, 8, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf8x8_t", RvvUint8mf8x8, RvvUint8mf8x8Ty, 1, 8, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf4x2_t", RvvUint8mf4x2, RvvUint8mf4x2Ty, 2, 8, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf4x3_t", RvvUint8mf4x3, RvvUint8mf4x3Ty, 2, 8, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf4x4_t", RvvUint8mf4x4, RvvUint8mf4x4Ty, 2, 8, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf4x5_t", RvvUint8mf4x5, RvvUint8mf4x5Ty, 2, 8, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf4x6_t", RvvUint8mf4x6, RvvUint8mf4x6Ty, 2, 8, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf4x7_t", RvvUint8mf4x7, RvvUint8mf4x7Ty, 2, 8, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf4x8_t", RvvUint8mf4x8, RvvUint8mf4x8Ty, 2, 8, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf2x2_t", RvvUint8mf2x2, RvvUint8mf2x2Ty, 4, 8, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf2x3_t", RvvUint8mf2x3, RvvUint8mf2x3Ty, 4, 8, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf2x4_t", RvvUint8mf2x4, RvvUint8mf2x4Ty, 4, 8, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf2x5_t", RvvUint8mf2x5, RvvUint8mf2x5Ty, 4, 8, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf2x6_t", RvvUint8mf2x6, RvvUint8mf2x6Ty, 4, 8, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf2x7_t", RvvUint8mf2x7, RvvUint8mf2x7Ty, 4, 8, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf2x8_t", RvvUint8mf2x8, RvvUint8mf2x8Ty, 4, 8, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint8m1x2_t", RvvUint8m1x2, RvvUint8m1x2Ty, 8, 8, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m1x3_t", RvvUint8m1x3, RvvUint8m1x3Ty, 8, 8, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m1x4_t", RvvUint8m1x4, RvvUint8m1x4Ty, 8, 8, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m1x5_t", RvvUint8m1x5, RvvUint8m1x5Ty, 8, 8, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m1x6_t", RvvUint8m1x6, RvvUint8m1x6Ty, 8, 8, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m1x7_t", RvvUint8m1x7, RvvUint8m1x7Ty, 8, 8, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m1x8_t", RvvUint8m1x8, RvvUint8m1x8Ty, 8, 8, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint8m2x2_t", RvvUint8m2x2, RvvUint8m2x2Ty, 16, 8, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m2x3_t", RvvUint8m2x3, RvvUint8m2x3Ty, 16, 8, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m2x4_t", RvvUint8m2x4, RvvUint8m2x4Ty, 16, 8, 4, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint8m4x2_t", RvvUint8m4x2, RvvUint8m4x2Ty, 32, 8, 2, false)
+
+//===- Int16 tuple types --------------------------------------------------===//
+RVV_VECTOR_TYPE_INT("__rvv_int16mf4x2_t", RvvInt16mf4x2, RvvInt16mf4x2Ty, 1, 16, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf4x3_t", RvvInt16mf4x3, RvvInt16mf4x3Ty, 1, 16, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf4x4_t", RvvInt16mf4x4, RvvInt16mf4x4Ty, 1, 16, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf4x5_t", RvvInt16mf4x5, RvvInt16mf4x5Ty, 1, 16, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf4x6_t", RvvInt16mf4x6, RvvInt16mf4x6Ty, 1, 16, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf4x7_t", RvvInt16mf4x7, RvvInt16mf4x7Ty, 1, 16, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf4x8_t", RvvInt16mf4x8, RvvInt16mf4x8Ty, 1, 16, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int16mf2x2_t", RvvInt16mf2x2, RvvInt16mf2x2Ty, 2, 16, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf2x3_t", RvvInt16mf2x3, RvvInt16mf2x3Ty, 2, 16, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf2x4_t", RvvInt16mf2x4, RvvInt16mf2x4Ty, 2, 16, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf2x5_t", RvvInt16mf2x5, RvvInt16mf2x5Ty, 2, 16, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf2x6_t", RvvInt16mf2x6, RvvInt16mf2x6Ty, 2, 16, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf2x7_t", RvvInt16mf2x7, RvvInt16mf2x7Ty, 2, 16, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf2x8_t", RvvInt16mf2x8, RvvInt16mf2x8Ty, 2, 16, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int16m1x2_t", RvvInt16m1x2, RvvInt16m1x2Ty, 4, 16, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m1x3_t", RvvInt16m1x3, RvvInt16m1x3Ty, 4, 16, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m1x4_t", RvvInt16m1x4, RvvInt16m1x4Ty, 4, 16, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m1x5_t", RvvInt16m1x5, RvvInt16m1x5Ty, 4, 16, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m1x6_t", RvvInt16m1x6, RvvInt16m1x6Ty, 4, 16, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m1x7_t", RvvInt16m1x7, RvvInt16m1x7Ty, 4, 16, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m1x8_t", RvvInt16m1x8, RvvInt16m1x8Ty, 4, 16, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int16m2x2_t", RvvInt16m2x2, RvvInt16m2x2Ty, 8, 16, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m2x3_t", RvvInt16m2x3, RvvInt16m2x3Ty, 8, 16, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m2x4_t", RvvInt16m2x4, RvvInt16m2x4Ty, 8, 16, 4, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int16m4x2_t", RvvInt16m4x2, RvvInt16m4x2Ty, 16, 16, 2, true)
+
+//===- Uint16 tuple types -------------------------------------------------===//
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf4x2_t", RvvUint16mf4x2, RvvUint16mf4x2Ty, 1, 16, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf4x3_t", RvvUint16mf4x3, RvvUint16mf4x3Ty, 1, 16, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf4x4_t", RvvUint16mf4x4, RvvUint16mf4x4Ty, 1, 16, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf4x5_t", RvvUint16mf4x5, RvvUint16mf4x5Ty, 1, 16, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf4x6_t", RvvUint16mf4x6, RvvUint16mf4x6Ty, 1, 16, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf4x7_t", RvvUint16mf4x7, RvvUint16mf4x7Ty, 1, 16, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf4x8_t", RvvUint16mf4x8, RvvUint16mf4x8Ty, 1, 16, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf2x2_t", RvvUint16mf2x2, RvvUint16mf2x2Ty, 2, 16, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf2x3_t", RvvUint16mf2x3, RvvUint16mf2x3Ty, 2, 16, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf2x4_t", RvvUint16mf2x4, RvvUint16mf2x4Ty, 2, 16, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf2x5_t", RvvUint16mf2x5, RvvUint16mf2x5Ty, 2, 16, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf2x6_t", RvvUint16mf2x6, RvvUint16mf2x6Ty, 2, 16, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf2x7_t", RvvUint16mf2x7, RvvUint16mf2x7Ty, 2, 16, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf2x8_t", RvvUint16mf2x8, RvvUint16mf2x8Ty, 2, 16, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint16m1x2_t", RvvUint16m1x2, RvvUint16m1x2Ty, 4, 16, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m1x3_t", RvvUint16m1x3, RvvUint16m1x3Ty, 4, 16, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m1x4_t", RvvUint16m1x4, RvvUint16m1x4Ty, 4, 16, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m1x5_t", RvvUint16m1x5, RvvUint16m1x5Ty, 4, 16, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m1x6_t", RvvUint16m1x6, RvvUint16m1x6Ty, 4, 16, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m1x7_t", RvvUint16m1x7, RvvUint16m1x7Ty, 4, 16, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m1x8_t", RvvUint16m1x8, RvvUint16m1x8Ty, 4, 16, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint16m2x2_t", RvvUint16m2x2, RvvUint16m2x2Ty, 8, 16, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m2x3_t", RvvUint16m2x3, RvvUint16m2x3Ty, 8, 16, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m2x4_t", RvvUint16m2x4, RvvUint16m2x4Ty, 8, 16, 4, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint16m4x2_t", RvvUint16m4x2, RvvUint16m4x2Ty, 16, 16, 2, false)
+
+//===- Int32 tuple types --------------------------------------------------===//
+RVV_VECTOR_TYPE_INT("__rvv_int32mf2x2_t", RvvInt32mf2x2, RvvInt32mf2x2Ty, 1, 32, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32mf2x3_t", RvvInt32mf2x3, RvvInt32mf2x3Ty, 1, 32, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32mf2x4_t", RvvInt32mf2x4, RvvInt32mf2x4Ty, 1, 32, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32mf2x5_t", RvvInt32mf2x5, RvvInt32mf2x5Ty, 1, 32, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32mf2x6_t", RvvInt32mf2x6, RvvInt32mf2x6Ty, 1, 32, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32mf2x7_t", RvvInt32mf2x7, RvvInt32mf2x7Ty, 1, 32, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32mf2x8_t", RvvInt32mf2x8, RvvInt32mf2x8Ty, 1, 32, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int32m1x2_t", RvvInt32m1x2, RvvInt32m1x2Ty, 2, 32, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m1x3_t", RvvInt32m1x3, RvvInt32m1x3Ty, 2, 32, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m1x4_t", RvvInt32m1x4, RvvInt32m1x4Ty, 2, 32, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m1x5_t", RvvInt32m1x5, RvvInt32m1x5Ty, 2, 32, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m1x6_t", RvvInt32m1x6, RvvInt32m1x6Ty, 2, 32, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m1x7_t", RvvInt32m1x7, RvvInt32m1x7Ty, 2, 32, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m1x8_t", RvvInt32m1x8, RvvInt32m1x8Ty, 2, 32, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int32m2x2_t", RvvInt32m2x2, RvvInt32m2x2Ty, 4, 32, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m2x3_t", RvvInt32m2x3, RvvInt32m2x3Ty, 4, 32, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m2x4_t", RvvInt32m2x4, RvvInt32m2x4Ty, 4, 32, 4, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int32m4x2_t", RvvInt32m4x2, RvvInt32m4x2Ty, 8, 32, 2, true)
+
+//===- Uint32 tuple types -------------------------------------------------===//
+RVV_VECTOR_TYPE_INT("__rvv_uint32mf2x2_t", RvvUint32mf2x2, RvvUint32mf2x2Ty, 1, 32, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32mf2x3_t", RvvUint32mf2x3, RvvUint32mf2x3Ty, 1, 32, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32mf2x4_t", RvvUint32mf2x4, RvvUint32mf2x4Ty, 1, 32, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32mf2x5_t", RvvUint32mf2x5, RvvUint32mf2x5Ty, 1, 32, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32mf2x6_t", RvvUint32mf2x6, RvvUint32mf2x6Ty, 1, 32, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32mf2x7_t", RvvUint32mf2x7, RvvUint32mf2x7Ty, 1, 32, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32mf2x8_t", RvvUint32mf2x8, RvvUint32mf2x8Ty, 1, 32, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint32m1x2_t", RvvUint32m1x2, RvvUint32m1x2Ty, 2, 32, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m1x3_t", RvvUint32m1x3, RvvUint32m1x3Ty, 2, 32, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m1x4_t", RvvUint32m1x4, RvvUint32m1x4Ty, 2, 32, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m1x5_t", RvvUint32m1x5, RvvUint32m1x5Ty, 2, 32, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m1x6_t", RvvUint32m1x6, RvvUint32m1x6Ty, 2, 32, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m1x7_t", RvvUint32m1x7, RvvUint32m1x7Ty, 2, 32, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m1x8_t", RvvUint32m1x8, RvvUint32m1x8Ty, 2, 32, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint32m2x2_t", RvvUint32m2x2, RvvUint32m2x2Ty, 4, 32, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m2x3_t", RvvUint32m2x3, RvvUint32m2x3Ty, 4, 32, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m2x4_t", RvvUint32m2x4, RvvUint32m2x4Ty, 4, 32, 4, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint32m4x2_t", RvvUint32m4x2, RvvUint32m4x2Ty, 8, 32, 2, false)
+
+//===- Int64 tuple types -------------------------------------------------===//
+RVV_VECTOR_TYPE_INT("__rvv_int64m1x2_t", RvvInt64m1x2, RvvInt64m1x2Ty, 1, 64, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m1x3_t", RvvInt64m1x3, RvvInt64m1x3Ty, 1, 64, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m1x4_t", RvvInt64m1x4, RvvInt64m1x4Ty, 1, 64, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m1x5_t", RvvInt64m1x5, RvvInt64m1x5Ty, 1, 64, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m1x6_t", RvvInt64m1x6, RvvInt64m1x6Ty, 1, 64, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m1x7_t", RvvInt64m1x7, RvvInt64m1x7Ty, 1, 64, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m1x8_t", RvvInt64m1x8, RvvInt64m1x8Ty, 1, 64, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int64m2x2_t", RvvInt64m2x2, RvvInt64m2x2Ty, 2, 64, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m2x3_t", RvvInt64m2x3, RvvInt64m2x3Ty, 2, 64, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m2x4_t", RvvInt64m2x4, RvvInt64m2x4Ty, 2, 64, 4, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int64m4x2_t", RvvInt64m4x2, RvvInt64m4x2Ty, 4, 64, 2, true)
+
+//===- Uint64 tuple types -------------------------------------------------===//
+RVV_VECTOR_TYPE_INT("__rvv_uint64m1x2_t", RvvUint64m1x2, RvvUint64m1x2Ty, 1, 64, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m1x3_t", RvvUint64m1x3, RvvUint64m1x3Ty, 1, 64, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m1x4_t", RvvUint64m1x4, RvvUint64m1x4Ty, 1, 64, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m1x5_t", RvvUint64m1x5, RvvUint64m1x5Ty, 1, 64, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m1x6_t", RvvUint64m1x6, RvvUint64m1x6Ty, 1, 64, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m1x7_t", RvvUint64m1x7, RvvUint64m1x7Ty, 1, 64, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m1x8_t", RvvUint64m1x8, RvvUint64m1x8Ty, 1, 64, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint64m2x2_t", RvvUint64m2x2, RvvUint64m2x2Ty, 2, 64, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m2x3_t", RvvUint64m2x3, RvvUint64m2x3Ty, 2, 64, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m2x4_t", RvvUint64m2x4, RvvUint64m2x4Ty, 2, 64, 4, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint64m4x2_t", RvvUint64m4x2, RvvUint64m4x2Ty, 4, 64, 2, false)
+
+//===- Float16 tuple types --------------------------------------------------===//
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf4x2_t", RvvFloat16mf4x2, RvvFloat16mf4x2Ty, 1, 16, 2)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf4x3_t", RvvFloat16mf4x3, RvvFloat16mf4x3Ty, 1, 16, 3)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf4x4_t", RvvFloat16mf4x4, RvvFloat16mf4x4Ty, 1, 16, 4)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf4x5_t", RvvFloat16mf4x5, RvvFloat16mf4x5Ty, 1, 16, 5)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf4x6_t", RvvFloat16mf4x6, RvvFloat16mf4x6Ty, 1, 16, 6)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf4x7_t", RvvFloat16mf4x7, RvvFloat16mf4x7Ty, 1, 16, 7)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf4x8_t", RvvFloat16mf4x8, RvvFloat16mf4x8Ty, 1, 16, 8)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf2x2_t", RvvFloat16mf2x2, RvvFloat16mf2x2Ty, 2, 16, 2)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf2x3_t", RvvFloat16mf2x3, RvvFloat16mf2x3Ty, 2, 16, 3)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf2x4_t", RvvFloat16mf2x4, RvvFloat16mf2x4Ty, 2, 16, 4)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf2x5_t", RvvFloat16mf2x5, RvvFloat16mf2x5Ty, 2, 16, 5)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf2x6_t", RvvFloat16mf2x6, RvvFloat16mf2x6Ty, 2, 16, 6)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf2x7_t", RvvFloat16mf2x7, RvvFloat16mf2x7Ty, 2, 16, 7)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf2x8_t", RvvFloat16mf2x8, RvvFloat16mf2x8Ty, 2, 16, 8)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m1x2_t", RvvFloat16m1x2, RvvFloat16m1x2Ty, 4, 16, 2)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m1x3_t", RvvFloat16m1x3, RvvFloat16m1x3Ty, 4, 16, 3)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m1x4_t", RvvFloat16m1x4, RvvFloat16m1x4Ty, 4, 16, 4)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m1x5_t", RvvFloat16m1x5, RvvFloat16m1x5Ty, 4, 16, 5)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m1x6_t", RvvFloat16m1x6, RvvFloat16m1x6Ty, 4, 16, 6)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m1x7_t", RvvFloat16m1x7, RvvFloat16m1x7Ty, 4, 16, 7)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m1x8_t", RvvFloat16m1x8, RvvFloat16m1x8Ty, 4, 16, 8)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m2x2_t", RvvFloat16m2x2, RvvFloat16m2x2Ty, 8, 16, 2)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m2x3_t", RvvFloat16m2x3, RvvFloat16m2x3Ty, 8, 16, 3)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m2x4_t", RvvFloat16m2x4, RvvFloat16m2x4Ty, 8, 16, 4)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m4x2_t", RvvFloat16m4x2, RvvFloat16m4x2Ty, 16, 16, 2)
+
+//===- Float32 tuple types --------------------------------------------------===//
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32mf2x2_t", RvvFloat32mf2x2, RvvFloat32mf2x2Ty, 1, 32, 2)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32mf2x3_t", RvvFloat32mf2x3, RvvFloat32mf2x3Ty, 1, 32, 3)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32mf2x4_t", RvvFloat32mf2x4, RvvFloat32mf2x4Ty, 1, 32, 4)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32mf2x5_t", RvvFloat32mf2x5, RvvFloat32mf2x5Ty, 1, 32, 5)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32mf2x6_t", RvvFloat32mf2x6, RvvFloat32mf2x6Ty, 1, 32, 6)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32mf2x7_t", RvvFloat32mf2x7, RvvFloat32mf2x7Ty, 1, 32, 7)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32mf2x8_t", RvvFloat32mf2x8, RvvFloat32mf2x8Ty, 1, 32, 8)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m1x2_t", RvvFloat32m1x2, RvvFloat32m1x2Ty, 2, 32, 2)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m1x3_t", RvvFloat32m1x3, RvvFloat32m1x3Ty, 2, 32, 3)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m1x4_t", RvvFloat32m1x4, RvvFloat32m1x4Ty, 2, 32, 4)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m1x5_t", RvvFloat32m1x5, RvvFloat32m1x5Ty, 2, 32, 5)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m1x6_t", RvvFloat32m1x6, RvvFloat32m1x6Ty, 2, 32, 6)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m1x7_t", RvvFloat32m1x7, RvvFloat32m1x7Ty, 2, 32, 7)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m1x8_t", RvvFloat32m1x8, RvvFloat32m1x8Ty, 2, 32, 8)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m2x2_t", RvvFloat32m2x2, RvvFloat32m2x2Ty, 4, 32, 2)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m2x3_t", RvvFloat32m2x3, RvvFloat32m2x3Ty, 4, 32, 3)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m2x4_t", RvvFloat32m2x4, RvvFloat32m2x4Ty, 4, 32, 4)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m4x2_t", RvvFloat32m4x2, RvvFloat32m4x2Ty, 8, 32, 2)
+
+//===- Float64 tuple types -------------------------------------------------===//
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m1x2_t", RvvFloat64m1x2, RvvFloat64m1x2Ty, 1, 64, 2)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m1x3_t", RvvFloat64m1x3, RvvFloat64m1x3Ty, 1, 64, 3)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m1x4_t", RvvFloat64m1x4, RvvFloat64m1x4Ty, 1, 64, 4)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m1x5_t", RvvFloat64m1x5, RvvFloat64m1x5Ty, 1, 64, 5)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m1x6_t", RvvFloat64m1x6, RvvFloat64m1x6Ty, 1, 64, 6)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m1x7_t", RvvFloat64m1x7, RvvFloat64m1x7Ty, 1, 64, 7)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m1x8_t", RvvFloat64m1x8, RvvFloat64m1x8Ty, 1, 64, 8)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m2x2_t", RvvFloat64m2x2, RvvFloat64m2x2Ty, 2, 64, 2)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m2x3_t", RvvFloat64m2x3, RvvFloat64m2x3Ty, 2, 64, 3)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m2x4_t", RvvFloat64m2x4, RvvFloat64m2x4Ty, 2, 64, 4)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m4x2_t", RvvFloat64m4x2, RvvFloat64m4x2Ty, 4, 64, 2)
+
#undef RVV_VECTOR_TYPE_FLOAT
#undef RVV_VECTOR_TYPE_INT
#undef RVV_VECTOR_TYPE
diff --git a/contrib/llvm-project/clang/include/clang/Basic/SourceManager.h b/contrib/llvm-project/clang/include/clang/Basic/SourceManager.h
index a877308f0412..48c6b6611219 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/SourceManager.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/SourceManager.h
@@ -992,8 +992,7 @@ public:
if (OverriddenFilesInfo) {
if (OverriddenFilesInfo->OverriddenFilesWithBuffer.count(File))
return true;
- if (OverriddenFilesInfo->OverriddenFiles.find(File) !=
- OverriddenFilesInfo->OverriddenFiles.end())
+ if (OverriddenFilesInfo->OverriddenFiles.contains(File))
return true;
}
return false;
@@ -1686,7 +1685,7 @@ public:
fileinfo_iterator fileinfo_begin() const { return FileInfos.begin(); }
fileinfo_iterator fileinfo_end() const { return FileInfos.end(); }
bool hasFileInfo(const FileEntry *File) const {
- return FileInfos.find(File) != FileInfos.end();
+ return FileInfos.contains(File);
}
/// Print statistics to stderr.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Specifiers.h b/contrib/llvm-project/clang/include/clang/Basic/Specifiers.h
index a8c35fed9997..06279a016a50 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Specifiers.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Specifiers.h
@@ -19,6 +19,9 @@
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
+namespace llvm {
+class raw_ostream;
+} // namespace llvm
namespace clang {
/// Define the meaning of possible values of the kind in ExplicitSpecifier.
@@ -333,6 +336,8 @@ namespace clang {
// parameters are assumed to only get null on error.
NullableResult,
};
+ /// Prints human-readable debug representation.
+ llvm::raw_ostream &operator<<(llvm::raw_ostream&, NullabilityKind);
/// Return true if \p L has a weaker nullability annotation than \p R. The
/// ordering is: Unspecified < Nullable < NonNull.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/StmtNodes.td b/contrib/llvm-project/clang/include/clang/Basic/StmtNodes.td
index eeec01dd8c84..4b31e06eb2cd 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/StmtNodes.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/StmtNodes.td
@@ -50,7 +50,7 @@ def CXXCatchStmt : StmtNode<Stmt>;
def CXXTryStmt : StmtNode<Stmt>;
def CXXForRangeStmt : StmtNode<Stmt>;
-// C++ Coroutines TS statements
+// C++ Coroutines statements
def CoroutineBodyStmt : StmtNode<Stmt>;
def CoreturnStmt : StmtNode<Stmt>;
@@ -162,7 +162,7 @@ def LambdaExpr : StmtNode<Expr>;
def CXXFoldExpr : StmtNode<Expr>;
def CXXParenListInitExpr: StmtNode<Expr>;
-// C++ Coroutines TS expressions
+// C++ Coroutines expressions
def CoroutineSuspendExpr : StmtNode<Expr, 1>;
def CoawaitExpr : StmtNode<CoroutineSuspendExpr>;
def DependentCoawaitExpr : StmtNode<Expr>;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TargetBuiltins.h b/contrib/llvm-project/clang/include/clang/Basic/TargetBuiltins.h
index 2f94e839768c..8f7881abf26f 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TargetBuiltins.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TargetBuiltins.h
@@ -48,11 +48,22 @@ namespace clang {
enum {
LastNEONBuiltin = NEON::FirstTSBuiltin - 1,
#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BI##ID,
#include "clang/Basic/BuiltinsSVE.def"
FirstTSBuiltin,
};
}
+ namespace SME {
+ enum {
+ LastSVEBuiltin = SVE::FirstTSBuiltin - 1,
+#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BI##ID,
+#include "clang/Basic/BuiltinsSME.def"
+ FirstTSBuiltin,
+ };
+ }
+
/// AArch64 builtins
namespace AArch64 {
enum {
@@ -60,6 +71,8 @@ namespace clang {
LastNEONBuiltin = NEON::FirstTSBuiltin - 1,
FirstSVEBuiltin = NEON::FirstTSBuiltin,
LastSVEBuiltin = SVE::FirstTSBuiltin - 1,
+ FirstSMEBuiltin = SVE::FirstTSBuiltin,
+ LastSMEBuiltin = SME::FirstTSBuiltin - 1,
#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
#include "clang/Basic/BuiltinsAArch64.def"
LastTSBuiltin
@@ -243,10 +256,10 @@ namespace clang {
};
SVETypeFlags(uint64_t F) : Flags(F) {
- EltTypeShift = llvm::countTrailingZeros(EltTypeMask);
- MemEltTypeShift = llvm::countTrailingZeros(MemEltTypeMask);
- MergeTypeShift = llvm::countTrailingZeros(MergeTypeMask);
- SplatOperandMaskShift = llvm::countTrailingZeros(SplatOperandMask);
+ EltTypeShift = llvm::countr_zero(EltTypeMask);
+ MemEltTypeShift = llvm::countr_zero(MemEltTypeMask);
+ MergeTypeShift = llvm::countr_zero(MergeTypeMask);
+ SplatOperandMaskShift = llvm::countr_zero(SplatOperandMask);
}
EltType getEltType() const {
@@ -288,10 +301,14 @@ namespace clang {
bool isInsertOp1SVALL() const { return Flags & IsInsertOp1SVALL; }
bool isGatherPrefetch() const { return Flags & IsGatherPrefetch; }
bool isReverseUSDOT() const { return Flags & ReverseUSDOT; }
+ bool isReverseMergeAnyBinOp() const { return Flags & ReverseMergeAnyBinOp; }
+ bool isReverseMergeAnyAccOp() const { return Flags & ReverseMergeAnyAccOp; }
bool isUndef() const { return Flags & IsUndef; }
bool isTupleCreate() const { return Flags & IsTupleCreate; }
bool isTupleGet() const { return Flags & IsTupleGet; }
bool isTupleSet() const { return Flags & IsTupleSet; }
+ bool isReadZA() const { return Flags & IsReadZA; }
+ bool isWriteZA() const { return Flags & IsWriteZA; }
uint64_t getBits() const { return Flags; }
bool isFlagSet(uint64_t Flag) const { return Flags & Flag; }
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TargetCXXABI.h b/contrib/llvm-project/clang/include/clang/Basic/TargetCXXABI.h
index e727f85edad7..c113a6a048ad 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TargetCXXABI.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TargetCXXABI.h
@@ -19,8 +19,8 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
@@ -60,9 +60,7 @@ public:
static const auto &getSpelling(Kind ABIKind) {
return getSpellingMap().find(ABIKind)->second;
}
- static bool isABI(StringRef Name) {
- return getABIMap().find(Name) != getABIMap().end();
- }
+ static bool isABI(StringRef Name) { return getABIMap().contains(Name); }
// Return true if this target should use the relative vtables C++ ABI by
// default.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TargetID.h b/contrib/llvm-project/clang/include/clang/Basic/TargetID.h
index f1922942804e..cef9cb5f0fb2 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TargetID.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TargetID.h
@@ -11,7 +11,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/TargetParser/Triple.h"
#include <optional>
#include <set>
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h b/contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h
index bb2a453f4657..41ef47eb565b 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h
@@ -29,12 +29,13 @@
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/Frontend/OpenMP/OMPGridValues.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/VersionTuple.h"
+#include "llvm/TargetParser/Triple.h"
#include <cassert>
#include <optional>
#include <string>
@@ -202,7 +203,7 @@ enum OpenCLTypeKind : uint8_t {
/// Exposes information about the current target.
///
-class TargetInfo : public virtual TransferrableTargetInfo,
+class TargetInfo : public TransferrableTargetInfo,
public RefCountedBase<TargetInfo> {
std::shared_ptr<TargetOptions> TargetOpts;
llvm::Triple Triple;
@@ -219,13 +220,15 @@ protected:
bool HasFloat128;
bool HasFloat16;
bool HasBFloat16;
+ bool HasFullBFloat16; // True if the backend supports native bfloat16
+ // arithmetic. Used to determine excess precision
+ // support in the frontend.
bool HasIbm128;
bool HasLongDouble;
bool HasFPReturn;
bool HasStrictFP;
unsigned char MaxAtomicPromoteWidth, MaxAtomicInlineWidth;
- unsigned short SimdDefaultAlign;
std::string DataLayoutString;
const char *UserLabelPrefix;
const char *MCountName;
@@ -265,6 +268,12 @@ protected:
// as a DataLayout object.
void resetDataLayout(StringRef DL, const char *UserLabelPrefix = "");
+ // Target features that are read-only and should not be disabled/enabled
+ // by command line options. Such features are for emitting predefined
+ // macros or checking availability of builtin functions and can be omitted
+ // in function attributes in IR.
+ llvm::StringSet<> ReadOnlyFeatures;
+
public:
/// Construct a target for the given options.
///
@@ -649,7 +658,13 @@ public:
virtual bool hasFloat16Type() const { return HasFloat16; }
/// Determine whether the _BFloat16 type is supported on this target.
- virtual bool hasBFloat16Type() const { return HasBFloat16; }
+ virtual bool hasBFloat16Type() const {
+ return HasBFloat16 || HasFullBFloat16;
+ }
+
+ /// Determine whether the BFloat type is fully supported on this target, i.e
+ /// arithemtic operations.
+ virtual bool hasFullBFloat16Type() const { return HasFullBFloat16; }
/// Determine whether the __ibm128 type is supported on this target.
virtual bool hasIbm128Type() const { return HasIbm128; }
@@ -757,9 +772,7 @@ public:
}
/// Return the mangled code of bfloat.
- virtual const char *getBFloat16Mangling() const {
- llvm_unreachable("bfloat not implemented on this target");
- }
+ virtual const char *getBFloat16Mangling() const { return "DF16b"; }
/// Return the value for the C99 FLT_EVAL_METHOD macro.
virtual LangOptions::FPEvalMethodKind getFPEvalMethod() const {
@@ -794,10 +807,6 @@ public:
/// Return the maximum vector alignment supported for the given target.
unsigned getMaxVectorAlign() const { return MaxVectorAlign; }
- /// Return default simd alignment for the given target. Generally, this
- /// value is type-specific, but this alignment can be used for most of the
- /// types for the given target.
- unsigned getSimdDefaultAlign() const { return SimdDefaultAlign; }
unsigned getMaxOpenCLWorkGroupSize() const { return MaxOpenCLWorkGroupSize; }
@@ -1183,7 +1192,7 @@ public:
}
/// Returns a string of target-specific clobbers, in LLVM format.
- virtual const char *getClobbers() const = 0;
+ virtual std::string_view getClobbers() const = 0;
/// Returns true if NaN encoding is IEEE 754-2008.
/// Only MIPS allows a different encoding.
@@ -1338,12 +1347,16 @@ public:
}
/// Returns true if feature has an impact on target code
- /// generation and get its dependent options in second argument.
- virtual bool getFeatureDepOptions(StringRef Feature,
- std::string &Options) const {
+ /// generation.
+ virtual bool doesFeatureAffectCodeGen(StringRef Feature) const {
return true;
}
+ /// For given feature return dependent ones.
+ virtual StringRef getFeatureDependencies(StringRef Feature) const {
+ return StringRef();
+ }
+
struct BranchProtectionInfo {
LangOptions::SignReturnAddressScopeKind SignReturnAddr =
LangOptions::SignReturnAddressScopeKind::None;
@@ -1388,6 +1401,11 @@ public:
return false;
}
+ /// Determine whether the given target feature is read only.
+ bool isReadOnlyFeature(StringRef Feature) const {
+ return ReadOnlyFeatures.count(Feature);
+ }
+
/// Identify whether this target supports multiversioning of functions,
/// which requires support for cpu_supports and cpu_is functionality.
bool supportsMultiVersioning() const {
@@ -1705,6 +1723,9 @@ public:
: std::optional<VersionTuple>();
}
+ /// Whether to support HIP image/texture API's.
+ virtual bool hasHIPImageSupport() const { return true; }
+
protected:
/// Copy type and layout related info.
void copyAuxTarget(const TargetInfo *Aux);
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TargetOptions.h b/contrib/llvm-project/clang/include/clang/Basic/TargetOptions.h
index f9e5cedbafcd..b192c856384b 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TargetOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TargetOptions.h
@@ -45,7 +45,7 @@ public:
std::string ABI;
/// The EABI version to use
- llvm::EABI EABIVersion;
+ llvm::EABI EABIVersion = llvm::EABI::Default;
/// If given, the version string of the linker in use.
std::string LinkerVersion;
@@ -88,7 +88,20 @@ public:
COV_5 = 500,
};
/// \brief Code object version for AMDGPU.
- CodeObjectVersionKind CodeObjectVersion;
+ CodeObjectVersionKind CodeObjectVersion = CodeObjectVersionKind::COV_None;
+
+ /// \brief Enumeration values for AMDGPU printf lowering scheme
+ enum class AMDGPUPrintfKind {
+ /// printf lowering scheme involving hostcalls, currently used by HIP
+ /// programs by default
+ Hostcall = 0,
+
+ /// printf lowering scheme involving implicit printf buffers,
+ Buffered = 1,
+ };
+
+ /// \brief AMDGPU Printf lowering scheme
+ AMDGPUPrintfKind AMDGPUPrintfKindVal = AMDGPUPrintfKind::Hostcall;
// The code model to be used as specified by the user. Corresponds to
// CodeModel::Model enum defined in include/llvm/Support/CodeGen.h, plus
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Thunk.h b/contrib/llvm-project/clang/include/clang/Basic/Thunk.h
index 91088be6ae73..0247e279408f 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Thunk.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Thunk.h
@@ -26,7 +26,7 @@ class CXXMethodDecl;
struct ReturnAdjustment {
/// The non-virtual adjustment from the derived object to its
/// nearest virtual base.
- int64_t NonVirtual;
+ int64_t NonVirtual = 0;
/// Holds the ABI-specific information about the virtual return
/// adjustment, if needed.
@@ -64,7 +64,7 @@ struct ReturnAdjustment {
}
} Virtual;
- ReturnAdjustment() : NonVirtual(0) {}
+ ReturnAdjustment() = default;
bool isEmpty() const { return !NonVirtual && Virtual.isEmpty(); }
@@ -91,7 +91,7 @@ struct ReturnAdjustment {
struct ThisAdjustment {
/// The non-virtual adjustment from the derived object to its
/// nearest virtual base.
- int64_t NonVirtual;
+ int64_t NonVirtual = 0;
/// Holds the ABI-specific information about the virtual this
/// adjustment, if needed.
@@ -131,7 +131,7 @@ struct ThisAdjustment {
}
} Virtual;
- ThisAdjustment() : NonVirtual(0) {}
+ ThisAdjustment() = default;
bool isEmpty() const { return !NonVirtual && Virtual.isEmpty(); }
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def b/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def
index 96feae991ccb..ef0dad0f2dcd 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def
@@ -85,6 +85,9 @@
#ifndef PRAGMA_ANNOTATION
#define PRAGMA_ANNOTATION(X) ANNOTATION(X)
#endif
+#ifndef INTERESTING_IDENTIFIER
+#define INTERESTING_IDENTIFIER(X)
+#endif
//===----------------------------------------------------------------------===//
// Preprocessor keywords.
@@ -394,12 +397,12 @@ CXX11_KEYWORD(nullptr , KEYC2X)
CXX11_KEYWORD(static_assert , KEYMSCOMPAT|KEYC2X)
CXX11_KEYWORD(thread_local , KEYC2X)
-// C++20 / coroutines TS keywords
+// C++20 / coroutines keywords
COROUTINES_KEYWORD(co_await)
COROUTINES_KEYWORD(co_return)
COROUTINES_KEYWORD(co_yield)
-// C++ modules TS keywords
+// C++20 keywords
MODULES_KEYWORD(module)
MODULES_KEYWORD(import)
@@ -436,7 +439,9 @@ KEYWORD(__attribute , KEYALL)
KEYWORD(__builtin_choose_expr , KEYALL)
KEYWORD(__builtin_offsetof , KEYALL)
KEYWORD(__builtin_FILE , KEYALL)
+KEYWORD(__builtin_FILE_NAME , KEYALL)
KEYWORD(__builtin_FUNCTION , KEYALL)
+KEYWORD(__builtin_FUNCSIG , KEYMS)
KEYWORD(__builtin_LINE , KEYALL)
KEYWORD(__builtin_COLUMN , KEYALL)
KEYWORD(__builtin_source_location , KEYCXX)
@@ -518,11 +523,13 @@ TYPE_TRAIT_1(__has_unique_object_representations,
// Clang-only C++ Type Traits
TYPE_TRAIT_1(__is_trivially_relocatable, IsTriviallyRelocatable, KEYCXX)
+TYPE_TRAIT_1(__is_trivially_equality_comparable, IsTriviallyEqualityComparable, KEYCXX)
TYPE_TRAIT_1(__is_bounded_array, IsBoundedArray, KEYCXX)
TYPE_TRAIT_1(__is_unbounded_array, IsUnboundedArray, KEYCXX)
TYPE_TRAIT_1(__is_nullptr, IsNullPointer, KEYCXX)
TYPE_TRAIT_1(__is_scoped_enum, IsScopedEnum, KEYCXX)
TYPE_TRAIT_1(__is_referenceable, IsReferenceable, KEYCXX)
+TYPE_TRAIT_1(__can_pass_in_regs, CanPassInRegs, KEYCXX)
TYPE_TRAIT_2(__reference_binds_to_temporary, ReferenceBindsToTemporary, KEYCXX)
// Embarcadero Expression Traits
@@ -678,6 +685,9 @@ KEYWORD(_Nullable , KEYALL)
KEYWORD(_Nullable_result , KEYALL)
KEYWORD(_Null_unspecified , KEYALL)
+// WebAssembly Type Extension
+KEYWORD(__funcref , KEYALL)
+
// Microsoft extensions which should be disabled in strict conformance mode
KEYWORD(__ptr64 , KEYMS)
KEYWORD(__ptr32 , KEYMS)
@@ -741,6 +751,12 @@ KEYWORD(__builtin_bit_cast , KEYALL)
KEYWORD(__builtin_available , KEYALL)
KEYWORD(__builtin_sycl_unique_stable_name, KEYSYCL)
+// Keywords defined by Attr.td.
+#ifndef KEYWORD_ATTRIBUTE
+#define KEYWORD_ATTRIBUTE(X) KEYWORD(X, KEYALL)
+#endif
+#include "clang/Basic/AttrTokenKinds.inc"
+
// Clang-specific keywords enabled only in testing.
TESTING_KEYWORD(__unknown_anytype , KEYALL)
@@ -781,6 +797,17 @@ OBJC_AT_KEYWORD(dynamic)
OBJC_AT_KEYWORD(import)
OBJC_AT_KEYWORD(available)
+//===----------------------------------------------------------------------===//
+// Interesting identifiers.
+//===----------------------------------------------------------------------===//
+INTERESTING_IDENTIFIER(not_interesting)
+INTERESTING_IDENTIFIER(FILE)
+INTERESTING_IDENTIFIER(jmp_buf)
+INTERESTING_IDENTIFIER(sigjmp_buf)
+INTERESTING_IDENTIFIER(ucontext_t)
+INTERESTING_IDENTIFIER(float_t)
+INTERESTING_IDENTIFIER(double_t)
+
// TODO: What to do about context-sensitive keywords like:
// bycopy/byref/in/inout/oneway/out?
@@ -936,6 +963,9 @@ ANNOTATION(module_end)
// into the name of a header unit.
ANNOTATION(header_unit)
+// Annotation for end of input in clang-repl.
+ANNOTATION(repl_input_end)
+
#undef PRAGMA_ANNOTATION
#undef ANNOTATION
#undef TESTING_KEYWORD
@@ -958,3 +988,4 @@ ANNOTATION(header_unit)
#undef TOK
#undef C99_KEYWORD
#undef C2X_KEYWORD
+#undef INTERESTING_IDENTIFIER
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.h b/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.h
index 6b7006651f4e..e4857405bc7f 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.h
@@ -44,6 +44,14 @@ enum ObjCKeywordKind {
NUM_OBJC_KEYWORDS
};
+/// Provides a namespace for interesting identifers such as float_t and
+/// double_t.
+enum InterestingIdentifierKind {
+#define INTERESTING_IDENTIFIER(X) X,
+#include "clang/Basic/TokenKinds.def"
+ NUM_INTERESTING_IDENTIFIERS
+};
+
/// Defines the possible values of an on-off-switch (C99 6.10.6p2).
enum OnOffSwitch {
OOS_ON, OOS_OFF, OOS_DEFAULT
@@ -99,6 +107,13 @@ bool isAnnotation(TokenKind K);
/// Return true if this is an annotation token representing a pragma.
bool isPragmaAnnotation(TokenKind K);
+inline constexpr bool isRegularKeywordAttribute(TokenKind K) {
+ return (false
+#define KEYWORD_ATTRIBUTE(X) || (K == tok::kw_##X)
+#include "clang/Basic/AttrTokenKinds.inc"
+ );
+}
+
} // end namespace tok
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td b/contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td
index f8557d02e5bd..649b071cebb9 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td
@@ -3,7 +3,7 @@ include "clang/Basic/ASTNode.td"
class TypeNode<TypeNode base, bit abstract = 0> : ASTNode {
TypeNode Base = base;
bit Abstract = abstract;
-}
+}
/// A type node that is only used to represent dependent types in C++. For
/// example, DependentTemplateSpecializationType is used to represent types
diff --git a/contrib/llvm-project/clang/include/clang/Basic/WebAssemblyReferenceTypes.def b/contrib/llvm-project/clang/include/clang/Basic/WebAssemblyReferenceTypes.def
new file mode 100644
index 000000000000..7c83da15150c
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/WebAssemblyReferenceTypes.def
@@ -0,0 +1,40 @@
+//===-- WebAssemblyReferenceTypes.def - Wasm reference types ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines externref_t. The macros are:
+//
+// WASM_TYPE(Name, Id, SingletonId)
+// WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS)
+//
+// where:
+//
+// - Name is the name of the builtin type.
+//
+// - MangledNameBase is the base used for name mangling.
+//
+// - BuiltinType::Id is the enumerator defining the type.
+//
+// - Context.SingletonId is the global singleton of this type.
+//
+// - AS indicates the address space for values of this type.
+//
+// To include this file, define either WASM_REF_TYPE or WASM_TYPE, depending on
+// how much information you want. The macros will be undefined after inclusion.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef WASM_REF_TYPE
+#define WASM_REF_TYPE(Name, MangledNameBase, Id, SingletonId, AS) \
+ WASM_TYPE(Name, Id, SingletonId)
+#endif
+
+WASM_REF_TYPE("__externref_t", "externref_t", WasmExternRef, WasmExternRefTy, 10)
+
+#undef WASM_TYPE
+#undef WASM_REF_TYPE
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_bf16.td b/contrib/llvm-project/clang/include/clang/Basic/arm_bf16.td
index d837a7666d40..f70c7221f8d6 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/arm_bf16.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_bf16.td
@@ -1,4 +1,4 @@
-//===--- arm_fp16.td - ARM BF16 compiler interface ------------------------===//
+//===--- arm_bf16.td - ARM BF16 compiler interface ------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_neon.td b/contrib/llvm-project/clang/include/clang/Basic/arm_neon.td
index 94dfe80acc35..ba3764d2f778 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/arm_neon.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_neon.td
@@ -2086,3 +2086,9 @@ let ArchGuard = "defined(__aarch64__)", TargetGuard = "bf16" in {
"csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk", "bQb">;
}
}
+
+// v8.9a/v9.4a LRCPC3 intrinsics
+let ArchGuard = "defined(__aarch64__)", TargetGuard = "rcpc3" in {
+ def VLDAP1_LANE : WInst<"vldap1_lane", ".(c*!).I", "QUlQlUlldQdPlQPl">;
+ def VSTL1_LANE : WInst<"vstl1_lane", "v*(.!)I", "QUlQlUlldQdPlQPl">;
+}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_sme.td b/contrib/llvm-project/clang/include/clang/Basic/arm_sme.td
new file mode 100644
index 000000000000..b950f5cb8acc
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_sme.td
@@ -0,0 +1,259 @@
+//===--- arm_sme.td - ARM SME compiler interface ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TableGen definitions from which the ARM SME header
+// file will be generated. See:
+//
+// https://developer.arm.com/architectures/system-architectures/software-standards/acle
+//
+//===----------------------------------------------------------------------===//
+
+include "arm_sve_sme_incl.td"
+
+////////////////////////////////////////////////////////////////////////////////
+// Loads
+
+multiclass ZALoad<string n_suffix, string t, string i_prefix, list<ImmCheck> ch> {
+ let TargetGuard = "sme" in {
+ def NAME # _H : MInst<"svld1_hor_" # n_suffix, "vimiPQ", t,
+ [IsLoad, IsOverloadNone, IsStreaming, IsSharedZA],
+ MemEltTyDefault, i_prefix # "_horiz", ch>;
+
+ def NAME # _H_VNUM : MInst<"svld1_hor_vnum_" # n_suffix, "vimiPQl", t,
+ [IsLoad, IsOverloadNone, IsStreaming, IsSharedZA],
+ MemEltTyDefault, i_prefix # "_horiz", ch>;
+
+ def NAME # _V : MInst<"svld1_ver_" # n_suffix, "vimiPQ", t,
+ [IsLoad, IsOverloadNone, IsStreaming, IsSharedZA],
+ MemEltTyDefault, i_prefix # "_vert", ch>;
+
+ def NAME # _V_VNUM : MInst<"svld1_ver_vnum_" # n_suffix, "vimiPQl", t,
+ [IsLoad, IsOverloadNone, IsStreaming, IsSharedZA],
+ MemEltTyDefault, i_prefix # "_vert", ch>;
+ }
+}
+
+defm SVLD1_ZA8 : ZALoad<"za8", "c", "aarch64_sme_ld1b", [ImmCheck<0, ImmCheck0_0>, ImmCheck<2, ImmCheck0_15>]>;
+defm SVLD1_ZA16 : ZALoad<"za16", "s", "aarch64_sme_ld1h", [ImmCheck<0, ImmCheck0_1>, ImmCheck<2, ImmCheck0_7>]>;
+defm SVLD1_ZA32 : ZALoad<"za32", "i", "aarch64_sme_ld1w", [ImmCheck<0, ImmCheck0_3>, ImmCheck<2, ImmCheck0_3>]>;
+defm SVLD1_ZA64 : ZALoad<"za64", "l", "aarch64_sme_ld1d", [ImmCheck<0, ImmCheck0_7>, ImmCheck<2, ImmCheck0_1>]>;
+defm SVLD1_ZA128 : ZALoad<"za128", "q", "aarch64_sme_ld1q", [ImmCheck<0, ImmCheck0_15>, ImmCheck<2, ImmCheck0_0>]>;
+
+def SVLDR_VNUM_ZA : MInst<"svldr_vnum_za", "vmiQ", "",
+ [IsOverloadNone, IsStreamingCompatible, IsSharedZA],
+ MemEltTyDefault, "aarch64_sme_ldr",
+ [ImmCheck<1, ImmCheck0_15>]>;
+
+////////////////////////////////////////////////////////////////////////////////
+// Stores
+
+multiclass ZAStore<string n_suffix, string t, string i_prefix, list<ImmCheck> ch> {
+ let TargetGuard = "sme" in {
+ def NAME # _H : MInst<"svst1_hor_" # n_suffix, "vimiP%", t,
+ [IsStore, IsOverloadNone, IsStreaming, IsSharedZA, IsPreservesZA],
+ MemEltTyDefault, i_prefix # "_horiz", ch>;
+
+ def NAME # _H_VNUM : MInst<"svst1_hor_vnum_" # n_suffix, "vimiP%l", t,
+ [IsStore, IsOverloadNone, IsStreaming, IsSharedZA, IsPreservesZA],
+ MemEltTyDefault, i_prefix # "_horiz", ch>;
+
+ def NAME # _V : MInst<"svst1_ver_" # n_suffix, "vimiP%", t,
+ [IsStore, IsOverloadNone, IsStreaming, IsSharedZA, IsPreservesZA],
+ MemEltTyDefault, i_prefix # "_vert", ch>;
+
+ def NAME # _V_VNUM : MInst<"svst1_ver_vnum_" # n_suffix, "vimiP%l", t,
+ [IsStore, IsOverloadNone, IsStreaming, IsSharedZA, IsPreservesZA],
+ MemEltTyDefault, i_prefix # "_vert", ch>;
+ }
+}
+
+defm SVST1_ZA8 : ZAStore<"za8", "c", "aarch64_sme_st1b", [ImmCheck<0, ImmCheck0_0>, ImmCheck<2, ImmCheck0_15>]>;
+defm SVST1_ZA16 : ZAStore<"za16", "s", "aarch64_sme_st1h", [ImmCheck<0, ImmCheck0_1>, ImmCheck<2, ImmCheck0_7>]>;
+defm SVST1_ZA32 : ZAStore<"za32", "i", "aarch64_sme_st1w", [ImmCheck<0, ImmCheck0_3>, ImmCheck<2, ImmCheck0_3>]>;
+defm SVST1_ZA64 : ZAStore<"za64", "l", "aarch64_sme_st1d", [ImmCheck<0, ImmCheck0_7>, ImmCheck<2, ImmCheck0_1>]>;
+defm SVST1_ZA128 : ZAStore<"za128", "q", "aarch64_sme_st1q", [ImmCheck<0, ImmCheck0_15>, ImmCheck<2, ImmCheck0_0>]>;
+
+def SVSTR_VNUM_ZA : MInst<"svstr_vnum_za", "vmi%", "",
+ [IsOverloadNone, IsStreamingCompatible, IsSharedZA, IsPreservesZA],
+ MemEltTyDefault, "aarch64_sme_str",
+ [ImmCheck<1, ImmCheck0_15>]>;
+
+////////////////////////////////////////////////////////////////////////////////
+// Read horizontal/vertical ZA slices
+
+multiclass ZARead<string n_suffix, string t, string i_prefix, list<ImmCheck> ch> {
+ let TargetGuard = "sme" in {
+ def NAME # _H : SInst<"svread_hor_" # n_suffix # "[_{d}]", "ddPimi", t,
+ MergeOp1, i_prefix # "_horiz",
+ [IsReadZA, IsStreaming, IsSharedZA, IsPreservesZA], ch>;
+
+ def NAME # _V : SInst<"svread_ver_" # n_suffix # "[_{d}]", "ddPimi", t,
+ MergeOp1, i_prefix # "_vert",
+ [IsReadZA, IsStreaming, IsSharedZA, IsPreservesZA], ch>;
+ }
+}
+
+defm SVREAD_ZA8 : ZARead<"za8", "cUc", "aarch64_sme_read", [ImmCheck<2, ImmCheck0_0>, ImmCheck<4, ImmCheck0_15>]>;
+defm SVREAD_ZA16 : ZARead<"za16", "sUshb", "aarch64_sme_read", [ImmCheck<2, ImmCheck0_1>, ImmCheck<4, ImmCheck0_7>]>;
+defm SVREAD_ZA32 : ZARead<"za32", "iUif", "aarch64_sme_read", [ImmCheck<2, ImmCheck0_3>, ImmCheck<4, ImmCheck0_3>]>;
+defm SVREAD_ZA64 : ZARead<"za64", "lUld", "aarch64_sme_read", [ImmCheck<2, ImmCheck0_7>, ImmCheck<4, ImmCheck0_1>]>;
+defm SVREAD_ZA128 : ZARead<"za128", "csilUcUsUiUlhbfd", "aarch64_sme_readq", [ImmCheck<2, ImmCheck0_15>, ImmCheck<4, ImmCheck0_0>]>;
+
+////////////////////////////////////////////////////////////////////////////////
+// Write horizontal/vertical ZA slices
+
+multiclass ZAWrite<string n_suffix, string t, string i_prefix, list<ImmCheck> ch> {
+ let TargetGuard = "sme" in {
+ def NAME # _H : SInst<"svwrite_hor_" # n_suffix # "[_{d}]", "vimiPd", t,
+ MergeOp1, i_prefix # "_horiz",
+ [IsWriteZA, IsStreaming, IsSharedZA], ch>;
+
+ def NAME # _V : SInst<"svwrite_ver_" # n_suffix # "[_{d}]", "vimiPd", t,
+ MergeOp1, i_prefix # "_vert",
+ [IsWriteZA, IsStreaming, IsSharedZA], ch>;
+ }
+}
+
+defm SVWRITE_ZA8 : ZAWrite<"za8", "cUc", "aarch64_sme_write", [ImmCheck<0, ImmCheck0_0>, ImmCheck<2, ImmCheck0_15>]>;
+defm SVWRITE_ZA16 : ZAWrite<"za16", "sUshb", "aarch64_sme_write", [ImmCheck<0, ImmCheck0_1>, ImmCheck<2, ImmCheck0_7>]>;
+defm SVWRITE_ZA32 : ZAWrite<"za32", "iUif", "aarch64_sme_write", [ImmCheck<0, ImmCheck0_3>, ImmCheck<2, ImmCheck0_3>]>;
+defm SVWRITE_ZA64 : ZAWrite<"za64", "lUld", "aarch64_sme_write", [ImmCheck<0, ImmCheck0_7>, ImmCheck<2, ImmCheck0_1>]>;
+defm SVWRITE_ZA128 : ZAWrite<"za128", "csilUcUsUiUlhbfd", "aarch64_sme_writeq", [ImmCheck<0, ImmCheck0_15>, ImmCheck<2, ImmCheck0_0>]>;
+
+////////////////////////////////////////////////////////////////////////////////
+// SME - Zero
+
+let TargetGuard = "sme" in {
+ def SVZERO_MASK_ZA : SInst<"svzero_mask_za", "vi", "", MergeNone, "aarch64_sme_zero",
+ [IsOverloadNone, IsStreamingCompatible, IsSharedZA],
+ [ImmCheck<0, ImmCheck0_255>]>;
+ def SVZERO_ZA : SInst<"svzero_za", "v", "", MergeNone, "aarch64_sme_zero",
+ [IsOverloadNone, IsStreamingCompatible, IsSharedZA]>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SME - Counting elements in a streaming vector
+
+multiclass ZACount<string n_suffix> {
+ let TargetGuard = "sme" in {
+ def NAME : SInst<"sv" # n_suffix, "nv", "", MergeNone,
+ "aarch64_sme_" # n_suffix,
+ [IsOverloadNone, IsStreamingCompatible, IsPreservesZA]>;
+ }
+}
+
+defm SVCNTSB : ZACount<"cntsb">;
+defm SVCNTSH : ZACount<"cntsh">;
+defm SVCNTSW : ZACount<"cntsw">;
+defm SVCNTSD : ZACount<"cntsd">;
+
+////////////////////////////////////////////////////////////////////////////////
+// SME - ADDHA/ADDVA
+
+multiclass ZAAdd<string n_suffix> {
+ let TargetGuard = "sme" in {
+ def NAME # _ZA32: SInst<"sv" # n_suffix # "_za32[_{d}]", "viPPd", "iUi", MergeOp1,
+ "aarch64_sme_" # n_suffix, [IsStreaming, IsSharedZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
+ }
+
+ let TargetGuard = "sme-i16i64" in {
+ def NAME # _ZA64: SInst<"sv" # n_suffix # "_za64[_{d}]", "viPPd", "lUl", MergeOp1,
+ "aarch64_sme_" # n_suffix, [IsStreaming, IsSharedZA],
+ [ImmCheck<0, ImmCheck0_7>]>;
+ }
+}
+
+defm SVADDHA : ZAAdd<"addha">;
+defm SVADDVA : ZAAdd<"addva">;
+
+////////////////////////////////////////////////////////////////////////////////
+// SME - SMOPA, SMOPS, UMOPA, UMOPS
+
+multiclass ZAIntOuterProd<string n_suffix1, string n_suffix2> {
+ let TargetGuard = "sme" in {
+ def NAME # _ZA32_B: SInst<"sv" # n_suffix2 # "_za32[_{d}]",
+ "viPPdd", !cond(!eq(n_suffix1, "s") : "", true: "U") # "c",
+ MergeOp1, "aarch64_sme_" # n_suffix1 # n_suffix2 # "_wide",
+ [IsStreaming, IsSharedZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
+ }
+
+ let TargetGuard = "sme-i16i64" in {
+ def NAME # _ZA64_H: SInst<"sv" # n_suffix2 # "_za64[_{d}]",
+ "viPPdd", !cond(!eq(n_suffix1, "s") : "", true: "U") # "s",
+ MergeOp1, "aarch64_sme_" # n_suffix1 # n_suffix2 # "_wide",
+ [IsStreaming, IsSharedZA],
+ [ImmCheck<0, ImmCheck0_7>]>;
+ }
+}
+
+defm SVSMOPA : ZAIntOuterProd<"s", "mopa">;
+defm SVSMOPS : ZAIntOuterProd<"s", "mops">;
+defm SVUMOPA : ZAIntOuterProd<"u", "mopa">;
+defm SVUMOPS : ZAIntOuterProd<"u", "mops">;
+
+////////////////////////////////////////////////////////////////////////////////
+// SME - SUMOPA, SUMOPS, USMOPA, USMOPS
+
+multiclass ZAIntOuterProdMixedSigns<string n_suffix1, string n_suffix2> {
+ let TargetGuard = "sme" in {
+ def NAME # _ZA32_B: SInst<"sv" # n_suffix1 # n_suffix2 # "_za32[_{d}]",
+ "viPPd" # !cond(!eq(n_suffix1, "su") : "u", true: "x"),
+ !cond(!eq(n_suffix1, "su") : "", true: "U") # "c",
+ MergeOp1, "aarch64_sme_" # n_suffix1 # n_suffix2 # "_wide",
+ [IsStreaming, IsSharedZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
+ }
+
+ let TargetGuard = "sme-i16i64" in {
+ def NAME # _ZA64_H: SInst<"sv" # n_suffix1 # n_suffix2 # "_za64[_{d}]",
+ "viPPd" # !cond(!eq(n_suffix1, "su") : "u", true: "x"),
+ !cond(!eq(n_suffix1, "su") : "", true: "U") # "s",
+ MergeOp1, "aarch64_sme_" # n_suffix1 # n_suffix2 # "_wide",
+ [IsStreaming, IsSharedZA],
+ [ImmCheck<0, ImmCheck0_7>]>;
+ }
+}
+
+defm SVSUMOPA : ZAIntOuterProdMixedSigns<"su", "mopa">;
+defm SVSUMOPS : ZAIntOuterProdMixedSigns<"su", "mops">;
+defm SVUSMOPA : ZAIntOuterProdMixedSigns<"us", "mopa">;
+defm SVUSMOPS : ZAIntOuterProdMixedSigns<"us", "mops">;
+
+////////////////////////////////////////////////////////////////////////////////
+// SME - FMOPA, FMOPS
+
+multiclass ZAFPOuterProd<string n_suffix> {
+ let TargetGuard = "sme" in {
+ def NAME # _ZA32_B: SInst<"sv" # n_suffix # "_za32[_{d}]", "viPPdd", "h",
+ MergeOp1, "aarch64_sme_" # n_suffix # "_wide",
+ [IsStreaming, IsSharedZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
+
+ def NAME # _ZA32_H: SInst<"sv" # n_suffix # "_za32[_{d}]", "viPPdd", "b",
+ MergeOp1, "aarch64_sme_" # n_suffix # "_wide",
+ [IsStreaming, IsSharedZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
+
+ def NAME # _ZA32_S: SInst<"sv" # n_suffix # "_za32[_{d}]", "viPPdd", "f",
+ MergeOp1, "aarch64_sme_" # n_suffix,
+ [IsStreaming, IsSharedZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
+ }
+
+ let TargetGuard = "sme-f64f64" in {
+ def NAME # _ZA64_D: SInst<"sv" # n_suffix # "_za64[_{d}]", "viPPdd", "d",
+ MergeOp1, "aarch64_sme_" # n_suffix,
+ [IsStreaming, IsSharedZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
+ }
+}
+
+defm SVMOPA : ZAFPOuterProd<"mopa">;
+defm SVMOPS : ZAFPOuterProd<"mops">;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_sve.td b/contrib/llvm-project/clang/include/clang/Basic/arm_sve.td
index e547bbd34b5e..894a0a1296b0 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/arm_sve.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_sve.td
@@ -13,251 +13,7 @@
//
//===----------------------------------------------------------------------===//
-//===----------------------------------------------------------------------===//
-// Instruction definitions
-//===----------------------------------------------------------------------===//
-// Every intrinsic subclasses "Inst". An intrinsic has a name, a prototype and
-// a sequence of typespecs.
-//
-// The name is the base name of the intrinsic, for example "svld1". This is
-// then mangled by the tblgen backend to add type information ("svld1_s16").
-//
-// A typespec is a sequence of uppercase characters (modifiers) followed by one
-// lowercase character. A typespec encodes a particular "base type" of the
-// intrinsic.
-//
-// An example typespec is "Us" - unsigned short - svuint16_t. The available
-// typespec codes are given below.
-//
-// The string given to an Inst class is a sequence of typespecs. The intrinsic
-// is instantiated for every typespec in the sequence. For example "sdUsUd".
-//
-// The prototype is a string that defines the return type of the intrinsic
-// and the type of each argument. The return type and every argument gets a
-// "modifier" that can change in some way the "base type" of the intrinsic.
-//
-// The modifier 'd' means "default" and does not modify the base type in any
-// way. The available modifiers are given below.
-//
-// Typespecs
-// ---------
-// c: char
-// s: short
-// i: int
-// l: long
-// f: float
-// h: half-float
-// d: double
-// b: bfloat
-
-// Typespec modifiers
-// ------------------
-// P: boolean
-// U: unsigned
-
-// Prototype modifiers
-// -------------------
-// prototype: return (arg, arg, ...)
-//
-// 2,3,4: array of default vectors
-// v: void
-// x: vector of signed integers
-// u: vector of unsigned integers
-// d: default
-// c: const pointer type
-// P: predicate type
-// s: scalar of element type
-// a: scalar of element type (splat to vector type)
-// R: scalar of 1/2 width element type (splat to vector type)
-// r: scalar of 1/4 width element type (splat to vector type)
-// @: unsigned scalar of 1/4 width element type (splat to vector type)
-// e: 1/2 width unsigned elements, 2x element count
-// b: 1/4 width unsigned elements, 4x element count
-// h: 1/2 width elements, 2x element count
-// q: 1/4 width elements, 4x element count
-// o: 4x width elements, 1/4 element count
-//
-// w: vector of element type promoted to 64bits, vector maintains
-// signedness of its element type.
-// f: element type promoted to uint64_t (splat to vector type)
-// j: element type promoted to 64bits (splat to vector type)
-// K: element type bitcast to a signed integer (splat to vector type)
-// L: element type bitcast to an unsigned integer (splat to vector type)
-//
-// i: constant uint64_t
-// k: int32_t
-// l: int64_t
-// m: uint32_t
-// n: uint64_t
-
-// t: svint32_t
-// z: svuint32_t
-// g: svuint64_t
-// O: svfloat16_t
-// M: svfloat32_t
-// N: svfloat64_t
-
-// J: Prefetch type (sv_prfop)
-// A: pointer to int8_t
-// B: pointer to int16_t
-// C: pointer to int32_t
-// D: pointer to int64_t
-
-// E: pointer to uint8_t
-// F: pointer to uint16_t
-// G: pointer to uint32_t
-// H: pointer to uint64_t
-
-// Q: const pointer to void
-
-// S: const pointer to int8_t
-// T: const pointer to int16_t
-// U: const pointer to int32_t
-// V: const pointer to int64_t
-//
-// W: const pointer to uint8_t
-// X: const pointer to uint16_t
-// Y: const pointer to uint32_t
-// Z: const pointer to uint64_t
-
-class MergeType<int val, string suffix=""> {
- int Value = val;
- string Suffix = suffix;
-}
-def MergeNone : MergeType<0>;
-def MergeAny : MergeType<1, "_x">;
-def MergeOp1 : MergeType<2, "_m">;
-def MergeZero : MergeType<3, "_z">;
-def MergeAnyExp : MergeType<4, "_x">; // Use merged builtin with explicit
-def MergeZeroExp : MergeType<5, "_z">; // generation of its inactive argument.
-
-class EltType<int val> {
- int Value = val;
-}
-def EltTyInvalid : EltType<0>;
-def EltTyInt8 : EltType<1>;
-def EltTyInt16 : EltType<2>;
-def EltTyInt32 : EltType<3>;
-def EltTyInt64 : EltType<4>;
-def EltTyFloat16 : EltType<5>;
-def EltTyFloat32 : EltType<6>;
-def EltTyFloat64 : EltType<7>;
-def EltTyBool8 : EltType<8>;
-def EltTyBool16 : EltType<9>;
-def EltTyBool32 : EltType<10>;
-def EltTyBool64 : EltType<11>;
-def EltTyBFloat16 : EltType<12>;
-
-class MemEltType<int val> {
- int Value = val;
-}
-def MemEltTyDefault : MemEltType<0>;
-def MemEltTyInt8 : MemEltType<1>;
-def MemEltTyInt16 : MemEltType<2>;
-def MemEltTyInt32 : MemEltType<3>;
-def MemEltTyInt64 : MemEltType<4>;
-
-class FlagType<int val> {
- int Value = val;
-}
-
-// These must be kept in sync with the flags in utils/TableGen/SveEmitter.h
-// and include/clang/Basic/TargetBuiltins.h
-def NoFlags : FlagType<0x00000000>;
-def FirstEltType : FlagType<0x00000001>;
-// : :
-// : :
-def EltTypeMask : FlagType<0x0000000f>;
-def FirstMemEltType : FlagType<0x00000010>;
-// : :
-// : :
-def MemEltTypeMask : FlagType<0x00000070>;
-def FirstMergeTypeMask : FlagType<0x00000080>;
-// : :
-// : :
-def MergeTypeMask : FlagType<0x00000380>;
-def FirstSplatOperand : FlagType<0x00000400>;
-// : :
-// These flags are used to specify which scalar operand
-// needs to be duplicated/splatted into a vector.
-// : :
-def SplatOperandMask : FlagType<0x00001C00>;
-def IsLoad : FlagType<0x00002000>;
-def IsStore : FlagType<0x00004000>;
-def IsGatherLoad : FlagType<0x00008000>;
-def IsScatterStore : FlagType<0x00010000>;
-def IsStructLoad : FlagType<0x00020000>;
-def IsStructStore : FlagType<0x00040000>;
-def IsZExtReturn : FlagType<0x00080000>; // Return value is sign-extend by default
-def IsOverloadNone : FlagType<0x00100000>; // Intrinsic does not take any overloaded types.
-def IsOverloadWhile : FlagType<0x00200000>; // Use {default type, typeof(operand1)} as overloaded types.
-def IsOverloadWhileRW : FlagType<0x00400000>; // Use {pred(default type), typeof(operand0)} as overloaded types.
-def IsOverloadCvt : FlagType<0x00800000>; // Use {typeof(operand0), typeof(last operand)} as overloaded types.
-def OverloadKindMask : FlagType<0x00E00000>; // When the masked values are all '0', the default type is used as overload type.
-def IsByteIndexed : FlagType<0x01000000>;
-def IsAppendSVALL : FlagType<0x02000000>; // Appends SV_ALL as the last operand.
-def IsInsertOp1SVALL : FlagType<0x04000000>; // Inserts SV_ALL as the second operand.
-def IsPrefetch : FlagType<0x08000000>; // Contiguous prefetches.
-def IsGatherPrefetch : FlagType<0x10000000>;
-def ReverseCompare : FlagType<0x20000000>; // Compare operands must be swapped.
-def ReverseUSDOT : FlagType<0x40000000>; // Unsigned/signed operands must be swapped.
-def IsUndef : FlagType<0x80000000>; // Codegen `undef` of given type.
-def IsTupleCreate : FlagType<0x100000000>;
-def IsTupleGet : FlagType<0x200000000>;
-def IsTupleSet : FlagType<0x400000000>;
-
-// These must be kept in sync with the flags in include/clang/Basic/TargetBuiltins.h
-class ImmCheckType<int val> {
- int Value = val;
-}
-def ImmCheck0_31 : ImmCheckType<0>; // 0..31 (used for e.g. predicate patterns)
-def ImmCheck1_16 : ImmCheckType<1>; // 1..16
-def ImmCheckExtract : ImmCheckType<2>; // 0..(2048/sizeinbits(elt) - 1)
-def ImmCheckShiftRight : ImmCheckType<3>; // 1..sizeinbits(elt)
-def ImmCheckShiftRightNarrow : ImmCheckType<4>; // 1..sizeinbits(elt)/2
-def ImmCheckShiftLeft : ImmCheckType<5>; // 0..(sizeinbits(elt) - 1)
-def ImmCheck0_7 : ImmCheckType<6>; // 0..7
-def ImmCheckLaneIndex : ImmCheckType<7>; // 0..(128/(1*sizeinbits(elt)) - 1)
-def ImmCheckLaneIndexCompRotate : ImmCheckType<8>; // 0..(128/(2*sizeinbits(elt)) - 1)
-def ImmCheckLaneIndexDot : ImmCheckType<9>; // 0..(128/(4*sizeinbits(elt)) - 1)
-def ImmCheckComplexRot90_270 : ImmCheckType<10>; // [90,270]
-def ImmCheckComplexRotAll90 : ImmCheckType<11>; // [0, 90, 180,270]
-def ImmCheck0_13 : ImmCheckType<12>; // 0..13
-def ImmCheck0_1 : ImmCheckType<13>; // 0..1
-def ImmCheck0_2 : ImmCheckType<14>; // 0..2
-def ImmCheck0_3 : ImmCheckType<15>; // 0..3
-
-class ImmCheck<int arg, ImmCheckType kind, int eltSizeArg = -1> {
- int Arg = arg;
- int EltSizeArg = eltSizeArg;
- ImmCheckType Kind = kind;
-}
-
-class Inst<string n, string p, string t, MergeType mt, string i,
- list<FlagType> ft, list<ImmCheck> ch, MemEltType met> {
- string Name = n;
- string Prototype = p;
- string Types = t;
- string TargetGuard = "sve";
- int Merge = mt.Value;
- string MergeSuffix = mt.Suffix;
- string LLVMIntrinsic = i;
- list<FlagType> Flags = ft;
- list<ImmCheck> ImmChecks = ch;
- int MemEltType = met.Value;
-}
-
-// SInst: Instruction with signed/unsigned suffix (e.g., "s8", "u8")
-class SInst<string n, string p, string t, MergeType mt, string i = "",
- list<FlagType> ft = [], list<ImmCheck> ch = []>
- : Inst<n, p, t, mt, i, ft, ch, MemEltTyDefault> {
-}
-
-// MInst: Instructions which access memory
-class MInst<string n, string p, string t, list<FlagType> f,
- MemEltType met = MemEltTyDefault, string i = "">
- : Inst<n, p, t, MergeNone, i, f, [], met> {
-}
+include "arm_sve_sme_incl.td"
////////////////////////////////////////////////////////////////////////////////
// Loads
@@ -758,49 +514,49 @@ defm SVNEG : SInstZPZ<"svneg", "csil", "aarch64_sve_neg">;
//------------------------------------------------------------------------------
-multiclass SInstZPZZ<string name, string types, string intrinsic, list<FlagType> flags=[]> {
- def _M : SInst<name # "[_{d}]", "dPdd", types, MergeOp1, intrinsic, flags>;
- def _X : SInst<name # "[_{d}]", "dPdd", types, MergeAny, intrinsic, flags>;
- def _Z : SInst<name # "[_{d}]", "dPdd", types, MergeZero, intrinsic, flags>;
-
- def _N_M : SInst<name # "[_n_{d}]", "dPda", types, MergeOp1, intrinsic, flags>;
- def _N_X : SInst<name # "[_n_{d}]", "dPda", types, MergeAny, intrinsic, flags>;
- def _N_Z : SInst<name # "[_n_{d}]", "dPda", types, MergeZero, intrinsic, flags>;
-}
-
-defm SVABD_S : SInstZPZZ<"svabd", "csil", "aarch64_sve_sabd">;
-defm SVABD_U : SInstZPZZ<"svabd", "UcUsUiUl", "aarch64_sve_uabd">;
-defm SVADD : SInstZPZZ<"svadd", "csilUcUsUiUl", "aarch64_sve_add">;
-defm SVDIV_S : SInstZPZZ<"svdiv", "il", "aarch64_sve_sdiv">;
-defm SVDIV_U : SInstZPZZ<"svdiv", "UiUl", "aarch64_sve_udiv">;
-defm SVDIVR_S : SInstZPZZ<"svdivr", "il", "aarch64_sve_sdivr">;
-defm SVDIVR_U : SInstZPZZ<"svdivr", "UiUl", "aarch64_sve_udivr">;
-defm SVMAX_S : SInstZPZZ<"svmax", "csil", "aarch64_sve_smax">;
-defm SVMAX_U : SInstZPZZ<"svmax", "UcUsUiUl", "aarch64_sve_umax">;
-defm SVMIN_S : SInstZPZZ<"svmin", "csil", "aarch64_sve_smin">;
-defm SVMIN_U : SInstZPZZ<"svmin", "UcUsUiUl", "aarch64_sve_umin">;
-defm SVMUL : SInstZPZZ<"svmul", "csilUcUsUiUl", "aarch64_sve_mul">;
-defm SVMULH_S : SInstZPZZ<"svmulh", "csil", "aarch64_sve_smulh">;
-defm SVMULH_U : SInstZPZZ<"svmulh", "UcUsUiUl", "aarch64_sve_umulh">;
-defm SVSUB : SInstZPZZ<"svsub", "csilUcUsUiUl", "aarch64_sve_sub">;
-defm SVSUBR : SInstZPZZ<"svsubr", "csilUcUsUiUl", "aarch64_sve_subr">;
+multiclass SInstZPZZ<string name, string types, string m_intrinsic, string x_intrinsic, list<FlagType> flags=[]> {
+ def _M : SInst<name # "[_{d}]", "dPdd", types, MergeOp1, m_intrinsic, flags>;
+ def _X : SInst<name # "[_{d}]", "dPdd", types, MergeAny, x_intrinsic, flags>;
+ def _Z : SInst<name # "[_{d}]", "dPdd", types, MergeZero, m_intrinsic, flags>;
+
+ def _N_M : SInst<name # "[_n_{d}]", "dPda", types, MergeOp1, m_intrinsic, flags>;
+ def _N_X : SInst<name # "[_n_{d}]", "dPda", types, MergeAny, x_intrinsic, flags>;
+ def _N_Z : SInst<name # "[_n_{d}]", "dPda", types, MergeZero, m_intrinsic, flags>;
+}
+
+defm SVABD_S : SInstZPZZ<"svabd", "csil", "aarch64_sve_sabd", "aarch64_sve_sabd_u">;
+defm SVABD_U : SInstZPZZ<"svabd", "UcUsUiUl", "aarch64_sve_uabd", "aarch64_sve_uabd_u">;
+defm SVADD : SInstZPZZ<"svadd", "csilUcUsUiUl", "aarch64_sve_add", "aarch64_sve_add_u">;
+defm SVDIV_S : SInstZPZZ<"svdiv", "il", "aarch64_sve_sdiv", "aarch64_sve_sdiv_u">;
+defm SVDIV_U : SInstZPZZ<"svdiv", "UiUl", "aarch64_sve_udiv", "aarch64_sve_udiv_u">;
+defm SVDIVR_S : SInstZPZZ<"svdivr", "il", "aarch64_sve_sdivr", "aarch64_sve_sdiv_u", [ReverseMergeAnyBinOp]>;
+defm SVDIVR_U : SInstZPZZ<"svdivr", "UiUl", "aarch64_sve_udivr", "aarch64_sve_udiv_u", [ReverseMergeAnyBinOp]>;
+defm SVMAX_S : SInstZPZZ<"svmax", "csil", "aarch64_sve_smax", "aarch64_sve_smax_u">;
+defm SVMAX_U : SInstZPZZ<"svmax", "UcUsUiUl", "aarch64_sve_umax", "aarch64_sve_umax_u">;
+defm SVMIN_S : SInstZPZZ<"svmin", "csil", "aarch64_sve_smin", "aarch64_sve_smin_u">;
+defm SVMIN_U : SInstZPZZ<"svmin", "UcUsUiUl", "aarch64_sve_umin", "aarch64_sve_umin_u">;
+defm SVMUL : SInstZPZZ<"svmul", "csilUcUsUiUl", "aarch64_sve_mul", "aarch64_sve_mul_u">;
+defm SVMULH_S : SInstZPZZ<"svmulh", "csil", "aarch64_sve_smulh", "aarch64_sve_smulh_u">;
+defm SVMULH_U : SInstZPZZ<"svmulh", "UcUsUiUl", "aarch64_sve_umulh", "aarch64_sve_umulh_u">;
+defm SVSUB : SInstZPZZ<"svsub", "csilUcUsUiUl", "aarch64_sve_sub", "aarch64_sve_sub_u">;
+defm SVSUBR : SInstZPZZ<"svsubr", "csilUcUsUiUl", "aarch64_sve_subr", "aarch64_sve_sub_u", [ReverseMergeAnyBinOp]>;
//------------------------------------------------------------------------------
-multiclass SInstZPZZZ<string name, string types, string intrinsic, list<FlagType> flags=[]> {
- def _M : SInst<name # "[_{d}]", "dPddd", types, MergeOp1, intrinsic, flags>;
- def _X : SInst<name # "[_{d}]", "dPddd", types, MergeAny, intrinsic, flags>;
- def _Z : SInst<name # "[_{d}]", "dPddd", types, MergeZero, intrinsic, flags>;
+multiclass SInstZPZZZ<string name, string types, string m_intrinsic, string x_intrinsic, list<FlagType> flags=[]> {
+ def _M : SInst<name # "[_{d}]", "dPddd", types, MergeOp1, m_intrinsic, flags>;
+ def _X : SInst<name # "[_{d}]", "dPddd", types, MergeAny, x_intrinsic, flags>;
+ def _Z : SInst<name # "[_{d}]", "dPddd", types, MergeZero, m_intrinsic, flags>;
- def _N_M : SInst<name # "[_n_{d}]", "dPdda", types, MergeOp1, intrinsic, flags>;
- def _N_X : SInst<name # "[_n_{d}]", "dPdda", types, MergeAny, intrinsic, flags>;
- def _N_Z : SInst<name # "[_n_{d}]", "dPdda", types, MergeZero, intrinsic, flags>;
+ def _N_M : SInst<name # "[_n_{d}]", "dPdda", types, MergeOp1, m_intrinsic, flags>;
+ def _N_X : SInst<name # "[_n_{d}]", "dPdda", types, MergeAny, x_intrinsic, flags>;
+ def _N_Z : SInst<name # "[_n_{d}]", "dPdda", types, MergeZero, m_intrinsic, flags>;
}
-defm SVMAD : SInstZPZZZ<"svmad", "csilUcUsUiUl", "aarch64_sve_mad">;
-defm SVMLA : SInstZPZZZ<"svmla", "csilUcUsUiUl", "aarch64_sve_mla">;
-defm SVMLS : SInstZPZZZ<"svmls", "csilUcUsUiUl", "aarch64_sve_mls">;
-defm SVMSB : SInstZPZZZ<"svmsb", "csilUcUsUiUl", "aarch64_sve_msb">;
+defm SVMAD : SInstZPZZZ<"svmad", "csilUcUsUiUl", "aarch64_sve_mad", "aarch64_sve_mla_u", [ReverseMergeAnyAccOp]>;
+defm SVMLA : SInstZPZZZ<"svmla", "csilUcUsUiUl", "aarch64_sve_mla", "aarch64_sve_mla_u">;
+defm SVMLS : SInstZPZZZ<"svmls", "csilUcUsUiUl", "aarch64_sve_mls", "aarch64_sve_mls_u">;
+defm SVMSB : SInstZPZZZ<"svmsb", "csilUcUsUiUl", "aarch64_sve_msb", "aarch64_sve_mls_u", [ReverseMergeAnyAccOp]>;
//------------------------------------------------------------------------------
@@ -824,10 +580,10 @@ def SVDOT_LANE_U : SInst<"svdot_lane[_{d}]", "ddqqi", "UiUl", MergeNone, "aarc
////////////////////////////////////////////////////////////////////////////////
// Logical operations
-defm SVAND : SInstZPZZ<"svand", "csilUcUsUiUl", "aarch64_sve_and">;
-defm SVBIC : SInstZPZZ<"svbic", "csilUcUsUiUl", "aarch64_sve_bic">;
-defm SVEOR : SInstZPZZ<"sveor", "csilUcUsUiUl", "aarch64_sve_eor">;
-defm SVORR : SInstZPZZ<"svorr", "csilUcUsUiUl", "aarch64_sve_orr">;
+defm SVAND : SInstZPZZ<"svand", "csilUcUsUiUl", "aarch64_sve_and", "aarch64_sve_and_u">;
+defm SVBIC : SInstZPZZ<"svbic", "csilUcUsUiUl", "aarch64_sve_bic", "aarch64_sve_bic_u">;
+defm SVEOR : SInstZPZZ<"sveor", "csilUcUsUiUl", "aarch64_sve_eor", "aarch64_sve_eor_u">;
+defm SVORR : SInstZPZZ<"svorr", "csilUcUsUiUl", "aarch64_sve_orr", "aarch64_sve_orr_u">;
defm SVCNOT : SInstZPZ<"svcnot", "csilUcUsUiUl", "aarch64_sve_cnot">;
defm SVNOT : SInstZPZ<"svnot", "csilUcUsUiUl", "aarch64_sve_not">;
@@ -837,11 +593,11 @@ defm SVNOT : SInstZPZ<"svnot", "csilUcUsUiUl", "aarch64_sve_not">;
multiclass SInst_SHIFT<string name, string intrinsic, string ts, string wide_ts> {
def _M : SInst<name # "[_{d}]", "dPdu", ts, MergeOp1, intrinsic>;
- def _X : SInst<name # "[_{d}]", "dPdu", ts, MergeAny, intrinsic>;
+ def _X : SInst<name # "[_{d}]", "dPdu", ts, MergeAny, intrinsic # _u>;
def _Z : SInst<name # "[_{d}]", "dPdu", ts, MergeZero, intrinsic>;
def _N_M : SInst<name # "[_n_{d}]", "dPdL", ts, MergeOp1, intrinsic>;
- def _N_X : SInst<name # "[_n_{d}]", "dPdL", ts, MergeAny, intrinsic>;
+ def _N_X : SInst<name # "[_n_{d}]", "dPdL", ts, MergeAny, intrinsic # _u>;
def _N_Z : SInst<name # "[_n_{d}]", "dPdL", ts, MergeZero, intrinsic>;
def _WIDE_M : SInst<name # _wide # "[_{d}]", "dPdg", wide_ts, MergeOp1, intrinsic # _wide>;
@@ -979,18 +735,18 @@ defm SVREVW : SInstZPZ<"svrevw", "lUl", "aarch64_sve_revw">;
defm SVABS_F : SInstZPZ<"svabs", "hfd", "aarch64_sve_fabs">;
defm SVNEG_F : SInstZPZ<"svneg", "hfd", "aarch64_sve_fneg">;
-defm SVABD_F : SInstZPZZ<"svabd", "hfd", "aarch64_sve_fabd">;
-defm SVADD_F : SInstZPZZ<"svadd", "hfd", "aarch64_sve_fadd">;
-defm SVDIV_F : SInstZPZZ<"svdiv", "hfd", "aarch64_sve_fdiv">;
-defm SVDIVR_F : SInstZPZZ<"svdivr", "hfd", "aarch64_sve_fdivr">;
-defm SVMAX_F : SInstZPZZ<"svmax", "hfd", "aarch64_sve_fmax">;
-defm SVMAXNM : SInstZPZZ<"svmaxnm","hfd", "aarch64_sve_fmaxnm">;
-defm SVMIN_F : SInstZPZZ<"svmin", "hfd", "aarch64_sve_fmin">;
-defm SVMINNM : SInstZPZZ<"svminnm","hfd", "aarch64_sve_fminnm">;
-defm SVMUL_F : SInstZPZZ<"svmul", "hfd", "aarch64_sve_fmul">;
-defm SVMULX : SInstZPZZ<"svmulx", "hfd", "aarch64_sve_fmulx">;
-defm SVSUB_F : SInstZPZZ<"svsub", "hfd", "aarch64_sve_fsub">;
-defm SVSUBR_F : SInstZPZZ<"svsubr", "hfd", "aarch64_sve_fsubr">;
+defm SVABD_F : SInstZPZZ<"svabd", "hfd", "aarch64_sve_fabd", "aarch64_sve_fabd_u">;
+defm SVADD_F : SInstZPZZ<"svadd", "hfd", "aarch64_sve_fadd", "aarch64_sve_fadd_u">;
+defm SVDIV_F : SInstZPZZ<"svdiv", "hfd", "aarch64_sve_fdiv", "aarch64_sve_fdiv_u">;
+defm SVDIVR_F : SInstZPZZ<"svdivr", "hfd", "aarch64_sve_fdivr", "aarch64_sve_fdiv_u", [ReverseMergeAnyBinOp]>;
+defm SVMAX_F : SInstZPZZ<"svmax", "hfd", "aarch64_sve_fmax", "aarch64_sve_fmax_u">;
+defm SVMAXNM : SInstZPZZ<"svmaxnm","hfd", "aarch64_sve_fmaxnm", "aarch64_sve_fmaxnm_u">;
+defm SVMIN_F : SInstZPZZ<"svmin", "hfd", "aarch64_sve_fmin", "aarch64_sve_fmin_u">;
+defm SVMINNM : SInstZPZZ<"svminnm","hfd", "aarch64_sve_fminnm", "aarch64_sve_fminnm_u">;
+defm SVMUL_F : SInstZPZZ<"svmul", "hfd", "aarch64_sve_fmul", "aarch64_sve_fmul_u">;
+defm SVMULX : SInstZPZZ<"svmulx", "hfd", "aarch64_sve_fmulx", "aarch64_sve_fmulx_u">;
+defm SVSUB_F : SInstZPZZ<"svsub", "hfd", "aarch64_sve_fsub", "aarch64_sve_fsub_u">;
+defm SVSUBR_F : SInstZPZZ<"svsubr", "hfd", "aarch64_sve_fsubr", "aarch64_sve_fsub_u", [ReverseMergeAnyBinOp]>;
defm SVRECPX : SInstZPZ<"svrecpx", "hfd", "aarch64_sve_frecpx">;
defm SVRINTA : SInstZPZ<"svrinta", "hfd", "aarch64_sve_frinta">;
@@ -1015,14 +771,14 @@ def SVSCALE_N_M : SInst<"svscale[_n_{d}]", "dPdK", "hfd", MergeOp1, "aarch64_sv
def SVSCALE_N_X : SInst<"svscale[_n_{d}]", "dPdK", "hfd", MergeAny, "aarch64_sve_fscale">;
def SVSCALE_N_Z : SInst<"svscale[_n_{d}]", "dPdK", "hfd", MergeZero, "aarch64_sve_fscale">;
-defm SVMAD_F : SInstZPZZZ<"svmad", "hfd", "aarch64_sve_fmad">;
-defm SVMLA_F : SInstZPZZZ<"svmla", "hfd", "aarch64_sve_fmla">;
-defm SVMLS_F : SInstZPZZZ<"svmls", "hfd", "aarch64_sve_fmls">;
-defm SVMSB_F : SInstZPZZZ<"svmsb", "hfd", "aarch64_sve_fmsb">;
-defm SVNMAD_F : SInstZPZZZ<"svnmad", "hfd", "aarch64_sve_fnmad">;
-defm SVNMLA_F : SInstZPZZZ<"svnmla", "hfd", "aarch64_sve_fnmla">;
-defm SVNMLS_F : SInstZPZZZ<"svnmls", "hfd", "aarch64_sve_fnmls">;
-defm SVNMSB_F : SInstZPZZZ<"svnmsb", "hfd", "aarch64_sve_fnmsb">;
+defm SVMAD_F : SInstZPZZZ<"svmad", "hfd", "aarch64_sve_fmad", "aarch64_sve_fmla_u", [ReverseMergeAnyAccOp]>;
+defm SVMLA_F : SInstZPZZZ<"svmla", "hfd", "aarch64_sve_fmla", "aarch64_sve_fmla_u">;
+defm SVMLS_F : SInstZPZZZ<"svmls", "hfd", "aarch64_sve_fmls", "aarch64_sve_fmls_u">;
+defm SVMSB_F : SInstZPZZZ<"svmsb", "hfd", "aarch64_sve_fmsb", "aarch64_sve_fmls_u", [ReverseMergeAnyAccOp]>;
+defm SVNMAD_F : SInstZPZZZ<"svnmad", "hfd", "aarch64_sve_fnmad", "aarch64_sve_fnmla_u", [ReverseMergeAnyAccOp]>;
+defm SVNMLA_F : SInstZPZZZ<"svnmla", "hfd", "aarch64_sve_fnmla", "aarch64_sve_fnmla_u">;
+defm SVNMLS_F : SInstZPZZZ<"svnmls", "hfd", "aarch64_sve_fnmls", "aarch64_sve_fnmls_u">;
+defm SVNMSB_F : SInstZPZZZ<"svnmsb", "hfd", "aarch64_sve_fnmsb", "aarch64_sve_fnmls_u", [ReverseMergeAnyAccOp]>;
def SVCADD_M : SInst<"svcadd[_{d}]", "dPddi", "hfd", MergeOp1, "aarch64_sve_fcadd", [], [ImmCheck<3, ImmCheckComplexRot90_270>]>;
def SVCADD_X : SInst<"svcadd[_{d}]", "dPddi", "hfd", MergeAny, "aarch64_sve_fcadd", [], [ImmCheck<3, ImmCheckComplexRot90_270>]>;
@@ -1513,21 +1269,21 @@ def SVWHILEHS_U64 : SInst<"svwhilege_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNon
// SVE2 - Uniform DSP operations
let TargetGuard = "sve2" in {
-defm SVQADD_S : SInstZPZZ<"svqadd", "csli", "aarch64_sve_sqadd">;
-defm SVQADD_U : SInstZPZZ<"svqadd", "UcUsUiUl", "aarch64_sve_uqadd">;
-defm SVHADD_S : SInstZPZZ<"svhadd", "csli", "aarch64_sve_shadd">;
-defm SVHADD_U : SInstZPZZ<"svhadd", "UcUsUiUl", "aarch64_sve_uhadd">;
-defm SVRHADD_S : SInstZPZZ<"svrhadd", "csli", "aarch64_sve_srhadd">;
-defm SVRHADD_U : SInstZPZZ<"svrhadd", "UcUsUiUl", "aarch64_sve_urhadd">;
-
-defm SVQSUB_S : SInstZPZZ<"svqsub", "csli", "aarch64_sve_sqsub">;
-defm SVQSUB_U : SInstZPZZ<"svqsub", "UcUsUiUl", "aarch64_sve_uqsub">;
-defm SVQSUBR_S : SInstZPZZ<"svqsubr", "csli", "aarch64_sve_sqsubr">;
-defm SVQSUBR_U : SInstZPZZ<"svqsubr", "UcUsUiUl", "aarch64_sve_uqsubr">;
-defm SVHSUB_S : SInstZPZZ<"svhsub", "csli", "aarch64_sve_shsub">;
-defm SVHSUB_U : SInstZPZZ<"svhsub", "UcUsUiUl", "aarch64_sve_uhsub">;
-defm SVHSUBR_S : SInstZPZZ<"svhsubr", "csli", "aarch64_sve_shsubr">;
-defm SVHSUBR_U : SInstZPZZ<"svhsubr", "UcUsUiUl", "aarch64_sve_uhsubr">;
+defm SVQADD_S : SInstZPZZ<"svqadd", "csli", "aarch64_sve_sqadd", "aarch64_sve_sqadd">;
+defm SVQADD_U : SInstZPZZ<"svqadd", "UcUsUiUl", "aarch64_sve_uqadd", "aarch64_sve_uqadd">;
+defm SVHADD_S : SInstZPZZ<"svhadd", "csli", "aarch64_sve_shadd", "aarch64_sve_shadd">;
+defm SVHADD_U : SInstZPZZ<"svhadd", "UcUsUiUl", "aarch64_sve_uhadd", "aarch64_sve_uhadd">;
+defm SVRHADD_S : SInstZPZZ<"svrhadd", "csli", "aarch64_sve_srhadd", "aarch64_sve_srhadd">;
+defm SVRHADD_U : SInstZPZZ<"svrhadd", "UcUsUiUl", "aarch64_sve_urhadd", "aarch64_sve_urhadd">;
+
+defm SVQSUB_S : SInstZPZZ<"svqsub", "csli", "aarch64_sve_sqsub", "aarch64_sve_sqsub_u">;
+defm SVQSUB_U : SInstZPZZ<"svqsub", "UcUsUiUl", "aarch64_sve_uqsub", "aarch64_sve_uqsub_u">;
+defm SVQSUBR_S : SInstZPZZ<"svqsubr", "csli", "aarch64_sve_sqsubr", "aarch64_sve_sqsub_u", [ReverseMergeAnyBinOp]>;
+defm SVQSUBR_U : SInstZPZZ<"svqsubr", "UcUsUiUl", "aarch64_sve_uqsubr", "aarch64_sve_uqsub_u", [ReverseMergeAnyBinOp]>;
+defm SVHSUB_S : SInstZPZZ<"svhsub", "csli", "aarch64_sve_shsub", "aarch64_sve_shsub">;
+defm SVHSUB_U : SInstZPZZ<"svhsub", "UcUsUiUl", "aarch64_sve_uhsub", "aarch64_sve_uhsub">;
+defm SVHSUBR_S : SInstZPZZ<"svhsubr", "csli", "aarch64_sve_shsubr", "aarch64_sve_shsubr">;
+defm SVHSUBR_U : SInstZPZZ<"svhsubr", "UcUsUiUl", "aarch64_sve_uhsubr", "aarch64_sve_uhsubr">;
defm SVQABS : SInstZPZ<"svqabs", "csil", "aarch64_sve_sqabs">;
defm SVQNEG : SInstZPZ<"svqneg", "csil", "aarch64_sve_sqneg">;
@@ -2102,3 +1858,13 @@ def SVBEXT_N : SInst<"svbext[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sv
def SVBGRP : SInst<"svbgrp[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_bgrp_x">;
def SVBGRP_N : SInst<"svbgrp[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_bgrp_x">;
}
+
+let TargetGuard = "sve2p1" in {
+def SVFCLAMP : SInst<"svclamp[_{d}]", "dddd", "hfd", MergeNone, "aarch64_sve_fclamp", [], []>;
+def SVPTRUE_COUNT : SInst<"svptrue_{d}", "}v", "QcQsQiQl", MergeNone, "aarch64_sve_ptrue_{d}", [IsOverloadNone], []>;
+}
+
+let TargetGuard = "sve2p1" in {
+def SVSCLAMP : SInst<"svclamp[_{d}]", "dddd", "csil", MergeNone, "aarch64_sve_sclamp", [], []>;
+def SVUCLAMP : SInst<"svclamp[_{d}]", "dddd", "UcUsUiUl", MergeNone, "aarch64_sve_uclamp", [], []>;
+}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_sve_sme_incl.td b/contrib/llvm-project/clang/include/clang/Basic/arm_sve_sme_incl.td
new file mode 100644
index 000000000000..74c9b9266771
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_sve_sme_incl.td
@@ -0,0 +1,281 @@
+//===--- arm_sve_sme_incl.td - ARM SVE/SME compiler interface -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines common properites of TableGen definitions use for both
+// SVE and SME intrinsics.
+//
+// https://developer.arm.com/architectures/system-architectures/software-standards/acle
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Instruction definitions
+//===----------------------------------------------------------------------===//
+// Every intrinsic subclasses "Inst". An intrinsic has a name, a prototype and
+// a sequence of typespecs.
+//
+// The name is the base name of the intrinsic, for example "svld1". This is
+// then mangled by the tblgen backend to add type information ("svld1_s16").
+//
+// A typespec is a sequence of uppercase characters (modifiers) followed by one
+// lowercase character. A typespec encodes a particular "base type" of the
+// intrinsic.
+//
+// An example typespec is "Us" - unsigned short - svuint16_t. The available
+// typespec codes are given below.
+//
+// The string given to an Inst class is a sequence of typespecs. The intrinsic
+// is instantiated for every typespec in the sequence. For example "sdUsUd".
+//
+// The prototype is a string that defines the return type of the intrinsic
+// and the type of each argument. The return type and every argument gets a
+// "modifier" that can change in some way the "base type" of the intrinsic.
+//
+// The modifier 'd' means "default" and does not modify the base type in any
+// way. The available modifiers are given below.
+//
+// Typespecs
+// ---------
+// c: char
+// s: short
+// i: int
+// l: long
+// q: int128_t
+// f: float
+// h: half-float
+// d: double
+// b: bfloat
+
+// Typespec modifiers
+// ------------------
+// P: boolean
+// U: unsigned
+// Q: svcount
+
+// Prototype modifiers
+// -------------------
+// prototype: return (arg, arg, ...)
+//
+// 2,3,4: array of default vectors
+// v: void
+// x: vector of signed integers
+// u: vector of unsigned integers
+// d: default
+// c: const pointer type
+// P: predicate type
+// s: scalar of element type
+// a: scalar of element type (splat to vector type)
+// R: scalar of 1/2 width element type (splat to vector type)
+// r: scalar of 1/4 width element type (splat to vector type)
+// @: unsigned scalar of 1/4 width element type (splat to vector type)
+// e: 1/2 width unsigned elements, 2x element count
+// b: 1/4 width unsigned elements, 4x element count
+// h: 1/2 width elements, 2x element count
+// q: 1/4 width elements, 4x element count
+// o: 4x width elements, 1/4 element count
+//
+// w: vector of element type promoted to 64bits, vector maintains
+// signedness of its element type.
+// f: element type promoted to uint64_t (splat to vector type)
+// j: element type promoted to 64bits (splat to vector type)
+// K: element type bitcast to a signed integer (splat to vector type)
+// L: element type bitcast to an unsigned integer (splat to vector type)
+//
+// i: constant uint64_t
+// k: int32_t
+// l: int64_t
+// m: uint32_t
+// n: uint64_t
+
+// t: svint32_t
+// z: svuint32_t
+// g: svuint64_t
+// O: svfloat16_t
+// M: svfloat32_t
+// N: svfloat64_t
+
+// J: Prefetch type (sv_prfop)
+
+// %: pointer to void
+
+// A: pointer to int8_t
+// B: pointer to int16_t
+// C: pointer to int32_t
+// D: pointer to int64_t
+
+// E: pointer to uint8_t
+// F: pointer to uint16_t
+// G: pointer to uint32_t
+// H: pointer to uint64_t
+
+// Q: const pointer to void
+
+// S: const pointer to int8_t
+// T: const pointer to int16_t
+// U: const pointer to int32_t
+// V: const pointer to int64_t
+//
+// W: const pointer to uint8_t
+// X: const pointer to uint16_t
+// Y: const pointer to uint32_t
+// Z: const pointer to uint64_t
+
+// Prototype modifiers added for SVE2p1
+// }: svcount_t
+
+class MergeType<int val, string suffix=""> {
+ int Value = val;
+ string Suffix = suffix;
+}
+def MergeNone : MergeType<0>;
+def MergeAny : MergeType<1, "_x">;
+def MergeOp1 : MergeType<2, "_m">;
+def MergeZero : MergeType<3, "_z">;
+def MergeAnyExp : MergeType<4, "_x">; // Use merged builtin with explicit
+def MergeZeroExp : MergeType<5, "_z">; // generation of its inactive argument.
+
+class EltType<int val> {
+ int Value = val;
+}
+def EltTyInvalid : EltType<0>;
+def EltTyInt8 : EltType<1>;
+def EltTyInt16 : EltType<2>;
+def EltTyInt32 : EltType<3>;
+def EltTyInt64 : EltType<4>;
+def EltTyInt128 : EltType<5>;
+def EltTyFloat16 : EltType<6>;
+def EltTyFloat32 : EltType<7>;
+def EltTyFloat64 : EltType<8>;
+def EltTyBool8 : EltType<9>;
+def EltTyBool16 : EltType<10>;
+def EltTyBool32 : EltType<11>;
+def EltTyBool64 : EltType<12>;
+def EltTyBFloat16 : EltType<13>;
+
+class MemEltType<int val> {
+ int Value = val;
+}
+def MemEltTyDefault : MemEltType<0>;
+def MemEltTyInt8 : MemEltType<1>;
+def MemEltTyInt16 : MemEltType<2>;
+def MemEltTyInt32 : MemEltType<3>;
+def MemEltTyInt64 : MemEltType<4>;
+
+class FlagType<int val> {
+ int Value = val;
+}
+
+// These must be kept in sync with the flags in utils/TableGen/SveEmitter.h
+// and include/clang/Basic/TargetBuiltins.h
+def NoFlags : FlagType<0x00000000>;
+def FirstEltType : FlagType<0x00000001>;
+// : :
+// : :
+def EltTypeMask : FlagType<0x0000000f>;
+def FirstMemEltType : FlagType<0x00000010>;
+// : :
+// : :
+def MemEltTypeMask : FlagType<0x00000070>;
+def FirstMergeTypeMask : FlagType<0x00000080>;
+// : :
+// : :
+def MergeTypeMask : FlagType<0x00000380>;
+def FirstSplatOperand : FlagType<0x00000400>;
+// : :
+// These flags are used to specify which scalar operand
+// needs to be duplicated/splatted into a vector.
+// : :
+def SplatOperandMask : FlagType<0x00001C00>;
+def IsLoad : FlagType<0x00002000>;
+def IsStore : FlagType<0x00004000>;
+def IsGatherLoad : FlagType<0x00008000>;
+def IsScatterStore : FlagType<0x00010000>;
+def IsStructLoad : FlagType<0x00020000>;
+def IsStructStore : FlagType<0x00040000>;
+def IsZExtReturn : FlagType<0x00080000>; // Return value is sign-extend by default
+def IsOverloadNone : FlagType<0x00100000>; // Intrinsic does not take any overloaded types.
+def IsOverloadWhile : FlagType<0x00200000>; // Use {default type, typeof(operand1)} as overloaded types.
+def IsOverloadWhileRW : FlagType<0x00400000>; // Use {pred(default type), typeof(operand0)} as overloaded types.
+def IsOverloadCvt : FlagType<0x00800000>; // Use {typeof(operand0), typeof(last operand)} as overloaded types.
+def OverloadKindMask : FlagType<0x00E00000>; // When the masked values are all '0', the default type is used as overload type.
+def IsByteIndexed : FlagType<0x01000000>;
+def IsAppendSVALL : FlagType<0x02000000>; // Appends SV_ALL as the last operand.
+def IsInsertOp1SVALL : FlagType<0x04000000>; // Inserts SV_ALL as the second operand.
+def IsPrefetch : FlagType<0x08000000>; // Contiguous prefetches.
+def IsGatherPrefetch : FlagType<0x10000000>;
+def ReverseCompare : FlagType<0x20000000>; // Compare operands must be swapped.
+def ReverseUSDOT : FlagType<0x40000000>; // Unsigned/signed operands must be swapped.
+def IsUndef : FlagType<0x80000000>; // Codegen `undef` of given type.
+def IsTupleCreate : FlagType<0x100000000>;
+def IsTupleGet : FlagType<0x200000000>;
+def IsTupleSet : FlagType<0x400000000>;
+def ReverseMergeAnyBinOp : FlagType<0x800000000>; // e.g. Implement SUBR_X using SUB_X.
+def ReverseMergeAnyAccOp : FlagType<0x1000000000>; // e.g. Implement MSB_X using MLS_X.
+def IsStreaming : FlagType<0x2000000000>;
+def IsStreamingCompatible : FlagType<0x4000000000>;
+def IsSharedZA : FlagType<0x8000000000>;
+def IsPreservesZA : FlagType<0x10000000000>;
+def IsReadZA : FlagType<0x20000000000>;
+def IsWriteZA : FlagType<0x40000000000>;
+
+// These must be kept in sync with the flags in include/clang/Basic/TargetBuiltins.h
+class ImmCheckType<int val> {
+ int Value = val;
+}
+def ImmCheck0_31 : ImmCheckType<0>; // 0..31 (used for e.g. predicate patterns)
+def ImmCheck1_16 : ImmCheckType<1>; // 1..16
+def ImmCheckExtract : ImmCheckType<2>; // 0..(2048/sizeinbits(elt) - 1)
+def ImmCheckShiftRight : ImmCheckType<3>; // 1..sizeinbits(elt)
+def ImmCheckShiftRightNarrow : ImmCheckType<4>; // 1..sizeinbits(elt)/2
+def ImmCheckShiftLeft : ImmCheckType<5>; // 0..(sizeinbits(elt) - 1)
+def ImmCheck0_7 : ImmCheckType<6>; // 0..7
+def ImmCheckLaneIndex : ImmCheckType<7>; // 0..(128/(1*sizeinbits(elt)) - 1)
+def ImmCheckLaneIndexCompRotate : ImmCheckType<8>; // 0..(128/(2*sizeinbits(elt)) - 1)
+def ImmCheckLaneIndexDot : ImmCheckType<9>; // 0..(128/(4*sizeinbits(elt)) - 1)
+def ImmCheckComplexRot90_270 : ImmCheckType<10>; // [90,270]
+def ImmCheckComplexRotAll90 : ImmCheckType<11>; // [0, 90, 180,270]
+def ImmCheck0_13 : ImmCheckType<12>; // 0..13
+def ImmCheck0_1 : ImmCheckType<13>; // 0..1
+def ImmCheck0_2 : ImmCheckType<14>; // 0..2
+def ImmCheck0_3 : ImmCheckType<15>; // 0..3
+def ImmCheck0_0 : ImmCheckType<16>; // 0..0
+def ImmCheck0_15 : ImmCheckType<17>; // 0..15
+def ImmCheck0_255 : ImmCheckType<18>; // 0..255
+
+class ImmCheck<int arg, ImmCheckType kind, int eltSizeArg = -1> {
+ int Arg = arg;
+ int EltSizeArg = eltSizeArg;
+ ImmCheckType Kind = kind;
+}
+
+class Inst<string n, string p, string t, MergeType mt, string i,
+ list<FlagType> ft, list<ImmCheck> ch, MemEltType met> {
+ string Name = n;
+ string Prototype = p;
+ string Types = t;
+ string TargetGuard = "sve";
+ int Merge = mt.Value;
+ string MergeSuffix = mt.Suffix;
+ string LLVMIntrinsic = i;
+ list<FlagType> Flags = ft;
+ list<ImmCheck> ImmChecks = ch;
+ int MemEltType = met.Value;
+}
+
+// SInst: Instruction with signed/unsigned suffix (e.g., "s8", "u8")
+class SInst<string n, string p, string t, MergeType mt, string i = "",
+ list<FlagType> ft = [], list<ImmCheck> ch = []>
+ : Inst<n, p, t, mt, i, ft, ch, MemEltTyDefault> {
+}
+
+// MInst: Instructions which access memory
+class MInst<string n, string p, string t, list<FlagType> f,
+ MemEltType met = MemEltTyDefault, string i = "",
+ list<ImmCheck> ch = []>
+ : Inst<n, p, t, MergeNone, i, f, ch, met> {
+}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/riscv_sifive_vector.td b/contrib/llvm-project/clang/include/clang/Basic/riscv_sifive_vector.td
new file mode 100644
index 000000000000..0d390be711c8
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/riscv_sifive_vector.td
@@ -0,0 +1,105 @@
+//==--- riscv_sifive_vector.td - RISC-V SiFive VCIX function list ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the builtins for RISC-V SiFive VCIX. See:
+//
+// https://sifive.cdn.prismic.io/sifive/c3829e36-8552-41f0-a841-79945784241b_vcix-spec-software.pdf
+//
+//===----------------------------------------------------------------------===//
+
+include "riscv_vector_common.td"
+
+//===----------------------------------------------------------------------===//
+// Instruction definitions
+//===----------------------------------------------------------------------===//
+
+class VCIXSuffix<string range> {
+ list<string> suffix = !cond(!eq(range, "c"): ["8mf8", "8mf4", "8mf2", "8m1", "8m2", "8m4", "8m8"],
+ !eq(range, "s"): ["16mf4", "16mf2", "16m1", "16m2", "16m4", "16m8"],
+ !eq(range, "i"): ["32mf2", "32m1", "32m2", "32m4", "32m8"],
+ !eq(range, "l"): ["64m1", "64m2", "64m4", "64m8"]);
+}
+
+class VCIXBuiltinSet<string name, string IR_name, string suffix,
+ string prototype, string type_range,
+ list<int> intrinsic_types>
+ : RVVBuiltin<suffix, prototype, type_range> {
+ let Name = name;
+ let OverloadedName = name;
+ let IRName = IR_name;
+ let HasMasked = false;
+ let IntrinsicTypes = intrinsic_types;
+}
+
+multiclass VCIXBuiltinSet<string name, string IR_name, string suffix,
+ string prototype, string type_range,
+ list<int> intrinsic_types> {
+ if !find(prototype, "0") then {
+ def : VCIXBuiltinSet<name, IR_name, suffix, prototype, type_range, intrinsic_types>;
+ }
+ def : VCIXBuiltinSet<name # "_se", IR_name # "_se", suffix, prototype, type_range, intrinsic_types>;
+}
+
+multiclass RVVVCIXBuiltinSet<list<string> range, string prototype,
+ list<int> intrinsic_types, bit UseGPR> {
+ foreach r = range in
+ let RequiredFeatures = !if(!and(UseGPR, !eq(r, "l")),
+ ["Xsfvcp", "RV64"], ["Xsfvcp"]) in
+ defm : VCIXBuiltinSet<NAME, NAME, "Uv", prototype, r, intrinsic_types>;
+}
+
+multiclass RVVVCIXBuiltinSetWVType<list<string> range, string prototype,
+ list<int> intrinsic_types, bit UseGPR> {
+ foreach r = range in
+ let RequiredFeatures = !if(!and(UseGPR, !eq(r, "l")),
+ ["Xsfvcp", "RV64"], ["Xsfvcp"]) in
+ // These intrinsics don't have any vector types in the output and inputs,
+ // but we still need to add vetvli for them. So we encode different
+ // VTYPE into the intrinsic names, and then will know which vsetvli is
+ // correct.
+ foreach s = VCIXSuffix<r>.suffix in
+ // Since we already encode the Vtype into the name, so just set
+ // Log2LMUL to zero. Otherwise the RISCVVEmitter will expand
+ // lots of redundant intrinsic but have same names.
+ let Log2LMUL = [0] in
+ def : VCIXBuiltinSet<NAME # "_u" # s, NAME # "_e" # s,
+ "", prototype, r, intrinsic_types>;
+}
+
+let SupportOverloading = false in {
+ defm sf_vc_x_se : RVVVCIXBuiltinSetWVType<["c", "s", "i", "l"], "0KzKzKzUe", [0, 3], /*UseGPR*/1>;
+ defm sf_vc_i_se : RVVVCIXBuiltinSetWVType<["c", "s", "i", "l"], "0KzKzKzKz", [2, 3], /*UseGPR*/0>;
+ defm sf_vc_xv : RVVVCIXBuiltinSet<["csi", "l"], "0KzKzUvUe", [0, 2, 3], /*UseGPR*/1>;
+ defm sf_vc_iv : RVVVCIXBuiltinSet<["csi", "l"], "0KzKzUvKz", [0, 2, 3], /*UseGPR*/0>;
+ defm sf_vc_vv : RVVVCIXBuiltinSet<["csi", "l"], "0KzKzUvUv", [0, 2, 3], /*UseGPR*/0>;
+ defm sf_vc_fv : RVVVCIXBuiltinSet<["si", "l"], "0KzKzUvFe", [0, 2, 3], /*UseGPR*/0>;
+ defm sf_vc_xvv : RVVVCIXBuiltinSet<["csi", "l"], "0KzUvUvUe", [0, 1, 3], /*UseGPR*/1>;
+ defm sf_vc_ivv : RVVVCIXBuiltinSet<["csi", "l"], "0KzUvUvKz", [0, 1, 3], /*UseGPR*/0>;
+ defm sf_vc_vvv : RVVVCIXBuiltinSet<["csi", "l"], "0KzUvUvUv", [0, 1, 3], /*UseGPR*/0>;
+ defm sf_vc_fvv : RVVVCIXBuiltinSet<["si", "l"], "0KzUvUvFe", [0, 1, 3], /*UseGPR*/0>;
+ defm sf_vc_v_x : RVVVCIXBuiltinSet<["csi", "l"], "UvKzKzUe", [-1, 1, 2], /*UseGPR*/1>;
+ defm sf_vc_v_i : RVVVCIXBuiltinSet<["csi", "l"], "UvKzKzKz", [-1, 1, 2], /*UseGPR*/0>;
+ defm sf_vc_v_xv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUe", [-1, 0, 2], /*UseGPR*/1>;
+ defm sf_vc_v_iv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvKz", [-1, 0, 2], /*UseGPR*/0>;
+ defm sf_vc_v_vv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUv", [-1, 0, 2], /*UseGPR*/0>;
+ defm sf_vc_v_fv : RVVVCIXBuiltinSet<["si", "l"], "UvKzUvFe", [-1, 0, 2], /*UseGPR*/0>;
+ defm sf_vc_v_xvv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvUe", [-1, 0, 3], /*UseGPR*/1>;
+ defm sf_vc_v_ivv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvKz", [-1, 0, 3], /*UseGPR*/0>;
+ defm sf_vc_v_vvv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvUv", [-1, 0, 3], /*UseGPR*/0>;
+ defm sf_vc_v_fvv : RVVVCIXBuiltinSet<["si", "l"], "UvKzUvUvFe", [-1, 0, 3], /*UseGPR*/0>;
+ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+ defm sf_vc_xvw : RVVVCIXBuiltinSet<["csi"], "0KzUwUvUe", [0, 1, 2, 3], /*UseGPR*/1>;
+ defm sf_vc_ivw : RVVVCIXBuiltinSet<["csi"], "0KzUwUvKz", [0, 1, 2, 3], /*UseGPR*/0>;
+ defm sf_vc_vvw : RVVVCIXBuiltinSet<["csi"], "0KzUwUvUv", [0, 1, 2, 3], /*UseGPR*/0>;
+ defm sf_vc_fvw : RVVVCIXBuiltinSet<["si"], "0KzUwUvFe", [0, 1, 2, 3], /*UseGPR*/0>;
+ defm sf_vc_v_xvw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvUe", [-1, 0, 2, 3], /*UseGPR*/1>;
+ defm sf_vc_v_ivw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvKz", [-1, 0, 2, 3], /*UseGPR*/0>;
+ defm sf_vc_v_vvw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvUv", [-1, 0, 2, 3], /*UseGPR*/0>;
+ defm sf_vc_v_fvw : RVVVCIXBuiltinSet<["si"], "UwKzUwUvFe", [-1, 0, 2, 3], /*UseGPR*/0>;
+ }
+}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td b/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td
index b23e26ecaa57..7e5889812aec 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td
@@ -12,233 +12,7 @@
//
//===----------------------------------------------------------------------===//
-//===----------------------------------------------------------------------===//
-// Instruction definitions
-//===----------------------------------------------------------------------===//
-// Each record of the class RVVBuiltin defines a collection of builtins (i.e.
-// "def vadd : RVVBuiltin" will be used to define things like "vadd_vv_i32m1",
-// "vadd_vv_i32m2", etc).
-//
-// The elements of this collection are defined by an instantiation process the
-// range of which is specified by the cross product of the LMUL attribute and
-// every element in the attribute TypeRange. By default builtins have LMUL = [1,
-// 2, 4, 8, 1/2, 1/4, 1/8] so the process is repeated 7 times. In tablegen we
-// use the Log2LMUL [0, 1, 2, 3, -1, -2, -3] to represent the LMUL.
-//
-// LMUL represents the fact that the types of values used by that builtin are
-// values generated by instructions that are executed under that LMUL. However,
-// this does not mean the builtin is necessarily lowered into an instruction
-// that executes under the specified LMUL. An example where this happens are
-// loads and stores of masks. A mask like `vbool8_t` can be generated, for
-// instance, by comparing two `__rvv_int8m1_t` (this is LMUL=1) or comparing two
-// `__rvv_int16m2_t` (this is LMUL=2). The actual load or store, however, will
-// be performed under LMUL=1 because mask registers are not grouped.
-//
-// TypeRange is a non-empty sequence of basic types:
-//
-// c: int8_t (i8)
-// s: int16_t (i16)
-// i: int32_t (i32)
-// l: int64_t (i64)
-// x: float16_t (half)
-// f: float32_t (float)
-// d: float64_t (double)
-//
-// This way, given an LMUL, a record with a TypeRange "sil" will cause the
-// definition of 3 builtins. Each type "t" in the TypeRange (in this example
-// they are int16_t, int32_t, int64_t) is used as a parameter that drives the
-// definition of that particular builtin (for the given LMUL).
-//
-// During the instantiation, types can be transformed or modified using type
-// transformers. Given a type "t" the following primitive type transformers can
-// be applied to it to yield another type.
-//
-// e: type of "t" as is (identity)
-// v: computes a vector type whose element type is "t" for the current LMUL
-// w: computes a vector type identical to what 'v' computes except for the
-// element type which is twice as wide as the element type of 'v'
-// q: computes a vector type identical to what 'v' computes except for the
-// element type which is four times as wide as the element type of 'v'
-// o: computes a vector type identical to what 'v' computes except for the
-// element type which is eight times as wide as the element type of 'v'
-// m: computes a vector type identical to what 'v' computes except for the
-// element type which is bool
-// 0: void type, ignores "t"
-// z: size_t, ignores "t"
-// t: ptrdiff_t, ignores "t"
-// u: unsigned long, ignores "t"
-// l: long, ignores "t"
-//
-// So for instance if t is "i", i.e. int, then "e" will yield int again. "v"
-// will yield an RVV vector type (assume LMUL=1), so __rvv_int32m1_t.
-// Accordingly "w" would yield __rvv_int64m2_t.
-//
-// A type transformer can be prefixed by other non-primitive type transformers.
-//
-// P: constructs a pointer to the current type
-// C: adds const to the type
-// K: requires the integer type to be a constant expression
-// U: given an integer type or vector type, computes its unsigned variant
-// I: given a vector type, compute the vector type with integer type
-// elements of the same width
-// F: given a vector type, compute the vector type with floating-point type
-// elements of the same width
-// S: given a vector type, computes its equivalent one for LMUL=1. This is a
-// no-op if the vector was already LMUL=1
-// (Log2EEW:Value): Log2EEW value could be 3/4/5/6 (8/16/32/64), given a
-// vector type (SEW and LMUL) and EEW (8/16/32/64), computes its
-// equivalent integer vector type with EEW and corresponding ELMUL (elmul =
-// (eew/sew) * lmul). For example, vector type is __rvv_float16m4
-// (SEW=16, LMUL=4) and Log2EEW is 3 (EEW=8), and then equivalent vector
-// type is __rvv_uint8m2_t (elmul=(8/16)*4 = 2). Ignore to define a new
-// builtins if its equivalent type has illegal lmul.
-// (FixedSEW:Value): Given a vector type (SEW and LMUL), and computes another
-// vector type which only changed SEW as given value. Ignore to define a new
-// builtin if its equivalent type has illegal lmul or the SEW does not changed.
-// (SFixedLog2LMUL:Value): Smaller Fixed Log2LMUL. Given a vector type (SEW
-// and LMUL), and computes another vector type which only changed LMUL as
-// given value. The new LMUL should be smaller than the old one. Ignore to
-// define a new builtin if its equivalent type has illegal lmul.
-// (LFixedLog2LMUL:Value): Larger Fixed Log2LMUL. Given a vector type (SEW
-// and LMUL), and computes another vector type which only changed LMUL as
-// given value. The new LMUL should be larger than the old one. Ignore to
-// define a new builtin if its equivalent type has illegal lmul.
-//
-// Following with the example above, if t is "i", then "Ue" will yield unsigned
-// int and "Fv" will yield __rvv_float32m1_t (again assuming LMUL=1), Fw would
-// yield __rvv_float64m2_t, etc.
-//
-// Each builtin is then defined by applying each type in TypeRange against the
-// sequence of type transformers described in Suffix and Prototype.
-//
-// The name of the builtin is defined by the Name attribute (which defaults to
-// the name of the class) appended (separated with an underscore) the Suffix
-// attribute. For instance with Name="foo", Suffix = "v" and TypeRange = "il",
-// the builtin generated will be __builtin_rvv_foo_i32m1 and
-// __builtin_rvv_foo_i64m1 (under LMUL=1). If Suffix contains more than one
-// type transformer (say "vv") each of the types is separated with an
-// underscore as in "__builtin_rvv_foo_i32m1_i32m1".
-//
-// The C/C++ prototype of the builtin is defined by the Prototype attribute.
-// Prototype is a non-empty sequence of type transformers, the first of which
-// is the return type of the builtin and the rest are the parameters of the
-// builtin, in order. For instance if Prototype is "wvv" and TypeRange is "si"
-// a first builtin will have type
-// __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t) and the second builtin
-// will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t) (again
-// under LMUL=1).
-//
-// There are a number of attributes that are used to constraint the number and
-// shape of the builtins generated. Refer to the comments below for them.
-
-class PolicyScheme<int val>{
- int Value = val;
-}
-def NonePolicy : PolicyScheme<0>;
-def HasPassthruOperand : PolicyScheme<1>;
-def HasPolicyOperand : PolicyScheme<2>;
-
-class RVVBuiltin<string suffix, string prototype, string type_range,
- string overloaded_suffix = ""> {
- // Base name that will be prepended in __builtin_rvv_ and appended the
- // computed Suffix.
- string Name = NAME;
-
- // If not empty, each instantiated builtin will have this appended after an
- // underscore (_). It is instantiated like Prototype.
- string Suffix = suffix;
-
- // If empty, default OverloadedName is sub string of `Name` which end of first
- // '_'. For example, the default overloaded name is `vadd` for Name `vadd_vv`.
- // It's used for describe some special naming cases.
- string OverloadedName = "";
-
- // If not empty, each OverloadedName will have this appended after an
- // underscore (_). It is instantiated like Prototype.
- string OverloadedSuffix = overloaded_suffix;
-
- // The different variants of the builtin, parameterised with a type.
- string TypeRange = type_range;
-
- // We use each type described in TypeRange and LMUL with prototype to
- // instantiate a specific element of the set of builtins being defined.
- // Prototype attribute defines the C/C++ prototype of the builtin. It is a
- // non-empty sequence of type transformers, the first of which is the return
- // type of the builtin and the rest are the parameters of the builtin, in
- // order. For instance if Prototype is "wvv", TypeRange is "si" and LMUL=1, a
- // first builtin will have type
- // __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t), and the second builtin
- // will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t).
- string Prototype = prototype;
-
- // This builtin has a masked form.
- bit HasMasked = true;
-
- // If HasMasked, this flag states that this builtin has a maskedoff operand. It
- // is always the first operand in builtin and IR intrinsic.
- bit HasMaskedOffOperand = true;
-
- // This builtin has a granted vector length parameter.
- bit HasVL = true;
-
- // The policy scheme for masked intrinsic IR.
- // It could be NonePolicy or HasPolicyOperand.
- // HasPolicyOperand: Has a policy operand. 0 is tail and mask undisturbed, 1 is
- // tail agnostic, 2 is mask undisturbed, and 3 is tail and mask agnostic. The
- // policy operand is located at the last position.
- PolicyScheme MaskedPolicyScheme = HasPolicyOperand;
-
- // The policy scheme for unmasked intrinsic IR.
- // It could be NonePolicy, HasPassthruOperand or HasPolicyOperand.
- // HasPassthruOperand: Has a passthru operand to decide tail policy. If it is
- // poison, tail policy is tail agnostic, otherwise policy is tail undisturbed.
- // HasPolicyOperand: Has a policy operand. 1 is tail agnostic and 0 is tail
- // undisturbed.
- PolicyScheme UnMaskedPolicyScheme = NonePolicy;
-
- // This builtin support tail agnostic and undisturbed policy.
- bit HasTailPolicy = true;
- // This builtin support mask agnostic and undisturbed policy.
- bit HasMaskPolicy = true;
-
- // This builtin prototype with TA or TAMA policy could not support overloading
- // API. Other policy intrinsic functions would support overloading API with
- // suffix `_tu`, `tumu`, `tuma`, `tamu` and `tama`.
- bit SupportOverloading = true;
-
- // This builtin is valid for the given Log2LMULs.
- list<int> Log2LMUL = [0, 1, 2, 3, -1, -2, -3];
-
- // Manual code in clang codegen riscv_vector_builtin_cg.inc
- code ManualCodegen = [{}];
-
- // When emit the automatic clang codegen, it describes what types we have to use
- // to obtain the specific LLVM intrinsic. -1 means the return type, otherwise,
- // k >= 0 meaning the k-th operand (counting from zero) of the codegen'd
- // parameter of the unmasked version. k can't be the mask operand's position.
- list<int> IntrinsicTypes = [];
-
- // If these names are not empty, this is the ID of the LLVM intrinsic
- // we want to lower to.
- string IRName = NAME;
-
- // If HasMasked, this is the ID of the LLVM intrinsic we want to lower to.
- string MaskedIRName = NAME #"_mask";
-
- // Use clang_builtin_alias to save the number of builtins.
- bit HasBuiltinAlias = true;
-
- // Features required to enable for this builtin.
- list<string> RequiredFeatures = [];
-
- // Number of fields for Load/Store Segment instructions.
- int NF = 1;
-}
-
-// This is the code emitted in the header.
-class RVVHeader {
- code HeaderCode;
-}
+include "riscv_vector_common.td"
//===----------------------------------------------------------------------===//
// Basic classes with automatic codegen.
@@ -316,11 +90,21 @@ multiclass RVVSignedBinBuiltinSet
[["vv", "v", "vvv"],
["vx", "v", "vve"]]>;
+multiclass RVVSignedBinBuiltinSetRoundingMode
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "v", "vvvu"],
+ ["vx", "v", "vveu"]]>;
+
multiclass RVVUnsignedBinBuiltinSet
: RVVOutOp1BuiltinSet<NAME, "csil",
[["vv", "Uv", "UvUvUv"],
["vx", "Uv", "UvUvUe"]]>;
+multiclass RVVUnsignedBinBuiltinSetRoundingMode
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "Uv", "UvUvUvu"],
+ ["vx", "Uv", "UvUvUeu"]]>;
+
multiclass RVVIntBinBuiltinSet
: RVVSignedBinBuiltinSet,
RVVUnsignedBinBuiltinSet;
@@ -335,11 +119,21 @@ multiclass RVVSignedShiftBuiltinSet
[["vv", "v", "vvUv"],
["vx", "v", "vvz"]]>;
+multiclass RVVSignedShiftBuiltinSetRoundingMode
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "v", "vvUvu"],
+ ["vx", "v", "vvzu"]]>;
+
multiclass RVVUnsignedShiftBuiltinSet
: RVVOutOp1BuiltinSet<NAME, "csil",
[["vv", "Uv", "UvUvUv"],
["vx", "Uv", "UvUvz"]]>;
+multiclass RVVUnsignedShiftBuiltinSetRoundingMode
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "Uv", "UvUvUvu"],
+ ["vx", "Uv", "UvUvzu"]]>;
+
multiclass RVVShiftBuiltinSet
: RVVSignedShiftBuiltinSet,
RVVUnsignedShiftBuiltinSet;
@@ -349,10 +143,22 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
: RVVOutOp0Op1BuiltinSet<NAME, "csil",
[["wv", "v", "vwUv"],
["wx", "v", "vwz"]]>;
+
+ multiclass RVVSignedNShiftBuiltinSetRoundingMode
+ : RVVOutOp0Op1BuiltinSet<NAME, "csil",
+ [["wv", "v", "vwUvu"],
+ ["wx", "v", "vwzu"]]>;
+
multiclass RVVUnsignedNShiftBuiltinSet
: RVVOutOp0Op1BuiltinSet<NAME, "csil",
[["wv", "Uv", "UvUwUv"],
["wx", "Uv", "UvUwz"]]>;
+
+ multiclass RVVUnsignedNShiftBuiltinSetRoundingMode
+ : RVVOutOp0Op1BuiltinSet<NAME, "csil",
+ [["wv", "Uv", "UvUwUvu"],
+ ["wx", "Uv", "UvUwzu"]]>;
+
}
multiclass RVVCarryinBuiltinSet
@@ -405,6 +211,11 @@ let HasMaskedOffOperand = false in {
[["vv", "v", "vvvv"],
["vf", "v", "vvev"]]>;
}
+ multiclass RVVFloatingTerBuiltinSetRoundingMode {
+ defm "" : RVVOutOp1BuiltinSet<NAME, "xfd",
+ [["vv", "v", "vvvvu"],
+ ["vf", "v", "vvevu"]]>;
+ }
}
let HasMaskedOffOperand = false, Log2LMUL = [-2, -1, 0, 1, 2] in {
@@ -413,6 +224,11 @@ let HasMaskedOffOperand = false, Log2LMUL = [-2, -1, 0, 1, 2] in {
[["vv", "w", "wwvv"],
["vf", "w", "wwev"]]>;
}
+ multiclass RVVFloatingWidenTerBuiltinSetRoundingMode {
+ defm "" : RVVOutOp1Op2BuiltinSet<NAME, "xf",
+ [["vv", "w", "wwvvu"],
+ ["vf", "w", "wwevu"]]>;
+ }
}
multiclass RVVFloatingBinBuiltinSet
@@ -420,10 +236,19 @@ multiclass RVVFloatingBinBuiltinSet
[["vv", "v", "vvv"],
["vf", "v", "vve"]]>;
+multiclass RVVFloatingBinBuiltinSetRoundingMode
+ : RVVOutOp1BuiltinSet<NAME, "xfd",
+ [["vv", "v", "vvvu"],
+ ["vf", "v", "vveu"]]>;
+
multiclass RVVFloatingBinVFBuiltinSet
: RVVOutOp1BuiltinSet<NAME, "xfd",
[["vf", "v", "vve"]]>;
+multiclass RVVFloatingBinVFBuiltinSetRoundingMode
+ : RVVOutOp1BuiltinSet<NAME, "xfd",
+ [["vf", "v", "vveu"]]>;
+
multiclass RVVFloatingMaskOutBuiltinSet
: RVVOp0Op1BuiltinSet<NAME, "xfd",
[["vv", "vm", "mvv"],
@@ -433,6 +258,21 @@ multiclass RVVFloatingMaskOutVFBuiltinSet
: RVVOp0Op1BuiltinSet<NAME, "fd",
[["vf", "vm", "mve"]]>;
+multiclass RVVConvBuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes> {
+let Name = intrinsic_name,
+ IRName = intrinsic_name,
+ MaskedIRName = intrinsic_name # "_mask",
+ IntrinsicTypes = [-1, 0] in {
+ foreach s_p = suffixes_prototypes in {
+ defvar suffix = s_p[0];
+ defvar prototype = s_p[1];
+ def : RVVBuiltin<suffix, prototype, type_range>;
+ }
+ }
+}
+
+
class RVVMaskBinBuiltin : RVVOutBuiltin<"m", "mmm", "c"> {
let Name = NAME # "_mm";
let HasMasked = false;
@@ -531,10 +371,18 @@ let HasMaskedOffOperand = true in {
defm "" : RVVOutOp0BuiltinSet<NAME, "xfd",
[["vs", "vSv", "SvvSv"]]>;
}
+ multiclass RVVFloatingReductionBuiltinRoundingMode {
+ defm "" : RVVOutOp0BuiltinSet<NAME, "xfd",
+ [["vs", "vSv", "SvvSvu"]]>;
+ }
multiclass RVVFloatingWidenReductionBuiltin {
defm "" : RVVOutOp0BuiltinSet<NAME, "xf",
[["vs", "vSw", "SwvSw"]]>;
}
+ multiclass RVVFloatingWidenReductionBuiltinRoundingMode {
+ defm "" : RVVOutOp0BuiltinSet<NAME, "xf",
+ [["vs", "vSw", "SwvSwu"]]>;
+ }
}
multiclass RVVIntReductionBuiltinSet
@@ -598,11 +446,21 @@ multiclass RVVFloatingWidenBinBuiltinSet
[["vv", "w", "wvv"],
["vf", "w", "wve"]]>;
+multiclass RVVFloatingWidenBinBuiltinSetRoundingMode
+ : RVVWidenBuiltinSet<NAME, "xf",
+ [["vv", "w", "wvvu"],
+ ["vf", "w", "wveu"]]>;
+
multiclass RVVFloatingWidenOp0BinBuiltinSet
: RVVWidenWOp0BuiltinSet<NAME # "_w", "xf",
[["wv", "w", "wwv"],
["wf", "w", "wwe"]]>;
+multiclass RVVFloatingWidenOp0BinBuiltinSetRoundingMode
+ : RVVWidenWOp0BuiltinSet<NAME # "_w", "xf",
+ [["wv", "w", "wwvu"],
+ ["wf", "w", "wweu"]]>;
+
defvar TypeList = ["c","s","i","l","x","f","d"];
defvar EEWList = [["8", "(Log2EEW:3)"],
["16", "(Log2EEW:4)"],
@@ -918,269 +776,6 @@ class PVString<int nf, bit signed> {
!eq(nf, 8): !if(signed, "PvPvPvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUvPUvPUv"));
}
-multiclass RVVUnitStridedSegLoad<string op> {
- foreach type = TypeList in {
- defvar eew = !cond(!eq(type, "c") : "8",
- !eq(type, "s") : "16",
- !eq(type, "i") : "32",
- !eq(type, "l") : "64",
- !eq(type, "x") : "16",
- !eq(type, "f") : "32",
- !eq(type, "d") : "64");
- foreach nf = NFList in {
- let Name = op # nf # "e" # eew # "_v",
- IRName = op # nf,
- MaskedIRName = op # nf # "_mask",
- NF = nf,
- ManualCodegen = [{
- {
- ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType());
- IntrinsicTypes = {ResultType, Ops.back()->getType()};
- SmallVector<llvm::Value*, 12> Operands;
-
- // Please refer to comment under 'defvar NFList' in this file
- if ((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
- (!IsMasked && PolicyAttrs & RVV_VTA))
- Operands.append(NF, llvm::PoisonValue::get(ResultType));
- else {
- if (IsMasked)
- Operands.append(Ops.begin() + NF + 1, Ops.begin() + 2 * NF + 1);
- else // Unmasked
- Operands.append(Ops.begin() + NF, Ops.begin() + 2 * NF);
- }
- unsigned PtrOperandIdx = IsMasked ?
- ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ? NF + 1 : 2 * NF + 1 :
- (PolicyAttrs & RVV_VTA) ? NF : 2 * NF;
- Value *PtrOperand = Ops[PtrOperandIdx];
- Value *VLOperand = Ops[PtrOperandIdx + 1];
- Operands.push_back(PtrOperand);
- if (IsMasked)
- Operands.push_back(Ops[NF]);
- Operands.push_back(VLOperand);
- if (IsMasked)
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
-
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
- clang::CharUnits Align =
- CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
- llvm::Value *V;
- for (unsigned I = 0; I < NF; ++I) {
- llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {I});
- V = Builder.CreateStore(Val, Address(Ops[I], Val->getType(), Align));
- }
- return V;
- }
- }] in {
- defvar PV = PVString<nf, /*signed=*/true>.S;
- defvar PUV = PVString<nf, /*signed=*/false>.S;
- def : RVVBuiltin<"v", "0" # PV # "PCe", type>;
- if !not(IsFloat<type>.val) then {
- def : RVVBuiltin<"Uv", "0" # PUV # "PCUe", type>;
- }
- }
- }
- }
-}
-
-multiclass RVVUnitStridedSegLoadFF<string op> {
- foreach type = TypeList in {
- defvar eew = !cond(!eq(type, "c") : "8",
- !eq(type, "s") : "16",
- !eq(type, "i") : "32",
- !eq(type, "l") : "64",
- !eq(type, "x") : "16",
- !eq(type, "f") : "32",
- !eq(type, "d") : "64");
- foreach nf = NFList in {
- let Name = op # nf # "e" # eew # "ff_v",
- IRName = op # nf # "ff",
- MaskedIRName = op # nf # "ff_mask",
- NF = nf,
- ManualCodegen = [{
- {
- ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType());
- IntrinsicTypes = {ResultType, Ops.back()->getType()};
- SmallVector<llvm::Value*, 12> Operands;
-
- // Please refer to comment under 'defvar NFList' in this file
- if ((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
- (!IsMasked && PolicyAttrs & RVV_VTA))
- Operands.append(NF, llvm::PoisonValue::get(ResultType));
- else {
- if (IsMasked)
- Operands.append(Ops.begin() + NF + 1, Ops.begin() + 2 * NF + 1);
- else // Unmasked
- Operands.append(Ops.begin() + NF, Ops.begin() + 2 * NF);
- }
- unsigned PtrOperandIdx = IsMasked ?
- ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ? NF + 1 : 2 * NF + 1 :
- (PolicyAttrs & RVV_VTA) ? NF : 2 * NF;
- Value *PtrOperand = Ops[PtrOperandIdx];
- Value *NewVLOperand = Ops[PtrOperandIdx + 1];
- Value *VLOperand = Ops[PtrOperandIdx + 2];
- Operands.push_back(PtrOperand);
- if (IsMasked)
- Operands.push_back(Ops[NF]);
- Operands.push_back(VLOperand);
- if (IsMasked)
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
-
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
- clang::CharUnits Align =
- CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
- for (unsigned I = 0; I < NF; ++I) {
- llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {I});
- Builder.CreateStore(Val, Address(Ops[I], Val->getType(), Align));
- }
- // Store new_vl.
- llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {NF});
- return Builder.CreateStore(Val, Address(NewVLOperand, Val->getType(), Align));
- }
- }] in {
- defvar PV = PVString<nf, /*signed=*/true>.S;
- defvar PUV = PVString<nf, /*signed=*/false>.S;
- def : RVVBuiltin<"v", "0" # PV # "PCe" # "Pz", type>;
- if !not(IsFloat<type>.val) then {
- def : RVVBuiltin<"Uv", "0" # PUV # "PCUe" # "Pz", type>;
- }
- }
- }
- }
-}
-
-multiclass RVVStridedSegLoad<string op> {
- foreach type = TypeList in {
- defvar eew = !cond(!eq(type, "c") : "8",
- !eq(type, "s") : "16",
- !eq(type, "i") : "32",
- !eq(type, "l") : "64",
- !eq(type, "x") : "16",
- !eq(type, "f") : "32",
- !eq(type, "d") : "64");
- foreach nf = NFList in {
- let Name = op # nf # "e" # eew # "_v",
- IRName = op # nf,
- MaskedIRName = op # nf # "_mask",
- NF = nf,
- ManualCodegen = [{
- {
- ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType());
- IntrinsicTypes = {ResultType, Ops.back()->getType()};
- SmallVector<llvm::Value*, 12> Operands;
-
- // Please refer to comment under 'defvar NFList' in this file
- if ((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
- (!IsMasked && PolicyAttrs & RVV_VTA))
- Operands.append(NF, llvm::PoisonValue::get(ResultType));
- else {
- if (IsMasked)
- Operands.append(Ops.begin() + NF + 1, Ops.begin() + 2 * NF + 1);
- else // Unmasked
- Operands.append(Ops.begin() + NF, Ops.begin() + 2 * NF);
- }
- unsigned PtrOperandIdx = IsMasked ?
- ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ? NF + 1 : 2 * NF + 1 :
- (PolicyAttrs & RVV_VTA) ? NF : 2 * NF;
- Value *PtrOperand = Ops[PtrOperandIdx];
- Value *StrideOperand = Ops[PtrOperandIdx + 1];
- Value *VLOperand = Ops[PtrOperandIdx + 2];
- Operands.push_back(PtrOperand);
- Operands.push_back(StrideOperand);
- if (IsMasked)
- Operands.push_back(Ops[NF]);
- Operands.push_back(VLOperand);
- if (IsMasked)
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
-
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
- clang::CharUnits Align =
- CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
- llvm::Value *V;
- for (unsigned I = 0; I < NF; ++I) {
- llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {I});
- V = Builder.CreateStore(Val, Address(Ops[I], Val->getType(), Align));
- }
- return V;
- }
- }] in {
- defvar PV = PVString<nf, /*signed=*/true>.S;
- defvar PUV = PVString<nf, /*signed=*/false>.S;
- def : RVVBuiltin<"v", "0" # PV # "PCe" # "t", type>;
- if !not(IsFloat<type>.val) then {
- def : RVVBuiltin<"Uv", "0" # PUV # "PCUe" # "t", type>;
- }
- }
- }
- }
-}
-
-multiclass RVVIndexedSegLoad<string op> {
- foreach type = TypeList in {
- foreach eew_info = EEWList in {
- defvar eew = eew_info[0];
- defvar eew_type = eew_info[1];
- foreach nf = NFList in {
- let Name = op # nf # "ei" # eew # "_v",
- IRName = op # nf,
- MaskedIRName = op # nf # "_mask",
- NF = nf,
- ManualCodegen = [{
- {
- ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType());
- SmallVector<llvm::Value*, 12> Operands;
-
- // Please refer to comment under 'defvar NFList' in this file
- if ((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
- (!IsMasked && PolicyAttrs & RVV_VTA))
- Operands.append(NF, llvm::PoisonValue::get(ResultType));
- else {
- if (IsMasked)
- Operands.append(Ops.begin() + NF + 1, Ops.begin() + 2 * NF + 1);
- else // Unmasked
- Operands.append(Ops.begin() + NF, Ops.begin() + 2 * NF);
- }
- unsigned PtrOperandIdx = IsMasked ?
- ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ? NF + 1 : 2 * NF + 1 :
- (PolicyAttrs & RVV_VTA) ? NF : 2 * NF;
- Value *PtrOperand = Ops[PtrOperandIdx];
- Value *IndexOperand = Ops[PtrOperandIdx + 1];
- Value *VLOperand = Ops[PtrOperandIdx + 2];
- Operands.push_back(PtrOperand);
- Operands.push_back(IndexOperand);
- if (IsMasked)
- Operands.push_back(Ops[NF]);
- Operands.push_back(VLOperand);
- if (IsMasked)
- Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
- IntrinsicTypes = {ResultType, IndexOperand->getType(), Ops.back()->getType()};
-
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
- clang::CharUnits Align =
- CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
- llvm::Value *V;
- for (unsigned I = 0; I < NF; ++I) {
- llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {I});
- V = Builder.CreateStore(Val, Address(Ops[I], Val->getType(), Align));
- }
- return V;
- }
- }] in {
- defvar PV = PVString<nf, /*signed=*/true>.S;
- defvar PUV = PVString<nf, /*signed=*/false>.S;
- def : RVVBuiltin<"v", "0" # PV # "PCe" # eew_type # "Uv", type>;
- if !not(IsFloat<type>.val) then {
- def : RVVBuiltin<"Uv", "0" # PUV # "PCUe" # eew_type # "Uv", type>;
- }
- }
- }
- }
- }
-}
-
class VString<int nf, bit signed> {
string S = !cond(!eq(nf, 2): !if(signed, "vv", "UvUv"),
!eq(nf, 3): !if(signed, "vvv", "UvUvUv"),
@@ -1191,139 +786,6 @@ class VString<int nf, bit signed> {
!eq(nf, 8): !if(signed, "vvvvvvvv", "UvUvUvUvUvUvUvUv"));
}
-multiclass RVVUnitStridedSegStore<string op> {
- foreach type = TypeList in {
- defvar eew = !cond(!eq(type, "c") : "8",
- !eq(type, "s") : "16",
- !eq(type, "i") : "32",
- !eq(type, "l") : "64",
- !eq(type, "x") : "16",
- !eq(type, "f") : "32",
- !eq(type, "d") : "64");
- foreach nf = NFList in {
- let Name = op # nf # "e" # eew # "_v",
- IRName = op # nf,
- MaskedIRName = op # nf # "_mask",
- NF = nf,
- HasMaskedOffOperand = false,
- MaskedPolicyScheme = NonePolicy,
- ManualCodegen = [{
- {
- if (IsMasked) {
- // Builtin: (mask, ptr, val0, val1, ..., vl)
- // Intrinsic: (val0, val1, ..., ptr, mask, vl)
- std::rotate(Ops.begin(), Ops.begin() + 2, Ops.end() - 1);
- std::swap(Ops[NF], Ops[NF + 1]);
- IntrinsicTypes = {Ops[0]->getType(), Ops[NF + 2]->getType()};
- assert(Ops.size() == NF + 3);
- } else {
- // Builtin: (ptr, val0, val1, ..., vl)
- // Intrinsic: (val0, val1, ..., ptr, vl)
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
- IntrinsicTypes = {Ops[0]->getType(), Ops[NF + 1]->getType()};
- assert(Ops.size() == NF + 2);
- }
- }
- }] in {
- defvar V = VString<nf, /*signed=*/true>.S;
- defvar UV = VString<nf, /*signed=*/false>.S;
- def : RVVBuiltin<"v", "0Pe" # V, type>;
- if !not(IsFloat<type>.val) then {
- def : RVVBuiltin<"Uv", "0PUe" # UV, type>;
- }
- }
- }
- }
-}
-
-multiclass RVVStridedSegStore<string op> {
- foreach type = TypeList in {
- defvar eew = !cond(!eq(type, "c") : "8",
- !eq(type, "s") : "16",
- !eq(type, "i") : "32",
- !eq(type, "l") : "64",
- !eq(type, "x") : "16",
- !eq(type, "f") : "32",
- !eq(type, "d") : "64");
- foreach nf = NFList in {
- let Name = op # nf # "e" # eew # "_v",
- IRName = op # nf,
- MaskedIRName = op # nf # "_mask",
- NF = nf,
- HasMaskedOffOperand = false,
- MaskedPolicyScheme = NonePolicy,
- ManualCodegen = [{
- {
- if (IsMasked) {
- // Builtin: (mask, ptr, stride, val0, val1, ..., vl).
- // Intrinsic: (val0, val1, ..., ptr, stride, mask, vl)
- std::rotate(Ops.begin(), Ops.begin() + 3, Ops.end() - 1);
- std::rotate(Ops.begin() + NF, Ops.begin() + NF + 1, Ops.begin() + NF + 3);
- assert(Ops.size() == NF + 4);
- } else {
- // Builtin: (ptr, stride, val0, val1, ..., vl).
- // Intrinsic: (val0, val1, ..., ptr, stride, vl)
- std::rotate(Ops.begin(), Ops.begin() + 2, Ops.end() - 1);
- assert(Ops.size() == NF + 3);
- }
- IntrinsicTypes = {Ops[0]->getType(), Ops[NF + 1]->getType()};
- }
- }] in {
- defvar V = VString<nf, /*signed=*/true>.S;
- defvar UV = VString<nf, /*signed=*/false>.S;
- def : RVVBuiltin<"v", "0Pet" # V, type>;
- if !not(IsFloat<type>.val) then {
- def : RVVBuiltin<"Uv", "0PUet" # UV, type>;
- }
- }
- }
- }
-}
-
-multiclass RVVIndexedSegStore<string op> {
- foreach type = TypeList in {
- foreach eew_info = EEWList in {
- defvar eew = eew_info[0];
- defvar eew_type = eew_info[1];
- foreach nf = NFList in {
- let Name = op # nf # "ei" # eew # "_v",
- IRName = op # nf,
- MaskedIRName = op # nf # "_mask",
- NF = nf,
- HasMaskedOffOperand = false,
- MaskedPolicyScheme = NonePolicy,
- ManualCodegen = [{
- {
- if (IsMasked) {
- // Builtin: (mask, ptr, index, val0, val1, ..., vl)
- // Intrinsic: (val0, val1, ..., ptr, index, mask, vl)
- std::rotate(Ops.begin(), Ops.begin() + 3, Ops.end() - 1);
- std::rotate(Ops.begin() + NF, Ops.begin() + NF + 1, Ops.begin() + NF + 3);
- IntrinsicTypes = {Ops[0]->getType(),
- Ops[NF + 1]->getType(), Ops[NF + 3]->getType()};
- assert(Ops.size() == NF + 4);
- } else {
- // Builtin: (ptr, index, val0, val1, ..., vl)
- // Intrinsic: (val0, val1, ..., ptr, index, vl)
- std::rotate(Ops.begin(), Ops.begin() + 2, Ops.end() - 1);
- IntrinsicTypes = {Ops[0]->getType(),
- Ops[NF + 1]->getType(), Ops[NF + 2]->getType()};
- assert(Ops.size() == NF + 3);
- }
- }
- }] in {
- defvar V = VString<nf, /*signed=*/true>.S;
- defvar UV = VString<nf, /*signed=*/false>.S;
- def : RVVBuiltin<"v", "0Pe" # eew_type # "Uv" # V, type>;
- if !not(IsFloat<type>.val) then {
- def : RVVBuiltin<"Uv", "0PUe" # eew_type # "Uv" # UV, type>;
- }
- }
- }
- }
- }
-}
-
multiclass RVVPseudoUnaryBuiltin<string IR, string type_range> {
let Name = NAME,
IRName = IR,
@@ -1504,7 +966,7 @@ multiclass RVVPseudoVNCVTBuiltin<string IR, string MName, string type_range,
if (PolicyAttrs & RVV_VTA)
Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
}
- Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(Ops.back()->getType()));
+ Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(Ops.back()->getType()));
if (IsMasked) {
Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
// maskedoff, op1, xlen, mask, vl
@@ -1721,20 +1183,429 @@ defm vle16ff: RVVVLEFFBuiltin<["s","x"]>;
defm vle32ff: RVVVLEFFBuiltin<["i", "f"]>;
defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>;
+multiclass RVVUnitStridedSegLoadTuple<string op> {
+ foreach type = TypeList in {
+ defvar eew = !cond(!eq(type, "c") : "8",
+ !eq(type, "s") : "16",
+ !eq(type, "i") : "32",
+ !eq(type, "l") : "64",
+ !eq(type, "x") : "16",
+ !eq(type, "f") : "32",
+ !eq(type, "d") : "64");
+ foreach nf = NFList in {
+ let Name = op # nf # "e" # eew # "_v",
+ IRName = op # nf,
+ MaskedIRName = op # nf # "_mask",
+ NF = nf,
+ ManualCodegen = [{
+ {
+ llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
+ IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
+ SmallVector<llvm::Value*, 12> Operands;
+
+ bool NoPassthru =
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
+ (!IsMasked && (PolicyAttrs & RVV_VTA));
+ unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
+
+ if (NoPassthru) { // Push poison into passthru
+ Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
+ } else { // Push intrinsics operands into passthru
+ llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
+ for (unsigned I = 0; I < NF; ++I)
+ Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
+ }
+
+ Operands.push_back(Ops[Offset]); // Ptr
+ if (IsMasked)
+ Operands.push_back(Ops[0]);
+ Operands.push_back(Ops[Offset + 1]); // VL
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+
+ llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
+ if (ReturnValue.isNull())
+ return LoadValue;
+ else
+ return Builder.CreateStore(LoadValue, ReturnValue.getValue());
+ }
+ }] in {
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<T # "v", T # "vPCe", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<T # "Uv", T # "UvPCUe", type>;
+ }
+ }
+ }
+ }
+}
+
+multiclass RVVUnitStridedSegStoreTuple<string op> {
+ foreach type = TypeList in {
+ defvar eew = !cond(!eq(type, "c") : "8",
+ !eq(type, "s") : "16",
+ !eq(type, "i") : "32",
+ !eq(type, "l") : "64",
+ !eq(type, "x") : "16",
+ !eq(type, "f") : "32",
+ !eq(type, "d") : "64");
+ foreach nf = NFList in {
+ let Name = op # nf # "e" # eew # "_v",
+ IRName = op # nf,
+ MaskedIRName = op # nf # "_mask",
+ NF = nf,
+ HasMaskedOffOperand = false,
+ ManualCodegen = [{
+ {
+ // Masked
+ // Builtin: (mask, ptr, v_tuple, vl)
+ // Intrinsic: (val0, val1, ..., ptr, mask, vl)
+ // Unmasked
+ // Builtin: (ptr, v_tuple, vl)
+ // Intrinsic: (val0, val1, ..., ptr, vl)
+ unsigned Offset = IsMasked ? 1 : 0;
+ llvm::Value *VTupleOperand = Ops[Offset + 1];
+
+ SmallVector<llvm::Value*, 12> Operands;
+ for (unsigned I = 0; I < NF; ++I) {
+ llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I});
+ Operands.push_back(V);
+ }
+ Operands.push_back(Ops[Offset]); // Ptr
+ if (IsMasked)
+ Operands.push_back(Ops[0]);
+ Operands.push_back(Ops[Offset + 2]); // VL
+
+ IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+ }] in {
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<T # "v", "0Pe" # T # "v", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<T # "Uv", "0PUe" # T # "Uv", type>;
+ }
+ }
+ }
+ }
+}
+
+multiclass RVVUnitStridedSegLoadFFTuple<string op> {
+ foreach type = TypeList in {
+ defvar eew = !cond(!eq(type, "c") : "8",
+ !eq(type, "s") : "16",
+ !eq(type, "i") : "32",
+ !eq(type, "l") : "64",
+ !eq(type, "x") : "16",
+ !eq(type, "f") : "32",
+ !eq(type, "d") : "64");
+ foreach nf = NFList in {
+ let Name = op # nf # "e" # eew # "ff_v",
+ IRName = op # nf # "ff",
+ MaskedIRName = op # nf # "ff_mask",
+ NF = nf,
+ ManualCodegen = [{
+ {
+ llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
+ IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
+ SmallVector<llvm::Value*, 12> Operands;
+
+ bool NoPassthru =
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
+ (!IsMasked && (PolicyAttrs & RVV_VTA));
+ unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
+
+ if (NoPassthru) { // Push poison into passthru
+ Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
+ } else { // Push intrinsics operands into passthru
+ llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
+ for (unsigned I = 0; I < NF; ++I)
+ Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
+ }
+
+ Operands.push_back(Ops[Offset]); // Ptr
+ if (IsMasked)
+ Operands.push_back(Ops[0]);
+ Operands.push_back(Ops[Offset + 2]); // vl
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+
+ llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
+ // Get alignment from the new vl operand
+ clang::CharUnits Align =
+ CGM.getNaturalPointeeTypeAlignment(E->getArg(Offset + 1)->getType());
+
+ llvm::Value *ReturnTuple = llvm::PoisonValue::get(ResultType);
+ for (unsigned I = 0; I < NF; ++I) {
+ llvm::Value *V = Builder.CreateExtractValue(LoadValue, {I});
+ ReturnTuple = Builder.CreateInsertValue(ReturnTuple, V, {I});
+ }
+
+ // Store new_vl
+ llvm::Value *V = Builder.CreateExtractValue(LoadValue, {NF});
+ Builder.CreateStore(V, Address(Ops[Offset + 1], V->getType(), Align));
+
+ if (ReturnValue.isNull())
+ return ReturnTuple;
+ else
+ return Builder.CreateStore(ReturnTuple, ReturnValue.getValue());
+ }
+ }] in {
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<T # "v", T # "vPCePz", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<T # "Uv", T # "UvPCUePz", type>;
+ }
+ }
+ }
+ }
+}
+
+multiclass RVVStridedSegLoadTuple<string op> {
+ foreach type = TypeList in {
+ defvar eew = !cond(!eq(type, "c") : "8",
+ !eq(type, "s") : "16",
+ !eq(type, "i") : "32",
+ !eq(type, "l") : "64",
+ !eq(type, "x") : "16",
+ !eq(type, "f") : "32",
+ !eq(type, "d") : "64");
+ foreach nf = NFList in {
+ let Name = op # nf # "e" # eew # "_v",
+ IRName = op # nf,
+ MaskedIRName = op # nf # "_mask",
+ NF = nf,
+ ManualCodegen = [{
+ {
+ llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
+ IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
+ SmallVector<llvm::Value*, 12> Operands;
+
+ bool NoPassthru =
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
+ (!IsMasked && (PolicyAttrs & RVV_VTA));
+ unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
+
+ if (NoPassthru) { // Push poison into passthru
+ Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
+ } else { // Push intrinsics operands into passthru
+ llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
+ for (unsigned I = 0; I < NF; ++I)
+ Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
+ }
+
+ Operands.push_back(Ops[Offset]); // Ptr
+ Operands.push_back(Ops[Offset + 1]); // Stride
+ if (IsMasked)
+ Operands.push_back(Ops[0]);
+ Operands.push_back(Ops[Offset + 2]); // VL
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
+
+ if (ReturnValue.isNull())
+ return LoadValue;
+ else
+ return Builder.CreateStore(LoadValue, ReturnValue.getValue());
+ }
+ }] in {
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<T # "v", T # "vPCet", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<T # "Uv", T # "UvPCUet", type>;
+ }
+ }
+ }
+ }
+}
+
+multiclass RVVStridedSegStoreTuple<string op> {
+ foreach type = TypeList in {
+ defvar eew = !cond(!eq(type, "c") : "8",
+ !eq(type, "s") : "16",
+ !eq(type, "i") : "32",
+ !eq(type, "l") : "64",
+ !eq(type, "x") : "16",
+ !eq(type, "f") : "32",
+ !eq(type, "d") : "64");
+ foreach nf = NFList in {
+ let Name = op # nf # "e" # eew # "_v",
+ IRName = op # nf,
+ MaskedIRName = op # nf # "_mask",
+ NF = nf,
+ HasMaskedOffOperand = false,
+ MaskedPolicyScheme = NonePolicy,
+ ManualCodegen = [{
+ {
+ // Masked
+ // Builtin: (mask, ptr, stride, v_tuple, vl)
+ // Intrinsic: (val0, val1, ..., ptr, stride, mask, vl)
+ // Unmasked
+ // Builtin: (ptr, stride, v_tuple, vl)
+ // Intrinsic: (val0, val1, ..., ptr, stride, vl)
+ unsigned Offset = IsMasked ? 1 : 0;
+ llvm::Value *VTupleOperand = Ops[Offset + 2];
+
+ SmallVector<llvm::Value*, 12> Operands;
+ for (unsigned I = 0; I < NF; ++I) {
+ llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I});
+ Operands.push_back(V);
+ }
+ Operands.push_back(Ops[Offset]); // Ptr
+ Operands.push_back(Ops[Offset + 1]); // Stride
+ if (IsMasked)
+ Operands.push_back(Ops[0]);
+ Operands.push_back(Ops[Offset + 3]); // VL
+
+ IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+ }] in {
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<T # "v", "0Pet" # T # "v", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<T # "Uv", "0PUet" # T # "Uv", type>;
+ }
+ }
+ }
+ }
+}
+
+multiclass RVVIndexedSegLoadTuple<string op> {
+ foreach type = TypeList in {
+ foreach eew_info = EEWList in {
+ defvar eew = eew_info[0];
+ defvar eew_type = eew_info[1];
+ foreach nf = NFList in {
+ let Name = op # nf # "ei" # eew # "_v",
+ IRName = op # nf,
+ MaskedIRName = op # nf # "_mask",
+ NF = nf,
+ ManualCodegen = [{
+ {
+ llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
+ IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
+ SmallVector<llvm::Value*, 12> Operands;
+
+ bool NoPassthru =
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
+ (!IsMasked && (PolicyAttrs & RVV_VTA));
+ unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
+
+ if (NoPassthru) { // Push poison into passthru
+ Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
+ } else { // Push intrinsics operands into passthru
+ llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
+ for (unsigned I = 0; I < NF; ++I)
+ Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
+ }
+
+ Operands.push_back(Ops[Offset]); // Ptr
+ Operands.push_back(Ops[Offset + 1]); // Idx
+ if (IsMasked)
+ Operands.push_back(Ops[0]);
+ Operands.push_back(Ops[Offset + 2]); // VL
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ElementVectorType, Ops[Offset + 1]->getType(),
+ Ops.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
+
+ if (ReturnValue.isNull())
+ return LoadValue;
+ else
+ return Builder.CreateStore(LoadValue, ReturnValue.getValue());
+ }
+ }] in {
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<T # "v", T # "vPCe" # eew_type # "Uv", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<T # "Uv", T # "UvPCUe" # eew_type # "Uv", type>;
+ }
+ }
+ }
+ }
+ }
+}
+
+multiclass RVVIndexedSegStoreTuple<string op> {
+ foreach type = TypeList in {
+ foreach eew_info = EEWList in {
+ defvar eew = eew_info[0];
+ defvar eew_type = eew_info[1];
+ foreach nf = NFList in {
+ let Name = op # nf # "ei" # eew # "_v",
+ IRName = op # nf,
+ MaskedIRName = op # nf # "_mask",
+ NF = nf,
+ HasMaskedOffOperand = false,
+ MaskedPolicyScheme = NonePolicy,
+ ManualCodegen = [{
+ {
+ // Masked
+ // Builtin: (mask, ptr, index, v_tuple, vl)
+ // Intrinsic: (val0, val1, ..., ptr, index, mask, vl)
+ // Unmasked
+ // Builtin: (ptr, index, v_tuple, vl)
+ // Intrinsic: (val0, val1, ..., ptr, index, vl)
+ unsigned Offset = IsMasked ? 1 : 0;
+ llvm::Value *VTupleOperand = Ops[Offset + 2];
+
+ SmallVector<llvm::Value*, 12> Operands;
+ for (unsigned I = 0; I < NF; ++I) {
+ llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I});
+ Operands.push_back(V);
+ }
+ Operands.push_back(Ops[Offset]); // Ptr
+ Operands.push_back(Ops[Offset + 1]); // Idx
+ if (IsMasked)
+ Operands.push_back(Ops[0]);
+ Operands.push_back(Ops[Offset + 3]); // VL
+
+ IntrinsicTypes = {Operands[0]->getType(), Ops[Offset + 1]->getType(),
+ Operands.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+ }] in {
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<T # "v", "0Pe" # eew_type # "Uv" # T # "v", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<T # "Uv", "0PUe" # eew_type # "Uv" # T # "Uv", type>;
+ }
+ }
+ }
+ }
+ }
+}
+
// 7.8 Vector Load/Store Segment Instructions
-let UnMaskedPolicyScheme = HasPassthruOperand in {
-defm : RVVUnitStridedSegLoad<"vlseg">;
-defm : RVVUnitStridedSegLoadFF<"vlseg">;
-defm : RVVStridedSegLoad<"vlsseg">;
-defm : RVVIndexedSegLoad<"vluxseg">;
-defm : RVVIndexedSegLoad<"vloxseg">;
+let UnMaskedPolicyScheme = HasPassthruOperand,
+ IsTuple = true in {
+ defm : RVVUnitStridedSegLoadTuple<"vlseg">;
+ defm : RVVUnitStridedSegLoadFFTuple<"vlseg">;
+ defm : RVVStridedSegLoadTuple<"vlsseg">;
+ defm : RVVIndexedSegLoadTuple<"vluxseg">;
+ defm : RVVIndexedSegLoadTuple<"vloxseg">;
}
+
let UnMaskedPolicyScheme = NonePolicy,
- MaskedPolicyScheme = NonePolicy in {
-defm : RVVUnitStridedSegStore<"vsseg">;
-defm : RVVStridedSegStore<"vssseg">;
-defm : RVVIndexedSegStore<"vsuxseg">;
-defm : RVVIndexedSegStore<"vsoxseg">;
+ MaskedPolicyScheme = NonePolicy,
+ IsTuple = true in {
+defm : RVVUnitStridedSegStoreTuple<"vsseg">;
+defm : RVVStridedSegStoreTuple<"vssseg">;
+defm : RVVIndexedSegStoreTuple<"vsuxseg">;
+defm : RVVIndexedSegStoreTuple<"vsoxseg">;
}
// 12. Vector Integer Arithmetic Instructions
@@ -1842,13 +1713,11 @@ defm vmax : RVVSignedBinBuiltinSet;
// 12.10. Vector Single-Width Integer Multiply Instructions
defm vmul : RVVIntBinBuiltinSet;
-let RequiredFeatures = ["FullMultiply"] in {
defm vmulh : RVVSignedBinBuiltinSet;
defm vmulhu : RVVUnsignedBinBuiltinSet;
defm vmulhsu : RVVOutOp1BuiltinSet<"vmulhsu", "csil",
[["vv", "v", "vvUv"],
["vx", "v", "vvUe"]]>;
-}
// 12.11. Vector Integer Divide Instructions
defm vdivu : RVVUnsignedBinBuiltinSet;
@@ -1928,6 +1797,17 @@ let HasMasked = false,
}
// 13. Vector Fixed-Point Arithmetic Instructions
+let HeaderCode =
+[{
+enum __RISCV_VXRM {
+ __RISCV_VXRM_RNU = 0,
+ __RISCV_VXRM_RNE = 1,
+ __RISCV_VXRM_RDN = 2,
+ __RISCV_VXRM_ROD = 3,
+};
+}] in
+def vxrm_enum : RVVHeader;
+
// 13.1. Vector Single-Width Saturating Add and Subtract
let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vsaddu : RVVUnsignedBinBuiltinSet;
@@ -1935,80 +1815,434 @@ defm vsadd : RVVSignedBinBuiltinSet;
defm vssubu : RVVUnsignedBinBuiltinSet;
defm vssub : RVVSignedBinBuiltinSet;
-// 13.2. Vector Single-Width Averaging Add and Subtract
-defm vaaddu : RVVUnsignedBinBuiltinSet;
-defm vaadd : RVVSignedBinBuiltinSet;
-defm vasubu : RVVUnsignedBinBuiltinSet;
-defm vasub : RVVSignedBinBuiltinSet;
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
-// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
-let RequiredFeatures = ["FullMultiply"] in {
-defm vsmul : RVVSignedBinBuiltinSet;
-}
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasMaskedOff = !(
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ unsigned Offset = IsMasked ?
+ (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
-// 13.4. Vector Single-Width Scaling Shift Instructions
-defm vssrl : RVVUnsignedShiftBuiltinSet;
-defm vssra : RVVSignedShiftBuiltinSet;
+ if (!HasMaskedOff)
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
+ else
+ Operands.push_back(Ops[IsMasked ? 1 : 0]);
-// 13.5. Vector Narrowing Fixed-Point Clip Instructions
-defm vnclipu : RVVUnsignedNShiftBuiltinSet;
-defm vnclip : RVVSignedNShiftBuiltinSet;
+ Operands.push_back(Ops[Offset]); // op0
+ Operands.push_back(Ops[Offset + 1]); // op1
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ Operands.push_back(Ops[Offset + 2]); // vxrm
+ Operands.push_back(Ops[Offset + 3]); // vl
+
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(), Ops.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ // 13.2. Vector Single-Width Averaging Add and Subtract
+ defm vaaddu : RVVUnsignedBinBuiltinSetRoundingMode;
+ defm vaadd : RVVSignedBinBuiltinSetRoundingMode;
+ defm vasubu : RVVUnsignedBinBuiltinSetRoundingMode;
+ defm vasub : RVVSignedBinBuiltinSetRoundingMode;
+
+ // 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
+ defm vsmul : RVVSignedBinBuiltinSetRoundingMode;
+
+ // 13.4. Vector Single-Width Scaling Shift Instructions
+ defm vssrl : RVVUnsignedShiftBuiltinSetRoundingMode;
+ defm vssra : RVVSignedShiftBuiltinSetRoundingMode;
+}
+
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
+
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasMaskedOff = !(
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ unsigned Offset = IsMasked ?
+ (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+ if (!HasMaskedOff)
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
+ else
+ Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+ Operands.push_back(Ops[Offset]); // op0
+ Operands.push_back(Ops[Offset + 1]); // op1
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ Operands.push_back(Ops[Offset + 2]); // vxrm
+ Operands.push_back(Ops[Offset + 3]); // vl
+
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(),
+ Ops.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ // 13.5. Vector Narrowing Fixed-Point Clip Instructions
+ defm vnclipu : RVVUnsignedNShiftBuiltinSetRoundingMode;
+ defm vnclip : RVVSignedNShiftBuiltinSetRoundingMode;
+}
+}
// 14. Vector Floating-Point Instructions
-// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
-defm vfadd : RVVFloatingBinBuiltinSet;
-defm vfsub : RVVFloatingBinBuiltinSet;
-defm vfrsub : RVVFloatingBinVFBuiltinSet;
+let HeaderCode =
+[{
+enum __RISCV_FRM {
+ __RISCV_FRM_RNE = 0,
+ __RISCV_FRM_RTZ = 1,
+ __RISCV_FRM_RDN = 2,
+ __RISCV_FRM_RUP = 3,
+ __RISCV_FRM_RMM = 4,
+};
+}] in def frm_enum : RVVHeader;
+
+let UnMaskedPolicyScheme = HasPassthruOperand in {
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
+
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasMaskedOff = !(
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ bool HasRoundModeOp = IsMasked ?
+ (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
+ (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
+
+ unsigned Offset = IsMasked ?
+ (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+ if (!HasMaskedOff)
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
+ else
+ Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+ Operands.push_back(Ops[Offset]); // op0
+ Operands.push_back(Ops[Offset + 1]); // op1
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ if (HasRoundModeOp) {
+ Operands.push_back(Ops[Offset + 2]); // frm
+ Operands.push_back(Ops[Offset + 3]); // vl
+ } else {
+ Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
+ Operands.push_back(Ops[Offset + 2]); // vl
+ }
-// 14.3. Vector Widening Floating-Point Add/Subtract Instructions
-// Widening FP add/subtract, 2*SEW = SEW +/- SEW
-defm vfwadd : RVVFloatingWidenBinBuiltinSet;
-defm vfwsub : RVVFloatingWidenBinBuiltinSet;
-// Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW
-defm vfwadd : RVVFloatingWidenOp0BinBuiltinSet;
-defm vfwsub : RVVFloatingWidenOp0BinBuiltinSet;
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
-// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
-defm vfmul : RVVFloatingBinBuiltinSet;
-defm vfdiv : RVVFloatingBinBuiltinSet;
-defm vfrdiv : RVVFloatingBinVFBuiltinSet;
+ IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(),
+ Operands.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ let HasFRMRoundModeOp = true in {
+ // 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
+ defm vfadd : RVVFloatingBinBuiltinSetRoundingMode;
+ defm vfsub : RVVFloatingBinBuiltinSetRoundingMode;
+ defm vfrsub : RVVFloatingBinVFBuiltinSetRoundingMode;
+
+ // 14.3. Vector Widening Floating-Point Add/Subtract Instructions
+ // Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW
+ defm vfwadd : RVVFloatingWidenOp0BinBuiltinSetRoundingMode;
+ defm vfwsub : RVVFloatingWidenOp0BinBuiltinSetRoundingMode;
+
+ // 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
+ defm vfmul : RVVFloatingBinBuiltinSetRoundingMode;
+ defm vfdiv : RVVFloatingBinBuiltinSetRoundingMode;
+ defm vfrdiv : RVVFloatingBinVFBuiltinSetRoundingMode;
+ }
+ // 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
+ defm vfadd : RVVFloatingBinBuiltinSet;
+ defm vfsub : RVVFloatingBinBuiltinSet;
+ defm vfrsub : RVVFloatingBinVFBuiltinSet;
+
+ // 14.3. Vector Widening Floating-Point Add/Subtract Instructions
+ // Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW
+ defm vfwadd : RVVFloatingWidenOp0BinBuiltinSet;
+ defm vfwsub : RVVFloatingWidenOp0BinBuiltinSet;
+
+ // 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
+ defm vfmul : RVVFloatingBinBuiltinSet;
+ defm vfdiv : RVVFloatingBinBuiltinSet;
+ defm vfrdiv : RVVFloatingBinVFBuiltinSet;
+}
+
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
+
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasMaskedOff = !(
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ bool HasRoundModeOp = IsMasked ?
+ (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
+ (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
+
+ unsigned Offset = IsMasked ?
+ (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+ if (!HasMaskedOff)
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
+ else
+ Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+ Operands.push_back(Ops[Offset]); // op0
+ Operands.push_back(Ops[Offset + 1]); // op1
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ if (HasRoundModeOp) {
+ Operands.push_back(Ops[Offset + 2]); // frm
+ Operands.push_back(Ops[Offset + 3]); // vl
+ } else {
+ Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
+ Operands.push_back(Ops[Offset + 2]); // vl
+ }
-// 14.5. Vector Widening Floating-Point Multiply
-let Log2LMUL = [-2, -1, 0, 1, 2] in {
- defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "xf",
- [["vv", "w", "wvv"],
- ["vf", "w", "wve"]]>;
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(),
+ Ops.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ let HasFRMRoundModeOp = true in {
+ // 14.3. Vector Widening Floating-Point Add/Subtract Instructions
+ // Widening FP add/subtract, 2*SEW = SEW +/- SEW
+ defm vfwadd : RVVFloatingWidenBinBuiltinSetRoundingMode;
+ defm vfwsub : RVVFloatingWidenBinBuiltinSetRoundingMode;
+
+ // 14.5. Vector Widening Floating-Point Multiply
+ let Log2LMUL = [-2, -1, 0, 1, 2] in {
+ defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "xf",
+ [["vv", "w", "wvvu"],
+ ["vf", "w", "wveu"]]>;
+ }
+ }
+ // 14.3. Vector Widening Floating-Point Add/Subtract Instructions
+ // Widening FP add/subtract, 2*SEW = SEW +/- SEW
+ defm vfwadd : RVVFloatingWidenBinBuiltinSet;
+ defm vfwsub : RVVFloatingWidenBinBuiltinSet;
+
+ // 14.5. Vector Widening Floating-Point Multiply
+ let Log2LMUL = [-2, -1, 0, 1, 2] in {
+ defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "xf",
+ [["vv", "w", "wvv"],
+ ["vf", "w", "wve"]]>;
+ }
}
}
-// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
+
let UnMaskedPolicyScheme = HasPolicyOperand in {
-defm vfmacc : RVVFloatingTerBuiltinSet;
-defm vfnmacc : RVVFloatingTerBuiltinSet;
-defm vfmsac : RVVFloatingTerBuiltinSet;
-defm vfnmsac : RVVFloatingTerBuiltinSet;
-defm vfmadd : RVVFloatingTerBuiltinSet;
-defm vfnmadd : RVVFloatingTerBuiltinSet;
-defm vfmsub : RVVFloatingTerBuiltinSet;
-defm vfnmsub : RVVFloatingTerBuiltinSet;
-
-// 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
-defm vfwmacc : RVVFloatingWidenTerBuiltinSet;
-defm vfwnmacc : RVVFloatingWidenTerBuiltinSet;
-defm vfwmsac : RVVFloatingWidenTerBuiltinSet;
-defm vfwnmsac : RVVFloatingWidenTerBuiltinSet;
-}
-
-// 14.8. Vector Floating-Point Square-Root Instruction
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
+
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5;
+
+ unsigned Offset = IsMasked ? 2 : 1;
+
+ Operands.push_back(Ops[IsMasked ? 1 : 0]); // passthrough
+
+ Operands.push_back(Ops[Offset]); // op0
+ Operands.push_back(Ops[Offset + 1]); // op1
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ if (HasRoundModeOp) {
+ Operands.push_back(Ops[Offset + 2]); // frm
+ Operands.push_back(Ops[Offset + 3]); // vl
+ } else {
+ Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
+ Operands.push_back(Ops[Offset + 2]); // vl
+ }
+
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
+ Operands.back()->getType()};
+
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ let HasFRMRoundModeOp = 1 in {
+ // 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
+ defm vfmacc : RVVFloatingTerBuiltinSetRoundingMode;
+ defm vfnmacc : RVVFloatingTerBuiltinSetRoundingMode;
+ defm vfmsac : RVVFloatingTerBuiltinSetRoundingMode;
+ defm vfnmsac : RVVFloatingTerBuiltinSetRoundingMode;
+ defm vfmadd : RVVFloatingTerBuiltinSetRoundingMode;
+ defm vfnmadd : RVVFloatingTerBuiltinSetRoundingMode;
+ defm vfmsub : RVVFloatingTerBuiltinSetRoundingMode;
+ defm vfnmsub : RVVFloatingTerBuiltinSetRoundingMode;
+ }
+ // 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
+ defm vfmacc : RVVFloatingTerBuiltinSet;
+ defm vfnmacc : RVVFloatingTerBuiltinSet;
+ defm vfmsac : RVVFloatingTerBuiltinSet;
+ defm vfnmsac : RVVFloatingTerBuiltinSet;
+ defm vfmadd : RVVFloatingTerBuiltinSet;
+ defm vfnmadd : RVVFloatingTerBuiltinSet;
+ defm vfmsub : RVVFloatingTerBuiltinSet;
+ defm vfnmsub : RVVFloatingTerBuiltinSet;
+}
+
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
+
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5;
+
+ unsigned Offset = IsMasked ? 2 : 1;
+
+ Operands.push_back(Ops[IsMasked ? 1 : 0]); // passthrough
+
+ Operands.push_back(Ops[Offset]); // op0
+ Operands.push_back(Ops[Offset + 1]); // op1
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ if (HasRoundModeOp) {
+ Operands.push_back(Ops[Offset + 2]); // frm
+ Operands.push_back(Ops[Offset + 3]); // vl
+ } else {
+ Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
+ Operands.push_back(Ops[Offset + 2]); // vl
+ }
+
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(),
+ Operands.back()->getType()};
+
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ let HasFRMRoundModeOp = 1 in {
+ // 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
+ defm vfwmacc : RVVFloatingWidenTerBuiltinSetRoundingMode;
+ defm vfwnmacc : RVVFloatingWidenTerBuiltinSetRoundingMode;
+ defm vfwmsac : RVVFloatingWidenTerBuiltinSetRoundingMode;
+ defm vfwnmsac : RVVFloatingWidenTerBuiltinSetRoundingMode;
+ }
+ // 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
+ defm vfwmacc : RVVFloatingWidenTerBuiltinSet;
+ defm vfwnmacc : RVVFloatingWidenTerBuiltinSet;
+ defm vfwmsac : RVVFloatingWidenTerBuiltinSet;
+ defm vfwnmsac : RVVFloatingWidenTerBuiltinSet;
+}
+
+}
+
let UnMaskedPolicyScheme = HasPassthruOperand in {
-def vfsqrt : RVVFloatingUnaryVVBuiltin;
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, round_mode, vl)
+ // Masked: (passthru, op0, mask, frm, vl, policy)
+
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasMaskedOff = !(
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ bool HasRoundModeOp = IsMasked ?
+ (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4) :
+ (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3);
+
+ unsigned Offset = IsMasked ?
+ (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+ if (!HasMaskedOff)
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
+ else
+ Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+ Operands.push_back(Ops[Offset]); // op0
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ if (HasRoundModeOp) {
+ Operands.push_back(Ops[Offset + 1]); // frm
+ Operands.push_back(Ops[Offset + 2]); // vl
+ } else {
+ Operands.push_back(ConstantInt::get(Ops[Offset + 1]->getType(), 7)); // frm
+ Operands.push_back(Ops[Offset + 1]); // vl
+ }
+
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Operands.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ let HasFRMRoundModeOp = 1 in {
+ // 14.8. Vector Floating-Point Square-Root Instruction
+ defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "xfd", [["v", "v", "vvu"]]>;
+
+ // 14.10. Vector Floating-Point Reciprocal Estimate Instruction
+ defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "xfd", [["v", "v", "vvu"]]>;
+ }
+ // 14.8. Vector Floating-Point Square-Root Instruction
+ defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "xfd", [["v", "v", "vv"]]>;
+
+ // 14.10. Vector Floating-Point Reciprocal Estimate Instruction
+ defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "xfd", [["v", "v", "vv"]]>;
+}
// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
def vfrsqrt7 : RVVFloatingUnaryVVBuiltin;
-// 14.10. Vector Floating-Point Reciprocal Estimate Instruction
-def vfrec7 : RVVFloatingUnaryVVBuiltin;
-
// 14.11. Vector Floating-Point MIN/MAX Instructions
defm vfmin : RVVFloatingBinBuiltinSet;
defm vfmax : RVVFloatingBinBuiltinSet;
@@ -2064,17 +2298,11 @@ let HasMasked = false,
// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
let UnMaskedPolicyScheme = HasPassthruOperand in {
-def vfcvt_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_xu">;
-def vfcvt_x_f_v : RVVConvToSignedBuiltin<"vfcvt_x">;
def vfcvt_rtz_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_rtz_xu">;
def vfcvt_rtz_x_f_v : RVVConvToSignedBuiltin<"vfcvt_rtz_x">;
-def vfcvt_f_xu_v : RVVConvBuiltin<"Fv", "FvUv", "sil", "vfcvt_f">;
-def vfcvt_f_x_v : RVVConvBuiltin<"Fv", "Fvv", "sil", "vfcvt_f">;
// 14.18. Widening Floating-Point/Integer Type-Convert Instructions
let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
- def vfwcvt_xu_f_v : RVVConvToWidenUnsignedBuiltin<"vfwcvt_xu">;
- def vfwcvt_x_f_v : RVVConvToWidenSignedBuiltin<"vfwcvt_x">;
def vfwcvt_rtz_xu_f_v : RVVConvToWidenUnsignedBuiltin<"vfwcvt_rtz_xu">;
def vfwcvt_rtz_x_f_v : RVVConvToWidenSignedBuiltin<"vfwcvt_rtz_x">;
def vfwcvt_f_xu_v : RVVConvBuiltin<"Fw", "FwUv", "csi", "vfwcvt_f">;
@@ -2084,15 +2312,139 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
- def vfncvt_xu_f_w : RVVConvToNarrowingUnsignedBuiltin<"vfncvt_xu">;
- def vfncvt_x_f_w : RVVConvToNarrowingSignedBuiltin<"vfncvt_x">;
def vfncvt_rtz_xu_f_w : RVVConvToNarrowingUnsignedBuiltin<"vfncvt_rtz_xu">;
def vfncvt_rtz_x_f_w : RVVConvToNarrowingSignedBuiltin<"vfncvt_rtz_x">;
- def vfncvt_f_xu_w : RVVConvBuiltin<"Fv", "FvUw", "csi", "vfncvt_f">;
- def vfncvt_f_x_w : RVVConvBuiltin<"Fv", "Fvw", "csi", "vfncvt_f">;
- def vfncvt_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_f">;
def vfncvt_rod_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_rod_f">;
}
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, frm, vl)
+ // Masked: (passthru, op0, mask, frm, vl, policy)
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasMaskedOff = !(
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ bool HasRoundModeOp = IsMasked ?
+ (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4) :
+ (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3);
+
+ unsigned Offset = IsMasked ?
+ (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+ if (!HasMaskedOff)
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
+ else
+ Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+ Operands.push_back(Ops[Offset]); // op0
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ if (HasRoundModeOp) {
+ Operands.push_back(Ops[Offset + 1]); // frm
+ Operands.push_back(Ops[Offset + 2]); // vl
+ } else {
+ Operands.push_back(ConstantInt::get(Ops[Offset + 1]->getType(), 7)); // frm
+ Operands.push_back(Ops[Offset + 1]); // vl
+ }
+
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
+ Operands.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ let HasFRMRoundModeOp = 1 in {
+ // 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
+ let OverloadedName = "vfcvt_x" in
+ defm :
+ RVVConvBuiltinSet<"vfcvt_x_f_v", "xfd", [["Iv", "Ivvu"]]>;
+ let OverloadedName = "vfcvt_xu" in
+ defm :
+ RVVConvBuiltinSet<"vfcvt_xu_f_v", "xfd", [["Uv", "Uvvu"]]>;
+ let OverloadedName = "vfcvt_f" in {
+ defm :
+ RVVConvBuiltinSet<"vfcvt_f_x_v", "sil", [["Fv", "Fvvu"]]>;
+ defm :
+ RVVConvBuiltinSet<"vfcvt_f_xu_v", "sil", [["Fv", "FvUvu"]]>;
+ }
+
+ // 14.18. Widening Floating-Point/Integer Type-Convert Instructions
+ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+ let OverloadedName = "vfwcvt_x" in
+ defm :
+ RVVConvBuiltinSet<"vfwcvt_x_f_v", "xf", [["Iw", "Iwvu"]]>;
+ let OverloadedName = "vfwcvt_xu" in
+ defm :
+ RVVConvBuiltinSet<"vfwcvt_xu_f_v", "xf", [["Uw", "Uwvu"]]>;
+ }
+ // 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
+ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+ let OverloadedName = "vfncvt_x" in
+ defm :
+ RVVConvBuiltinSet<"vfncvt_x_f_w", "csi", [["Iv", "IvFwu"]]>;
+ let OverloadedName = "vfncvt_xu" in
+ defm :
+ RVVConvBuiltinSet<"vfncvt_xu_f_w", "csi", [["Uv", "UvFwu"]]>;
+ let OverloadedName = "vfncvt_f" in {
+ defm :
+ RVVConvBuiltinSet<"vfncvt_f_x_w", "csi", [["Fv", "Fvwu"]]>;
+ defm :
+ RVVConvBuiltinSet<"vfncvt_f_xu_w", "csi", [["Fv", "FvUwu"]]>;
+ }
+ let OverloadedName = "vfncvt_f" in
+ defm :
+ RVVConvBuiltinSet<"vfncvt_f_f_w", "xf", [["v", "vwu"]]>;
+ }
+ }
+
+ // 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
+ let OverloadedName = "vfcvt_x" in
+ defm :
+ RVVConvBuiltinSet<"vfcvt_x_f_v", "xfd", [["Iv", "Ivv"]]>;
+ let OverloadedName = "vfcvt_xu" in
+ defm :
+ RVVConvBuiltinSet<"vfcvt_xu_f_v", "xfd", [["Uv", "Uvv"]]>;
+ let OverloadedName = "vfcvt_f" in {
+ defm :
+ RVVConvBuiltinSet<"vfcvt_f_x_v", "sil", [["Fv", "Fvv"]]>;
+ defm :
+ RVVConvBuiltinSet<"vfcvt_f_xu_v", "sil", [["Fv", "FvUv"]]>;
+ }
+
+ // 14.18. Widening Floating-Point/Integer Type-Convert Instructions
+ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+ let OverloadedName = "vfwcvt_x" in
+ defm :
+ RVVConvBuiltinSet<"vfwcvt_x_f_v", "xf", [["Iw", "Iwv"]]>;
+ let OverloadedName = "vfwcvt_xu" in
+ defm :
+ RVVConvBuiltinSet<"vfwcvt_xu_f_v", "xf", [["Uw", "Uwv"]]>;
+ }
+ // 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
+ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+ let OverloadedName = "vfncvt_x" in
+ defm :
+ RVVConvBuiltinSet<"vfncvt_x_f_w", "csi", [["Iv", "IvFw"]]>;
+ let OverloadedName = "vfncvt_xu" in
+ defm :
+ RVVConvBuiltinSet<"vfncvt_xu_f_w", "csi", [["Uv", "UvFw"]]>;
+ let OverloadedName = "vfncvt_f" in {
+ defm :
+ RVVConvBuiltinSet<"vfncvt_f_x_w", "csi", [["Fv", "Fvw"]]>;
+ defm :
+ RVVConvBuiltinSet<"vfncvt_f_xu_w", "csi", [["Fv", "FvUw"]]>;
+ }
+ let OverloadedName = "vfncvt_f" in
+ defm :
+ RVVConvBuiltinSet<"vfncvt_f_f_w", "xf", [["v", "vw"]]>;
+ }
+}
}
// 15. Vector Reduction Operations
@@ -2121,12 +2473,65 @@ let HasMaskedOffOperand = true in {
// 15.3. Vector Single-Width Floating-Point Reduction Instructions
defm vfredmax : RVVFloatingReductionBuiltin;
defm vfredmin : RVVFloatingReductionBuiltin;
-defm vfredusum : RVVFloatingReductionBuiltin;
-defm vfredosum : RVVFloatingReductionBuiltin;
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
+
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasMaskedOff = !(
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ bool HasRoundModeOp = IsMasked ?
+ (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
+ (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
+
+ unsigned Offset = IsMasked ?
+ (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+ if (!HasMaskedOff)
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
+ else
+ Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+ Operands.push_back(Ops[Offset]); // op0
+ Operands.push_back(Ops[Offset + 1]); // op1
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ if (HasRoundModeOp) {
+ Operands.push_back(Ops[Offset + 2]); // frm
+ Operands.push_back(Ops[Offset + 3]); // vl
+ } else {
+ Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
+ Operands.push_back(Ops[Offset + 2]); // vl
+ }
+
+ IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
+ Ops.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ let HasFRMRoundModeOp = 1 in {
+ // 15.3. Vector Single-Width Floating-Point Reduction Instructions
+ defm vfredusum : RVVFloatingReductionBuiltinRoundingMode;
+ defm vfredosum : RVVFloatingReductionBuiltinRoundingMode;
+
+ // 15.4. Vector Widening Floating-Point Reduction Instructions
+ defm vfwredusum : RVVFloatingWidenReductionBuiltinRoundingMode;
+ defm vfwredosum : RVVFloatingWidenReductionBuiltinRoundingMode;
+ }
+ // 15.3. Vector Single-Width Floating-Point Reduction Instructions
+ defm vfredusum : RVVFloatingReductionBuiltin;
+ defm vfredosum : RVVFloatingReductionBuiltin;
-// 15.4. Vector Widening Floating-Point Reduction Instructions
-defm vfwredusum : RVVFloatingWidenReductionBuiltin;
-defm vfwredosum : RVVFloatingWidenReductionBuiltin;
+ // 15.4. Vector Widening Floating-Point Reduction Instructions
+ defm vfwredusum : RVVFloatingWidenReductionBuiltin;
+ defm vfwredosum : RVVFloatingWidenReductionBuiltin;
+}
}
// 16. Vector Mask Instructions
@@ -2256,6 +2661,36 @@ let HasMasked = false,
let HasMasked = false, HasVL = false, IRName = "" in {
let Name = "vreinterpret_v", MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
+ if (ResultType->isIntOrIntVectorTy(1) ||
+ Ops[0]->getType()->isIntOrIntVectorTy(1)) {
+ assert(isa<ScalableVectorType>(ResultType) &&
+ isa<ScalableVectorType>(Ops[0]->getType()));
+
+ LLVMContext &Context = CGM.getLLVMContext();
+ ScalableVectorType *Boolean64Ty =
+ ScalableVectorType::get(llvm::Type::getInt1Ty(Context), 64);
+
+ if (ResultType->isIntOrIntVectorTy(1)) {
+ // Casting from m1 vector integer -> vector boolean
+ // Ex: <vscale x 8 x i8>
+ // --(bitcast)--------> <vscale x 64 x i1>
+ // --(vector_extract)-> <vscale x 8 x i1>
+ llvm::Value *BitCast = Builder.CreateBitCast(Ops[0], Boolean64Ty);
+ return Builder.CreateExtractVector(ResultType, BitCast,
+ ConstantInt::get(Int64Ty, 0));
+ } else {
+ // Casting from vector boolean -> m1 vector integer
+ // Ex: <vscale x 1 x i1>
+ // --(vector_insert)-> <vscale x 64 x i1>
+ // --(bitcast)-------> <vscale x 8 x i8>
+ llvm::Value *Boolean64Val =
+ Builder.CreateInsertVector(Boolean64Ty,
+ llvm::PoisonValue::get(Boolean64Ty),
+ Ops[0],
+ ConstantInt::get(Int64Ty, 0));
+ return Builder.CreateBitCast(Boolean64Val, ResultType);
+ }
+ }
return Builder.CreateBitCast(Ops[0], ResultType);
}] in {
// Reinterpret between different type under the same SEW and LMUL
@@ -2274,6 +2709,53 @@ let HasMasked = false, HasVL = false, IRName = "" in {
def vreinterpret_u_ # dst_sew : RVVBuiltin<"Uv" # dst_sew # "Uv",
dst_sew # "UvUv", "csil", dst_sew # "Uv">;
}
+
+ // Existing users of FixedSEW - the reinterpretation between different SEW
+ // and same LMUL has the implicit assumption that if FixedSEW is set to the
+ // given element width, then the type will be identified as invalid, thus
+ // skipping definition of reinterpret of SEW=8 to SEW=8. However this blocks
+ // our usage here of defining all possible combinations of a fixed SEW to
+ // any boolean. So we need to separately define SEW=8 here.
+ // Reinterpret from LMUL=1 integer type to vector boolean type
+ def vreintrepret_m1_b8_signed :
+ RVVBuiltin<"Svm",
+ "mSv",
+ "c", "m">;
+ def vreintrepret_m1_b8_usigned :
+ RVVBuiltin<"USvm",
+ "mUSv",
+ "c", "m">;
+
+ // Reinterpret from vector boolean type to LMUL=1 integer type
+ def vreintrepret_b8_m1_signed :
+ RVVBuiltin<"mSv",
+ "Svm",
+ "c", "Sv">;
+ def vreintrepret_b8_m1_usigned :
+ RVVBuiltin<"mUSv",
+ "USvm",
+ "c", "USv">;
+
+ foreach dst_sew = ["16", "32", "64"] in {
+ // Reinterpret from LMUL=1 integer type to vector boolean type
+ def vreinterpret_m1_b # dst_sew # _signed:
+ RVVBuiltin<"(FixedSEW:" # dst_sew # ")Svm",
+ "m(FixedSEW:" # dst_sew # ")Sv",
+ "c", "m">;
+ def vreinterpret_m1_b # dst_sew # _unsigned:
+ RVVBuiltin<"(FixedSEW:" # dst_sew # ")USvm",
+ "m(FixedSEW:" # dst_sew # ")USv",
+ "c", "m">;
+ // Reinterpret from vector boolean type to LMUL=1 integer type
+ def vreinterpret_b # dst_sew # _m1_signed:
+ RVVBuiltin<"m(FixedSEW:" # dst_sew # ")Sv",
+ "(FixedSEW:" # dst_sew # ")Svm",
+ "c", "(FixedSEW:" # dst_sew # ")Sv">;
+ def vreinterpret_b # dst_sew # _m1_unsigned:
+ RVVBuiltin<"m(FixedSEW:" # dst_sew # ")USv",
+ "(FixedSEW:" # dst_sew # ")USvm",
+ "c", "(FixedSEW:" # dst_sew # ")USv">;
+ }
}
let Name = "vundefined", SupportOverloading = false,
@@ -2290,10 +2772,8 @@ let HasMasked = false, HasVL = false, IRName = "" in {
let Name = "vlmul_trunc_v", OverloadedName = "vlmul_trunc",
MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{ {
- ID = Intrinsic::vector_extract;
- IntrinsicTypes = {ResultType, Ops[0]->getType()};
- Ops.push_back(ConstantInt::get(Int64Ty, 0));
- return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, "");
+ return Builder.CreateExtractVector(ResultType, Ops[0],
+ ConstantInt::get(Int64Ty, 0));
} }] in {
foreach dst_lmul = ["(SFixedLog2LMUL:-3)", "(SFixedLog2LMUL:-2)", "(SFixedLog2LMUL:-1)",
"(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in {
@@ -2309,12 +2789,9 @@ let HasMasked = false, HasVL = false, IRName = "" in {
let Name = "vlmul_ext_v", OverloadedName = "vlmul_ext",
MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
- ID = Intrinsic::vector_insert;
- IntrinsicTypes = {ResultType, Ops[0]->getType()};
- Ops.push_back(llvm::PoisonValue::get(ResultType));
- std::swap(Ops[0], Ops[1]);
- Ops.push_back(ConstantInt::get(Int64Ty, 0));
- return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, "");
+ return Builder.CreateInsertVector(ResultType,
+ llvm::PoisonValue::get(ResultType),
+ Ops[0], ConstantInt::get(Int64Ty, 0));
}] in {
foreach dst_lmul = ["(LFixedLog2LMUL:-2)", "(LFixedLog2LMUL:-1)", "(LFixedLog2LMUL:-0)",
"(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in {
@@ -2328,7 +2805,11 @@ let HasMasked = false, HasVL = false, IRName = "" in {
let Name = "vget_v", MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
{
- ID = Intrinsic::vector_extract;
+ if (isa<StructType>(Ops[0]->getType())) // For tuple type
+ // Extract value from index (operand 1) of vtuple (operand 0)
+ return Builder.CreateExtractValue(
+ Ops[0],
+ {(unsigned)cast<ConstantInt>(Ops[1])->getZExtValue()});
auto *VecTy = cast<ScalableVectorType>(ResultType);
auto *OpVecTy = cast<ScalableVectorType>(Ops[0]->getType());
// Mask to only valid indices.
@@ -2339,21 +2820,28 @@ let HasMasked = false, HasVL = false, IRName = "" in {
Ops[1] = Builder.CreateMul(Ops[1],
ConstantInt::get(Ops[1]->getType(),
VecTy->getMinNumElements()));
- IntrinsicTypes = {ResultType, Ops[0]->getType()};
- return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, "");
+ return Builder.CreateExtractVector(ResultType, Ops[0], Ops[1]);
}
}] in {
foreach dst_lmul = ["(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in {
def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vvKz", "csilxfd", dst_lmul # "v">;
def : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUvKz", "csil", dst_lmul # "Uv">;
}
+ foreach nf = NFList in {
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<T # "vv", "v" # T # "vKz", "csilxfd", "v">;
+ def : RVVBuiltin<T # "UvUv", "Uv" # T # "UvKz", "csil", "Uv">;
+ }
}
- let Name = "vset_v", Log2LMUL = [0, 1, 2], MaskedPolicyScheme = NonePolicy,
+ let Name = "vset_v", MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
{
- ID = Intrinsic::vector_insert;
- IntrinsicTypes = {ResultType, Ops[2]->getType()};
+ if (isa<StructType>(ResultType)) // For tuple type
+ // Insert value (operand 2) into index (operand 1) of vtuple (operand 0)
+ return Builder.CreateInsertValue(
+ Ops[0], Ops[2],
+ {(unsigned)cast<ConstantInt>(Ops[1])->getZExtValue()});
auto *ResVecTy = cast<ScalableVectorType>(ResultType);
auto *VecTy = cast<ScalableVectorType>(Ops[2]->getType());
// Mask to only valid indices.
@@ -2364,13 +2852,19 @@ let HasMasked = false, HasVL = false, IRName = "" in {
Ops[1] = Builder.CreateMul(Ops[1],
ConstantInt::get(Ops[1]->getType(),
VecTy->getMinNumElements()));
- std::swap(Ops[1], Ops[2]);
- return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, "");
+ return Builder.CreateInsertVector(ResultType, Ops[0], Ops[2], Ops[1]);
}
}] in {
- foreach dst_lmul = ["(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in {
- def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "v" # dst_lmul # "vKzv", "csilxfd">;
- def : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "Uv" # dst_lmul #"UvKzUv", "csil">;
+ let Log2LMUL = [0, 1, 2] in {
+ foreach dst_lmul = ["(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in {
+ def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "v" # dst_lmul # "vKzv", "csilxfd">;
+ def : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "Uv" # dst_lmul #"UvKzUv", "csil">;
+ }
+ }
+ foreach nf = NFList in {
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<"v" # T # "v", T # "v" # T # "vKzv", "csilxfd">;
+ def : RVVBuiltin<"Uv" # T # "Uv", T # "Uv" # T # "UvKzUv", "csil">;
}
}
}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/riscv_vector_common.td b/contrib/llvm-project/clang/include/clang/Basic/riscv_vector_common.td
new file mode 100644
index 000000000000..e276e4c3c409
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/riscv_vector_common.td
@@ -0,0 +1,246 @@
+//==------ riscv_vector_common.td - RISC-V V-ext builtin class ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines RVV builtin base class for RISC-V V-extension.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Instruction definitions
+//===----------------------------------------------------------------------===//
+// Each record of the class RVVBuiltin defines a collection of builtins (i.e.
+// "def vadd : RVVBuiltin" will be used to define things like "vadd_vv_i32m1",
+// "vadd_vv_i32m2", etc).
+//
+// The elements of this collection are defined by an instantiation process the
+// range of which is specified by the cross product of the LMUL attribute and
+// every element in the attribute TypeRange. By default builtins have LMUL = [1,
+// 2, 4, 8, 1/2, 1/4, 1/8] so the process is repeated 7 times. In tablegen we
+// use the Log2LMUL [0, 1, 2, 3, -1, -2, -3] to represent the LMUL.
+//
+// LMUL represents the fact that the types of values used by that builtin are
+// values generated by instructions that are executed under that LMUL. However,
+// this does not mean the builtin is necessarily lowered into an instruction
+// that executes under the specified LMUL. An example where this happens are
+// loads and stores of masks. A mask like `vbool8_t` can be generated, for
+// instance, by comparing two `__rvv_int8m1_t` (this is LMUL=1) or comparing two
+// `__rvv_int16m2_t` (this is LMUL=2). The actual load or store, however, will
+// be performed under LMUL=1 because mask registers are not grouped.
+//
+// TypeRange is a non-empty sequence of basic types:
+//
+// c: int8_t (i8)
+// s: int16_t (i16)
+// i: int32_t (i32)
+// l: int64_t (i64)
+// x: float16_t (half)
+// f: float32_t (float)
+// d: float64_t (double)
+//
+// This way, given an LMUL, a record with a TypeRange "sil" will cause the
+// definition of 3 builtins. Each type "t" in the TypeRange (in this example
+// they are int16_t, int32_t, int64_t) is used as a parameter that drives the
+// definition of that particular builtin (for the given LMUL).
+//
+// During the instantiation, types can be transformed or modified using type
+// transformers. Given a type "t" the following primitive type transformers can
+// be applied to it to yield another type.
+//
+// e: type of "t" as is (identity)
+// v: computes a vector type whose element type is "t" for the current LMUL
+// w: computes a vector type identical to what 'v' computes except for the
+// element type which is twice as wide as the element type of 'v'
+// q: computes a vector type identical to what 'v' computes except for the
+// element type which is four times as wide as the element type of 'v'
+// o: computes a vector type identical to what 'v' computes except for the
+// element type which is eight times as wide as the element type of 'v'
+// m: computes a vector type identical to what 'v' computes except for the
+// element type which is bool
+// 0: void type, ignores "t"
+// z: size_t, ignores "t"
+// t: ptrdiff_t, ignores "t"
+// u: unsigned long, ignores "t"
+// l: long, ignores "t"
+//
+// So for instance if t is "i", i.e. int, then "e" will yield int again. "v"
+// will yield an RVV vector type (assume LMUL=1), so __rvv_int32m1_t.
+// Accordingly "w" would yield __rvv_int64m2_t.
+//
+// A type transformer can be prefixed by other non-primitive type transformers.
+//
+// P: constructs a pointer to the current type
+// C: adds const to the type
+// K: requires the integer type to be a constant expression
+// U: given an integer type or vector type, computes its unsigned variant
+// I: given a vector type, compute the vector type with integer type
+// elements of the same width
+// F: given a vector type, compute the vector type with floating-point type
+// elements of the same width
+// S: given a vector type, computes its equivalent one for LMUL=1. This is a
+// no-op if the vector was already LMUL=1
+// (Log2EEW:Value): Log2EEW value could be 3/4/5/6 (8/16/32/64), given a
+// vector type (SEW and LMUL) and EEW (8/16/32/64), computes its
+// equivalent integer vector type with EEW and corresponding ELMUL (elmul =
+// (eew/sew) * lmul). For example, vector type is __rvv_float16m4
+// (SEW=16, LMUL=4) and Log2EEW is 3 (EEW=8), and then equivalent vector
+// type is __rvv_uint8m2_t (elmul=(8/16)*4 = 2). Ignore to define a new
+// builtins if its equivalent type has illegal lmul.
+// (FixedSEW:Value): Given a vector type (SEW and LMUL), and computes another
+// vector type which only changed SEW as given value. Ignore to define a new
+// builtin if its equivalent type has illegal lmul or the SEW does not changed.
+// (SFixedLog2LMUL:Value): Smaller Fixed Log2LMUL. Given a vector type (SEW
+// and LMUL), and computes another vector type which only changed LMUL as
+// given value. The new LMUL should be smaller than the old one. Ignore to
+// define a new builtin if its equivalent type has illegal lmul.
+// (LFixedLog2LMUL:Value): Larger Fixed Log2LMUL. Given a vector type (SEW
+// and LMUL), and computes another vector type which only changed LMUL as
+// given value. The new LMUL should be larger than the old one. Ignore to
+// define a new builtin if its equivalent type has illegal lmul.
+//
+// Following with the example above, if t is "i", then "Ue" will yield unsigned
+// int and "Fv" will yield __rvv_float32m1_t (again assuming LMUL=1), Fw would
+// yield __rvv_float64m2_t, etc.
+//
+// Each builtin is then defined by applying each type in TypeRange against the
+// sequence of type transformers described in Suffix and Prototype.
+//
+// The name of the builtin is defined by the Name attribute (which defaults to
+// the name of the class) appended (separated with an underscore) the Suffix
+// attribute. For instance with Name="foo", Suffix = "v" and TypeRange = "il",
+// the builtin generated will be __builtin_rvv_foo_i32m1 and
+// __builtin_rvv_foo_i64m1 (under LMUL=1). If Suffix contains more than one
+// type transformer (say "vv") each of the types is separated with an
+// underscore as in "__builtin_rvv_foo_i32m1_i32m1".
+//
+// The C/C++ prototype of the builtin is defined by the Prototype attribute.
+// Prototype is a non-empty sequence of type transformers, the first of which
+// is the return type of the builtin and the rest are the parameters of the
+// builtin, in order. For instance if Prototype is "wvv" and TypeRange is "si"
+// a first builtin will have type
+// __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t) and the second builtin
+// will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t) (again
+// under LMUL=1).
+//
+// There are a number of attributes that are used to constraint the number and
+// shape of the builtins generated. Refer to the comments below for them.
+
+class PolicyScheme<int val>{
+ int Value = val;
+}
+def NonePolicy : PolicyScheme<0>;
+def HasPassthruOperand : PolicyScheme<1>;
+def HasPolicyOperand : PolicyScheme<2>;
+
+class RVVBuiltin<string suffix, string prototype, string type_range,
+ string overloaded_suffix = ""> {
+ // Base name that will be prepended in __builtin_rvv_ and appended the
+ // computed Suffix.
+ string Name = NAME;
+
+ // If not empty, each instantiated builtin will have this appended after an
+ // underscore (_). It is instantiated like Prototype.
+ string Suffix = suffix;
+
+ // If empty, default OverloadedName is sub string of `Name` which end of first
+ // '_'. For example, the default overloaded name is `vadd` for Name `vadd_vv`.
+ // It's used for describe some special naming cases.
+ string OverloadedName = "";
+
+ // If not empty, each OverloadedName will have this appended after an
+ // underscore (_). It is instantiated like Prototype.
+ string OverloadedSuffix = overloaded_suffix;
+
+ // The different variants of the builtin, parameterised with a type.
+ string TypeRange = type_range;
+
+ // We use each type described in TypeRange and LMUL with prototype to
+ // instantiate a specific element of the set of builtins being defined.
+ // Prototype attribute defines the C/C++ prototype of the builtin. It is a
+ // non-empty sequence of type transformers, the first of which is the return
+ // type of the builtin and the rest are the parameters of the builtin, in
+ // order. For instance if Prototype is "wvv", TypeRange is "si" and LMUL=1, a
+ // first builtin will have type
+ // __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t), and the second builtin
+ // will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t).
+ string Prototype = prototype;
+
+ // This builtin has a masked form.
+ bit HasMasked = true;
+
+ // If HasMasked, this flag states that this builtin has a maskedoff operand. It
+ // is always the first operand in builtin and IR intrinsic.
+ bit HasMaskedOffOperand = true;
+
+ // This builtin has a granted vector length parameter.
+ bit HasVL = true;
+
+ // The policy scheme for masked intrinsic IR.
+ // It could be NonePolicy or HasPolicyOperand.
+ // HasPolicyOperand: Has a policy operand. 0 is tail and mask undisturbed, 1 is
+ // tail agnostic, 2 is mask undisturbed, and 3 is tail and mask agnostic. The
+ // policy operand is located at the last position.
+ PolicyScheme MaskedPolicyScheme = HasPolicyOperand;
+
+ // The policy scheme for unmasked intrinsic IR.
+ // It could be NonePolicy, HasPassthruOperand or HasPolicyOperand.
+ // HasPassthruOperand: Has a passthru operand to decide tail policy. If it is
+ // poison, tail policy is tail agnostic, otherwise policy is tail undisturbed.
+ // HasPolicyOperand: Has a policy operand. 1 is tail agnostic and 0 is tail
+ // undisturbed.
+ PolicyScheme UnMaskedPolicyScheme = NonePolicy;
+
+ // This builtin support tail agnostic and undisturbed policy.
+ bit HasTailPolicy = true;
+ // This builtin support mask agnostic and undisturbed policy.
+ bit HasMaskPolicy = true;
+
+ // This builtin prototype with TA or TAMA policy could not support overloading
+ // API. Other policy intrinsic functions would support overloading API with
+ // suffix `_tu`, `tumu`, `tuma`, `tamu` and `tama`.
+ bit SupportOverloading = true;
+
+ // This builtin is valid for the given Log2LMULs.
+ list<int> Log2LMUL = [0, 1, 2, 3, -1, -2, -3];
+
+ // Manual code in clang codegen riscv_vector_builtin_cg.inc
+ code ManualCodegen = [{}];
+
+ // When emit the automatic clang codegen, it describes what types we have to use
+ // to obtain the specific LLVM intrinsic. -1 means the return type, otherwise,
+ // k >= 0 meaning the k-th operand (counting from zero) of the codegen'd
+ // parameter of the unmasked version. k can't be the mask operand's position.
+ list<int> IntrinsicTypes = [];
+
+ // If these names are not empty, this is the ID of the LLVM intrinsic
+ // we want to lower to.
+ string IRName = NAME;
+
+ // If HasMasked, this is the ID of the LLVM intrinsic we want to lower to.
+ string MaskedIRName = NAME #"_mask";
+
+ // Use clang_builtin_alias to save the number of builtins.
+ bit HasBuiltinAlias = true;
+
+ // Features required to enable for this builtin.
+ list<string> RequiredFeatures = [];
+
+ // Number of fields for Load/Store Segment instructions.
+ int NF = 1;
+
+ // Set to true if the builtin is associated with tuple types.
+ bit IsTuple = false;
+
+ // Set to true if the builtin has a parameter that models floating-point
+ // rounding mode control
+ bit HasFRMRoundModeOp = false;
+}
+
+// This is the code emitted in the header.
+class RVVHeader {
+ code HeaderCode;
+}
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/BackendUtil.h b/contrib/llvm-project/clang/include/clang/CodeGen/BackendUtil.h
index d97af65a3d01..cdbfe4ca5e65 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/BackendUtil.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/BackendUtil.h
@@ -16,8 +16,12 @@
namespace llvm {
class BitcodeModule;
template <typename T> class Expected;
+ template <typename T> class IntrusiveRefCntPtr;
class Module;
class MemoryBufferRef;
+ namespace vfs {
+ class FileSystem;
+ } // namespace vfs
}
namespace clang {
@@ -40,6 +44,7 @@ namespace clang {
const CodeGenOptions &CGOpts,
const TargetOptions &TOpts, const LangOptions &LOpts,
StringRef TDesc, llvm::Module *M, BackendAction Action,
+ llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
std::unique_ptr<raw_pwrite_stream> OS);
void EmbedBitcode(llvm::Module *M, const CodeGenOptions &CGOpts,
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/CGFunctionInfo.h b/contrib/llvm-project/clang/include/clang/CodeGen/CGFunctionInfo.h
index c042bcd9fc5f..39c7a578c8c4 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/CGFunctionInfo.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/CGFunctionInfo.h
@@ -742,7 +742,7 @@ public:
/// Set the maximum vector width in the arguments.
void setMaxVectorWidth(unsigned Width) {
assert(llvm::isPowerOf2_32(Width) && "Expected power of 2 vector");
- MaxVectorWidth = llvm::countTrailingZeros(Width) + 1;
+ MaxVectorWidth = llvm::countr_zero(Width) + 1;
}
void Profile(llvm::FoldingSetNodeID &ID) {
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenAction.h b/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenAction.h
index b5721344046d..7ad2988e589e 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenAction.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenAction.h
@@ -53,6 +53,9 @@ private:
std::unique_ptr<llvm::Module> loadModule(llvm::MemoryBufferRef MBRef);
+ /// Load bitcode modules to link into our module from the options.
+ bool loadLinkModules(CompilerInstance &CI);
+
protected:
/// Create a new code generation action. If the optional \p _VMContext
/// parameter is supplied, the action uses it without taking ownership,
@@ -80,7 +83,7 @@ public:
CodeGenerator *getCodeGenerator() const;
- BackendConsumer *BEConsumer;
+ BackendConsumer *BEConsumer = nullptr;
};
class EmitAssemblyAction : public CodeGenAction {
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/ObjectFilePCHContainerOperations.h b/contrib/llvm-project/clang/include/clang/CodeGen/ObjectFilePCHContainerOperations.h
index c13e052149d9..7a02d8725885 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/ObjectFilePCHContainerOperations.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/ObjectFilePCHContainerOperations.h
@@ -32,7 +32,7 @@ class ObjectFilePCHContainerWriter : public PCHContainerWriter {
/// A PCHContainerReader implementation that uses LLVM to
/// wraps Clang modules inside a COFF, ELF, or Mach-O container.
class ObjectFilePCHContainerReader : public PCHContainerReader {
- StringRef getFormat() const override { return "obj"; }
+ ArrayRef<StringRef> getFormats() const override;
/// Returns the serialized AST inside the PCH container Buffer.
StringRef ExtractPCH(llvm::MemoryBufferRef Buffer) const override;
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Action.h b/contrib/llvm-project/clang/include/clang/Driver/Action.h
index f8b0621543ca..04fa8b01b418 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Action.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Action.h
@@ -75,9 +75,10 @@ public:
OffloadPackagerJobClass,
LinkerWrapperJobClass,
StaticLibJobClass,
+ BinaryAnalyzeJobClass,
JobClassFirst = PreprocessJobClass,
- JobClassLast = StaticLibJobClass
+ JobClassLast = BinaryAnalyzeJobClass
};
// The offloading kind determines if this action is binded to a particular
@@ -674,6 +675,17 @@ public:
}
};
+class BinaryAnalyzeJobAction : public JobAction {
+ void anchor() override;
+
+public:
+ BinaryAnalyzeJobAction(Action *Input, types::ID Type);
+
+ static bool classof(const Action *A) {
+ return A->getKind() == BinaryAnalyzeJobClass;
+ }
+};
+
} // namespace driver
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Compilation.h b/contrib/llvm-project/clang/include/clang/Driver/Compilation.h
index f58b5a8cc9fd..36ae85c42451 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Compilation.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Compilation.h
@@ -112,6 +112,9 @@ class Compilation {
/// only be removed if we crash.
ArgStringMap FailureResultFiles;
+ /// -ftime-trace result files.
+ ArgStringMap TimeTraceFiles;
+
/// Optional redirection for stdin, stdout, stderr.
std::vector<std::optional<StringRef>> Redirects;
@@ -269,6 +272,14 @@ public:
return Name;
}
+ const char *getTimeTraceFile(const JobAction *JA) const {
+ return TimeTraceFiles.lookup(JA);
+ }
+ void addTimeTraceFile(const char *Name, const JobAction *JA) {
+ assert(!TimeTraceFiles.contains(JA));
+ TimeTraceFiles[JA] = Name;
+ }
+
/// CleanupFile - Delete a given file.
///
/// \param IssueErrors - Report failures as errors.
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Distro.h b/contrib/llvm-project/clang/include/clang/Driver/Distro.h
index 1aaf93ddb7c4..8291f6575a71 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Distro.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Distro.h
@@ -9,8 +9,8 @@
#ifndef LLVM_CLANG_DRIVER_DISTRO_H
#define LLVM_CLANG_DRIVER_DISTRO_H
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace driver {
@@ -77,6 +77,7 @@ public:
UbuntuJammy,
UbuntuKinetic,
UbuntuLunar,
+ UbuntuMantic,
UnknownDistro
};
@@ -128,7 +129,7 @@ public:
}
bool IsUbuntu() const {
- return DistroVal >= UbuntuHardy && DistroVal <= UbuntuLunar;
+ return DistroVal >= UbuntuHardy && DistroVal <= UbuntuMantic;
}
bool IsAlpineLinux() const { return DistroVal == AlpineLinux; }
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Driver.h b/contrib/llvm-project/clang/include/clang/Driver/Driver.h
index 4bbb113b6cf5..e3e98bad9912 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Driver.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Driver.h
@@ -21,6 +21,7 @@
#include "clang/Driver/Types.h"
#include "clang/Driver/Util.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Option/Arg.h"
@@ -193,6 +194,9 @@ public:
/// The file to log CC_PRINT_PROC_STAT_FILE output to, if enabled.
std::string CCPrintStatReportFilename;
+ /// The file to log CC_PRINT_INTERNAL_STAT_FILE output to, if enabled.
+ std::string CCPrintInternalStatReportFilename;
+
/// The file to log CC_PRINT_OPTIONS output to, if enabled.
std::string CCPrintOptionsFilename;
@@ -257,11 +261,16 @@ public:
/// performance report to CC_PRINT_PROC_STAT_FILE or to stdout.
unsigned CCPrintProcessStats : 1;
+ /// Set CC_PRINT_INTERNAL_STAT mode, which causes the driver to dump internal
+ /// performance report to CC_PRINT_INTERNAL_STAT_FILE or to stdout.
+ unsigned CCPrintInternalStats : 1;
+
/// Pointer to the ExecuteCC1Tool function, if available.
/// When the clangDriver lib is used through clang.exe, this provides a
/// shortcut for executing the -cc1 command-line directly, in the same
/// process.
- typedef int (*CC1ToolFunc)(SmallVectorImpl<const char *> &ArgV);
+ using CC1ToolFunc =
+ llvm::function_ref<int(SmallVectorImpl<const char *> &ArgV)>;
CC1ToolFunc CC1Main = nullptr;
private:
@@ -286,6 +295,12 @@ private:
/// Arguments originated from command line.
std::unique_ptr<llvm::opt::InputArgList> CLOptions;
+ /// If this is non-null, the driver will prepend this argument before
+ /// reinvoking clang. This is useful for the llvm-driver where clang's
+ /// realpath will be to the llvm binary and not clang, so it must pass
+ /// "clang" as it's first argument.
+ const char *PrependArg;
+
/// Whether to check that input files exist when constructing compilation
/// jobs.
unsigned CheckInputsExist : 1;
@@ -383,6 +398,9 @@ public:
bool getProbePrecompiled() const { return ProbePrecompiled; }
void setProbePrecompiled(bool Value) { ProbePrecompiled = Value; }
+ const char *getPrependArg() const { return PrependArg; }
+ void setPrependArg(const char *Value) { PrependArg = Value; }
+
void setTargetAndMode(const ParsedClangName &TM) { ClangNameParts = TM; }
const std::string &getTitle() { return DriverTitle; }
@@ -616,10 +634,19 @@ public:
/// Returns the default name for linked images (e.g., "a.out").
const char *getDefaultImageName() const;
- // Creates a temp file with $Prefix-%%%%%%.$Suffix
+ /// Creates a temp file.
+ /// 1. If \p MultipleArch is false or \p BoundArch is empty, the temp file is
+ /// in the temporary directory with name $Prefix-%%%%%%.$Suffix.
+ /// 2. If \p MultipleArch is true and \p BoundArch is not empty,
+ /// 2a. If \p NeedUniqueDirectory is false, the temp file is in the
+ /// temporary directory with name $Prefix-$BoundArch-%%%%%.$Suffix.
+ /// 2b. If \p NeedUniqueDirectory is true, the temp file is in a unique
+ /// subdiretory with random name under the temporary directory, and
+ /// the temp file itself has name $Prefix-$BoundArch.$Suffix.
const char *CreateTempFile(Compilation &C, StringRef Prefix, StringRef Suffix,
bool MultipleArchs = false,
- StringRef BoundArch = {}) const;
+ StringRef BoundArch = {},
+ bool NeedUniqueDirectory = false) const;
/// GetNamedOutputPath - Return the name to use for the output of
/// the action \p JA. The result is appended to the compilation's
@@ -788,6 +815,16 @@ llvm::StringRef getDriverMode(StringRef ProgName, ArrayRef<const char *> Args);
/// Checks whether the value produced by getDriverMode is for CL mode.
bool IsClangCL(StringRef DriverMode);
+/// Expand response files from a clang driver or cc1 invocation.
+///
+/// \param Args The arguments that will be expanded.
+/// \param ClangCLMode Whether clang is in CL mode.
+/// \param Alloc Allocator for new arguments.
+/// \param FS Filesystem to use when expanding files.
+llvm::Error expandResponseFiles(SmallVectorImpl<const char *> &Args,
+ bool ClangCLMode, llvm::BumpPtrAllocator &Alloc,
+ llvm::vfs::FileSystem *FS = nullptr);
+
} // end namespace driver
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Job.h b/contrib/llvm-project/clang/include/clang/Driver/Job.h
index e3fa92d6ad5f..df9449463c53 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Job.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Job.h
@@ -116,6 +116,9 @@ class Command {
/// The executable to run.
const char *Executable;
+ /// Optional argument to prepend.
+ const char *PrependArg;
+
/// The list of program arguments (not including the implicit first
/// argument, which will be the executable).
llvm::opt::ArgStringList Arguments;
@@ -169,7 +172,8 @@ public:
Command(const Action &Source, const Tool &Creator,
ResponseFileSupport ResponseSupport, const char *Executable,
const llvm::opt::ArgStringList &Arguments, ArrayRef<InputInfo> Inputs,
- ArrayRef<InputInfo> Outputs = std::nullopt);
+ ArrayRef<InputInfo> Outputs = std::nullopt,
+ const char *PrependArg = nullptr);
// FIXME: This really shouldn't be copyable, but is currently copied in some
// error handling in Driver::generateCompilationDiagnostics.
Command(const Command &) = default;
@@ -242,7 +246,8 @@ public:
ResponseFileSupport ResponseSupport, const char *Executable,
const llvm::opt::ArgStringList &Arguments,
ArrayRef<InputInfo> Inputs,
- ArrayRef<InputInfo> Outputs = std::nullopt);
+ ArrayRef<InputInfo> Outputs = std::nullopt,
+ const char *PrependArg = nullptr);
void Print(llvm::raw_ostream &OS, const char *Terminator, bool Quote,
CrashReportInfo *CrashInfo = nullptr) const override;
@@ -253,23 +258,6 @@ public:
void setEnvironment(llvm::ArrayRef<const char *> NewEnvironment) override;
};
-/// Like Command, but always pretends that the wrapped command succeeded.
-class ForceSuccessCommand : public Command {
-public:
- ForceSuccessCommand(const Action &Source_, const Tool &Creator_,
- ResponseFileSupport ResponseSupport,
- const char *Executable_,
- const llvm::opt::ArgStringList &Arguments_,
- ArrayRef<InputInfo> Inputs,
- ArrayRef<InputInfo> Outputs = std::nullopt);
-
- void Print(llvm::raw_ostream &OS, const char *Terminator, bool Quote,
- CrashReportInfo *CrashInfo = nullptr) const override;
-
- int Execute(ArrayRef<std::optional<StringRef>> Redirects, std::string *ErrMsg,
- bool *ExecutionFailed) const override;
-};
-
/// JobList - A sequence of jobs to perform.
class JobList {
public:
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Multilib.h b/contrib/llvm-project/clang/include/clang/Driver/Multilib.h
index cf2dbf6ff58a..1416559414f8 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Multilib.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Multilib.h
@@ -13,7 +13,9 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/SourceMgr.h"
#include <cassert>
#include <functional>
#include <string>
@@ -24,7 +26,9 @@ namespace clang {
namespace driver {
/// This corresponds to a single GCC Multilib, or a segment of one controlled
-/// by a command line flag
+/// by a command line flag.
+/// See also MultilibBuilder for building a multilib by mutating it
+/// incrementally.
class Multilib {
public:
using flags_list = std::vector<std::string>;
@@ -34,74 +38,35 @@ private:
std::string OSSuffix;
std::string IncludeSuffix;
flags_list Flags;
- int Priority;
public:
+ /// GCCSuffix, OSSuffix & IncludeSuffix will be appended directly to the
+ /// sysroot string so they must either be empty or begin with a '/' character.
+ /// This is enforced with an assert in the constructor.
Multilib(StringRef GCCSuffix = {}, StringRef OSSuffix = {},
- StringRef IncludeSuffix = {}, int Priority = 0);
+ StringRef IncludeSuffix = {},
+ const flags_list &Flags = flags_list());
/// Get the detected GCC installation path suffix for the multi-arch
/// target variant. Always starts with a '/', unless empty
- const std::string &gccSuffix() const {
- assert(GCCSuffix.empty() ||
- (StringRef(GCCSuffix).front() == '/' && GCCSuffix.size() > 1));
- return GCCSuffix;
- }
-
- /// Set the GCC installation path suffix.
- Multilib &gccSuffix(StringRef S);
+ const std::string &gccSuffix() const { return GCCSuffix; }
/// Get the detected os path suffix for the multi-arch
/// target variant. Always starts with a '/', unless empty
- const std::string &osSuffix() const {
- assert(OSSuffix.empty() ||
- (StringRef(OSSuffix).front() == '/' && OSSuffix.size() > 1));
- return OSSuffix;
- }
-
- /// Set the os path suffix.
- Multilib &osSuffix(StringRef S);
+ const std::string &osSuffix() const { return OSSuffix; }
/// Get the include directory suffix. Always starts with a '/', unless
/// empty
- const std::string &includeSuffix() const {
- assert(IncludeSuffix.empty() ||
- (StringRef(IncludeSuffix).front() == '/' && IncludeSuffix.size() > 1));
- return IncludeSuffix;
- }
-
- /// Set the include directory suffix
- Multilib &includeSuffix(StringRef S);
+ const std::string &includeSuffix() const { return IncludeSuffix; }
/// Get the flags that indicate or contraindicate this multilib's use
- /// All elements begin with either '+' or '-'
+ /// All elements begin with either '-' or '!'
const flags_list &flags() const { return Flags; }
- flags_list &flags() { return Flags; }
-
- /// Returns the multilib priority. When more than one multilib matches flags,
- /// the one with the highest priority is selected, with 0 being the default.
- int priority() const { return Priority; }
-
- /// Add a flag to the flags list
- /// \p Flag must be a flag accepted by the driver with its leading '-' removed,
- /// and replaced with either:
- /// '-' which contraindicates using this multilib with that flag
- /// or:
- /// '+' which promotes using this multilib in the presence of that flag
- /// otherwise '-print-multi-lib' will not emit them correctly.
- Multilib &flag(StringRef F) {
- assert(F.front() == '+' || F.front() == '-');
- Flags.push_back(std::string(F));
- return *this;
- }
LLVM_DUMP_METHOD void dump() const;
/// print summary of the Multilib
void print(raw_ostream &OS) const;
- /// Check whether any of the 'against' flags contradict the 'for' flags.
- bool isValid() const;
-
/// Check whether the default is selected
bool isDefault() const
{ return GCCSuffix.empty() && OSSuffix.empty() && IncludeSuffix.empty(); }
@@ -111,63 +76,57 @@ public:
raw_ostream &operator<<(raw_ostream &OS, const Multilib &M);
+/// See also MultilibSetBuilder for combining multilibs into a set.
class MultilibSet {
public:
using multilib_list = std::vector<Multilib>;
- using iterator = multilib_list::iterator;
using const_iterator = multilib_list::const_iterator;
using IncludeDirsFunc =
std::function<std::vector<std::string>(const Multilib &M)>;
using FilterCallback = llvm::function_ref<bool(const Multilib &)>;
+ /// Uses regular expressions to simplify flags used for multilib selection.
+ /// For example, we may wish both -mfloat-abi=soft and -mfloat-abi=softfp to
+ /// be treated as -mfloat-abi=soft.
+ struct FlagMatcher {
+ std::string Match;
+ std::vector<std::string> Flags;
+ };
+
private:
multilib_list Multilibs;
+ std::vector<FlagMatcher> FlagMatchers;
IncludeDirsFunc IncludeCallback;
IncludeDirsFunc FilePathsCallback;
public:
MultilibSet() = default;
+ MultilibSet(multilib_list &&Multilibs,
+ std::vector<FlagMatcher> &&FlagMatchers = {})
+ : Multilibs(Multilibs), FlagMatchers(FlagMatchers) {}
- /// Add an optional Multilib segment
- MultilibSet &Maybe(const Multilib &M);
-
- /// Add a set of mutually incompatible Multilib segments
- MultilibSet &Either(const Multilib &M1, const Multilib &M2);
- MultilibSet &Either(const Multilib &M1, const Multilib &M2,
- const Multilib &M3);
- MultilibSet &Either(const Multilib &M1, const Multilib &M2,
- const Multilib &M3, const Multilib &M4);
- MultilibSet &Either(const Multilib &M1, const Multilib &M2,
- const Multilib &M3, const Multilib &M4,
- const Multilib &M5);
- MultilibSet &Either(ArrayRef<Multilib> Ms);
+ const multilib_list &getMultilibs() { return Multilibs; }
/// Filter out some subset of the Multilibs using a user defined callback
MultilibSet &FilterOut(FilterCallback F);
- /// Filter out those Multilibs whose gccSuffix matches the given expression
- MultilibSet &FilterOut(const char *Regex);
-
/// Add a completed Multilib to the set
void push_back(const Multilib &M);
- /// Union this set of multilibs with another
- void combineWith(const MultilibSet &MS);
-
- /// Remove all of the multilibs from the set
- void clear() { Multilibs.clear(); }
-
- iterator begin() { return Multilibs.begin(); }
const_iterator begin() const { return Multilibs.begin(); }
-
- iterator end() { return Multilibs.end(); }
const_iterator end() const { return Multilibs.end(); }
- /// Pick the best multilib in the set, \returns false if none are compatible
- bool select(const Multilib::flags_list &Flags, Multilib &M) const;
+ /// Select compatible variants, \returns false if none are compatible
+ bool select(const Multilib::flags_list &Flags,
+ llvm::SmallVector<Multilib> &) const;
unsigned size() const { return Multilibs.size(); }
+ /// Get the given flags plus flags found by matching them against the
+ /// FlagMatchers and choosing the Flags of each accordingly. The select method
+ /// calls this method so in most cases it's not necessary to call it directly.
+ llvm::StringSet<> expandFlags(const Multilib::flags_list &) const;
+
LLVM_DUMP_METHOD void dump() const;
void print(raw_ostream &OS) const;
@@ -185,12 +144,9 @@ public:
const IncludeDirsFunc &filePathsCallback() const { return FilePathsCallback; }
-private:
- /// Apply the filter to Multilibs and return the subset that remains
- static multilib_list filterCopy(FilterCallback F, const multilib_list &Ms);
-
- /// Apply the filter to the multilib_list, removing those that don't match
- static void filterInPlace(FilterCallback F, multilib_list &Ms);
+ static llvm::ErrorOr<MultilibSet>
+ parseYaml(llvm::MemoryBufferRef, llvm::SourceMgr::DiagHandlerTy = nullptr,
+ void *DiagHandlerCtxt = nullptr);
};
raw_ostream &operator<<(raw_ostream &OS, const MultilibSet &MS);
diff --git a/contrib/llvm-project/clang/include/clang/Driver/MultilibBuilder.h b/contrib/llvm-project/clang/include/clang/Driver/MultilibBuilder.h
new file mode 100644
index 000000000000..61596c5c573f
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Driver/MultilibBuilder.h
@@ -0,0 +1,134 @@
+//===- MultilibBuilder.h
+//-----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_DRIVER_MULTILIBBUILDER_H
+#define LLVM_CLANG_DRIVER_MULTILIBBUILDER_H
+
+#include "clang/Driver/Multilib.h"
+
+namespace clang {
+namespace driver {
+
+/// This corresponds to a single GCC multilib, or a segment of one controlled
+/// by a command line flag. This class can be used to create a Multilib, and
+/// contains helper functions to mutate it before creating a Multilib instance
+/// with makeMultilib().
+class MultilibBuilder {
+public:
+ using flags_list = std::vector<std::string>;
+
+private:
+ std::string GCCSuffix;
+ std::string OSSuffix;
+ std::string IncludeSuffix;
+ flags_list Flags;
+
+public:
+ MultilibBuilder(StringRef GCCSuffix, StringRef OSSuffix,
+ StringRef IncludeSuffix);
+
+ /// Initializes GCCSuffix, OSSuffix & IncludeSuffix to the same value.
+ MultilibBuilder(StringRef Suffix = {});
+
+ /// Get the detected GCC installation path suffix for the multi-arch
+ /// target variant. Always starts with a '/', unless empty
+ const std::string &gccSuffix() const {
+ assert(GCCSuffix.empty() ||
+ (StringRef(GCCSuffix).front() == '/' && GCCSuffix.size() > 1));
+ return GCCSuffix;
+ }
+
+ /// Set the GCC installation path suffix.
+ MultilibBuilder &gccSuffix(StringRef S);
+
+ /// Get the detected os path suffix for the multi-arch
+ /// target variant. Always starts with a '/', unless empty
+ const std::string &osSuffix() const {
+ assert(OSSuffix.empty() ||
+ (StringRef(OSSuffix).front() == '/' && OSSuffix.size() > 1));
+ return OSSuffix;
+ }
+
+ /// Set the os path suffix.
+ MultilibBuilder &osSuffix(StringRef S);
+
+ /// Get the include directory suffix. Always starts with a '/', unless
+ /// empty
+ const std::string &includeSuffix() const {
+ assert(IncludeSuffix.empty() || (StringRef(IncludeSuffix).front() == '/' &&
+ IncludeSuffix.size() > 1));
+ return IncludeSuffix;
+ }
+
+ /// Set the include directory suffix
+ MultilibBuilder &includeSuffix(StringRef S);
+
+ /// Get the flags that indicate or contraindicate this multilib's use
+ /// All elements begin with either '-' or '!'
+ const flags_list &flags() const { return Flags; }
+ flags_list &flags() { return Flags; }
+
+ /// Add a flag to the flags list
+ /// \p Flag must be a flag accepted by the driver.
+ /// \p Disallow defines whether the flag is negated and therefore disallowed.
+ MultilibBuilder &flag(StringRef Flag, bool Disallow = false);
+
+ Multilib makeMultilib() const;
+
+ /// Check whether any of the 'against' flags contradict the 'for' flags.
+ bool isValid() const;
+
+ /// Check whether the default is selected
+ bool isDefault() const {
+ return GCCSuffix.empty() && OSSuffix.empty() && IncludeSuffix.empty();
+ }
+};
+
+/// This class can be used to create a MultilibSet, and contains helper
+/// functions to add combinations of multilibs before creating a MultilibSet
+/// instance with makeMultilibSet().
+class MultilibSetBuilder {
+public:
+ using multilib_list = std::vector<MultilibBuilder>;
+
+ MultilibSetBuilder() = default;
+
+ /// Add an optional Multilib segment
+ MultilibSetBuilder &Maybe(const MultilibBuilder &M);
+
+ /// Add a set of mutually incompatible Multilib segments
+ MultilibSetBuilder &Either(const MultilibBuilder &M1,
+ const MultilibBuilder &M2);
+ MultilibSetBuilder &Either(const MultilibBuilder &M1,
+ const MultilibBuilder &M2,
+ const MultilibBuilder &M3);
+ MultilibSetBuilder &Either(const MultilibBuilder &M1,
+ const MultilibBuilder &M2,
+ const MultilibBuilder &M3,
+ const MultilibBuilder &M4);
+ MultilibSetBuilder &Either(const MultilibBuilder &M1,
+ const MultilibBuilder &M2,
+ const MultilibBuilder &M3,
+ const MultilibBuilder &M4,
+ const MultilibBuilder &M5);
+ MultilibSetBuilder &Either(ArrayRef<MultilibBuilder> Ms);
+
+ /// Filter out those Multilibs whose gccSuffix matches the given expression
+ MultilibSetBuilder &FilterOut(const char *Regex);
+
+ MultilibSet makeMultilibSet() const;
+
+private:
+ multilib_list Multilibs;
+};
+
+} // namespace driver
+} // namespace clang
+
+#endif // LLVM_CLANG_DRIVER_MULTILIBBUILDER_H
diff --git a/contrib/llvm-project/clang/include/clang/Driver/OffloadBundler.h b/contrib/llvm-project/clang/include/clang/Driver/OffloadBundler.h
index d7f927c22381..28473c53662d 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/OffloadBundler.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/OffloadBundler.h
@@ -17,8 +17,8 @@
#ifndef LLVM_CLANG_DRIVER_OFFLOADBUNDLER_H
#define LLVM_CLANG_DRIVER_OFFLOADBUNDLER_H
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Error.h"
+#include "llvm/TargetParser/Triple.h"
#include <string>
#include <vector>
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Options.h b/contrib/llvm-project/clang/include/clang/Driver/Options.h
index f7ee154b7a7a..54c6f5faa37c 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Options.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Options.h
@@ -38,6 +38,7 @@ enum ClangFlags {
DXCOption = (1 << 17),
CLDXCOption = (1 << 18),
Ignored = (1 << 19),
+ TargetSpecific = (1 << 20),
};
enum ID {
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Options.td b/contrib/llvm-project/clang/include/clang/Driver/Options.td
index 652c15afcce8..229f6141c750 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Options.td
+++ b/contrib/llvm-project/clang/include/clang/Driver/Options.td
@@ -75,6 +75,10 @@ def FlangOnlyOption : OptionFlag;
// FC1Option - This option should be accepted by flang -fc1.
def FC1Option : OptionFlag;
+// This is a target-specific option for compilation. Using it on an unsupported
+// target will lead to an err_drv_unsupported_opt_for_target error.
+def TargetSpecific : OptionFlag;
+
// A short name to show in documentation. The name will be interpreted as rST.
class DocName<string name> { string DocName = name; }
@@ -89,6 +93,8 @@ class DocFlatten { bit DocFlatten = 1; }
// GCC compatibility.
class IgnoredGCCCompat : Flags<[HelpHidden]> {}
+class TargetSpecific : Flags<[TargetSpecific]> {}
+
/////////
// Groups
@@ -185,7 +191,7 @@ def m_wasm_Features_Driver_Group : OptionGroup<"<wasm driver features group>">,
def m_x86_Features_Group : OptionGroup<"<x86 features group>">,
Group<m_Group>, Flags<[CoreOption]>, DocName<"X86">;
def m_riscv_Features_Group : OptionGroup<"<riscv features group>">,
- Group<m_Group>, DocName<"RISCV">;
+ Group<m_Group>, DocName<"RISC-V">;
def m_libc_Group : OptionGroup<"<m libc group>">, Group<m_mips_Features_Group>,
Flags<[HelpHidden]>;
@@ -235,12 +241,20 @@ def clang_ignored_f_Group : OptionGroup<"<clang ignored f group>">,
def clang_ignored_m_Group : OptionGroup<"<clang ignored m group>">,
Group<m_Group>, Flags<[Ignored]>;
+// Unsupported flang groups
+def flang_ignored_w_Group : OptionGroup<"<flang ignored W group>">,
+ Group<W_Group>, Flags<[FlangOnlyOption, Ignored]>;
+
// Group for clang options in the process of deprecation.
// Please include the version that deprecated the flag as comment to allow
// easier garbage collection.
def clang_ignored_legacy_options_Group : OptionGroup<"<clang legacy flags>">,
Group<f_Group>, Flags<[Ignored]>;
+def LongDouble_Group : OptionGroup<"<LongDouble group>">, Group<m_Group>,
+ DocName<"Long double flags">,
+ DocBrief<[{Selects the long double implementation}]>;
+
// Retired with clang-5.0
def : Flag<["-"], "fslp-vectorize-aggressive">, Group<clang_ignored_legacy_options_Group>;
def : Flag<["-"], "fno-slp-vectorize-aggressive">, Group<clang_ignored_legacy_options_Group>;
@@ -482,6 +496,40 @@ multiclass BoolGOption<string flag_base, KeyPathAndMacro kpm,
Group<g_Group>;
}
+// Works like BoolOption except without marshalling
+multiclass BoolOptionWithoutMarshalling<string prefix = "", string spelling_base,
+ FlagDef flag1_base, FlagDef flag2_base,
+ BothFlags suffix = BothFlags<[], "">> {
+ defvar flag1 = FlagDefExpanded<ApplySuffix<flag1_base, suffix>.Result, prefix,
+ NAME, spelling_base>;
+
+ defvar flag2 = FlagDefExpanded<ApplySuffix<flag2_base, suffix>.Result, prefix,
+ NAME, spelling_base>;
+
+ // The flags must have different polarity, different values, and only
+ // one can be implied.
+ assert !xor(flag1.Polarity, flag2.Polarity),
+ "the flags must have different polarity: flag1: " #
+ flag1.Polarity # ", flag2: " # flag2.Polarity;
+ assert !ne(flag1.Value, flag2.Value),
+ "the flags must have different values: flag1: " #
+ flag1.Value # ", flag2: " # flag2.Value;
+ assert !not(!and(flag1.CanBeImplied, flag2.CanBeImplied)),
+ "only one of the flags can be implied: flag1: " #
+ flag1.CanBeImplied # ", flag2: " # flag2.CanBeImplied;
+
+ defvar implied = !if(flag1.CanBeImplied, flag1, flag2);
+
+ def flag1.RecordName : Flag<["-"], flag1.Spelling>, Flags<flag1.OptionFlags>,
+ HelpText<flag1.Help>,
+ ImpliedByAnyOf<implied.ImpliedBy, implied.ValueAsCode>
+ {}
+ def flag2.RecordName : Flag<["-"], flag2.Spelling>, Flags<flag2.OptionFlags>,
+ HelpText<flag2.Help>,
+ ImpliedByAnyOf<implied.ImpliedBy, implied.ValueAsCode>
+ {}
+}
+
// FIXME: Diagnose if target does not support protected visibility.
class MarshallingInfoVisibility<KeyPathAndMacro kpm, code default>
: MarshallingInfoEnum<kpm, default>,
@@ -679,7 +727,7 @@ def E : Flag<["-"], "E">, Flags<[NoXarchOption,CC1Option, FlangOption, FC1Option
HelpText<"Only run the preprocessor">;
def F : JoinedOrSeparate<["-"], "F">, Flags<[RenderJoined,CC1Option]>,
HelpText<"Add directory to framework include search path">;
-def G : JoinedOrSeparate<["-"], "G">, Flags<[NoXarchOption]>, Group<m_Group>,
+def G : JoinedOrSeparate<["-"], "G">, Flags<[NoXarchOption,TargetSpecific]>, Group<m_Group>,
MetaVarName<"<size>">, HelpText<"Put objects of at most <size> bytes "
"into small data section (MIPS / Hexagon)">;
def G_EQ : Joined<["-"], "G=">, Flags<[NoXarchOption]>, Group<m_Group>, Alias<G>;
@@ -929,8 +977,10 @@ def cxx_isystem : JoinedOrSeparate<["-"], "cxx-isystem">, Group<clang_i_Group>,
MetaVarName<"<directory>">;
def c : Flag<["-"], "c">, Flags<[NoXarchOption, FlangOption]>, Group<Action_Group>,
HelpText<"Only run preprocess, compile, and assemble steps">;
-def fconvergent_functions : Flag<["-"], "fconvergent-functions">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Assume functions may be convergent">;
+defm convergent_functions : BoolFOption<"convergent-functions",
+ LangOpts<"ConvergentFunctions">, DefaultFalse,
+ NegFlag<SetFalse, [], "Assume all functions may be convergent.">,
+ PosFlag<SetTrue, [CC1Option]>>;
def gpu_use_aux_triple_only : Flag<["--"], "gpu-use-aux-triple-only">,
InternalDriverOpt, HelpText<"Prepare '-aux-triple' only without populating "
@@ -987,6 +1037,17 @@ defm cuda_short_ptr : BoolFOption<"cuda-short-ptr",
TargetOpts<"NVPTXUseShortPointers">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Use 32-bit pointers for accessing const/local/shared address spaces">,
NegFlag<SetFalse>>;
+def mprintf_kind_EQ : Joined<["-"], "mprintf-kind=">, Group<m_Group>,
+ HelpText<"Specify the printf lowering scheme (AMDGPU only), allowed values are "
+ "\"hostcall\"(printing happens during kernel execution, this scheme "
+ "relies on hostcalls which require system to support pcie atomics) "
+ "and \"buffered\"(printing happens after all kernel threads exit, "
+ "this uses a printf buffer and does not rely on pcie atomic support)">,
+ Flags<[CC1Option]>,
+ Values<"hostcall,buffered">,
+ NormalizedValuesScope<"TargetOptions::AMDGPUPrintfKind">,
+ NormalizedValues<["Hostcall", "Buffered"]>,
+ MarshallingInfoEnum<TargetOpts<"AMDGPUPrintfKindVal">, "Hostcall">;
def fgpu_default_stream_EQ : Joined<["-"], "fgpu-default-stream=">,
HelpText<"Specify default stream. The default value is 'legacy'. (HIP only)">,
Flags<[CC1Option]>,
@@ -1063,6 +1124,10 @@ def gpu_bundle_output : Flag<["--"], "gpu-bundle-output">,
Group<f_Group>, HelpText<"Bundle output files of HIP device compilation">;
def no_gpu_bundle_output : Flag<["--"], "no-gpu-bundle-output">,
Group<f_Group>, HelpText<"Do not bundle output files of HIP device compilation">;
+def fhip_emit_relocatable : Flag<["-"], "fhip-emit-relocatable">, Group<f_Group>,
+ HelpText<"Compile HIP source to relocatable">;
+def fno_hip_emit_relocatable : Flag<["-"], "fno-hip-emit-relocatable">, Group<f_Group>,
+ HelpText<"Do not override toolchain to compile HIP source to relocatable">;
def cuid_EQ : Joined<["-"], "cuid=">, Flags<[CC1Option]>,
HelpText<"An ID for compilation unit, which should be the same for the same "
"compilation unit but different for different compilation units. "
@@ -1102,6 +1167,10 @@ def module_dependency_dir : Separate<["-"], "module-dependency-dir">,
def dsym_dir : JoinedOrSeparate<["-"], "dsym-dir">,
Flags<[NoXarchOption, RenderAsInput]>,
HelpText<"Directory to output dSYM's (if any) to">, MetaVarName<"<dir>">;
+// GCC style -dumpdir. We intentionally don't implement the less useful -dumpbase{,-ext}.
+def dumpdir : Separate<["-"], "dumpdir">, Flags<[CC1Option]>,
+ MetaVarName<"<dumppfx>">,
+ HelpText<"Use <dumpfpx> as a prefix to form auxiliary and dump file names">;
def dumpmachine : Flag<["-"], "dumpmachine">;
def dumpspecs : Flag<["-"], "dumpspecs">, Flags<[Unsupported]>;
def dumpversion : Flag<["-"], "dumpversion">;
@@ -1129,9 +1198,12 @@ def extract_api : Flag<["-"], "extract-api">, Flags<[CC1Option]>, Group<Action_G
HelpText<"Extract API information">;
def product_name_EQ: Joined<["--"], "product-name=">, Flags<[CC1Option]>,
MarshallingInfoString<FrontendOpts<"ProductName">>;
-def extract_api_ignores_EQ: Joined<["--"], "extract-api-ignores=">, Flags<[CC1Option]>,
- HelpText<"File containing a new line separated list of API symbols to ignore when extracting API information.">,
- MarshallingInfoString<FrontendOpts<"ExtractAPIIgnoresFile">>;
+def emit_symbol_graph_EQ: JoinedOrSeparate<["--"], "emit-symbol-graph=">, Flags<[CC1Option]>,
+ HelpText<"Generate Extract API information as a side effect of compilation.">,
+ MarshallingInfoString<FrontendOpts<"SymbolGraphOutputDir">>;
+def extract_api_ignores_EQ: CommaJoined<["--"], "extract-api-ignores=">, Flags<[CC1Option]>,
+ HelpText<"Comma separated list of files containing a new line separated list of API symbols to ignore when extracting API information.">,
+ MarshallingInfoStringVector<FrontendOpts<"ExtractAPIIgnoresFileList">>;
def e : JoinedOrSeparate<["-"], "e">, Flags<[LinkerInput]>, Group<Link_Group>;
def fmax_tokens_EQ : Joined<["-"], "fmax-tokens=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Max total number of preprocessed tokens for -Wmax-tokens.">,
@@ -1177,10 +1249,17 @@ defm xl_pragma_pack : BoolFOption<"xl-pragma-pack",
def shared_libsan : Flag<["-"], "shared-libsan">,
HelpText<"Dynamically link the sanitizer runtime">;
def static_libsan : Flag<["-"], "static-libsan">,
- HelpText<"Statically link the sanitizer runtime">;
+ HelpText<"Statically link the sanitizer runtime (Not supported for ASan, TSan or UBSan on darwin)">;
def : Flag<["-"], "shared-libasan">, Alias<shared_libsan>;
def fasm : Flag<["-"], "fasm">, Group<f_Group>;
+defm assume_unique_vtables : BoolFOption<"assume-unique-vtables",
+ CodeGenOpts<"AssumeUniqueVTables">, DefaultTrue,
+ PosFlag<SetTrue>,
+ NegFlag<SetFalse, [CC1Option],
+ "Disable optimizations based on vtable pointer identity">,
+ BothFlags<[CoreOption]>>;
+
def fassume_sane_operator_new : Flag<["-"], "fassume-sane-operator-new">, Group<f_Group>;
def fastcp : Flag<["-"], "fastcp">, Group<f_Group>;
def fastf : Flag<["-"], "fastf">, Group<f_Group>;
@@ -1188,9 +1267,7 @@ def fast : Flag<["-"], "fast">, Group<f_Group>;
def fasynchronous_unwind_tables : Flag<["-"], "fasynchronous-unwind-tables">, Group<f_Group>;
defm double_square_bracket_attributes : BoolFOption<"double-square-bracket-attributes",
- LangOpts<"DoubleSquareBracketAttributes">, Default<!strconcat(cpp11.KeyPath, "||", c2x.KeyPath)>,
- PosFlag<SetTrue, [], "Enable">, NegFlag<SetFalse, [], "Disable">,
- BothFlags<[NoXarchOption, CC1Option], " '[[]]' attributes in all C and C++ language modes">>;
+ LangOpts<"DoubleSquareBracketAttributes">, DefaultTrue, PosFlag<SetTrue>, NegFlag<SetFalse>>;
defm autolink : BoolFOption<"autolink",
CodeGenOpts<"Autolink">, DefaultTrue,
@@ -1203,10 +1280,10 @@ defm autolink : BoolFOption<"autolink",
def offload_EQ : CommaJoined<["--"], "offload=">, Flags<[NoXarchOption]>,
HelpText<"Specify comma-separated list of offloading target triples (CUDA and HIP only)">;
-// C++ Coroutines TS
-defm coroutines_ts : BoolFOption<"coroutines-ts",
+// C++ Coroutines
+defm coroutines : BoolFOption<"coroutines",
LangOpts<"Coroutines">, Default<cpp20.KeyPath>,
- PosFlag<SetTrue, [CC1Option], "Enable support for the C++ Coroutines TS">,
+ PosFlag<SetTrue, [CC1Option], "Enable support for the C++ Coroutines">,
NegFlag<SetFalse>>;
defm coro_aligned_allocation : BoolFOption<"coro-aligned-allocation",
@@ -1224,7 +1301,7 @@ defm experimental_library : BoolFOption<"experimental-library",
NegFlag<SetFalse>>;
def fembed_offload_object_EQ : Joined<["-"], "fembed-offload-object=">,
- Group<f_Group>, Flags<[NoXarchOption, CC1Option]>,
+ Group<f_Group>, Flags<[NoXarchOption, CC1Option, FC1Option]>,
HelpText<"Embed Offloading device-side binary into host object file as a section.">,
MarshallingInfoStringVector<CodeGenOpts<"OffloadObjects">>;
def fembed_bitcode_EQ : Joined<["-"], "fembed-bitcode=">,
@@ -1344,22 +1421,20 @@ def fno_profile_instr_use : Flag<["-"], "fno-profile-instr-use">,
HelpText<"Disable using instrumentation data for profile-guided optimization">;
def fno_profile_use : Flag<["-"], "fno-profile-use">,
Alias<fno_profile_instr_use>;
-defm profile_arcs : BoolFOption<"profile-arcs",
- CodeGenOpts<"EmitGcovArcs">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option, LinkOption]>, NegFlag<SetFalse>>;
-defm test_coverage : BoolFOption<"test-coverage",
- CodeGenOpts<"EmitGcovNotes">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option]>, NegFlag<SetFalse>>;
+def ftest_coverage : Flag<["-"], "ftest-coverage">, Group<f_Group>,
+ HelpText<"Produce gcov notes files (*.gcno)">;
+def fno_test_coverage : Flag<["-"], "fno-test-coverage">, Group<f_Group>;
+def fprofile_arcs : Flag<["-"], "fprofile-arcs">, Group<f_Group>,
+ HelpText<"Instrument code to produce gcov data files (*.gcda)">;
+def fno_profile_arcs : Flag<["-"], "fno-profile-arcs">, Group<f_Group>;
def fprofile_filter_files_EQ : Joined<["-"], "fprofile-filter-files=">,
Group<f_Group>, Flags<[CC1Option, CoreOption]>,
HelpText<"Instrument only functions from files where names match any regex separated by a semi-colon">,
- MarshallingInfoString<CodeGenOpts<"ProfileFilterFiles">>,
- ShouldParseIf<!strconcat(fprofile_arcs.KeyPath, "||", ftest_coverage.KeyPath)>;
+ MarshallingInfoString<CodeGenOpts<"ProfileFilterFiles">>;
def fprofile_exclude_files_EQ : Joined<["-"], "fprofile-exclude-files=">,
Group<f_Group>, Flags<[CC1Option, CoreOption]>,
HelpText<"Instrument only functions from files where names don't match all the regexes separated by a semi-colon">,
- MarshallingInfoString<CodeGenOpts<"ProfileExcludeFiles">>,
- ShouldParseIf<!strconcat(fprofile_arcs.KeyPath, "||", ftest_coverage.KeyPath)>;
+ MarshallingInfoString<CodeGenOpts<"ProfileExcludeFiles">>;
def fprofile_update_EQ : Joined<["-"], "fprofile-update=">,
Group<f_Group>, Flags<[CC1Option, CoreOption]>, Values<"atomic,prefer-atomic,single">,
MetaVarName<"<method>">, HelpText<"Set update method of profile counters">,
@@ -1373,7 +1448,8 @@ def forder_file_instrumentation : Flag<["-"], "forder-file-instrumentation">,
HelpText<"Generate instrumented code to collect order file into default.profraw file (overridden by '=' form of option or LLVM_PROFILE_FILE env var)">;
def fprofile_list_EQ : Joined<["-"], "fprofile-list=">,
Group<f_Group>, Flags<[CC1Option, CoreOption]>,
- HelpText<"Filename defining the list of functions/files to instrument">,
+ HelpText<"Filename defining the list of functions/files to instrument. "
+ "The file uses the sanitizer special case list format.">,
MarshallingInfoStringVector<LangOpts<"ProfileListFiles">>;
def fprofile_function_groups : Joined<["-"], "fprofile-function-groups=">,
Group<f_Group>, Flags<[CC1Option]>, MetaVarName<"<N>">,
@@ -1429,6 +1505,13 @@ def fcomment_block_commands : CommaJoined<["-"], "fcomment-block-commands=">, Gr
def fparse_all_comments : Flag<["-"], "fparse-all-comments">, Group<f_clang_Group>, Flags<[CC1Option]>,
MarshallingInfoFlag<LangOpts<"CommentOpts.ParseAllComments">>;
def frecord_command_line : Flag<["-"], "frecord-command-line">,
+ DocBrief<[{Generate a section named ".GCC.command.line" containing the clang
+driver command-line. After linking, the section may contain multiple command
+lines, which will be individually terminated by null bytes. Separate arguments
+within a command line are combined with spaces; spaces and backslashes within an
+argument are escaped with backslashes. This format differs from the format of
+the equivalent section produced by GCC with the -frecord-gcc-switches flag.
+This option is currently only supported on ELF targets.}]>,
Group<f_clang_Group>;
def fno_record_command_line : Flag<["-"], "fno-record-command-line">,
Group<f_clang_Group>;
@@ -1436,7 +1519,10 @@ def : Flag<["-"], "frecord-gcc-switches">, Alias<frecord_command_line>;
def : Flag<["-"], "fno-record-gcc-switches">, Alias<fno_record_command_line>;
def fcommon : Flag<["-"], "fcommon">, Group<f_Group>,
Flags<[CoreOption, CC1Option]>, HelpText<"Place uninitialized global variables in a common block">,
- MarshallingInfoNegativeFlag<CodeGenOpts<"NoCommon">>;
+ MarshallingInfoNegativeFlag<CodeGenOpts<"NoCommon">>,
+ DocBrief<[{Place definitions of variables with no storage class and no initializer
+(tentative definitions) in a common block, instead of generating individual
+zero-initialized definitions (default -fno-common).}]>;
def fcompile_resource_EQ : Joined<["-"], "fcompile-resource=">, Group<f_Group>;
defm complete_member_pointers : BoolOption<"f", "complete-member-pointers",
LangOpts<"CompleteMemberPointers">, DefaultFalse,
@@ -1454,13 +1540,18 @@ defm constant_cfstrings : BoolFOption<"constant-cfstrings",
NegFlag<SetTrue, [CC1Option], "Disable creation of CodeFoundation-type constant strings">,
PosFlag<SetFalse>>;
def fconstant_string_class_EQ : Joined<["-"], "fconstant-string-class=">, Group<f_Group>;
-def fconstexpr_depth_EQ : Joined<["-"], "fconstexpr-depth=">, Group<f_Group>;
-def fconstexpr_steps_EQ : Joined<["-"], "fconstexpr-steps=">, Group<f_Group>;
+def fconstexpr_depth_EQ : Joined<["-"], "fconstexpr-depth=">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Set the maximum depth of recursive constexpr function calls">,
+ MarshallingInfoInt<LangOpts<"ConstexprCallDepth">, "512">;
+def fconstexpr_steps_EQ : Joined<["-"], "fconstexpr-steps=">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Set the maximum number of steps in constexpr function evaluation">,
+ MarshallingInfoInt<LangOpts<"ConstexprStepLimit">, "1048576">;
def fexperimental_new_constant_interpreter : Flag<["-"], "fexperimental-new-constant-interpreter">, Group<f_Group>,
HelpText<"Enable the experimental new constant interpreter">, Flags<[CC1Option]>,
MarshallingInfoFlag<LangOpts<"EnableNewConstInterp">>;
-def fconstexpr_backtrace_limit_EQ : Joined<["-"], "fconstexpr-backtrace-limit=">,
- Group<f_Group>;
+def fconstexpr_backtrace_limit_EQ : Joined<["-"], "fconstexpr-backtrace-limit=">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Set the maximum number of entries to print in a constexpr evaluation backtrace (0 = no limit)">,
+ MarshallingInfoInt<DiagnosticOpts<"ConstexprBacktraceLimit">, "DiagnosticOptions::DefaultConstexprBacktraceLimit">;
def fcrash_diagnostics_EQ : Joined<["-"], "fcrash-diagnostics=">, Group<f_clang_Group>, Flags<[NoArgumentUnused, CoreOption]>,
HelpText<"Set level of crash diagnostic reporting, (option: off, compiler, all)">;
def fcrash_diagnostics : Flag<["-"], "fcrash-diagnostics">, Group<f_clang_Group>, Flags<[NoArgumentUnused, CoreOption]>,
@@ -1522,6 +1613,11 @@ def fdiagnostics_show_template_tree : Flag<["-"], "fdiagnostics-show-template-tr
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Print a template comparison tree for differing templates">,
MarshallingInfoFlag<DiagnosticOpts<"ShowTemplateTree">>;
+defm safe_buffer_usage_suggestions : BoolFOption<"safe-buffer-usage-suggestions",
+ DiagnosticOpts<"ShowSafeBufferUsageSuggestions">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option],
+ "Display suggestions to update code associated with -Wunsafe-buffer-usage warnings">,
+ NegFlag<SetFalse>>;
def fdiscard_value_names : Flag<["-"], "fdiscard-value-names">, Group<f_clang_Group>,
HelpText<"Discard value names in LLVM IR">, Flags<[NoXarchOption]>;
def fno_discard_value_names : Flag<["-"], "fno-discard-value-names">, Group<f_clang_Group>,
@@ -1552,7 +1648,7 @@ def femit_all_decls : Flag<["-"], "femit-all-decls">, Group<f_Group>, Flags<[CC1
defm emulated_tls : BoolFOption<"emulated-tls",
CodeGenOpts<"EmulatedTLS">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Use emutls functions to access thread_local variables">,
- NegFlag<SetFalse>, BothFlags<[CC1Option]>>;
+ NegFlag<SetFalse>>;
def fencoding_EQ : Joined<["-"], "fencoding=">, Group<f_Group>;
def ferror_limit_EQ : Joined<["-"], "ferror-limit=">, Group<f_Group>, Flags<[CoreOption]>;
defm exceptions : BoolFOption<"exceptions",
@@ -1595,6 +1691,15 @@ def ffloat16_excess_precision_EQ : Joined<["-"], "ffloat16-excess-precision=">,
Values<"standard,fast,none">, NormalizedValuesScope<"LangOptions">,
NormalizedValues<["FPP_Standard", "FPP_Fast", "FPP_None"]>,
MarshallingInfoEnum<LangOpts<"Float16ExcessPrecision">, "FPP_Standard">;
+def fbfloat16_excess_precision_EQ : Joined<["-"], "fbfloat16-excess-precision=">,
+ Group<f_Group>, Flags<[CC1Option, NoDriverOption]>,
+ HelpText<"Allows control over excess precision on targets where native "
+ "support for BFloat16 precision types is not available. By default, excess "
+ "precision is used to calculate intermediate results following the "
+ "rules specified in ISO C99.">,
+ Values<"standard,fast,none">, NormalizedValuesScope<"LangOptions">,
+ NormalizedValues<["FPP_Standard", "FPP_Fast", "FPP_None"]>,
+ MarshallingInfoEnum<LangOpts<"BFloat16ExcessPrecision">, "FPP_Standard">;
def : Flag<["-"], "fexpensive-optimizations">, Group<clang_ignored_gcc_optimization_f_Group>;
def : Flag<["-"], "fno-expensive-optimizations">, Group<clang_ignored_gcc_optimization_f_Group>;
def fextdirs_EQ : Joined<["-"], "fextdirs=">, Group<f_Group>;
@@ -1656,7 +1761,11 @@ defm force_enable_int128 : BoolFOption<"force-enable-int128",
defm keep_static_consts : BoolFOption<"keep-static-consts",
CodeGenOpts<"KeepStaticConsts">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Keep">, NegFlag<SetFalse, [], "Don't keep">,
- BothFlags<[NoXarchOption], " static const variables if unused">>;
+ BothFlags<[NoXarchOption], " static const variables even if unused">>;
+defm keep_persistent_storage_variables : BoolFOption<"keep-persistent-storage-variables",
+ CodeGenOpts<"KeepPersistentStorageVariables">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Enable">, NegFlag<SetFalse, [], "Disable">,
+ BothFlags<[NoXarchOption], " keeping all variables that have a persistent storage duration, including global, static and thread-local variables, to guarantee that they can be directly addressed">>;
defm fixed_point : BoolFOption<"fixed-point",
LangOpts<"FixedPoint">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Enable">, NegFlag<SetFalse, [], "Disable">,
@@ -1672,6 +1781,10 @@ defm memory_profile : OptInCC1FFlag<"memory-profile", "Enable", "Disable", " hea
def fmemory_profile_EQ : Joined<["-"], "fmemory-profile=">,
Group<f_Group>, Flags<[CC1Option]>, MetaVarName<"<directory>">,
HelpText<"Enable heap memory profiling and dump results into <directory>">;
+def fmemory_profile_use_EQ : Joined<["-"], "fmemory-profile-use=">,
+ Group<f_Group>, Flags<[CC1Option, CoreOption]>, MetaVarName<"<pathname>">,
+ HelpText<"Use memory profile for profile-guided memory optimization">,
+ MarshallingInfoString<CodeGenOpts<"MemoryProfileUsePath">>;
// Begin sanitizer flags. These should all be core options exposed in all driver
// modes.
@@ -1692,9 +1805,6 @@ def : Joined<["-"], "fsanitize-blacklist=">,
def fsanitize_system_ignorelist_EQ : Joined<["-"], "fsanitize-system-ignorelist=">,
HelpText<"Path to system ignorelist file for sanitizers">, Flags<[CC1Option]>;
-def : Joined<["-"], "fsanitize-system-blacklist=">,
- HelpText<"Alias for -fsanitize-system-ignorelist=">,
- Flags<[CC1Option, HelpHidden]>, Alias<fsanitize_system_ignorelist_EQ>;
def fno_sanitize_ignorelist : Flag<["-"], "fno-sanitize-ignorelist">,
Group<f_clang_Group>, HelpText<"Don't use ignorelist file for sanitizers">;
@@ -1725,6 +1835,10 @@ def fexperimental_sanitize_metadata_EQ : CommaJoined<["-"], "fexperimental-sanit
def fno_experimental_sanitize_metadata_EQ : CommaJoined<["-"], "fno-experimental-sanitize-metadata=">,
Group<f_Group>, Flags<[CoreOption]>,
HelpText<"Disable emitting metadata for binary analysis sanitizers">;
+def fexperimental_sanitize_metadata_ignorelist_EQ : Joined<["-"], "fexperimental-sanitize-metadata-ignorelist=">,
+ Group<f_Group>, Flags<[CoreOption]>,
+ HelpText<"Disable sanitizer metadata for modules and functions that match the provided special case list">,
+ MarshallingInfoStringVector<CodeGenOpts<"SanitizeMetadataIgnorelistFiles">>;
def fsanitize_memory_track_origins_EQ : Joined<["-"], "fsanitize-memory-track-origins=">,
Group<f_clang_Group>,
HelpText<"Enable origins tracking in MemorySanitizer">,
@@ -1743,6 +1857,10 @@ def fsanitize_address_outline_instrumentation : Flag<["-"], "fsanitize-address-o
def fno_sanitize_address_outline_instrumentation : Flag<["-"], "fno-sanitize-address-outline-instrumentation">,
Group<f_clang_Group>,
HelpText<"Use default code inlining logic for the address sanitizer">;
+defm sanitize_stable_abi
+ : OptInCC1FFlag<"sanitize-stable-abi", "Stable ", "Conventional ",
+ "ABI instrumentation for sanitizer runtime. Default: Conventional">;
+
def fsanitize_memtag_mode_EQ : Joined<["-"], "fsanitize-memtag-mode=">,
Group<f_clang_Group>,
HelpText<"Set default MTE mode to 'sync' (default) or 'async'">;
@@ -1782,6 +1900,17 @@ defm sanitize_address_poison_custom_array_cookie : BoolOption<"f", "sanitize-add
CodeGenOpts<"SanitizeAddressPoisonCustomArrayCookie">, DefaultFalse,
PosFlag<SetTrue, [], "Enable">, NegFlag<SetFalse, [], "Disable">,
BothFlags<[], " poisoning array cookies when using custom operator new[] in AddressSanitizer">>,
+ DocBrief<[{Enable "poisoning" array cookies when allocating arrays with a
+custom operator new\[\] in Address Sanitizer, preventing accesses to the
+cookies from user code. An array cookie is a small implementation-defined
+header added to certain array allocations to record metadata such as the
+length of the array. Accesses to array cookies from user code are technically
+allowed by the standard but are more likely to be the result of an
+out-of-bounds array access.
+
+An operator new\[\] is "custom" if it is not one of the allocation functions
+provided by the C++ standard library. Array cookies from non-custom allocation
+functions are always poisoned.}]>,
Group<f_clang_Group>;
defm sanitize_address_globals_dead_stripping : BoolOption<"f", "sanitize-address-globals-dead-stripping",
CodeGenOpts<"SanitizeAddressGlobalsDeadStripping">, DefaultFalse,
@@ -1797,7 +1926,10 @@ defm sanitize_address_use_odr_indicator : BoolOption<"f", "sanitize-address-use-
def sanitize_address_destructor_EQ
: Joined<["-"], "fsanitize-address-destructor=">,
Flags<[CC1Option]>,
- HelpText<"Set destructor type used in ASan instrumentation">,
+ HelpText<"Set the kind of module destructors emitted by "
+ "AddressSanitizer instrumentation. These destructors are "
+ "emitted to unregister instrumented global variables when "
+ "code is unloaded (e.g. via `dlclose()`).">,
Group<f_clang_Group>,
Values<"none,global">,
NormalizedValuesScope<"llvm::AsanDtorKind">,
@@ -1869,6 +2001,10 @@ def fsanitize_cfi_icall_generalize_pointers : Flag<["-"], "fsanitize-cfi-icall-g
Group<f_clang_Group>,
HelpText<"Generalize pointers in CFI indirect call type signature checks">,
MarshallingInfoFlag<CodeGenOpts<"SanitizeCfiICallGeneralizePointers">>;
+def fsanitize_cfi_icall_normalize_integers : Flag<["-"], "fsanitize-cfi-icall-experimental-normalize-integers">,
+ Group<f_clang_Group>,
+ HelpText<"Normalize integers in CFI indirect call type signature checks">,
+ MarshallingInfoFlag<CodeGenOpts<"SanitizeCfiICallNormalizeIntegers">>;
defm sanitize_cfi_canonical_jump_tables : BoolOption<"f", "sanitize-cfi-canonical-jump-tables",
CodeGenOpts<"SanitizeCfiCanonicalJumpTables">, DefaultFalse,
PosFlag<SetTrue, [], "Make">, NegFlag<SetFalse, [CoreOption, NoXarchOption], "Do not make">,
@@ -1929,16 +2065,24 @@ defm approx_func : BoolFOption<"approx-func", LangOpts<"ApproxFunc">, DefaultFal
NegFlag<SetFalse>>;
defm finite_math_only : BoolFOption<"finite-math-only",
LangOpts<"FiniteMathOnly">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "", [cl_finite_math_only.KeyPath, ffast_math.KeyPath]>,
+ PosFlag<SetTrue, [CC1Option], "Allow floating-point optimizations that "
+ "assume arguments and results are not NaNs or +-inf. This defines "
+ "the \\_\\_FINITE\\_MATH\\_ONLY\\_\\_ preprocessor macro.",
+ [cl_finite_math_only.KeyPath, ffast_math.KeyPath]>,
NegFlag<SetFalse>>;
defm signed_zeros : BoolFOption<"signed-zeros",
LangOpts<"NoSignedZero">, DefaultFalse,
NegFlag<SetTrue, [CC1Option, FC1Option, FlangOption], "Allow optimizations that ignore the sign of floating point zeros",
[cl_no_signed_zeros.KeyPath, funsafe_math_optimizations.KeyPath]>,
PosFlag<SetFalse>>;
-def fhonor_nans : Flag<["-"], "fhonor-nans">, Group<f_Group>;
+def fhonor_nans : Flag<["-"], "fhonor-nans">, Group<f_Group>,
+ HelpText<"Specify that floating-point optimizations are not allowed that "
+ "assume arguments and results are not NANs.">;
def fno_honor_nans : Flag<["-"], "fno-honor-nans">, Group<f_Group>;
-def fhonor_infinities : Flag<["-"], "fhonor-infinities">, Group<f_Group>;
+def fhonor_infinities : Flag<["-"], "fhonor-infinities">,
+ Group<f_Group>,
+ HelpText<"Specify that floating-point optimizations are not allowed that "
+ "assume arguments and results are not +-inf.">;
def fno_honor_infinities : Flag<["-"], "fno-honor-infinities">, Group<f_Group>;
// This option was originally misspelt "infinites" [sic].
def : Flag<["-"], "fhonor-infinites">, Alias<fhonor_infinities>;
@@ -1989,19 +2133,25 @@ defm delete_null_pointer_checks : BoolFOption<"delete-null-pointer-checks",
CodeGenOpts<"NullPointerIsValid">, DefaultFalse,
NegFlag<SetTrue, [CC1Option], "Do not treat usage of null pointers as undefined behavior">,
PosFlag<SetFalse, [], "Treat usage of null pointers as undefined behavior (default)">,
- BothFlags<[CoreOption]>>;
-
-def frewrite_map_file_EQ : Joined<["-"], "frewrite-map-file=">,
- Group<f_Group>,
- Flags<[NoXarchOption, CC1Option]>,
- MarshallingInfoStringVector<CodeGenOpts<"RewriteMapFiles">>;
+ BothFlags<[CoreOption]>>,
+ DocBrief<[{When enabled, treat null pointer dereference, creation of a reference to null,
+or passing a null pointer to a function parameter annotated with the "nonnull"
+attribute as undefined behavior. (And, thus the optimizer may assume that any
+pointer used in such a way must not have been null and optimize away the
+branches accordingly.) On by default.}]>;
defm use_line_directives : BoolFOption<"use-line-directives",
PreprocessorOutputOpts<"UseLineDirectives">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Use #line in preprocessed output">, NegFlag<SetFalse>>;
defm minimize_whitespace : BoolFOption<"minimize-whitespace",
PreprocessorOutputOpts<"MinimizeWhitespace">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Minimize whitespace when emitting preprocessor output">, NegFlag<SetFalse>>;
+ PosFlag<SetTrue, [CC1Option], "Ignore the whitespace from the input file "
+ "when emitting preprocessor output. It will only contain whitespace "
+ "when necessary, e.g. to keep two minus signs from merging into to "
+ "an increment operator. Useful with the -P option to normalize "
+ "whitespace such that two files with only formatting changes are "
+ "equal.\n\nOnly valid with -E on C-like inputs and incompatible "
+ "with -traditional-cpp.">, NegFlag<SetFalse>>;
def ffreestanding : Flag<["-"], "ffreestanding">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Assert that the compilation takes place in a freestanding environment">,
@@ -2049,7 +2199,7 @@ def fexperimental_isel : Flag<["-"], "fexperimental-isel">, Group<f_clang_Group>
Alias<fglobal_isel>;
def fexperimental_strict_floating_point : Flag<["-"], "fexperimental-strict-floating-point">,
Group<f_clang_Group>, Flags<[CC1Option]>,
- HelpText<"Enables experimental strict floating point in LLVM.">,
+ HelpText<"Enables the use of non-default rounding modes and non-default exception handling on targets that are not currently ready.">,
MarshallingInfoFlag<LangOpts<"ExpStrictFP">>;
def finput_charset_EQ : Joined<["-"], "finput-charset=">, Flags<[FlangOption, FC1Option]>, Group<f_Group>,
HelpText<"Specify the default character set for source files">;
@@ -2130,16 +2280,14 @@ defm xray_ignore_loops : BoolFOption<"xray-ignore-loops",
NegFlag<SetFalse>>;
defm xray_function_index : BoolFOption<"xray-function-index",
- CodeGenOpts<"XRayOmitFunctionIndex">, DefaultTrue,
+ CodeGenOpts<"XRayFunctionIndex">, DefaultTrue,
+ PosFlag<SetTrue, []>,
NegFlag<SetFalse, [CC1Option], "Omit function index section at the"
- " expense of single-function patching performance">,
- PosFlag<SetTrue>>;
+ " expense of single-function patching performance">>;
def fxray_link_deps : Flag<["-"], "fxray-link-deps">, Group<f_Group>,
- Flags<[CC1Option]>,
- HelpText<"Tells clang to add the link dependencies for XRay.">;
-def fnoxray_link_deps : Flag<["-"], "fnoxray-link-deps">, Group<f_Group>,
- Flags<[CC1Option]>;
+ HelpText<"Link XRay runtime library when -fxray-instrument is specified (default)">;
+def fno_xray_link_deps : Flag<["-"], "fno-xray-link-deps">, Group<f_Group>;
def fxray_instrumentation_bundle :
Joined<["-"], "fxray-instrumentation-bundle=">,
@@ -2194,14 +2342,19 @@ def flax_vector_conversions : Flag<["-"], "flax-vector-conversions">, Group<f_Gr
def flimited_precision_EQ : Joined<["-"], "flimited-precision=">, Group<f_Group>;
def fapple_link_rtlib : Flag<["-"], "fapple-link-rtlib">, Group<f_Group>,
HelpText<"Force linking the clang builtins runtime library">;
-def flto_EQ : Joined<["-"], "flto=">, Flags<[CoreOption, CC1Option]>, Group<f_Group>,
+def flto_EQ : Joined<["-"], "flto=">, Flags<[CoreOption, CC1Option, FC1Option, FlangOption]>, Group<f_Group>,
HelpText<"Set LTO mode">, Values<"thin,full">;
def flto_EQ_jobserver : Flag<["-"], "flto=jobserver">, Group<f_Group>,
Alias<flto_EQ>, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">;
def flto_EQ_auto : Flag<["-"], "flto=auto">, Group<f_Group>,
Alias<flto_EQ>, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">;
-def flto : Flag<["-"], "flto">, Flags<[CoreOption, CC1Option]>, Group<f_Group>,
+def flto : Flag<["-"], "flto">, Flags<[CoreOption, CC1Option, FC1Option, FlangOption]>, Group<f_Group>,
Alias<flto_EQ>, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">;
+defm unified_lto : BoolFOption<"unified-lto",
+ CodeGenOpts<"UnifiedLTO">, DefaultFalse,
+ PosFlag<SetTrue, [], "Use the unified LTO pipeline">,
+ NegFlag<SetFalse, [], "Use distinct LTO pipelines">,
+ BothFlags<[CC1Option], "">>;
def fno_lto : Flag<["-"], "fno-lto">, Flags<[CoreOption, CC1Option]>, Group<f_Group>,
HelpText<"Disable LTO mode (default)">;
def foffload_lto_EQ : Joined<["-"], "foffload-lto=">, Flags<[CoreOption]>, Group<f_Group>,
@@ -2222,8 +2375,20 @@ def fthin_link_bitcode_EQ : Joined<["-"], "fthin-link-bitcode=">,
Flags<[CoreOption, CC1Option]>, Group<f_Group>,
HelpText<"Write minimized bitcode to <file> for the ThinLTO thin link only">,
MarshallingInfoString<CodeGenOpts<"ThinLinkBitcodeFile">>;
+defm fat_lto_objects : BoolFOption<"fat-lto-objects",
+ CodeGenOpts<"FatLTO">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Enable">,
+ NegFlag<SetFalse, [CC1Option], "Disable">,
+ BothFlags<[CC1Option], " fat LTO object support">>;
def fmacro_backtrace_limit_EQ : Joined<["-"], "fmacro-backtrace-limit=">,
- Group<f_Group>, Flags<[NoXarchOption, CoreOption]>;
+ Group<f_Group>, Flags<[NoXarchOption, CC1Option, CoreOption]>,
+ HelpText<"Set the maximum number of entries to print in a macro expansion backtrace (0 = no limit)">,
+ MarshallingInfoInt<DiagnosticOpts<"MacroBacktraceLimit">, "DiagnosticOptions::DefaultMacroBacktraceLimit">;
+def fcaret_diagnostics_max_lines_EQ :
+ Joined<["-"], "fcaret-diagnostics-max-lines=">,
+ Group<f_Group>, Flags<[CC1Option, CoreOption]>,
+ HelpText<"Set the maximum number of source lines to show in a caret diagnostic (0 = no limit).">,
+ MarshallingInfoInt<DiagnosticOpts<"SnippetLineLimit">, "DiagnosticOptions::DefaultSnippetLineLimit">;
defm merge_all_constants : BoolFOption<"merge-all-constants",
CodeGenOpts<"MergeAllConstants">, DefaultFalse,
PosFlag<SetTrue, [CC1Option, CoreOption], "Allow">, NegFlag<SetFalse, [], "Disallow">,
@@ -2388,11 +2553,8 @@ def fimplicit_module_maps : Flag <["-"], "fimplicit-module-maps">, Group<f_Group
Flags<[NoXarchOption, CC1Option, CoreOption]>,
HelpText<"Implicitly search the file system for module map files.">,
MarshallingInfoFlag<HeaderSearchOpts<"ImplicitModuleMaps">>;
-def fmodules_ts : Flag <["-"], "fmodules-ts">, Group<f_Group>,
- Flags<[CC1Option]>, HelpText<"Enable support for the C++ Modules TS">,
- MarshallingInfoFlag<LangOpts<"ModulesTS">>;
defm modules : BoolFOption<"modules",
- LangOpts<"Modules">, Default<!strconcat(fmodules_ts.KeyPath, "||", fcxx_modules.KeyPath)>,
+ LangOpts<"Modules">, Default<fcxx_modules.KeyPath>,
PosFlag<SetTrue, [CC1Option], "Enable the 'modules' language feature">,
NegFlag<SetFalse>, BothFlags<[NoXarchOption, CoreOption]>>;
def fmodule_maps : Flag <["-"], "fmodule-maps">, Flags<[CoreOption]>, Alias<fimplicit_module_maps>;
@@ -2431,6 +2593,10 @@ defm modules_search_all : BoolFOption<"modules-search-all",
defm implicit_modules : BoolFOption<"implicit-modules",
LangOpts<"ImplicitModules">, DefaultTrue,
NegFlag<SetFalse, [CC1Option]>, PosFlag<SetTrue>, BothFlags<[NoXarchOption,CoreOption]>>;
+def fno_modules_check_relocated : Joined<["-"], "fno-modules-check-relocated">,
+ Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Skip checks for relocated modules when loading PCM files">,
+ MarshallingInfoNegativeFlag<PreprocessorOpts<"ModulesCheckRelocated">>;
def fretain_comments_from_system_headers : Flag<["-"], "fretain-comments-from-system-headers">, Group<f_Group>, Flags<[CC1Option]>,
MarshallingInfoFlag<LangOpts<"RetainCommentsFromSystemHeaders">>;
def fmodule_header : Flag <["-"], "fmodule-header">, Group<f_Group>,
@@ -2473,10 +2639,10 @@ def fno_experimental_isel : Flag<["-"], "fno-experimental-isel">, Group<f_clang_
Alias<fno_global_isel>;
def fveclib : Joined<["-"], "fveclib=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Use the given vector functions library">,
- Values<"Accelerate,libmvec,MASSV,SVML,SLEEF,Darwin_libsystem_m,none">,
+ Values<"Accelerate,libmvec,MASSV,SVML,SLEEF,Darwin_libsystem_m,ArmPL,none">,
NormalizedValuesScope<"CodeGenOptions">,
NormalizedValues<["Accelerate", "LIBMVEC", "MASSV", "SVML", "SLEEF",
- "Darwin_libsystem_m", "NoLibrary"]>,
+ "Darwin_libsystem_m", "ArmPL", "NoLibrary"]>,
MarshallingInfoEnum<CodeGenOpts<"VecLib">, "NoLibrary">;
def fno_lax_vector_conversions : Flag<["-"], "fno-lax-vector-conversions">, Group<f_Group>,
Alias<flax_vector_conversions_EQ>, AliasArgs<["none"]>;
@@ -2503,6 +2669,10 @@ defm operator_names : BoolFOption<"operator-names",
def fdiagnostics_absolute_paths : Flag<["-"], "fdiagnostics-absolute-paths">, Group<f_Group>,
Flags<[CC1Option, CoreOption]>, HelpText<"Print absolute paths in diagnostics">,
MarshallingInfoFlag<DiagnosticOpts<"AbsolutePath">>;
+defm diagnostics_show_line_numbers : BoolFOption<"diagnostics-show-line-numbers",
+ DiagnosticOpts<"ShowLineNumbers">, DefaultTrue,
+ NegFlag<SetFalse, [CC1Option], "Show line numbers in diagnostic code snippets">,
+ PosFlag<SetTrue>>;
def fno_stack_protector : Flag<["-"], "fno-stack-protector">, Group<f_Group>,
HelpText<"Disable the use of stack protectors">;
def fno_strict_aliasing : Flag<["-"], "fno-strict-aliasing">, Group<f_Group>,
@@ -2598,12 +2768,16 @@ defm objc_avoid_heapify_local_blocks : BoolFOption<"objc-avoid-heapify-local-blo
NegFlag<SetFalse, [], "Don't try">,
BothFlags<[CC1Option, NoDriverOption], " to avoid heapifying local blocks">>;
-def fomit_frame_pointer : Flag<["-"], "fomit-frame-pointer">, Group<f_Group>;
+def fomit_frame_pointer : Flag<["-"], "fomit-frame-pointer">, Group<f_Group>,
+ HelpText<"Omit the frame pointer from functions that don't need it. "
+ "Some stack unwinding cases, such as profilers and sanitizers, may prefer specifying -fno-omit-frame-pointer. "
+ "On many targets, -O1 and higher omit the frame pointer by default. "
+ "-m[no-]omit-leaf-frame-pointer takes precedence for leaf functions">;
def fopenmp : Flag<["-"], "fopenmp">, Group<f_Group>, Flags<[CC1Option, NoArgumentUnused, FlangOption, FC1Option]>,
HelpText<"Parse OpenMP pragmas and generate parallel code.">;
def fno_openmp : Flag<["-"], "fno-openmp">, Group<f_Group>, Flags<[NoArgumentUnused]>;
-def fopenmp_version_EQ : Joined<["-"], "fopenmp-version=">, Group<f_Group>, Flags<[CC1Option, NoArgumentUnused]>,
- HelpText<"Set OpenMP version (e.g. 45 for OpenMP 4.5, 50 for OpenMP 5.0). Default value is 50.">;
+def fopenmp_version_EQ : Joined<["-"], "fopenmp-version=">, Group<f_Group>, Flags<[CC1Option, NoArgumentUnused, FlangOption, FC1Option]>,
+ HelpText<"Set OpenMP version (e.g. 45 for OpenMP 4.5, 50 for OpenMP 5.0). Default value is 50 for Clang and 11 for Flang">;
defm openmp_extensions: BoolFOption<"openmp-extensions",
LangOpts<"OpenMPExtensions">, DefaultTrue,
PosFlag<SetTrue, [CC1Option, NoArgumentUnused],
@@ -2636,26 +2810,39 @@ def fopenmp_cuda_blocks_per_sm_EQ : Joined<["-"], "fopenmp-cuda-blocks-per-sm=">
Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
def fopenmp_cuda_teams_reduction_recs_num_EQ : Joined<["-"], "fopenmp-cuda-teams-reduction-recs-num=">, Group<f_Group>,
Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
-def fopenmp_target_debug : Flag<["-"], "fopenmp-target-debug">, Group<f_Group>, Flags<[CC1Option, NoArgumentUnused]>,
+
+//===----------------------------------------------------------------------===//
+// Shared cc1 + fc1 OpenMP Target Options
+//===----------------------------------------------------------------------===//
+
+let Flags = [CC1Option, FC1Option, NoArgumentUnused] in {
+let Group = f_Group in {
+
+def fopenmp_target_debug : Flag<["-"], "fopenmp-target-debug">,
HelpText<"Enable debugging in the OpenMP offloading device RTL">;
-def fno_openmp_target_debug : Flag<["-"], "fno-openmp-target-debug">, Group<f_Group>, Flags<[NoArgumentUnused]>;
-def fopenmp_target_debug_EQ : Joined<["-"], "fopenmp-target-debug=">, Group<f_Group>, Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
-def fopenmp_assume_teams_oversubscription : Flag<["-"], "fopenmp-assume-teams-oversubscription">,
- Group<f_Group>, Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
-def fopenmp_assume_threads_oversubscription : Flag<["-"], "fopenmp-assume-threads-oversubscription">,
- Group<f_Group>, Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
-def fno_openmp_assume_teams_oversubscription : Flag<["-"], "fno-openmp-assume-teams-oversubscription">,
- Group<f_Group>, Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
-def fno_openmp_assume_threads_oversubscription : Flag<["-"], "fno-openmp-assume-threads-oversubscription">,
- Group<f_Group>, Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
-def fopenmp_assume_no_thread_state : Flag<["-"], "fopenmp-assume-no-thread-state">, Group<f_Group>,
- Flags<[CC1Option, NoArgumentUnused, HelpHidden]>,
+def fno_openmp_target_debug : Flag<["-"], "fno-openmp-target-debug">;
+
+} // let Group = f_Group
+} // let Flags = [CC1Option, FC1Option, NoArgumentUnused]
+
+let Flags = [CC1Option, FC1Option, NoArgumentUnused, HelpHidden] in {
+let Group = f_Group in {
+
+def fopenmp_target_debug_EQ : Joined<["-"], "fopenmp-target-debug=">;
+def fopenmp_assume_teams_oversubscription : Flag<["-"], "fopenmp-assume-teams-oversubscription">;
+def fopenmp_assume_threads_oversubscription : Flag<["-"], "fopenmp-assume-threads-oversubscription">;
+def fno_openmp_assume_teams_oversubscription : Flag<["-"], "fno-openmp-assume-teams-oversubscription">;
+def fno_openmp_assume_threads_oversubscription : Flag<["-"], "fno-openmp-assume-threads-oversubscription">;
+def fopenmp_assume_no_thread_state : Flag<["-"], "fopenmp-assume-no-thread-state">,
HelpText<"Assert no thread in a parallel region modifies an ICV">,
MarshallingInfoFlag<LangOpts<"OpenMPNoThreadState">>;
-def fopenmp_assume_no_nested_parallelism : Flag<["-"], "fopenmp-assume-no-nested-parallelism">, Group<f_Group>,
- Flags<[CC1Option, NoArgumentUnused, HelpHidden]>,
+def fopenmp_assume_no_nested_parallelism : Flag<["-"], "fopenmp-assume-no-nested-parallelism">,
HelpText<"Assert no nested parallel regions in the GPU">,
MarshallingInfoFlag<LangOpts<"OpenMPNoNestedParallelism">>;
+
+} // let Group = f_Group
+} // let Flags = [CC1Option, FC1Option, NoArgumentUnused, HelpHidden]
+
def fopenmp_offload_mandatory : Flag<["-"], "fopenmp-offload-mandatory">, Group<f_Group>,
Flags<[CC1Option, NoArgumentUnused]>,
HelpText<"Do not create a host fallback if offloading to the device fails.">,
@@ -2678,11 +2865,11 @@ def offload_new_driver : Flag<["--"], "offload-new-driver">, Flags<[CC1Option]>,
MarshallingInfoFlag<LangOpts<"OffloadingNewDriver">>, HelpText<"Use the new driver for offloading compilation.">;
def no_offload_new_driver : Flag<["--"], "no-offload-new-driver">, Flags<[CC1Option]>, Group<f_Group>,
HelpText<"Don't Use the new driver for offloading compilation.">;
-def offload_device_only : Flag<["--"], "offload-device-only">,
+def offload_device_only : Flag<["--"], "offload-device-only">, Flags<[FlangOption]>,
HelpText<"Only compile for the offloading device.">;
-def offload_host_only : Flag<["--"], "offload-host-only">,
+def offload_host_only : Flag<["--"], "offload-host-only">, Flags<[FlangOption]>,
HelpText<"Only compile for the offloading host.">;
-def offload_host_device : Flag<["--"], "offload-host-device">,
+def offload_host_device : Flag<["--"], "offload-host-device">, Flags<[FlangOption]>,
HelpText<"Only compile for the offloading host.">;
def cuda_device_only : Flag<["--"], "cuda-device-only">, Alias<offload_device_only>,
HelpText<"Compile CUDA code for device only">;
@@ -2809,7 +2996,9 @@ defm show_source_location : BoolFOption<"show-source-location",
defm spell_checking : BoolFOption<"spell-checking",
LangOpts<"SpellChecking">, DefaultTrue,
NegFlag<SetFalse, [CC1Option], "Disable spell-checking">, PosFlag<SetTrue>>;
-def fspell_checking_limit_EQ : Joined<["-"], "fspell-checking-limit=">, Group<f_Group>;
+def fspell_checking_limit_EQ : Joined<["-"], "fspell-checking-limit=">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Set the maximum number of times to perform spell checking on unrecognized identifiers (0 = no limit)">,
+ MarshallingInfoInt<DiagnosticOpts<"SpellCheckingLimit">, "DiagnosticOptions::DefaultSpellCheckingLimit">;
def fsigned_bitfields : Flag<["-"], "fsigned-bitfields">, Group<f_Group>;
defm signed_char : BoolFOption<"signed-char",
LangOpts<"CharIsSigned">, DefaultTrue,
@@ -2824,7 +3013,8 @@ def fstack_protector_all : Flag<["-"], "fstack-protector-all">, Group<f_Group>,
defm stack_clash_protection : BoolFOption<"stack-clash-protection",
CodeGenOpts<"StackClashProtector">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Enable">, NegFlag<SetFalse, [], "Disable">,
- BothFlags<[], " stack clash protection">>;
+ BothFlags<[], " stack clash protection">>,
+ DocBrief<"Instrument stack allocation to prevent stack clash attacks">;
def fstack_protector_strong : Flag<["-"], "fstack-protector-strong">, Group<f_Group>,
HelpText<"Enable stack protectors for some functions vulnerable to stack smashing. "
"Compared to -fstack-protector, this uses a stronger heuristic "
@@ -2878,12 +3068,16 @@ def fsyntax_only : Flag<["-"], "fsyntax-only">,
Flags<[NoXarchOption,CoreOption,CC1Option,FC1Option,FlangOption]>, Group<Action_Group>,
HelpText<"Run the preprocessor, parser and semantic analysis stages">;
def ftabstop_EQ : Joined<["-"], "ftabstop=">, Group<f_Group>;
-def ftemplate_depth_EQ : Joined<["-"], "ftemplate-depth=">, Group<f_Group>;
-def ftemplate_depth_ : Joined<["-"], "ftemplate-depth-">, Group<f_Group>;
-def ftemplate_backtrace_limit_EQ : Joined<["-"], "ftemplate-backtrace-limit=">,
- Group<f_Group>;
-def foperator_arrow_depth_EQ : Joined<["-"], "foperator-arrow-depth=">,
- Group<f_Group>;
+def ftemplate_depth_EQ : Joined<["-"], "ftemplate-depth=">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Set the maximum depth of recursive template instantiation">,
+ MarshallingInfoInt<LangOpts<"InstantiationDepth">, "1024">;
+def : Joined<["-"], "ftemplate-depth-">, Group<f_Group>, Alias<ftemplate_depth_EQ>;
+def ftemplate_backtrace_limit_EQ : Joined<["-"], "ftemplate-backtrace-limit=">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Set the maximum number of entries to print in a template instantiation backtrace (0 = no limit)">,
+ MarshallingInfoInt<DiagnosticOpts<"TemplateBacktraceLimit">, "DiagnosticOptions::DefaultTemplateBacktraceLimit">;
+def foperator_arrow_depth_EQ : Joined<["-"], "foperator-arrow-depth=">, Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Maximum number of 'operator->'s to call for a member access">,
+ MarshallingInfoInt<LangOpts<"ArrowDepth">, "256">;
def fsave_optimization_record : Flag<["-"], "fsave-optimization-record">,
Group<f_Group>, HelpText<"Generate a YAML optimization record file">;
@@ -2946,8 +3140,7 @@ def ftime_trace : Flag<["-"], "ftime-trace">, Group<f_Group>,
Turn on time profiler. Generates JSON file based on output filename. Results
can be analyzed with chrome://tracing or `Speedscope App
<https://www.speedscope.app>`_ for flamegraph visualization.}]>,
- Flags<[CC1Option, CoreOption]>,
- MarshallingInfoFlag<FrontendOpts<"TimeTrace">>;
+ Flags<[CoreOption]>;
def ftime_trace_granularity_EQ : Joined<["-"], "ftime-trace-granularity=">, Group<f_Group>,
HelpText<"Minimum time granularity (in microseconds) traced by time profiler">,
Flags<[CC1Option, CoreOption]>,
@@ -3050,7 +3243,7 @@ def mdefault_visibility_export_mapping_EQ : Joined<["-"], "mdefault-visibility-e
NormalizedValuesScope<"LangOptions::DefaultVisiblityExportMapping">,
NormalizedValues<["None", "Explicit", "All"]>,
HelpText<"Mapping between default visibility and export">,
- Group<m_Group>, Flags<[CC1Option]>,
+ Group<m_Group>, Flags<[CC1Option,TargetSpecific]>,
MarshallingInfoEnum<LangOpts<"DefaultVisibilityExportMapping">,"None">;
defm new_infallible : BoolFOption<"new-infallible",
LangOpts<"NewInfallible">, DefaultFalse,
@@ -3068,7 +3261,10 @@ defm split_lto_unit : BoolFOption<"split-lto-unit",
defm force_emit_vtables : BoolFOption<"force-emit-vtables",
CodeGenOpts<"ForceEmitVTables">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Emits more virtual tables to improve devirtualization">,
- NegFlag<SetFalse>, BothFlags<[CoreOption]>>;
+ NegFlag<SetFalse>, BothFlags<[CoreOption]>>,
+ DocBrief<[{In order to improve devirtualization, forces emitting of vtables even in
+modules where it isn't necessary. It causes more inline virtual functions
+to be emitted.}]>;
defm virtual_function_elimination : BoolFOption<"virtual-function-elimination",
CodeGenOpts<"VirtualFunctionElimination">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Enables dead virtual function elimination optimization. Requires -flto=full">,
@@ -3164,11 +3360,13 @@ def fdebug_default_version: Joined<["-"], "fdebug-default-version=">, Group<f_Gr
def fdebug_prefix_map_EQ
: Joined<["-"], "fdebug-prefix-map=">, Group<f_Group>,
Flags<[CC1Option,CC1AsOption]>,
- HelpText<"remap file source paths in debug info">;
+ MetaVarName<"<old>=<new>">,
+ HelpText<"For paths in debug info, remap directory <old> to <new>. If multiple options match a path, the last option wins">;
def fcoverage_prefix_map_EQ
: Joined<["-"], "fcoverage-prefix-map=">, Group<f_Group>,
Flags<[CC1Option]>,
- HelpText<"remap file source paths in coverage mapping">;
+ MetaVarName<"<old>=<new>">,
+ HelpText<"remap file source paths <old> to <new> in coverage mapping. If there are multiple options, prefix replacement is applied in reverse order starting from the last one">;
def ffile_prefix_map_EQ
: Joined<["-"], "ffile-prefix-map=">, Group<f_Group>,
HelpText<"remap file source paths in debug info, predefined preprocessor "
@@ -3187,10 +3385,13 @@ def femit_dwarf_unwind_EQ : Joined<["-"], "femit-dwarf-unwind=">,
NormalizedValues<["Always", "NoCompactUnwind", "Default"]>,
NormalizedValuesScope<"llvm::EmitDwarfUnwindType">,
MarshallingInfoEnum<CodeGenOpts<"EmitDwarfUnwind">, "Default">;
+defm emit_compact_unwind_non_canonical : BoolFOption<"emit-compact-unwind-non-canonical",
+ CodeGenOpts<"EmitCompactUnwindNonCanonical">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option, CC1AsOption], "Try emitting Compact-Unwind for non-canonical entries. Maybe overriden by other constraints">, NegFlag<SetFalse>>;
def g_Flag : Flag<["-"], "g">, Group<g_Group>,
- HelpText<"Generate source-level debug information">;
+ Flags<[CoreOption,FlangOption]>, HelpText<"Generate source-level debug information">;
def gline_tables_only : Flag<["-"], "gline-tables-only">, Group<gN_Group>,
- Flags<[CoreOption]>, HelpText<"Emit debug line number tables only">;
+ Flags<[CoreOption,FlangOption]>, HelpText<"Emit debug line number tables only">;
def gline_directives_only : Flag<["-"], "gline-directives-only">, Group<gN_Group>,
Flags<[CoreOption]>, HelpText<"Emit debug line info directives only">;
def gmlt : Flag<["-"], "gmlt">, Alias<gline_tables_only>;
@@ -3259,17 +3460,21 @@ def : Flag<["-"], "grecord-gcc-switches">, Alias<grecord_command_line>;
def : Flag<["-"], "gno-record-gcc-switches">, Alias<gno_record_command_line>;
defm strict_dwarf : BoolOption<"g", "strict-dwarf",
CodeGenOpts<"DebugStrictDwarf">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option]>, NegFlag<SetFalse>, BothFlags<[CoreOption]>>,
+ PosFlag<SetTrue, [CC1Option], "Restrict DWARF features to those defined in "
+ "the specified version, avoiding features from later versions.">,
+ NegFlag<SetFalse>, BothFlags<[CoreOption]>>,
Group<g_flags_Group>;
defm column_info : BoolOption<"g", "column-info",
CodeGenOpts<"DebugColumnInfo">, DefaultTrue,
NegFlag<SetFalse, [CC1Option]>, PosFlag<SetTrue>, BothFlags<[CoreOption]>>,
Group<g_flags_Group>;
-def gsplit_dwarf : Flag<["-"], "gsplit-dwarf">, Group<g_flags_Group>;
+def gsplit_dwarf : Flag<["-"], "gsplit-dwarf">, Group<g_flags_Group>,
+ Flags<[CoreOption]>;
def gsplit_dwarf_EQ : Joined<["-"], "gsplit-dwarf=">, Group<g_flags_Group>,
- HelpText<"Set DWARF fission mode">,
+ Flags<[CoreOption]>, HelpText<"Set DWARF fission mode">,
Values<"split,single">;
-def gno_split_dwarf : Flag<["-"], "gno-split-dwarf">, Group<g_flags_Group>;
+def gno_split_dwarf : Flag<["-"], "gno-split-dwarf">, Group<g_flags_Group>,
+ Flags<[CoreOption]>;
def gsimple_template_names : Flag<["-"], "gsimple-template-names">, Group<g_flags_Group>;
def gsimple_template_names_EQ
: Joined<["-"], "gsimple-template-names=">,
@@ -3306,7 +3511,7 @@ def headerpad__max__install__names : Joined<["-"], "headerpad_max_install_names"
def help : Flag<["-", "--"], "help">, Flags<[CC1Option,CC1AsOption, FC1Option,
FlangOption]>, HelpText<"Display available options">,
MarshallingInfoFlag<FrontendOpts<"ShowHelp">>;
-def ibuiltininc : Flag<["-"], "ibuiltininc">,
+def ibuiltininc : Flag<["-"], "ibuiltininc">, Group<clang_i_Group>,
HelpText<"Enable builtin #include directories even when -nostdinc is used "
"before or after -ibuiltininc. "
"Using -nobuiltininc after the option disables it">;
@@ -3361,62 +3566,73 @@ def iwithsysroot : JoinedOrSeparate<["-"], "iwithsysroot">, Group<clang_i_Group>
Flags<[CC1Option]>;
def ivfsoverlay : JoinedOrSeparate<["-"], "ivfsoverlay">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Overlay the virtual filesystem described by file over the real file system">;
+def vfsoverlay : JoinedOrSeparate<["-", "--"], "vfsoverlay">, Flags<[CC1Option, CoreOption]>,
+ HelpText<"Overlay the virtual filesystem described by file over the real file system. "
+ "Additionally, pass this overlay file to the linker if it supports it">;
def imultilib : Separate<["-"], "imultilib">, Group<gfortran_Group>;
+def K : Flag<["-"], "K">, Flags<[LinkerInput]>;
def keep__private__externs : Flag<["-"], "keep_private_externs">;
def l : JoinedOrSeparate<["-"], "l">, Flags<[LinkerInput, RenderJoined]>,
Group<Link_Group>;
def lazy__framework : Separate<["-"], "lazy_framework">, Flags<[LinkerInput]>;
def lazy__library : Separate<["-"], "lazy_library">, Flags<[LinkerInput]>;
-def mlittle_endian : Flag<["-"], "mlittle-endian">, Flags<[NoXarchOption]>;
+def mlittle_endian : Flag<["-"], "mlittle-endian">, Group<m_Group>, Flags<[NoXarchOption,TargetSpecific]>;
def EL : Flag<["-"], "EL">, Alias<mlittle_endian>;
-def mbig_endian : Flag<["-"], "mbig-endian">, Flags<[NoXarchOption]>;
+def mbig_endian : Flag<["-"], "mbig-endian">, Group<m_Group>, Flags<[NoXarchOption,TargetSpecific]>;
def EB : Flag<["-"], "EB">, Alias<mbig_endian>;
def m16 : Flag<["-"], "m16">, Group<m_Group>, Flags<[NoXarchOption, CoreOption]>;
def m32 : Flag<["-"], "m32">, Group<m_Group>, Flags<[NoXarchOption, CoreOption]>;
+def maix32 : Flag<["-"], "maix32">, Group<m_Group>, Flags<[NoXarchOption]>;
def mqdsp6_compat : Flag<["-"], "mqdsp6-compat">, Group<m_Group>, Flags<[NoXarchOption,CC1Option]>,
HelpText<"Enable hexagon-qdsp6 backward compatibility">,
MarshallingInfoFlag<LangOpts<"HexagonQdsp6Compat">>;
def m64 : Flag<["-"], "m64">, Group<m_Group>, Flags<[NoXarchOption, CoreOption]>;
+def maix64 : Flag<["-"], "maix64">, Group<m_Group>, Flags<[NoXarchOption]>;
def mx32 : Flag<["-"], "mx32">, Group<m_Group>, Flags<[NoXarchOption, CoreOption]>;
-def mabi_EQ : Joined<["-"], "mabi=">, Group<m_Group>;
def miamcu : Flag<["-"], "miamcu">, Group<m_Group>, Flags<[NoXarchOption, CoreOption]>,
HelpText<"Use Intel MCU ABI">;
def mno_iamcu : Flag<["-"], "mno-iamcu">, Group<m_Group>, Flags<[NoXarchOption, CoreOption]>;
def malign_functions_EQ : Joined<["-"], "malign-functions=">, Group<clang_ignored_m_Group>;
def malign_loops_EQ : Joined<["-"], "malign-loops=">, Group<clang_ignored_m_Group>;
def malign_jumps_EQ : Joined<["-"], "malign-jumps=">, Group<clang_ignored_m_Group>;
-def malign_branch_EQ : CommaJoined<["-"], "malign-branch=">, Group<m_Group>, Flags<[NoXarchOption]>,
+
+let Flags = [TargetSpecific] in {
+def mabi_EQ : Joined<["-"], "mabi=">, Group<m_Group>;
+def malign_branch_EQ : CommaJoined<["-"], "malign-branch=">, Group<m_Group>,
HelpText<"Specify types of branches to align">;
-def malign_branch_boundary_EQ : Joined<["-"], "malign-branch-boundary=">, Group<m_Group>, Flags<[NoXarchOption]>,
+def malign_branch_boundary_EQ : Joined<["-"], "malign-branch-boundary=">, Group<m_Group>,
HelpText<"Specify the boundary's size to align branches">;
-def mpad_max_prefix_size_EQ : Joined<["-"], "mpad-max-prefix-size=">, Group<m_Group>, Flags<[NoXarchOption]>,
+def mpad_max_prefix_size_EQ : Joined<["-"], "mpad-max-prefix-size=">, Group<m_Group>,
HelpText<"Specify maximum number of prefixes to use for padding">;
-def mbranches_within_32B_boundaries : Flag<["-"], "mbranches-within-32B-boundaries">, Flags<[NoXarchOption]>, Group<m_Group>,
+def mbranches_within_32B_boundaries : Flag<["-"], "mbranches-within-32B-boundaries">, Group<m_Group>,
HelpText<"Align selected branches (fused, jcc, jmp) within 32-byte boundary">;
def mfancy_math_387 : Flag<["-"], "mfancy-math-387">, Group<clang_ignored_m_Group>;
def mlong_calls : Flag<["-"], "mlong-calls">, Group<m_Group>,
HelpText<"Generate branches with extended addressability, usually via indirect jumps.">;
+} // let Flags = [TargetSpecific]
def mdouble_EQ : Joined<["-"], "mdouble=">, Group<m_Group>,
MetaVarName<"<n">, Values<"32,64">, Flags<[CC1Option]>,
HelpText<"Force double to be <n> bits">,
MarshallingInfoInt<LangOpts<"DoubleSize">, "0">;
-def LongDouble_Group : OptionGroup<"<LongDouble group>">, Group<m_Group>,
- DocName<"Long double flags">,
- DocBrief<[{Selects the long double implementation}]>;
def mlong_double_64 : Flag<["-"], "mlong-double-64">, Group<LongDouble_Group>, Flags<[CC1Option]>,
HelpText<"Force long double to be 64 bits">;
def mlong_double_80 : Flag<["-"], "mlong-double-80">, Group<LongDouble_Group>, Flags<[CC1Option]>,
HelpText<"Force long double to be 80 bits, padded to 128 bits for storage">;
def mlong_double_128 : Flag<["-"], "mlong-double-128">, Group<LongDouble_Group>, Flags<[CC1Option]>,
HelpText<"Force long double to be 128 bits">;
+let Flags = [TargetSpecific] in {
def mno_long_calls : Flag<["-"], "mno-long-calls">, Group<m_Group>,
HelpText<"Restore the default behaviour of not generating long calls">;
+} // let Flags = [TargetSpecific]
def mexecute_only : Flag<["-"], "mexecute-only">, Group<m_arm_Features_Group>,
HelpText<"Disallow generation of data access to code sections (ARM only)">;
def mno_execute_only : Flag<["-"], "mno-execute-only">, Group<m_arm_Features_Group>,
HelpText<"Allow generation of data access to code sections (ARM only)">;
-def mtp_mode_EQ : Joined<["-"], "mtp=">, Group<m_arm_Features_Group>, Values<"soft,cp15,el0,el1,el2,el3">,
- HelpText<"Thread pointer access method (AArch32/AArch64 only)">;
+let Flags = [TargetSpecific] in {
+def mtp_mode_EQ : Joined<["-"], "mtp=">, Group<m_arm_Features_Group>, Values<"soft,cp15,tpidrurw,tpidruro,tpidrprw,el0,el1,el2,el3,tpidr_el0,tpidr_el1,tpidr_el2,tpidr_el3,tpidrro_el0">,
+ HelpText<"Thread pointer access method. "
+ "For AArch32: 'soft' uses a function call, or 'tpidrurw', 'tpidruro' or 'tpidrprw' use the three CP15 registers. 'cp15' is an alias for 'tpidruro'. "
+ "For AArch64: 'tpidr_el0', 'tpidr_el1', 'tpidr_el2', 'tpidr_el3' or 'tpidrro_el0' use the five system registers. 'elN' is an alias for 'tpidr_elN'.">;
def mpure_code : Flag<["-"], "mpure-code">, Alias<mexecute_only>; // Alias for GCC compatibility
def mno_pure_code : Flag<["-"], "mno-pure-code">, Alias<mno_execute_only>;
def mtvos_version_min_EQ : Joined<["-"], "mtvos-version-min=">, Group<m_Group>;
@@ -3424,9 +3640,11 @@ def mappletvos_version_min_EQ : Joined<["-"], "mappletvos-version-min=">, Alias<
def mtvos_simulator_version_min_EQ : Joined<["-"], "mtvos-simulator-version-min=">;
def mappletvsimulator_version_min_EQ : Joined<["-"], "mappletvsimulator-version-min=">, Alias<mtvos_simulator_version_min_EQ>;
def mwatchos_version_min_EQ : Joined<["-"], "mwatchos-version-min=">, Group<m_Group>;
-def mwatchos_simulator_version_min_EQ : Joined<["-"], "mwatchos-simulator-version-min=">;
+def mwatchos_simulator_version_min_EQ : Joined<["-"], "mwatchos-simulator-version-min=">, Group<m_Group>;
def mwatchsimulator_version_min_EQ : Joined<["-"], "mwatchsimulator-version-min=">, Alias<mwatchos_simulator_version_min_EQ>;
-def march_EQ : Joined<["-"], "march=">, Group<m_Group>, Flags<[CoreOption]>;
+} // let Flags = [TargetSpecific]
+def march_EQ : Joined<["-"], "march=">, Group<m_Group>, Flags<[CoreOption,TargetSpecific]>,
+ HelpText<"For a list of available architectures for the target use '-mcpu=help'">;
def masm_EQ : Joined<["-"], "masm=">, Group<m_Group>, Flags<[NoXarchOption]>;
def inline_asm_EQ : Joined<["-"], "inline-asm=">, Group<m_Group>, Flags<[CC1Option]>,
Values<"att,intel">,
@@ -3441,16 +3659,18 @@ def mtls_size_EQ : Joined<["-"], "mtls-size=">, Group<m_Group>, Flags<[NoXarchOp
def mimplicit_it_EQ : Joined<["-"], "mimplicit-it=">, Group<m_Group>;
def mdefault_build_attributes : Joined<["-"], "mdefault-build-attributes">, Group<m_Group>;
def mno_default_build_attributes : Joined<["-"], "mno-default-build-attributes">, Group<m_Group>;
+let Flags = [TargetSpecific] in {
def mconstant_cfstrings : Flag<["-"], "mconstant-cfstrings">, Group<clang_ignored_m_Group>;
-def mconsole : Joined<["-"], "mconsole">, Group<m_Group>, Flags<[NoXarchOption]>;
-def mwindows : Joined<["-"], "mwindows">, Group<m_Group>, Flags<[NoXarchOption]>;
-def mdll : Joined<["-"], "mdll">, Group<m_Group>, Flags<[NoXarchOption]>;
-def municode : Joined<["-"], "municode">, Group<m_Group>, Flags<[NoXarchOption]>;
-def mthreads : Joined<["-"], "mthreads">, Group<m_Group>, Flags<[NoXarchOption]>;
-def mguard_EQ : Joined<["-"], "mguard=">, Group<m_Group>, Flags<[NoXarchOption]>,
+def mconsole : Joined<["-"], "mconsole">, Group<m_Group>;
+def mwindows : Joined<["-"], "mwindows">, Group<m_Group>;
+def mdll : Joined<["-"], "mdll">, Group<m_Group>;
+def municode : Joined<["-"], "municode">, Group<m_Group>;
+def mthreads : Joined<["-"], "mthreads">, Group<m_Group>;
+def mguard_EQ : Joined<["-"], "mguard=">, Group<m_Group>,
HelpText<"Enable or disable Control Flow Guard checks and guard tables emission">,
Values<"none,cf,cf-nochecks">;
-def mcpu_EQ : Joined<["-"], "mcpu=">, Group<m_Group>;
+def mcpu_EQ : Joined<["-"], "mcpu=">, Group<m_Group>,
+ HelpText<"For a list of available CPUs for the target use '-mcpu=help'">;
def mmcu_EQ : Joined<["-"], "mmcu=">, Group<m_Group>;
def msim : Flag<["-"], "msim">, Group<m_Group>;
def mdynamic_no_pic : Joined<["-"], "mdynamic-no-pic">, Group<m_Group>;
@@ -3458,29 +3678,35 @@ def mfix_and_continue : Flag<["-"], "mfix-and-continue">, Group<clang_ignored_m_
def mieee_fp : Flag<["-"], "mieee-fp">, Group<clang_ignored_m_Group>;
def minline_all_stringops : Flag<["-"], "minline-all-stringops">, Group<clang_ignored_m_Group>;
def mno_inline_all_stringops : Flag<["-"], "mno-inline-all-stringops">, Group<clang_ignored_m_Group>;
+} // let Flags = [TargetSpecific]
def malign_double : Flag<["-"], "malign-double">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Align doubles to two words in structs (x86 only)">,
MarshallingInfoFlag<LangOpts<"AlignDouble">>;
+let Flags = [TargetSpecific] in {
def mfloat_abi_EQ : Joined<["-"], "mfloat-abi=">, Group<m_Group>, Values<"soft,softfp,hard">;
def mfpmath_EQ : Joined<["-"], "mfpmath=">, Group<m_Group>;
def mfpu_EQ : Joined<["-"], "mfpu=">, Group<m_Group>;
def mhwdiv_EQ : Joined<["-"], "mhwdiv=">, Group<m_Group>;
def mhwmult_EQ : Joined<["-"], "mhwmult=">, Group<m_Group>;
+} // let Flags = [TargetSpecific]
def mglobal_merge : Flag<["-"], "mglobal-merge">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Enable merging of globals">;
+let Flags = [TargetSpecific] in {
def mhard_float : Flag<["-"], "mhard-float">, Group<m_Group>;
def mios_version_min_EQ : Joined<["-"], "mios-version-min=">,
Group<m_Group>, HelpText<"Set iOS deployment target">;
def : Joined<["-"], "miphoneos-version-min=">,
Group<m_Group>, Alias<mios_version_min_EQ>;
-def mios_simulator_version_min_EQ : Joined<["-"], "mios-simulator-version-min=">;
+def mios_simulator_version_min_EQ : Joined<["-"], "mios-simulator-version-min=">, Group<m_Group>;
def : Joined<["-"], "miphonesimulator-version-min=">, Alias<mios_simulator_version_min_EQ>;
def mkernel : Flag<["-"], "mkernel">, Group<m_Group>;
-def mlinker_version_EQ : Joined<["-"], "mlinker-version=">,
- Flags<[NoXarchOption]>;
+def mlinker_version_EQ : Joined<["-"], "mlinker-version=">, Group<m_Group>, Flags<[NoXarchOption]>;
+} // let Flags = [TargetSpecific]
def mllvm : Separate<["-"], "mllvm">,Flags<[CC1Option,CC1AsOption,CoreOption,FC1Option,FlangOption]>,
HelpText<"Additional arguments to forward to LLVM's option processing">,
MarshallingInfoStringVector<FrontendOpts<"LLVMArgs">>;
+def : Joined<["-"], "mllvm=">, Flags<[CoreOption,FlangOption]>, Alias<mllvm>,
+ HelpText<"Alias for -mllvm">, MetaVarName<"<arg>">;
def mmlir : Separate<["-"], "mmlir">, Flags<[CoreOption,FC1Option,FlangOption]>,
HelpText<"Additional arguments to forward to MLIR's option processing">;
def ffuchsia_api_level_EQ : Joined<["-"], "ffuchsia-api-level=">,
@@ -3514,6 +3740,8 @@ def mstack_probe_size : Joined<["-"], "mstack-probe-size=">, Group<m_Group>, Fla
MarshallingInfoInt<CodeGenOpts<"StackProbeSize">, "4096">;
def mstack_arg_probe : Flag<["-"], "mstack-arg-probe">, Group<m_Group>,
HelpText<"Enable stack probes">;
+def mzos_sys_include_EQ : Joined<["-"], "mzos-sys-include=">, MetaVarName<"<SysInclude>">,
+ HelpText<"Path to system headers on z/OS">;
def mno_stack_arg_probe : Flag<["-"], "mno-stack-arg-probe">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Disable stack probes which are enabled by default">,
MarshallingInfoFlag<CodeGenOpts<"NoStackArgProbe">>;
@@ -3528,6 +3756,12 @@ def meabi : Separate<["-"], "meabi">, Group<m_Group>, Flags<[CC1Option]>,
NormalizedValues<["Default", "EABI4", "EABI5", "GNU"]>;
def mtargetos_EQ : Joined<["-"], "mtargetos=">, Group<m_Group>,
HelpText<"Set the deployment target to be the specified OS and OS version">;
+def mzos_hlq_le_EQ : Joined<["-"], "mzos-hlq-le=">, MetaVarName<"<LeHLQ>">,
+ HelpText<"High level qualifier for z/OS Language Environment datasets">;
+def mzos_hlq_clang_EQ : Joined<["-"], "mzos-hlq-clang=">, MetaVarName<"<ClangHLQ>">,
+ HelpText<"High level qualifier for z/OS C++RT side deck datasets">;
+def mzos_hlq_csslib_EQ : Joined<["-"], "mzos-hlq-csslib=">, MetaVarName<"<CsslibHLQ>">,
+ HelpText<"High level qualifier for z/OS CSSLIB dataset">;
def mno_constant_cfstrings : Flag<["-"], "mno-constant-cfstrings">, Group<m_Group>;
def mno_global_merge : Flag<["-"], "mno-global-merge">, Group<m_Group>, Flags<[CC1Option]>,
@@ -3539,9 +3773,11 @@ def mno_tls_direct_seg_refs : Flag<["-"], "mno-tls-direct-seg-refs">, Group<m_Gr
HelpText<"Disable direct TLS access through segment registers">,
MarshallingInfoFlag<CodeGenOpts<"IndirectTlsSegRefs">>;
def mno_relax_all : Flag<["-"], "mno-relax-all">, Group<m_Group>;
+let Flags = [TargetSpecific] in {
def mno_rtd: Flag<["-"], "mno-rtd">, Group<m_Group>;
def mno_soft_float : Flag<["-"], "mno-soft-float">, Group<m_Group>;
def mno_stackrealign : Flag<["-"], "mno-stackrealign">, Group<m_Group>;
+} // let Flags = [TargetSpecific]
def mretpoline : Flag<["-"], "mretpoline">, Group<m_Group>, Flags<[CoreOption,NoXarchOption]>;
def mno_retpoline : Flag<["-"], "mno-retpoline">, Group<m_Group>, Flags<[CoreOption,NoXarchOption]>;
@@ -3570,28 +3806,41 @@ def mno_relax : Flag<["-"], "mno-relax">, Group<m_Group>,
def msmall_data_limit_EQ : Joined<["-"], "msmall-data-limit=">, Group<m_Group>,
Alias<G>,
HelpText<"Put global and static data smaller than the limit into a special section">;
+let Flags = [TargetSpecific] in {
def msave_restore : Flag<["-"], "msave-restore">, Group<m_riscv_Features_Group>,
HelpText<"Enable using library calls for save and restore">;
def mno_save_restore : Flag<["-"], "mno-save-restore">, Group<m_riscv_Features_Group>,
HelpText<"Disable using library calls for save and restore">;
+} // let Flags = [TargetSpecific]
def mcmodel_EQ_medlow : Flag<["-"], "mcmodel=medlow">, Group<m_Group>,
Flags<[CC1Option]>, Alias<mcmodel_EQ>, AliasArgs<["small"]>,
HelpText<"Equivalent to -mcmodel=small, compatible with RISC-V gcc.">;
def mcmodel_EQ_medany : Flag<["-"], "mcmodel=medany">, Group<m_Group>,
Flags<[CC1Option]>, Alias<mcmodel_EQ>, AliasArgs<["medium"]>,
HelpText<"Equivalent to -mcmodel=medium, compatible with RISC-V gcc.">;
+let Flags = [TargetSpecific] in {
def menable_experimental_extensions : Flag<["-"], "menable-experimental-extensions">, Group<m_Group>,
HelpText<"Enable use of experimental RISC-V extensions.">;
-
-def munaligned_access : Flag<["-"], "munaligned-access">, Group<m_arm_Features_Group>,
- HelpText<"Allow memory accesses to be unaligned (AArch32/AArch64 only)">;
-def mno_unaligned_access : Flag<["-"], "mno-unaligned-access">, Group<m_arm_Features_Group>,
- HelpText<"Force all memory accesses to be aligned (AArch32/AArch64 only)">;
+def mrvv_vector_bits_EQ : Joined<["-"], "mrvv-vector-bits=">, Group<m_Group>,
+ HelpText<"Specify the size in bits of an RVV vector register. Defaults to "
+ "the vector length agnostic value of \"scalable\". Accepts power of "
+ "2 values between 64 and 65536. Also accepts \"zvl\" "
+ "to use the value implied by -march/-mcpu. Value will be reflected "
+ "in __riscv_v_fixed_vlen preprocessor define (RISC-V only)">;
+
+def munaligned_access : Flag<["-"], "munaligned-access">, Group<m_Group>,
+ HelpText<"Allow memory accesses to be unaligned (AArch32/AArch64/LoongArch only)">;
+def mno_unaligned_access : Flag<["-"], "mno-unaligned-access">, Group<m_Group>,
+ HelpText<"Force all memory accesses to be aligned (AArch32/AArch64/LoongArch only)">;
+} // let Flags = [TargetSpecific]
def mstrict_align : Flag<["-"], "mstrict-align">, Alias<mno_unaligned_access>, Flags<[CC1Option,HelpHidden]>,
HelpText<"Force all memory accesses to be aligned (same as mno-unaligned-access)">;
+def mno_strict_align : Flag<["-"], "mno-strict-align">, Alias<munaligned_access>, Flags<[CC1Option,HelpHidden]>,
+ HelpText<"Allow memory accesses to be unaligned (same as munaligned-access)">;
+let Flags = [TargetSpecific] in {
def mno_thumb : Flag<["-"], "mno-thumb">, Group<m_arm_Features_Group>;
def mrestrict_it: Flag<["-"], "mrestrict-it">, Group<m_arm_Features_Group>,
- HelpText<"Disallow generation of complex IT blocks.">;
+ HelpText<"Disallow generation of complex IT blocks. It is off by default.">;
def mno_restrict_it: Flag<["-"], "mno-restrict-it">, Group<m_arm_Features_Group>,
HelpText<"Allow generation of complex IT blocks.">;
def marm : Flag<["-"], "marm">, Alias<mno_thumb>;
@@ -3605,6 +3854,7 @@ def mnocrc : Flag<["-"], "mnocrc">, Group<m_arm_Features_Group>,
HelpText<"Disallow use of CRC instructions (ARM only)">;
def mno_neg_immediates: Flag<["-"], "mno-neg-immediates">, Group<m_arm_Features_Group>,
HelpText<"Disallow converting instructions with negative immediates to their negation or inversion.">;
+} // let Flags = [TargetSpecific]
def mcmse : Flag<["-"], "mcmse">, Group<m_arm_Features_Group>,
Flags<[NoXarchOption,CC1Option]>,
HelpText<"Allow use of CMSE (Armv8-M Security Extensions)">,
@@ -3619,6 +3869,7 @@ defm aapcs_bitfield_width : BoolOption<"f", "aapcs-bitfield-width",
BothFlags<[NoXarchOption, CC1Option], " the AAPCS standard requirement stating that"
" volatile bit-field width is dictated by the field container type. (ARM only).">>,
Group<m_arm_Features_Group>;
+let Flags = [TargetSpecific] in {
def mframe_chain : Joined<["-"], "mframe-chain=">,
Group<m_arm_Features_Group>, Values<"none,aapcs,aapcs+leaf">,
HelpText<"Select the frame chain model used to emit frame records (Arm only).">;
@@ -3669,25 +3920,27 @@ foreach i = {8-15,18} in
def msve_vector_bits_EQ : Joined<["-"], "msve-vector-bits=">, Group<m_aarch64_Features_Group>,
HelpText<"Specify the size in bits of an SVE vector register. Defaults to the"
" vector length agnostic value of \"scalable\". (AArch64 only)">;
+} // let Flags = [TargetSpecific]
def mvscale_min_EQ : Joined<["-"], "mvscale-min=">,
Group<m_aarch64_Features_Group>, Flags<[NoXarchOption,CC1Option]>,
- HelpText<"Specify the vscale minimum. Defaults to \"1\". (AArch64 only)">,
+ HelpText<"Specify the vscale minimum. Defaults to \"1\". (AArch64/RISC-V only)">,
MarshallingInfoInt<LangOpts<"VScaleMin">>;
def mvscale_max_EQ : Joined<["-"], "mvscale-max=">,
Group<m_aarch64_Features_Group>, Flags<[NoXarchOption,CC1Option]>,
HelpText<"Specify the vscale maximum. Defaults to the"
- " vector length agnostic value of \"0\". (AArch64 only)">,
+ " vector length agnostic value of \"0\". (AArch64/RISC-V only)">,
MarshallingInfoInt<LangOpts<"VScaleMax">>;
def msign_return_address_EQ : Joined<["-"], "msign-return-address=">,
Flags<[CC1Option]>, Group<m_Group>, Values<"none,all,non-leaf">,
HelpText<"Select return address signing scope">;
+let Flags = [TargetSpecific] in {
def mbranch_protection_EQ : Joined<["-"], "mbranch-protection=">,
Group<m_Group>,
HelpText<"Enforce targets of indirect branches and function returns">;
-def mharden_sls_EQ : Joined<["-"], "mharden-sls=">,
+def mharden_sls_EQ : Joined<["-"], "mharden-sls=">, Group<m_Group>,
HelpText<"Select straight-line speculation hardening scope (ARM/AArch64/X86"
" only). <arg> must be: all, none, retbr(ARM/AArch64),"
" blr(ARM/AArch64), comdat(ARM/AArch64), nocomdat(ARM/AArch64),"
@@ -3719,7 +3972,12 @@ def mextended_const : Flag<["-"], "mextended-const">, Group<m_wasm_Features_Grou
def mno_extended_const : Flag<["-"], "mno-extended-const">, Group<m_wasm_Features_Group>;
def mexec_model_EQ : Joined<["-"], "mexec-model=">, Group<m_wasm_Features_Driver_Group>,
Values<"command,reactor">,
- HelpText<"Execution model (WebAssembly only)">;
+ HelpText<"Execution model (WebAssembly only)">,
+ DocBrief<"Select between \"command\" and \"reactor\" executable models. "
+ "Commands have a main-function which scopes the lifetime of the "
+ "program. Reactors are activated and remain active until "
+ "explicitly terminated.">;
+} // let Flags = [TargetSpecific]
defm amdgpu_ieee : BoolOption<"m", "amdgpu-ieee",
CodeGenOpts<"EmitIEEENaNCompliantInsts">, DefaultTrue,
@@ -3737,10 +3995,6 @@ def mcode_object_version_EQ : Joined<["-"], "mcode-object-version=">, Group<m_Gr
NormalizedValues<["COV_None", "COV_2", "COV_3", "COV_4", "COV_5"]>,
MarshallingInfoEnum<TargetOpts<"CodeObjectVersion">, "COV_4">;
-defm code_object_v3_legacy : SimpleMFlag<"code-object-v3",
- "Legacy option to specify code object ABI V3",
- "Legacy option to specify code object ABI V2",
- " (AMDGPU only)">;
defm cumode : SimpleMFlag<"cumode",
"Specify CU wavefront", "Specify WGP wavefront",
" execution mode (AMDGPU only)", m_amdgpu_Features_Group>;
@@ -3752,12 +4006,17 @@ defm wavefrontsize64 : SimpleMFlag<"wavefrontsize64",
defm unsafe_fp_atomics : BoolOption<"m", "unsafe-fp-atomics",
TargetOpts<"AllowAMDGPUUnsafeFPAtomics">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable unsafe floating point atomic instructions (AMDGPU only)">,
+ PosFlag<SetTrue, [CC1Option], "Enable generation of unsafe floating point "
+ "atomic instructions. May generate more efficient code, but may not "
+ "respect rounding and denormal modes, and may give incorrect results "
+ "for certain memory destinations. (AMDGPU only)">,
NegFlag<SetFalse>>, Group<m_Group>;
def faltivec : Flag<["-"], "faltivec">, Group<f_Group>, Flags<[NoXarchOption]>;
def fno_altivec : Flag<["-"], "fno-altivec">, Group<f_Group>, Flags<[NoXarchOption]>;
-def maltivec : Flag<["-"], "maltivec">, Group<m_ppc_Features_Group>;
+let Flags = [TargetSpecific] in {
+def maltivec : Flag<["-"], "maltivec">, Group<m_ppc_Features_Group>,
+ HelpText<"Enable AltiVec vector initializer syntax">;
def mno_altivec : Flag<["-"], "mno-altivec">, Group<m_ppc_Features_Group>;
def mpcrel: Flag<["-"], "mpcrel">, Group<m_ppc_Features_Group>;
def mno_pcrel: Flag<["-"], "mno-pcrel">, Group<m_ppc_Features_Group>;
@@ -3766,15 +4025,12 @@ def mno_prefixed: Flag<["-"], "mno-prefixed">, Group<m_ppc_Features_Group>;
def mspe : Flag<["-"], "mspe">, Group<m_ppc_Features_Group>;
def mno_spe : Flag<["-"], "mno-spe">, Group<m_ppc_Features_Group>;
def mefpu2 : Flag<["-"], "mefpu2">, Group<m_ppc_Features_Group>;
-def mabi_EQ_vec_extabi : Flag<["-"], "mabi=vec-extabi">, Group<m_Group>, Flags<[CC1Option]>,
- HelpText<"Enable the extended Altivec ABI on AIX (AIX only). Uses volatile and nonvolatile vector registers">,
- MarshallingInfoFlag<LangOpts<"EnableAIXExtendedAltivecABI">>;
-def mabi_EQ_vec_default : Flag<["-"], "mabi=vec-default">, Group<m_Group>, Flags<[CC1Option]>,
- HelpText<"Enable the default Altivec ABI on AIX (AIX only). Uses only volatile vector registers.">;
+} // let Flags = [TargetSpecific]
def mabi_EQ_quadword_atomics : Flag<["-"], "mabi=quadword-atomics">,
Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Enable quadword atomics ABI on AIX (AIX PPC64 only). Uses lqarx/stqcx. instructions.">,
MarshallingInfoFlag<LangOpts<"EnableAIXQuadwordAtomicsABI">>;
+let Flags = [TargetSpecific] in {
def mvsx : Flag<["-"], "mvsx">, Group<m_ppc_Features_Group>;
def mno_vsx : Flag<["-"], "mno-vsx">, Group<m_ppc_Features_Group>;
def msecure_plt : Flag<["-"], "msecure-plt">, Group<m_ppc_Features_Group>;
@@ -3816,7 +4072,11 @@ def mno_mfocrf : Flag<["-"], "mno-mfocrf">, Group<m_ppc_Features_Group>;
def mno_mfcrf : Flag<["-"], "mno-mfcrf">, Alias<mno_mfocrf>;
def mpopcntd : Flag<["-"], "mpopcntd">, Group<m_ppc_Features_Group>;
def mno_popcntd : Flag<["-"], "mno-popcntd">, Group<m_ppc_Features_Group>;
-def mcrbits : Flag<["-"], "mcrbits">, Group<m_ppc_Features_Group>;
+def mcrbits : Flag<["-"], "mcrbits">, Group<m_ppc_Features_Group>,
+ HelpText<"Control the CR-bit tracking feature on PowerPC. ``-mcrbits`` "
+ "(the enablement of CR-bit tracking support) is the default for "
+ "POWER8 and above, as well as for all other CPUs when "
+ "optimization is applied (-O2 and above).">;
def mno_crbits : Flag<["-"], "mno-crbits">, Group<m_ppc_Features_Group>;
def minvariant_function_descriptors :
Flag<["-"], "minvariant-function-descriptors">, Group<m_ppc_Features_Group>;
@@ -3837,15 +4097,26 @@ def mrop_protect : Flag<["-"], "mrop-protect">,
Group<m_ppc_Features_Group>;
def mprivileged : Flag<["-"], "mprivileged">,
Group<m_ppc_Features_Group>;
+} // let Flags = [TargetSpecific]
def maix_struct_return : Flag<["-"], "maix-struct-return">,
Group<m_Group>, Flags<[CC1Option]>,
- HelpText<"Return all structs in memory (PPC32 only)">;
+ HelpText<"Return all structs in memory (PPC32 only)">,
+ DocBrief<"Override the default ABI for 32-bit targets to return all "
+ "structs in memory, as in the Power 32-bit ABI for Linux (2011), "
+ "and on AIX and Darwin.">;
def msvr4_struct_return : Flag<["-"], "msvr4-struct-return">,
Group<m_Group>, Flags<[CC1Option]>,
- HelpText<"Return small structs in registers (PPC32 only)">;
-
+ HelpText<"Return small structs in registers (PPC32 only)">,
+ DocBrief<"Override the default ABI for 32-bit targets to return small "
+ "structs in registers, as in the System V ABI (1995).">;
+def mxcoff_roptr : Flag<["-"], "mxcoff-roptr">, Group<m_Group>, Flags<[CC1Option,TargetSpecific]>,
+ HelpText<"Place constant objects with relocatable address values in the RO data section and add -bforceimprw to the linker flags (AIX only)">;
+def mno_xcoff_roptr : Flag<["-"], "mno-xcoff-roptr">, Group<m_Group>, TargetSpecific;
+
+let Flags = [TargetSpecific] in {
def mvx : Flag<["-"], "mvx">, Group<m_Group>;
def mno_vx : Flag<["-"], "mno-vx">, Group<m_Group>;
+} // let Flags = [TargetSpecific]
defm zvector : BoolFOption<"zvector",
LangOpts<"ZVector">, DefaultFalse,
@@ -3854,9 +4125,11 @@ defm zvector : BoolFOption<"zvector",
def mzvector : Flag<["-"], "mzvector">, Alias<fzvector>;
def mno_zvector : Flag<["-"], "mno-zvector">, Alias<fno_zvector>;
+def mxcoff_build_id_EQ : Joined<["-"], "mxcoff-build-id=">, Group<Link_Group>, MetaVarName<"<0xHEXSTRING>">,
+ HelpText<"On AIX, request creation of a build-id string, \"0xHEXSTRING\", in the string table of the loader section inside the linked binary">;
def mignore_xcoff_visibility : Flag<["-"], "mignore-xcoff-visibility">, Group<m_Group>,
HelpText<"Not emit the visibility attribute for asm in AIX OS or give all symbols 'unspecified' visibility in XCOFF object file">,
- Flags<[CC1Option]>;
+ Flags<[CC1Option,TargetSpecific]>;
defm backchain : BoolOption<"m", "backchain",
CodeGenOpts<"Backchain">, DefaultFalse,
PosFlag<SetTrue, [], "Link stack frames through backchain on System Z">,
@@ -3897,8 +4170,12 @@ def mno_outline_atomics : Flag<["-"], "mno-outline-atomics">, Group<f_clang_Grou
def mno_implicit_float : Flag<["-"], "mno-implicit-float">, Group<m_Group>,
HelpText<"Don't generate implicit floating point or vector instructions">;
def mimplicit_float : Flag<["-"], "mimplicit-float">, Group<m_Group>;
-def mrecip : Flag<["-"], "mrecip">, Group<m_Group>;
+def mrecip : Flag<["-"], "mrecip">, Group<m_Group>,
+ HelpText<"Equivalent to '-mrecip=all'">;
def mrecip_EQ : CommaJoined<["-"], "mrecip=">, Group<m_Group>, Flags<[CC1Option]>,
+ HelpText<"Control use of approximate reciprocal and reciprocal square root instructions followed by <n> iterations of "
+ "Newton-Raphson refinement. "
+ "<value> = ( ['!'] ['vec-'] ('rcp'|'sqrt') [('h'|'s'|'d')] [':'<n>] ) | 'all' | 'default' | 'none'">,
MarshallingInfoStringVector<CodeGenOpts<"Reciprocals">>;
def mprefer_vector_width_EQ : Joined<["-"], "mprefer-vector-width=">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Specifies preferred vector width for auto-vectorization. Defaults to 'none' which allows target specific decisions.">,
@@ -3928,6 +4205,8 @@ def mpacked_stack : Flag<["-"], "mpacked-stack">, HelpText<"Use packed stack lay
Flags<[CC1Option]>, Group<m_Group>,
MarshallingInfoFlag<CodeGenOpts<"PackedStack">>;
def mno_packed_stack : Flag<["-"], "mno-packed-stack">, Flags<[CC1Option]>, Group<m_Group>;
+
+let Flags = [TargetSpecific] in {
def mips16 : Flag<["-"], "mips16">, Group<m_mips_Features_Group>;
def mno_mips16 : Flag<["-"], "mno-mips16">, Group<m_mips_Features_Group>;
def mmicromips : Flag<["-"], "mmicromips">, Group<m_mips_Features_Group>;
@@ -3943,10 +4222,12 @@ def mno_check_zero_division : Flag<["-"], "mno-check-zero-division">,
def mfix4300 : Flag<["-"], "mfix4300">, Group<m_mips_Features_Group>;
def mcompact_branches_EQ : Joined<["-"], "mcompact-branches=">,
Group<m_mips_Features_Group>;
+} // let Flags = [TargetSpecific]
def mbranch_likely : Flag<["-"], "mbranch-likely">, Group<m_Group>,
IgnoredGCCCompat;
def mno_branch_likely : Flag<["-"], "mno-branch-likely">, Group<m_Group>,
IgnoredGCCCompat;
+let Flags = [TargetSpecific] in {
def mindirect_jump_EQ : Joined<["-"], "mindirect-jump=">,
Group<m_mips_Features_Group>,
HelpText<"Change indirect jump instructions to inhibit speculation">;
@@ -4012,6 +4293,7 @@ def mvirt : Flag<["-"], "mvirt">, Group<m_mips_Features_Group>;
def mno_virt : Flag<["-"], "mno-virt">, Group<m_mips_Features_Group>;
def mginv : Flag<["-"], "mginv">, Group<m_mips_Features_Group>;
def mno_ginv : Flag<["-"], "mno-ginv">, Group<m_mips_Features_Group>;
+} // let Flags = [TargetSpecific]
def mips1 : Flag<["-"], "mips1">,
Alias<march_EQ>, AliasArgs<["mips1"]>, Group<m_mips_Features_Group>,
HelpText<"Equivalent to -march=mips1">, Flags<[HelpHidden]>;
@@ -4093,12 +4375,13 @@ def no_cpp_precomp : Flag<["-"], "no-cpp-precomp">, Group<clang_ignored_f_Group>
def no_integrated_cpp : Flag<["-", "--"], "no-integrated-cpp">, Flags<[NoXarchOption]>;
def no_pedantic : Flag<["-", "--"], "no-pedantic">, Group<pedantic_Group>;
def no__dead__strip__inits__and__terms : Flag<["-"], "no_dead_strip_inits_and_terms">;
-def nobuiltininc : Flag<["-"], "nobuiltininc">, Flags<[CC1Option, CoreOption]>,
+def nobuiltininc : Flag<["-"], "nobuiltininc">, Flags<[CC1Option, CoreOption]>, Group<IncludePath_Group>,
HelpText<"Disable builtin #include directories">,
MarshallingInfoNegativeFlag<HeaderSearchOpts<"UseBuiltinIncludes">>;
-def nogpuinc : Flag<["-"], "nogpuinc">, HelpText<"Do not add include paths for CUDA/HIP and"
+def nogpuinc : Flag<["-"], "nogpuinc">, Group<IncludePath_Group>,
+ HelpText<"Do not add include paths for CUDA/HIP and"
" do not include the default CUDA/HIP wrapper headers">;
-def nohipwrapperinc : Flag<["-"], "nohipwrapperinc">,
+def nohipwrapperinc : Flag<["-"], "nohipwrapperinc">, Group<IncludePath_Group>,
HelpText<"Do not include the default HIP wrapper headers and include paths">;
def : Flag<["-"], "nocudainc">, Alias<nogpuinc>;
def nogpulib : Flag<["-"], "nogpulib">, MarshallingInfoFlag<LangOpts<"NoGPULib">>,
@@ -4115,9 +4398,9 @@ def noprebind : Flag<["-"], "noprebind">;
def noprofilelib : Flag<["-"], "noprofilelib">;
def noseglinkedit : Flag<["-"], "noseglinkedit">;
def nostartfiles : Flag<["-"], "nostartfiles">, Group<Link_Group>;
-def nostdinc : Flag<["-"], "nostdinc">, Flags<[CoreOption]>;
-def nostdlibinc : Flag<["-"], "nostdlibinc">;
-def nostdincxx : Flag<["-"], "nostdinc++">, Flags<[CC1Option]>,
+def nostdinc : Flag<["-"], "nostdinc">, Flags<[CoreOption]>, Group<IncludePath_Group>;
+def nostdlibinc : Flag<["-"], "nostdlibinc">, Group<IncludePath_Group>;
+def nostdincxx : Flag<["-"], "nostdinc++">, Flags<[CC1Option]>, Group<IncludePath_Group>,
HelpText<"Disable standard #include directories for the C++ standard library">,
MarshallingInfoNegativeFlag<HeaderSearchOpts<"UseStandardCXXIncludes">>;
def nostdlib : Flag<["-"], "nostdlib">, Group<Link_Group>;
@@ -4138,6 +4421,7 @@ def pedantic_errors : Flag<["-", "--"], "pedantic-errors">, Group<pedantic_Group
MarshallingInfoFlag<DiagnosticOpts<"PedanticErrors">>;
def pedantic : Flag<["-", "--"], "pedantic">, Group<pedantic_Group>, Flags<[CC1Option,FlangOption,FC1Option]>,
HelpText<"Warn on language extensions">, MarshallingInfoFlag<DiagnosticOpts<"Pedantic">>;
+def p : Flag<["-"], "p">, HelpText<"Enable mcount instrumentation with prof">;
def pg : Flag<["-"], "pg">, HelpText<"Enable mcount instrumentation">, Flags<[CC1Option]>,
MarshallingInfoFlag<CodeGenOpts<"InstrumentForProfiling">>;
def pipe : Flag<["-", "--"], "pipe">,
@@ -4155,6 +4439,8 @@ def print_libgcc_file_name : Flag<["-", "--"], "print-libgcc-file-name">,
"library (\"libgcc.a\" or \"libclang_rt.builtins.*.a\")">;
def print_multi_directory : Flag<["-", "--"], "print-multi-directory">;
def print_multi_lib : Flag<["-", "--"], "print-multi-lib">;
+def print_multi_flags : Flag<["-", "--"], "print-multi-flags-experimental">,
+ HelpText<"Print the flags used for selecting multilibs (experimental)">;
def print_multi_os_directory : Flag<["-", "--"], "print-multi-os-directory">,
Flags<[Unsupported]>;
def print_target_triple : Flag<["-", "--"], "print-target-triple">,
@@ -4184,7 +4470,6 @@ defm pthread : BoolOption<"", "pthread",
LangOpts<"POSIXThreads">, DefaultFalse,
PosFlag<SetTrue, [], "Support POSIX threads in generated code">,
NegFlag<SetFalse>, BothFlags<[CC1Option]>>;
-def p : Flag<["-"], "p">;
def pie : Flag<["-"], "pie">, Group<Link_Group>;
def static_pie : Flag<["-"], "static-pie">, Group<Link_Group>;
def read__only__relocs : Separate<["-"], "read_only_relocs">;
@@ -4204,24 +4489,20 @@ def rpath : Separate<["-"], "rpath">, Flags<[LinkerInput]>, Group<Link_Group>;
def rtlib_EQ : Joined<["-", "--"], "rtlib=">,
HelpText<"Compiler runtime library to use">;
def frtlib_add_rpath: Flag<["-"], "frtlib-add-rpath">, Flags<[NoArgumentUnused]>,
- HelpText<"Add -rpath with architecture-specific resource directory to the linker flags">;
+ HelpText<"Add -rpath with architecture-specific resource directory to the linker flags. "
+ "When --hip-link is specified, also add -rpath with HIP runtime library directory to the linker flags">;
def fno_rtlib_add_rpath: Flag<["-"], "fno-rtlib-add-rpath">, Flags<[NoArgumentUnused]>,
- HelpText<"Do not add -rpath with architecture-specific resource directory to the linker flags">;
+ HelpText<"Do not add -rpath with architecture-specific resource directory to the linker flags. "
+ "When --hip-link is specified, do not add -rpath with HIP runtime library directory to the linker flags">;
def offload_add_rpath: Flag<["--"], "offload-add-rpath">, Flags<[NoArgumentUnused]>,
- HelpText<"Add -rpath with HIP runtime library directory to the linker flags">;
+ Alias<frtlib_add_rpath>;
def no_offload_add_rpath: Flag<["--"], "no-offload-add-rpath">, Flags<[NoArgumentUnused]>,
- HelpText<"Do not add -rpath with HIP runtime library directory to the linker flags">;
-defm openmp_implicit_rpath: BoolFOption<"openmp-implicit-rpath",
- LangOpts<"OpenMP">,
- DefaultTrue,
- PosFlag<SetTrue, [], "Set rpath on OpenMP executables">,
- NegFlag<SetFalse>,
- BothFlags<[NoArgumentUnused]>>;
+ Alias<frtlib_add_rpath>;
def r : Flag<["-"], "r">, Flags<[LinkerInput,NoArgumentUnused]>,
Group<Link_Group>;
-def save_temps_EQ : Joined<["-", "--"], "save-temps=">, Flags<[CC1Option, FlangOption, NoXarchOption]>,
+def save_temps_EQ : Joined<["-", "--"], "save-temps=">, Flags<[CC1Option, FlangOption, FC1Option, NoXarchOption]>,
HelpText<"Save intermediate compilation results.">;
-def save_temps : Flag<["-", "--"], "save-temps">, Flags<[FlangOption, NoXarchOption]>,
+def save_temps : Flag<["-", "--"], "save-temps">, Flags<[FlangOption, FC1Option, NoXarchOption]>,
Alias<save_temps_EQ>, AliasArgs<["cwd"]>,
HelpText<"Save intermediate compilation results">;
def save_stats_EQ : Joined<["-", "--"], "save-stats=">, Flags<[NoXarchOption]>,
@@ -4296,8 +4577,8 @@ def print_supported_cpus : Flag<["-", "--"], "print-supported-cpus">,
HelpText<"Print supported cpu models for the given target (if target is not specified,"
" it will print the supported cpus for the default target)">,
MarshallingInfoFlag<FrontendOpts<"PrintSupportedCPUs">>;
-def mcpu_EQ_QUESTION : Flag<["-"], "mcpu=?">, Alias<print_supported_cpus>;
-def mtune_EQ_QUESTION : Flag<["-"], "mtune=?">, Alias<print_supported_cpus>;
+def : Flag<["-"], "mcpu=help">, Alias<print_supported_cpus>;
+def : Flag<["-"], "mtune=help">, Alias<print_supported_cpus>;
def time : Flag<["-"], "time">,
HelpText<"Time individual commands">;
def traditional_cpp : Flag<["-", "--"], "traditional-cpp">, Flags<[CC1Option]>,
@@ -4490,6 +4771,7 @@ def _write_user_dependencies : Flag<["--"], "write-user-dependencies">, Alias<MM
def _ : Joined<["--"], "">, Flags<[Unsupported]>;
// Hexagon feature flags.
+let Flags = [TargetSpecific] in {
def mieee_rnd_near : Flag<["-"], "mieee-rnd-near">,
Group<m_hexagon_Features_Group>;
def mv5 : Flag<["-"], "mv5">, Group<m_hexagon_Features_Group>, Alias<mcpu_EQ>,
@@ -4541,8 +4823,9 @@ def mhexagon_hvx_ieee_fp : Flag<["-"], "mhvx-ieee-fp">,
def mno_hexagon_hvx_ieee_fp : Flag<["-"], "mno-hvx-ieee-fp">,
Group<m_hexagon_Features_Group>,
HelpText<"Disable Hexagon HVX IEEE floating-point">;
-def ffixed_r19: Flag<["-"], "ffixed-r19">,
+def ffixed_r19: Flag<["-"], "ffixed-r19">, Group<f_Group>,
HelpText<"Reserve register r19 (Hexagon only)">;
+} // let Flags = [TargetSpecific]
def mmemops : Flag<["-"], "mmemops">, Group<m_hexagon_Features_Group>,
Flags<[CC1Option]>, HelpText<"Enable generation of memop instructions">;
def mno_memops : Flag<["-"], "mno-memops">, Group<m_hexagon_Features_Group>,
@@ -4563,6 +4846,7 @@ def mcabac: Flag<["-"], "mcabac">, Group<m_hexagon_Features_Group>,
HelpText<"Enable CABAC instructions">;
// SPARC feature flags
+let Flags = [TargetSpecific] in {
def mfpu : Flag<["-"], "mfpu">, Group<m_sparc_Features_Group>;
def mno_fpu : Flag<["-"], "mno-fpu">, Group<m_sparc_Features_Group>;
def mfsmuld : Flag<["-"], "mfsmuld">, Group<m_sparc_Features_Group>;
@@ -4577,8 +4861,10 @@ def mvis3 : Flag<["-"], "mvis3">, Group<m_sparc_Features_Group>;
def mno_vis3 : Flag<["-"], "mno-vis3">, Group<m_sparc_Features_Group>;
def mhard_quad_float : Flag<["-"], "mhard-quad-float">, Group<m_sparc_Features_Group>;
def msoft_quad_float : Flag<["-"], "msoft-quad-float">, Group<m_sparc_Features_Group>;
+} // let Flags = [TargetSpecific]
// M68k features flags
+let Flags = [TargetSpecific] in {
def m68000 : Flag<["-"], "m68000">, Group<m_m68k_Features_Group>;
def m68010 : Flag<["-"], "m68010">, Group<m_m68k_Features_Group>;
def m68020 : Flag<["-"], "m68020">, Group<m_m68k_Features_Group>;
@@ -4586,12 +4872,15 @@ def m68030 : Flag<["-"], "m68030">, Group<m_m68k_Features_Group>;
def m68040 : Flag<["-"], "m68040">, Group<m_m68k_Features_Group>;
def m68060 : Flag<["-"], "m68060">, Group<m_m68k_Features_Group>;
+def m68881 : Flag<["-"], "m68881">, Group<m_m68k_Features_Group>;
+
foreach i = {0-6} in
def ffixed_a#i : Flag<["-"], "ffixed-a"#i>, Group<m_m68k_Features_Group>,
HelpText<"Reserve the a"#i#" register (M68k only)">;
foreach i = {0-7} in
def ffixed_d#i : Flag<["-"], "ffixed-d"#i>, Group<m_m68k_Features_Group>,
HelpText<"Reserve the d"#i#" register (M68k only)">;
+} // let Flags = [TargetSpecific]
// X86 feature flags
def mx87 : Flag<["-"], "mx87">, Group<m_x86_Features_Group>;
@@ -4607,6 +4896,8 @@ def m3dnowa : Flag<["-"], "m3dnowa">, Group<m_x86_Features_Group>;
def mno_3dnowa : Flag<["-"], "mno-3dnowa">, Group<m_x86_Features_Group>;
def mamx_bf16 : Flag<["-"], "mamx-bf16">, Group<m_x86_Features_Group>;
def mno_amx_bf16 : Flag<["-"], "mno-amx-bf16">, Group<m_x86_Features_Group>;
+def mamx_complex : Flag<["-"], "mamx-complex">, Group<m_x86_Features_Group>;
+def mno_amx_complex : Flag<["-"], "mno-amx-complex">, Group<m_x86_Features_Group>;
def mamx_fp16 : Flag<["-"], "mamx-fp16">, Group<m_x86_Features_Group>;
def mno_amx_fp16 : Flag<["-"], "mno-amx-fp16">, Group<m_x86_Features_Group>;
def mamx_int8 : Flag<["-"], "mamx-int8">, Group<m_x86_Features_Group>;
@@ -4674,6 +4965,8 @@ def mavxifma : Flag<["-"], "mavxifma">, Group<m_x86_Features_Group>;
def mno_avxifma : Flag<["-"], "mno-avxifma">, Group<m_x86_Features_Group>;
def mavxneconvert : Flag<["-"], "mavxneconvert">, Group<m_x86_Features_Group>;
def mno_avxneconvert : Flag<["-"], "mno-avxneconvert">, Group<m_x86_Features_Group>;
+def mavxvnniint16 : Flag<["-"], "mavxvnniint16">, Group<m_x86_Features_Group>;
+def mno_avxvnniint16 : Flag<["-"], "mno-avxvnniint16">, Group<m_x86_Features_Group>;
def mavxvnniint8 : Flag<["-"], "mavxvnniint8">, Group<m_x86_Features_Group>;
def mno_avxvnniint8 : Flag<["-"], "mno-avxvnniint8">, Group<m_x86_Features_Group>;
def mavxvnni : Flag<["-"], "mavxvnni">, Group<m_x86_Features_Group>;
@@ -4770,6 +5063,12 @@ def msgx : Flag<["-"], "msgx">, Group<m_x86_Features_Group>;
def mno_sgx : Flag<["-"], "mno-sgx">, Group<m_x86_Features_Group>;
def msha : Flag<["-"], "msha">, Group<m_x86_Features_Group>;
def mno_sha : Flag<["-"], "mno-sha">, Group<m_x86_Features_Group>;
+def msha512 : Flag<["-"], "msha512">, Group<m_x86_Features_Group>;
+def mno_sha512 : Flag<["-"], "mno-sha512">, Group<m_x86_Features_Group>;
+def msm3 : Flag<["-"], "msm3">, Group<m_x86_Features_Group>;
+def mno_sm3 : Flag<["-"], "mno-sm3">, Group<m_x86_Features_Group>;
+def msm4 : Flag<["-"], "msm4">, Group<m_x86_Features_Group>;
+def mno_sm4 : Flag<["-"], "mno-sm4">, Group<m_x86_Features_Group>;
def mtbm : Flag<["-"], "mtbm">, Group<m_x86_Features_Group>;
def mno_tbm : Flag<["-"], "mno-tbm">, Group<m_x86_Features_Group>;
def mtsxldtrk : Flag<["-"], "mtsxldtrk">, Group<m_x86_Features_Group>;
@@ -4826,6 +5125,10 @@ multiclass BooleanFFlag<string name> {
def fno_#NAME : Flag<["-"], "fno-"#name>;
}
+multiclass FlangIgnoredDiagOpt<string name> {
+ def unsupported_warning_w#NAME : Flag<["-", "--"], "W"#name>, Group<flang_ignored_w_Group>;
+}
+
defm : BooleanFFlag<"keep-inline-functions">, Group<clang_ignored_gcc_optimization_f_Group>;
def fprofile_dir : Joined<["-"], "fprofile-dir=">, Group<f_Group>;
@@ -4850,12 +5153,15 @@ def falign_jumps_EQ : Joined<["-"], "falign-jumps=">, Group<clang_ignored_gcc_op
// ignore it for now to avoid breaking builds that use it.
def fdiagnostics_show_location_EQ : Joined<["-"], "fdiagnostics-show-location=">, Group<clang_ignored_f_Group>;
-defm fcheck_new : BooleanFFlag<"check-new">, Group<clang_ignored_f_Group>;
+defm check_new : BoolOption<"f", "check-new",
+ LangOpts<"CheckNew">, DefaultFalse,
+ PosFlag<SetTrue, [], "Do not assume C++ operator new may not return NULL">,
+ NegFlag<SetFalse>, BothFlags<[CC1Option]>>;
+
defm caller_saves : BooleanFFlag<"caller-saves">, Group<clang_ignored_gcc_optimization_f_Group>;
defm reorder_blocks : BooleanFFlag<"reorder-blocks">, Group<clang_ignored_gcc_optimization_f_Group>;
defm branch_count_reg : BooleanFFlag<"branch-count-reg">, Group<clang_ignored_gcc_optimization_f_Group>;
defm default_inline : BooleanFFlag<"default-inline">, Group<clang_ignored_gcc_optimization_f_Group>;
-defm fat_lto_objects : BooleanFFlag<"fat-lto-objects">, Group<clang_ignored_gcc_optimization_f_Group>;
defm float_store : BooleanFFlag<"float-store">, Group<clang_ignored_gcc_optimization_f_Group>;
defm friend_injection : BooleanFFlag<"friend-injection">, Group<clang_ignored_f_Group>;
defm function_attribute_list : BooleanFFlag<"function-attribute-list">, Group<clang_ignored_f_Group>;
@@ -4881,7 +5187,10 @@ defm ipa_cp : BooleanFFlag<"ipa-cp">,
defm ivopts : BooleanFFlag<"ivopts">, Group<clang_ignored_gcc_optimization_f_Group>;
defm semantic_interposition : BoolFOption<"semantic-interposition",
LangOpts<"SemanticInterposition">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option]>, NegFlag<SetFalse>>;
+ PosFlag<SetTrue, [CC1Option]>, NegFlag<SetFalse>>,
+ DocBrief<[{Enable semantic interposition. Semantic interposition allows for the
+interposition of a symbol by another at runtime, thus preventing a range of
+inter-procedural optimisation.}]>;
defm non_call_exceptions : BooleanFFlag<"non-call-exceptions">, Group<clang_ignored_f_Group>;
defm peel_loops : BooleanFFlag<"peel-loops">, Group<clang_ignored_gcc_optimization_f_Group>;
defm permissive : BooleanFFlag<"permissive">, Group<clang_ignored_f_Group>;
@@ -4983,10 +5292,38 @@ defm recursive : BooleanFFlag<"recursive">, Group<gfortran_Group>;
defm repack_arrays : BooleanFFlag<"repack-arrays">, Group<gfortran_Group>;
defm second_underscore : BooleanFFlag<"second-underscore">, Group<gfortran_Group>;
defm sign_zero : BooleanFFlag<"sign-zero">, Group<gfortran_Group>;
-defm stack_arrays : BooleanFFlag<"stack-arrays">, Group<gfortran_Group>;
-defm underscoring : BooleanFFlag<"underscoring">, Group<gfortran_Group>;
defm whole_file : BooleanFFlag<"whole-file">, Group<gfortran_Group>;
+// -W <arg> options unsupported by the flang compiler
+// If any of these options are passed into flang's compiler driver,
+// a warning will be raised and the argument will be claimed
+defm : FlangIgnoredDiagOpt<"extra">;
+defm : FlangIgnoredDiagOpt<"aliasing">;
+defm : FlangIgnoredDiagOpt<"ampersand">;
+defm : FlangIgnoredDiagOpt<"array-bounds">;
+defm : FlangIgnoredDiagOpt<"c-binding-type">;
+defm : FlangIgnoredDiagOpt<"character-truncation">;
+defm : FlangIgnoredDiagOpt<"conversion">;
+defm : FlangIgnoredDiagOpt<"do-subscript">;
+defm : FlangIgnoredDiagOpt<"function-elimination">;
+defm : FlangIgnoredDiagOpt<"implicit-interface">;
+defm : FlangIgnoredDiagOpt<"implicit-procedure">;
+defm : FlangIgnoredDiagOpt<"intrinsic-shadow">;
+defm : FlangIgnoredDiagOpt<"use-without-only">;
+defm : FlangIgnoredDiagOpt<"intrinsics-std">;
+defm : FlangIgnoredDiagOpt<"line-truncation">;
+defm : FlangIgnoredDiagOpt<"no-align-commons">;
+defm : FlangIgnoredDiagOpt<"no-overwrite-recursive">;
+defm : FlangIgnoredDiagOpt<"no-tabs">;
+defm : FlangIgnoredDiagOpt<"real-q-constant">;
+defm : FlangIgnoredDiagOpt<"surprising">;
+defm : FlangIgnoredDiagOpt<"underflow">;
+defm : FlangIgnoredDiagOpt<"unused-parameter">;
+defm : FlangIgnoredDiagOpt<"realloc-lhs">;
+defm : FlangIgnoredDiagOpt<"realloc-lhs-all">;
+defm : FlangIgnoredDiagOpt<"frontend-loop-interchange">;
+defm : FlangIgnoredDiagOpt<"target-lifetime">;
+
// C++ SYCL options
def fsycl : Flag<["-"], "fsycl">, Flags<[NoXarchOption, CoreOption]>,
Group<sycl_Group>, HelpText<"Enables SYCL kernels compilation for device">;
@@ -4997,9 +5334,14 @@ def fno_sycl : Flag<["-"], "fno-sycl">, Flags<[NoXarchOption, CoreOption]>,
// FLangOption + NoXarchOption
//===----------------------------------------------------------------------===//
-def flang_experimental_exec : Flag<["-"], "flang-experimental-exec">,
- Flags<[FlangOption, FlangOnlyOption, NoXarchOption, HelpHidden]>,
- HelpText<"Enable support for generating executables (experimental)">;
+def flang_experimental_hlfir : Flag<["-"], "flang-experimental-hlfir">,
+ Flags<[FlangOption, FC1Option, FlangOnlyOption, NoXarchOption, HelpHidden]>,
+ HelpText<"Use HLFIR lowering (experimental)">;
+
+def flang_experimental_polymorphism : Flag<["-"], "flang-experimental-polymorphism">,
+ Flags<[FlangOption, FC1Option, FlangOnlyOption, NoXarchOption, HelpHidden]>,
+ HelpText<"Enable Fortran 2003 polymorphism (experimental)">;
+
//===----------------------------------------------------------------------===//
// FLangOption + CoreOption + NoXarchOption
@@ -5042,7 +5384,7 @@ def fopenacc : Flag<["-"], "fopenacc">, Group<f_Group>,
def fdefault_double_8 : Flag<["-"],"fdefault-double-8">, Group<f_Group>,
HelpText<"Set the default double precision kind to an 8 byte wide type">;
def fdefault_integer_8 : Flag<["-"],"fdefault-integer-8">, Group<f_Group>,
- HelpText<"Set the default integer kind to an 8 byte wide type">;
+ HelpText<"Set the default integer and logical kind to an 8 byte wide type">;
def fdefault_real_8 : Flag<["-"],"fdefault-real-8">, Group<f_Group>,
HelpText<"Set the default real kind to an 8 byte wide type">;
def flarge_sizes : Flag<["-"],"flarge-sizes">, Group<f_Group>,
@@ -5059,10 +5401,17 @@ defm backslash : OptInFC1FFlag<"backslash", "Specify that backslash in string in
defm xor_operator : OptInFC1FFlag<"xor-operator", "Enable .XOR. as a synonym of .NEQV.">;
defm logical_abbreviations : OptInFC1FFlag<"logical-abbreviations", "Enable logical abbreviations">;
defm implicit_none : OptInFC1FFlag<"implicit-none", "No implicit typing allowed unless overridden by IMPLICIT statements">;
+defm underscoring : OptInFC1FFlag<"underscoring", "Appends one trailing underscore to external names">;
def fno_automatic : Flag<["-"], "fno-automatic">, Group<f_Group>,
HelpText<"Implies the SAVE attribute for non-automatic local objects in subprograms unless RECURSIVE">;
+defm stack_arrays : BoolOptionWithoutMarshalling<"f", "stack-arrays",
+ PosFlag<SetTrue, [], "Attempt to allocate array temporaries on the stack, no matter their size">,
+ NegFlag<SetFalse, [], "Allocate array temporaries on the heap (default)">>;
+defm loop_versioning : BoolOptionWithoutMarshalling<"f", "version-loops-for-stride",
+ PosFlag<SetTrue, [], "Create unit-strided versions of loops">,
+ NegFlag<SetFalse, [], "Do not create unit-strided loops (default)">>;
} // let Flags = [FC1Option, FlangOption, FlangOnlyOption]
def J : JoinedOrSeparate<["-"], "J">,
@@ -5124,9 +5473,12 @@ def fno_reformat : Flag<["-"], "fno-reformat">, Group<Preprocessor_Group>,
HelpText<"Dump the cooked character stream in -E mode">;
defm analyzed_objects_for_unparse : OptOutFC1FFlag<"analyzed-objects-for-unparse", "", "Do not use the analyzed objects when unparsing">;
-def emit_mlir : Flag<["-"], "emit-mlir">, Group<Action_Group>,
- HelpText<"Build the parse tree, then lower it to MLIR">;
-def emit_fir : Flag<["-"], "emit-fir">, Alias<emit_mlir>;
+def emit_fir : Flag<["-"], "emit-fir">, Group<Action_Group>,
+ HelpText<"Build the parse tree, then lower it to FIR">;
+def emit_mlir : Flag<["-"], "emit-mlir">, Alias<emit_fir>;
+
+def emit_hlfir : Flag<["-"], "emit-hlfir">, Group<Action_Group>,
+ HelpText<"Build the parse tree, then lower it to HLFIR">;
} // let Flags = [FC1Option, FlangOnlyOption]
@@ -5376,12 +5728,12 @@ def mrelocation_model : Separate<["-"], "mrelocation-model">,
NormalizedValuesScope<"llvm::Reloc">,
NormalizedValues<["Static", "PIC_", "ROPI", "RWPI", "ROPI_RWPI", "DynamicNoPIC"]>,
MarshallingInfoEnum<CodeGenOpts<"RelocationModel">, "PIC_">;
+def debug_info_kind_EQ : Joined<["-"], "debug-info-kind=">;
} // let Flags = [CC1Option, CC1AsOption, FC1Option, NoDriverOption]
let Flags = [CC1Option, CC1AsOption, NoDriverOption] in {
-def debug_info_kind_EQ : Joined<["-"], "debug-info-kind=">;
def debug_info_macro : Flag<["-"], "debug-info-macro">,
HelpText<"Emit macro debug information">,
MarshallingInfoFlag<CodeGenOpts<"MacroDebugInfo">>;
@@ -5441,6 +5793,9 @@ def as_secure_log_file : Separate<["-"], "as-secure-log-file">,
let Flags = [CC1Option, NoDriverOption] in {
+def llvm_verify_each : Flag<["-"], "llvm-verify-each">,
+ HelpText<"Run the LLVM verifier after every LLVM pass">,
+ MarshallingInfoFlag<CodeGenOpts<"VerifyEach">>;
def disable_llvm_verifier : Flag<["-"], "disable-llvm-verifier">,
HelpText<"Don't run the LLVM IR verifier pass">,
MarshallingInfoNegativeFlag<CodeGenOpts<"VerifyModule">>;
@@ -5486,14 +5841,12 @@ def fmerge_functions : Flag<["-"], "fmerge-functions">,
MarshallingInfoFlag<CodeGenOpts<"MergeFunctions">>;
def coverage_data_file : Separate<["-"], "coverage-data-file">,
HelpText<"Emit coverage data to this filename.">,
- MarshallingInfoString<CodeGenOpts<"CoverageDataFile">>,
- ShouldParseIf<!strconcat(fprofile_arcs.KeyPath, "||", ftest_coverage.KeyPath)>;
+ MarshallingInfoString<CodeGenOpts<"CoverageDataFile">>;
def coverage_data_file_EQ : Joined<["-"], "coverage-data-file=">,
Alias<coverage_data_file>;
def coverage_notes_file : Separate<["-"], "coverage-notes-file">,
HelpText<"Emit coverage notes to this filename.">,
- MarshallingInfoString<CodeGenOpts<"CoverageNotesFile">>,
- ShouldParseIf<!strconcat(fprofile_arcs.KeyPath, "||", ftest_coverage.KeyPath)>;
+ MarshallingInfoString<CodeGenOpts<"CoverageNotesFile">>;
def coverage_notes_file_EQ : Joined<["-"], "coverage-notes-file=">,
Alias<coverage_notes_file>;
def coverage_version_EQ : Joined<["-"], "coverage-version=">,
@@ -5522,6 +5875,9 @@ def mframe_pointer_EQ : Joined<["-"], "mframe-pointer=">,
def mabi_EQ_ieeelongdouble : Flag<["-"], "mabi=ieeelongdouble">,
HelpText<"Use IEEE 754 quadruple-precision for long double">,
MarshallingInfoFlag<LangOpts<"PPCIEEELongDouble">>;
+def mabi_EQ_vec_extabi : Flag<["-"], "mabi=vec-extabi">,
+ HelpText<"Enable the extended Altivec ABI on AIX. Use volatile and nonvolatile vector registers">,
+ MarshallingInfoFlag<LangOpts<"EnableAIXExtendedAltivecABI">>;
def mfloat_abi : Separate<["-"], "mfloat-abi">,
HelpText<"The float ABI to use">,
MarshallingInfoString<CodeGenOpts<"FloatABI">>;
@@ -5706,11 +6062,11 @@ def fctor_dtor_return_this : Flag<["-"], "fctor-dtor-return-this">,
"and non-deleting destructors. (No effect on Microsoft ABI)">,
MarshallingInfoFlag<CodeGenOpts<"CtorDtorReturnThis">>;
-defm experimental_assignment_tracking :
- BoolOption<"f", "experimental-assignment-tracking",
- CodeGenOpts<"EnableAssignmentTracking">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option]>, NegFlag<SetFalse>, BothFlags<[CoreOption]>>,
- Group<f_Group>;
+def fexperimental_assignment_tracking_EQ : Joined<["-"], "fexperimental-assignment-tracking=">,
+ Group<f_Group>, CodeGenOpts<"EnableAssignmentTracking">,
+ NormalizedValuesScope<"CodeGenOptions::AssignmentTrackingOpts">,
+ Values<"disabled,enabled,forced">, NormalizedValues<["Disabled","Enabled","Forced"]>,
+ MarshallingInfoEnum<CodeGenOpts<"AssignmentTrackingMode">, "Enabled">;
} // let Flags = [CC1Option, NoDriverOption]
@@ -5723,6 +6079,9 @@ let Flags = [CC1Option, NoDriverOption] in {
def sys_header_deps : Flag<["-"], "sys-header-deps">,
HelpText<"Include system headers in dependency output">,
MarshallingInfoFlag<DependencyOutputOpts<"IncludeSystemHeaders">>;
+def canonical_system_headers : Flag<["-"], "canonical-system-headers">,
+ HelpText<"Canonicalize system headers in dependency output">,
+ MarshallingInfoFlag<DependencyOutputOpts<"CanonicalSystemHeaders">>;
def module_file_deps : Flag<["-"], "module-file-deps">,
HelpText<"Include module files in dependency output">,
MarshallingInfoFlag<DependencyOutputOpts<"IncludeModuleFiles">>;
@@ -5774,22 +6133,6 @@ def ftabstop : Separate<["-"], "ftabstop">, MetaVarName<"<N>">,
def ferror_limit : Separate<["-"], "ferror-limit">, MetaVarName<"<N>">,
HelpText<"Set the maximum number of errors to emit before stopping (0 = no limit).">,
MarshallingInfoInt<DiagnosticOpts<"ErrorLimit">>;
-def fmacro_backtrace_limit : Separate<["-"], "fmacro-backtrace-limit">, MetaVarName<"<N>">,
- HelpText<"Set the maximum number of entries to print in a macro expansion backtrace (0 = no limit).">,
- MarshallingInfoInt<DiagnosticOpts<"MacroBacktraceLimit">, "DiagnosticOptions::DefaultMacroBacktraceLimit">;
-def ftemplate_backtrace_limit : Separate<["-"], "ftemplate-backtrace-limit">, MetaVarName<"<N>">,
- HelpText<"Set the maximum number of entries to print in a template instantiation backtrace (0 = no limit).">,
- MarshallingInfoInt<DiagnosticOpts<"TemplateBacktraceLimit">, "DiagnosticOptions::DefaultTemplateBacktraceLimit">;
-def fconstexpr_backtrace_limit : Separate<["-"], "fconstexpr-backtrace-limit">, MetaVarName<"<N>">,
- HelpText<"Set the maximum number of entries to print in a constexpr evaluation backtrace (0 = no limit).">,
- MarshallingInfoInt<DiagnosticOpts<"ConstexprBacktraceLimit">, "DiagnosticOptions::DefaultConstexprBacktraceLimit">;
-def fspell_checking_limit : Separate<["-"], "fspell-checking-limit">, MetaVarName<"<N>">,
- HelpText<"Set the maximum number of times to perform spell checking on unrecognized identifiers (0 = no limit).">,
- MarshallingInfoInt<DiagnosticOpts<"SpellCheckingLimit">, "DiagnosticOptions::DefaultSpellCheckingLimit">;
-def fcaret_diagnostics_max_lines :
- Separate<["-"], "fcaret-diagnostics-max-lines">, MetaVarName<"<N>">,
- HelpText<"Set the maximum number of source lines to show in a caret diagnostic">,
- MarshallingInfoInt<DiagnosticOpts<"SnippetLineLimit">, "DiagnosticOptions::DefaultSnippetLineLimit">;
def verify_EQ : CommaJoined<["-"], "verify=">,
MetaVarName<"<prefixes>">,
HelpText<"Verify diagnostic output using comment directives that start with"
@@ -5871,13 +6214,6 @@ defm enable_noundef_analysis : BoolOption<"",
PosFlag<SetTrue, [], "Enable">,
NegFlag<SetFalse, [], "Disable">,
BothFlags<[], " analyzing function argument and return types for mandatory definedness">>;
-defm opaque_pointers : BoolOption<"",
- "opaque-pointers",
- CodeGenOpts<"OpaquePointers">,
- DefaultTrue,
- PosFlag<SetTrue, [], "Enable">,
- NegFlag<SetFalse, [], "Disable">,
- BothFlags<[], " opaque pointers">>;
def discard_value_names : Flag<["-"], "discard-value-names">,
HelpText<"Discard value names in LLVM IR">,
MarshallingInfoFlag<CodeGenOpts<"DiscardValueNames">>;
@@ -5928,14 +6264,14 @@ defm fimplicit_modules_use_lock : BoolOption<"f", "implicit-modules-use-lock",
PosFlag<SetTrue, [],
"Use filesystem locks for implicit modules builds to avoid "
"duplicating work in competing clang invocations.">>;
-// FIXME: We only need this in C++ modules / Modules TS if we might textually
+// FIXME: We only need this in C++ modules if we might textually
// enter a different module (eg, when building a header unit).
def fmodules_local_submodule_visibility :
Flag<["-"], "fmodules-local-submodule-visibility">,
HelpText<"Enforce name visibility rules across submodules of the same "
"top-level module.">,
MarshallingInfoFlag<LangOpts<"ModulesLocalVisibility">>,
- ImpliedByAnyOf<[fmodules_ts.KeyPath, fcxx_modules.KeyPath]>;
+ ImpliedByAnyOf<[fcxx_modules.KeyPath]>;
def fmodules_codegen :
Flag<["-"], "fmodules-codegen">,
HelpText<"Generate code for uses of this module that assumes an explicit "
@@ -6063,6 +6399,9 @@ def print_stats : Flag<["-"], "print-stats">,
def stats_file : Joined<["-"], "stats-file=">,
HelpText<"Filename to write statistics to">,
MarshallingInfoString<FrontendOpts<"StatsFile">>;
+def stats_file_append : Flag<["-"], "stats-file-append">,
+ HelpText<"If stats should be appended to stats-file instead of overwriting it">,
+ MarshallingInfoFlag<FrontendOpts<"AppendStats">>;
def fdump_record_layouts_simple : Flag<["-"], "fdump-record-layouts-simple">,
HelpText<"Dump record layout information in a simple form used for testing">,
MarshallingInfoFlag<LangOpts<"DumpRecordLayoutsSimple">>;
@@ -6240,18 +6579,6 @@ def ftype_visibility : Joined<["-"], "ftype-visibility=">,
def fapply_global_visibility_to_externs : Flag<["-"], "fapply-global-visibility-to-externs">,
HelpText<"Apply global symbol visibility to external declarations without an explicit visibility">,
MarshallingInfoFlag<LangOpts<"SetVisibilityForExternDecls">>;
-def ftemplate_depth : Separate<["-"], "ftemplate-depth">,
- HelpText<"Maximum depth of recursive template instantiation">,
- MarshallingInfoInt<LangOpts<"InstantiationDepth">, "1024">;
-def foperator_arrow_depth : Separate<["-"], "foperator-arrow-depth">,
- HelpText<"Maximum number of 'operator->'s to call for a member access">,
- MarshallingInfoInt<LangOpts<"ArrowDepth">, "256">;
-def fconstexpr_depth : Separate<["-"], "fconstexpr-depth">,
- HelpText<"Maximum depth of recursive constexpr function calls">,
- MarshallingInfoInt<LangOpts<"ConstexprCallDepth">, "512">;
-def fconstexpr_steps : Separate<["-"], "fconstexpr-steps">,
- HelpText<"Maximum number of steps in constexpr function evaluation">,
- MarshallingInfoInt<LangOpts<"ConstexprStepLimit">, "1048576">;
def fbracket_depth : Separate<["-"], "fbracket-depth">,
HelpText<"Maximum nesting level for parentheses, brackets, and braces">,
MarshallingInfoInt<LangOpts<"BracketDepth">, "256">;
@@ -6442,12 +6769,15 @@ def fno_cuda_host_device_constexpr : Flag<["-"], "fno-cuda-host-device-constexpr
// OpenMP Options
//===----------------------------------------------------------------------===//
-def fopenmp_is_device : Flag<["-"], "fopenmp-is-device">,
- HelpText<"Generate code only for an OpenMP target device.">,
- Flags<[CC1Option, NoDriverOption]>;
+let Flags = [CC1Option, FC1Option, NoDriverOption] in {
+
+def fopenmp_is_target_device : Flag<["-"], "fopenmp-is-target-device">,
+ HelpText<"Generate code only for an OpenMP target device.">;
+def : Flag<["-"], "fopenmp-is-device">, Alias<fopenmp_is_target_device>;
def fopenmp_host_ir_file_path : Separate<["-"], "fopenmp-host-ir-file-path">,
- HelpText<"Path to the IR file produced by the frontend for the host.">,
- Flags<[CC1Option, NoDriverOption]>;
+ HelpText<"Path to the IR file produced by the frontend for the host.">;
+
+} // let Flags = [CC1Option, FC1Option, NoDriverOption]
//===----------------------------------------------------------------------===//
// SYCL Options
@@ -6958,6 +7288,16 @@ def _SLASH_Gv : CLFlag<"Gv">,
def _SLASH_Gregcall : CLFlag<"Gregcall">,
HelpText<"Set __regcall as a default calling convention">;
+// GNU Driver aliases
+
+def : Separate<["-"], "Xmicrosoft-visualc-tools-root">, Alias<_SLASH_vctoolsdir>;
+def : Separate<["-"], "Xmicrosoft-visualc-tools-version">,
+ Alias<_SLASH_vctoolsversion>;
+def : Separate<["-"], "Xmicrosoft-windows-sdk-root">,
+ Alias<_SLASH_winsdkdir>;
+def : Separate<["-"], "Xmicrosoft-windows-sdk-version">,
+ Alias<_SLASH_winsdkversion>;
+
// Ignored:
def _SLASH_analyze_ : CLIgnoredFlag<"analyze-">;
@@ -7112,3 +7452,7 @@ def dxc_entrypoint : Option<["--", "/", "-"], "E", KIND_JOINED_OR_SEPARATE>,
Group<dxc_Group>,
Flags<[DXCOption, NoXarchOption]>,
HelpText<"Entry point name">;
+def dxc_validator_path_EQ : Joined<["--"], "dxv-path=">, Group<dxc_Group>,
+ HelpText<"DXIL validator installation path">;
+def dxc_disable_validation : DXCFlag<"Vd">,
+ HelpText<"Disable validation">;
diff --git a/contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h b/contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h
index 52889c3fe189..047b50626c44 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h
@@ -30,6 +30,7 @@ class SanitizerArgs {
std::vector<std::string> SystemIgnorelistFiles;
std::vector<std::string> CoverageAllowlistFiles;
std::vector<std::string> CoverageIgnorelistFiles;
+ std::vector<std::string> BinaryMetadataIgnorelistFiles;
int CoverageFeatures = 0;
int BinaryMetadataFeatures = 0;
int MsanTrackOrigins = 0;
@@ -37,9 +38,11 @@ class SanitizerArgs {
bool MsanParamRetval = true;
bool CfiCrossDso = false;
bool CfiICallGeneralizePointers = false;
+ bool CfiICallNormalizeIntegers = false;
bool CfiCanonicalJumpTables = false;
int AsanFieldPadding = 0;
bool SharedRuntime = false;
+ bool StableABI = false;
bool AsanUseAfterScope = true;
bool AsanPoisonCustomArrayCookie = false;
bool AsanGlobalsDeadStripping = false;
@@ -117,6 +120,10 @@ public:
return MemtagMode;
}
+ bool hasShadowCallStack() const {
+ return Sanitizers.has(SanitizerKind::ShadowCallStack);
+ }
+
bool requiresPIE() const;
bool needsUnwindTables() const;
bool needsLTO() const;
diff --git a/contrib/llvm-project/clang/include/clang/Driver/ToolChain.h b/contrib/llvm-project/clang/include/clang/Driver/ToolChain.h
index f75f35dc9e65..e3fcbd9322b0 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/ToolChain.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/ToolChain.h
@@ -9,7 +9,6 @@
#ifndef LLVM_CLANG_DRIVER_TOOLCHAIN_H
#define LLVM_CLANG_DRIVER_TOOLCHAIN_H
-#include "clang/Basic/DebugInfoOptions.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Sanitizers.h"
@@ -21,11 +20,12 @@
#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/Frontend/Debug/Options.h"
#include "llvm/MC/MCTargetOptions.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/VersionTuple.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/TargetParser/Triple.h"
#include <cassert>
#include <climits>
#include <memory>
@@ -187,7 +187,7 @@ private:
protected:
MultilibSet Multilibs;
- Multilib SelectedMultilib;
+ llvm::SmallVector<Multilib> SelectedMultilibs;
ToolChain(const Driver &D, const llvm::Triple &T,
const llvm::opt::ArgList &Args);
@@ -283,7 +283,21 @@ public:
const MultilibSet &getMultilibs() const { return Multilibs; }
- const Multilib &getMultilib() const { return SelectedMultilib; }
+ const llvm::SmallVector<Multilib> &getSelectedMultilibs() const {
+ return SelectedMultilibs;
+ }
+
+ /// Get flags suitable for multilib selection, based on the provided clang
+ /// command line arguments. The command line arguments aren't suitable to be
+ /// used directly for multilib selection because they are not normalized and
+ /// normalization is a complex process. The result of this function is similar
+ /// to clang command line arguments except that the list of arguments is
+ /// incomplete. Only certain command line arguments are processed. If more
+ /// command line arguments are needed for multilib selection then this
+ /// function should be extended.
+ /// To allow users to find out what flags are returned, clang accepts a
+ /// -print-multi-flags-experimental argument.
+ Multilib::flags_list getMultilibFlags(const llvm::opt::ArgList &) const;
SanitizerArgs getSanitizerArgs(const llvm::opt::ArgList &JobArgs) const;
@@ -398,7 +412,7 @@ public:
/// IsIntegratedAssemblerDefault - Does this tool chain enable -integrated-as
/// by default.
- virtual bool IsIntegratedAssemblerDefault() const { return false; }
+ virtual bool IsIntegratedAssemblerDefault() const { return true; }
/// IsIntegratedBackendDefault - Does this tool chain enable
/// -fintegrated-objemitter by default.
@@ -492,9 +506,9 @@ public:
// Returns target specific standard library paths.
path_list getStdlibPaths() const;
- // Returns <ResourceDir>/lib/<OSName>/<arch>. This is used by runtimes (such
- // as OpenMP) to find arch-specific libraries.
- std::string getArchSpecificLibPath() const;
+ // Returns <ResourceDir>/lib/<OSName>/<arch> or <ResourceDir>/lib/<triple>.
+ // This is used by runtimes (such as OpenMP) to find arch-specific libraries.
+ virtual path_list getArchSpecificLibPaths() const;
// Returns <OSname> part of above.
virtual StringRef getOSLibName() const;
@@ -534,8 +548,8 @@ public:
virtual void CheckObjCARC() const {}
/// Get the default debug info format. Typically, this is DWARF.
- virtual codegenoptions::DebugInfoFormat getDefaultDebugFormat() const {
- return codegenoptions::DIF_DWARF;
+ virtual llvm::codegenoptions::DebugInfoFormat getDefaultDebugFormat() const {
+ return llvm::codegenoptions::DIF_DWARF;
}
/// UseDwarfDebugFlags - Embed the compile options to clang into the Dwarf
@@ -571,8 +585,9 @@ public:
}
/// Adjust debug information kind considering all passed options.
- virtual void adjustDebugInfoKind(codegenoptions::DebugInfoKind &DebugInfoKind,
- const llvm::opt::ArgList &Args) const {}
+ virtual void
+ adjustDebugInfoKind(llvm::codegenoptions::DebugInfoKind &DebugInfoKind,
+ const llvm::opt::ArgList &Args) const {}
/// GetExceptionModel - Return the tool chain exception model.
virtual llvm::ExceptionHandling
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Types.def b/contrib/llvm-project/clang/include/clang/Driver/Types.def
index 2960f0ba5e9b..aaea3ec0f9c8 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Types.def
+++ b/contrib/llvm-project/clang/include/clang/Driver/Types.def
@@ -107,4 +107,5 @@ TYPE("dependencies", Dependencies, INVALID, "d", phases
TYPE("cuda-fatbin", CUDA_FATBIN, INVALID, "fatbin", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("hip-fatbin", HIP_FATBIN, INVALID, "hipfb", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("api-information", API_INFO, INVALID, "json", phases::Precompile)
+TYPE("dx-container", DX_CONTAINER, INVALID, "dxo", phases::Compile, phases::Backend)
TYPE("none", Nothing, INVALID, nullptr, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
diff --git a/contrib/llvm-project/clang/include/clang/Driver/XRayArgs.h b/contrib/llvm-project/clang/include/clang/Driver/XRayArgs.h
index 6ed99a127669..bdd3d979547e 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/XRayArgs.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/XRayArgs.h
@@ -25,15 +25,8 @@ class XRayArgs {
std::vector<std::string> ExtraDeps;
std::vector<std::string> Modes;
XRayInstrSet InstrumentationBundle;
- bool XRayInstrument = false;
- int InstructionThreshold = 200;
- bool XRayAlwaysEmitCustomEvents = false;
- bool XRayAlwaysEmitTypedEvents = false;
+ llvm::opt::Arg *XRayInstrument = nullptr;
bool XRayRT = true;
- bool XRayIgnoreLoops = false;
- bool XRayFunctionIndex;
- int XRayFunctionGroups = 1;
- int XRaySelectedFunctionGroup = 0;
public:
/// Parses the XRay arguments from an argument list.
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/API.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/API.h
index f3c1cce3fe40..a18879f39645 100644
--- a/contrib/llvm-project/clang/include/clang/ExtractAPI/API.h
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/API.h
@@ -26,9 +26,9 @@
#include "clang/ExtractAPI/DeclarationFragments.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Casting.h"
+#include "llvm/TargetParser/Triple.h"
#include <memory>
#include <type_traits>
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/APIIgnoresList.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/APIIgnoresList.h
index 43c546102a2d..3eee8e336cb6 100644
--- a/contrib/llvm-project/clang/include/clang/ExtractAPI/APIIgnoresList.h
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/APIIgnoresList.h
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
///
/// \file This file defines APIIgnoresList which is a type that allows querying
-/// a file containing symbols to ignore when extracting API information.
+/// files containing symbols to ignore when extracting API information.
///
//===----------------------------------------------------------------------===//
@@ -44,11 +44,13 @@ struct IgnoresFileNotFound : public llvm::ErrorInfo<IgnoresFileNotFound> {
/// A type that provides access to a new line separated list of symbol names to
/// ignore when extracting API information.
struct APIIgnoresList {
- /// The API to use for generating from the file at \p IgnoresFilePath.
+ using FilePathList = std::vector<std::string>;
+
+ /// The API to use for generating from the files at \p IgnoresFilePathList.
///
/// \returns an initialized APIIgnoresList or an Error.
- static llvm::Expected<APIIgnoresList> create(llvm::StringRef IgnoresFilePath,
- FileManager &FM);
+ static llvm::Expected<APIIgnoresList>
+ create(const FilePathList &IgnoresFilePathList, FileManager &FM);
APIIgnoresList() = default;
@@ -58,14 +60,14 @@ struct APIIgnoresList {
private:
using SymbolNameList = llvm::SmallVector<llvm::StringRef, 32>;
+ using BufferList = llvm::SmallVector<std::unique_ptr<llvm::MemoryBuffer>>;
- APIIgnoresList(SymbolNameList SymbolsToIgnore,
- std::unique_ptr<llvm::MemoryBuffer> Buffer)
- : SymbolsToIgnore(std::move(SymbolsToIgnore)), Buffer(std::move(Buffer)) {
- }
+ APIIgnoresList(SymbolNameList SymbolsToIgnore, BufferList Buffers)
+ : SymbolsToIgnore(std::move(SymbolsToIgnore)),
+ Buffers(std::move(Buffers)) {}
SymbolNameList SymbolsToIgnore;
- std::unique_ptr<llvm::MemoryBuffer> Buffer;
+ BufferList Buffers;
};
} // namespace extractapi
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/AvailabilityInfo.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/AvailabilityInfo.h
index a258bc52c125..0af373135b66 100644
--- a/contrib/llvm-project/clang/include/clang/ExtractAPI/AvailabilityInfo.h
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/AvailabilityInfo.h
@@ -33,12 +33,14 @@ struct AvailabilityInfo {
VersionTuple Introduced;
VersionTuple Deprecated;
VersionTuple Obsoleted;
+ bool Unavailable;
AvailabilityInfo() = default;
AvailabilityInfo(StringRef Domain, VersionTuple I, VersionTuple D,
- VersionTuple O)
- : Domain(Domain), Introduced(I), Deprecated(D), Obsoleted(O) {}
+ VersionTuple O, bool U)
+ : Domain(Domain), Introduced(I), Deprecated(D), Obsoleted(O),
+ Unavailable(U) {}
};
class AvailabilitySet {
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/DeclarationFragments.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/DeclarationFragments.h
index a5db4d23e8b5..82f0c42ab8aa 100644
--- a/contrib/llvm-project/clang/include/clang/ExtractAPI/DeclarationFragments.h
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/DeclarationFragments.h
@@ -97,8 +97,37 @@ public:
Declaration(Declaration) {}
};
+ using FragmentIterator = std::vector<Fragment>::iterator;
+ using ConstFragmentIterator = std::vector<Fragment>::const_iterator;
+
const std::vector<Fragment> &getFragments() const { return Fragments; }
+ FragmentIterator begin() { return Fragments.begin(); }
+
+ FragmentIterator end() { return Fragments.end(); }
+
+ ConstFragmentIterator cbegin() const { return Fragments.cbegin(); }
+
+ ConstFragmentIterator cend() const { return Fragments.cend(); }
+
+ // Add a new Fragment at an arbitrary offset.
+ DeclarationFragments &insert(FragmentIterator It, StringRef Spelling,
+ FragmentKind Kind,
+ StringRef PreciseIdentifier = "",
+ const Decl *Declaration = nullptr) {
+ Fragments.insert(It,
+ Fragment(Spelling, Kind, PreciseIdentifier, Declaration));
+ return *this;
+ }
+
+ DeclarationFragments &insert(FragmentIterator It,
+ DeclarationFragments &&Other) {
+ Fragments.insert(It, std::make_move_iterator(Other.Fragments.begin()),
+ std::make_move_iterator(Other.Fragments.end()));
+ Other.Fragments.clear();
+ return *this;
+ }
+
/// Append a new Fragment to the end of the Fragments.
///
/// \returns a reference to the DeclarationFragments object itself after
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/ExtractAPIActionBase.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/ExtractAPIActionBase.h
new file mode 100644
index 000000000000..ac4f391db5f1
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/ExtractAPIActionBase.h
@@ -0,0 +1,54 @@
+//===- ExtractAPI/ExtractAPIActionBase.h -----------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the ExtractAPIActionBase class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EXTRACTAPI_ACTION_BASE_H
+#define LLVM_CLANG_EXTRACTAPI_ACTION_BASE_H
+
+#include "clang/ExtractAPI/API.h"
+#include "clang/ExtractAPI/APIIgnoresList.h"
+
+namespace clang {
+
+/// Base class to be used by front end actions to generate ExtarctAPI info
+///
+/// Deriving from this class equips an action with all the necessary tools to
+/// generate ExractAPI information in form of symbol-graphs
+class ExtractAPIActionBase {
+protected:
+ /// A representation of the APIs this action extracts.
+ std::unique_ptr<extractapi::APISet> API;
+
+ /// A stream to the output file of this action.
+ std::unique_ptr<raw_pwrite_stream> OS;
+
+ /// The product this action is extracting API information for.
+ std::string ProductName;
+
+ /// The synthesized input buffer that contains all the provided input header
+ /// files.
+ std::unique_ptr<llvm::MemoryBuffer> Buffer;
+
+ /// The list of symbols to ignore during serialization
+ extractapi::APIIgnoresList IgnoresList;
+
+ /// Implements EndSourceFileAction for Symbol-Graph generation
+ ///
+ /// Use the serializer to generate output symbol graph files from
+ /// the information gathered during the execution of Action.
+ void ImplEndSourceFileAction();
+};
+
+} // namespace clang
+
+#endif // LLVM_CLANG_EXTRACTAPI_ACTION_BASE_H
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/ExtractAPIVisitor.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/ExtractAPIVisitor.h
index f6546fb4776a..f0882afb5a61 100644
--- a/contrib/llvm-project/clang/include/clang/ExtractAPI/ExtractAPIVisitor.h
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/ExtractAPIVisitor.h
@@ -14,24 +14,28 @@
#ifndef LLVM_CLANG_EXTRACTAPI_EXTRACT_API_VISITOR_H
#define LLVM_CLANG_EXTRACTAPI_EXTRACT_API_VISITOR_H
+#include "llvm/ADT/FunctionExtras.h"
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ParentMapContext.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/SourceManager.h"
#include "clang/ExtractAPI/API.h"
-#include "llvm/ADT/FunctionExtras.h"
+#include "clang/ExtractAPI/TypedefUnderlyingTypeResolver.h"
+#include "llvm/ADT/StringRef.h"
+#include <type_traits>
namespace clang {
namespace extractapi {
+namespace impl {
-/// The RecursiveASTVisitor to traverse symbol declarations and collect API
-/// information.
-class ExtractAPIVisitor : public RecursiveASTVisitor<ExtractAPIVisitor> {
-public:
- ExtractAPIVisitor(ASTContext &Context,
- llvm::unique_function<bool(SourceLocation)> LocationChecker,
- APISet &API)
- : Context(Context), API(API),
- LocationChecker(std::move(LocationChecker)) {}
+template <typename Derived>
+class ExtractAPIVisitorBase : public RecursiveASTVisitor<Derived> {
+protected:
+ ExtractAPIVisitorBase(ASTContext &Context, APISet &API)
+ : Context(Context), API(API) {}
+public:
const APISet &getAPI() const { return API; }
bool VisitVarDecl(const VarDecl *Decl);
@@ -50,7 +54,11 @@ public:
bool VisitObjCCategoryDecl(const ObjCCategoryDecl *Decl);
-private:
+ bool shouldDeclBeIncluded(const Decl *Decl) const;
+
+ const RawComment *fetchRawCommentForDecl(const Decl *Decl) const;
+
+protected:
/// Collect API information for the enum constants and associate with the
/// parent enum.
void recordEnumConstants(EnumRecord *EnumRecord,
@@ -77,9 +85,616 @@ private:
void recordObjCProtocols(ObjCContainerRecord *Container,
ObjCInterfaceDecl::protocol_range Protocols);
+
ASTContext &Context;
APISet &API;
- llvm::unique_function<bool(SourceLocation)> LocationChecker;
+
+ StringRef getTypedefName(const TagDecl *Decl) {
+ if (const auto *TypedefDecl = Decl->getTypedefNameForAnonDecl())
+ return TypedefDecl->getName();
+
+ return {};
+ }
+
+ bool isInSystemHeader(const Decl *D) {
+ return Context.getSourceManager().isInSystemHeader(D->getLocation());
+ }
+
+private:
+ Derived &getDerivedExtractAPIVisitor() {
+ return *static_cast<Derived *>(this);
+ }
+};
+
+template <typename T>
+static void modifyRecords(const T &Records, const StringRef &Name) {
+ for (const auto &Record : Records) {
+ if (Name == Record.second.get()->Name) {
+ auto &DeclFragment = Record.second->Declaration;
+ DeclFragment.insert(DeclFragment.begin(), " ",
+ DeclarationFragments::FragmentKind::Text);
+ DeclFragment.insert(DeclFragment.begin(), "typedef",
+ DeclarationFragments::FragmentKind::Keyword, "",
+ nullptr);
+ DeclFragment.insert(--DeclFragment.end(), " { ... } ",
+ DeclarationFragments::FragmentKind::Text);
+ DeclFragment.insert(--DeclFragment.end(), Name,
+ DeclarationFragments::FragmentKind::Identifier);
+ break;
+ }
+ }
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitVarDecl(const VarDecl *Decl) {
+ // skip function parameters.
+ if (isa<ParmVarDecl>(Decl))
+ return true;
+
+ // Skip non-global variables in records (struct/union/class).
+ if (Decl->getDeclContext()->isRecord())
+ return true;
+
+ // Skip local variables inside function or method.
+ if (!Decl->isDefinedOutsideFunctionOrMethod())
+ return true;
+
+ // If this is a template but not specialization or instantiation, skip.
+ if (Decl->getASTContext().getTemplateOrSpecializationInfo(Decl) &&
+ Decl->getTemplateSpecializationKind() == TSK_Undeclared)
+ return true;
+
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ LinkageInfo Linkage = Decl->getLinkageAndVisibility();
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the variable.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForVar(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ // Add the global variable record to the API set.
+ API.addGlobalVar(Name, USR, Loc, AvailabilitySet(Decl), Linkage, Comment,
+ Declaration, SubHeading, isInSystemHeader(Decl));
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitFunctionDecl(
+ const FunctionDecl *Decl) {
+ if (const auto *Method = dyn_cast<CXXMethodDecl>(Decl)) {
+ // Skip member function in class templates.
+ if (Method->getParent()->getDescribedClassTemplate() != nullptr)
+ return true;
+
+ // Skip methods in records.
+ for (const auto &P : Context.getParents(*Method)) {
+ if (P.template get<CXXRecordDecl>())
+ return true;
+ }
+
+ // Skip ConstructorDecl and DestructorDecl.
+ if (isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method))
+ return true;
+ }
+
+ // Skip templated functions.
+ switch (Decl->getTemplatedKind()) {
+ case FunctionDecl::TK_NonTemplate:
+ case FunctionDecl::TK_DependentNonTemplate:
+ break;
+ case FunctionDecl::TK_MemberSpecialization:
+ case FunctionDecl::TK_FunctionTemplateSpecialization:
+ if (auto *TemplateInfo = Decl->getTemplateSpecializationInfo()) {
+ if (!TemplateInfo->isExplicitInstantiationOrSpecialization())
+ return true;
+ }
+ break;
+ case FunctionDecl::TK_FunctionTemplate:
+ case FunctionDecl::TK_DependentFunctionTemplateSpecialization:
+ return true;
+ }
+
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ LinkageInfo Linkage = Decl->getLinkageAndVisibility();
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments, sub-heading, and signature of the function.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForFunction(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+ FunctionSignature Signature =
+ DeclarationFragmentsBuilder::getFunctionSignature(Decl);
+
+ // Add the function record to the API set.
+ API.addGlobalFunction(Name, USR, Loc, AvailabilitySet(Decl), Linkage, Comment,
+ Declaration, SubHeading, Signature,
+ isInSystemHeader(Decl));
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitEnumDecl(const EnumDecl *Decl) {
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ SmallString<128> QualifiedNameBuffer;
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ if (Name.empty())
+ Name = getTypedefName(Decl);
+ if (Name.empty()) {
+ llvm::raw_svector_ostream OS(QualifiedNameBuffer);
+ Decl->printQualifiedName(OS);
+ Name = QualifiedNameBuffer.str();
+ }
+
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the enum.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForEnum(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ EnumRecord *EnumRecord =
+ API.addEnum(API.copyString(Name), USR, Loc, AvailabilitySet(Decl),
+ Comment, Declaration, SubHeading, isInSystemHeader(Decl));
+
+ // Now collect information about the enumerators in this enum.
+ getDerivedExtractAPIVisitor().recordEnumConstants(EnumRecord,
+ Decl->enumerators());
+
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitRecordDecl(const RecordDecl *Decl) {
+ // Skip C++ structs/classes/unions
+ // TODO: support C++ records
+ if (isa<CXXRecordDecl>(Decl))
+ return true;
+
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ if (Name.empty())
+ Name = getTypedefName(Decl);
+ if (Name.empty())
+ return true;
+
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the struct.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForStruct(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ StructRecord *StructRecord =
+ API.addStruct(Name, USR, Loc, AvailabilitySet(Decl), Comment, Declaration,
+ SubHeading, isInSystemHeader(Decl));
+
+ // Now collect information about the fields in this struct.
+ getDerivedExtractAPIVisitor().recordStructFields(StructRecord,
+ Decl->fields());
+
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitObjCInterfaceDecl(
+ const ObjCInterfaceDecl *Decl) {
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ LinkageInfo Linkage = Decl->getLinkageAndVisibility();
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the interface.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForObjCInterface(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ // Collect super class information.
+ SymbolReference SuperClass;
+ if (const auto *SuperClassDecl = Decl->getSuperClass()) {
+ SuperClass.Name = SuperClassDecl->getObjCRuntimeNameAsString();
+ SuperClass.USR = API.recordUSR(SuperClassDecl);
+ }
+
+ ObjCInterfaceRecord *ObjCInterfaceRecord = API.addObjCInterface(
+ Name, USR, Loc, AvailabilitySet(Decl), Linkage, Comment, Declaration,
+ SubHeading, SuperClass, isInSystemHeader(Decl));
+
+ // Record all methods (selectors). This doesn't include automatically
+ // synthesized property methods.
+ getDerivedExtractAPIVisitor().recordObjCMethods(ObjCInterfaceRecord,
+ Decl->methods());
+ getDerivedExtractAPIVisitor().recordObjCProperties(ObjCInterfaceRecord,
+ Decl->properties());
+ getDerivedExtractAPIVisitor().recordObjCInstanceVariables(ObjCInterfaceRecord,
+ Decl->ivars());
+ getDerivedExtractAPIVisitor().recordObjCProtocols(ObjCInterfaceRecord,
+ Decl->protocols());
+
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitObjCProtocolDecl(
+ const ObjCProtocolDecl *Decl) {
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the protocol.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForObjCProtocol(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ ObjCProtocolRecord *ObjCProtocolRecord =
+ API.addObjCProtocol(Name, USR, Loc, AvailabilitySet(Decl), Comment,
+ Declaration, SubHeading, isInSystemHeader(Decl));
+
+ getDerivedExtractAPIVisitor().recordObjCMethods(ObjCProtocolRecord,
+ Decl->methods());
+ getDerivedExtractAPIVisitor().recordObjCProperties(ObjCProtocolRecord,
+ Decl->properties());
+ getDerivedExtractAPIVisitor().recordObjCProtocols(ObjCProtocolRecord,
+ Decl->protocols());
+
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitTypedefNameDecl(
+ const TypedefNameDecl *Decl) {
+ // Skip ObjC Type Parameter for now.
+ if (isa<ObjCTypeParamDecl>(Decl))
+ return true;
+
+ if (!Decl->isDefinedOutsideFunctionOrMethod())
+ return true;
+
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ // Add the notion of typedef for tag type (struct or enum) of the same name.
+ if (const ElaboratedType *ET =
+ dyn_cast<ElaboratedType>(Decl->getUnderlyingType())) {
+ if (const TagType *TagTy = dyn_cast<TagType>(ET->desugar())) {
+ if (Decl->getName() == TagTy->getDecl()->getName()) {
+ if (TagTy->getDecl()->isStruct()) {
+ modifyRecords(API.getStructs(), Decl->getName());
+ }
+ if (TagTy->getDecl()->isEnum()) {
+ modifyRecords(API.getEnums(), Decl->getName());
+ }
+ }
+ }
+ }
+
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ QualType Type = Decl->getUnderlyingType();
+ SymbolReference SymRef =
+ TypedefUnderlyingTypeResolver(Context).getSymbolReferenceForType(Type,
+ API);
+
+ API.addTypedef(Name, USR, Loc, AvailabilitySet(Decl), Comment,
+ DeclarationFragmentsBuilder::getFragmentsForTypedef(Decl),
+ DeclarationFragmentsBuilder::getSubHeading(Decl), SymRef,
+ isInSystemHeader(Decl));
+
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitObjCCategoryDecl(
+ const ObjCCategoryDecl *Decl) {
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+ // Build declaration fragments and sub-heading for the category.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForObjCCategory(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ const ObjCInterfaceDecl *InterfaceDecl = Decl->getClassInterface();
+ SymbolReference Interface(InterfaceDecl->getName(),
+ API.recordUSR(InterfaceDecl));
+
+ ObjCCategoryRecord *ObjCCategoryRecord = API.addObjCCategory(
+ Name, USR, Loc, AvailabilitySet(Decl), Comment, Declaration, SubHeading,
+ Interface, isInSystemHeader(Decl));
+
+ getDerivedExtractAPIVisitor().recordObjCMethods(ObjCCategoryRecord,
+ Decl->methods());
+ getDerivedExtractAPIVisitor().recordObjCProperties(ObjCCategoryRecord,
+ Decl->properties());
+ getDerivedExtractAPIVisitor().recordObjCInstanceVariables(ObjCCategoryRecord,
+ Decl->ivars());
+ getDerivedExtractAPIVisitor().recordObjCProtocols(ObjCCategoryRecord,
+ Decl->protocols());
+
+ return true;
+}
+
+/// Collect API information for the enum constants and associate with the
+/// parent enum.
+template <typename Derived>
+void ExtractAPIVisitorBase<Derived>::recordEnumConstants(
+ EnumRecord *EnumRecord, const EnumDecl::enumerator_range Constants) {
+ for (const auto *Constant : Constants) {
+ // Collect symbol information.
+ StringRef Name = Constant->getName();
+ StringRef USR = API.recordUSR(Constant);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Constant->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Constant))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the enum constant.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForEnumConstant(Constant);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Constant);
+
+ API.addEnumConstant(EnumRecord, Name, USR, Loc, AvailabilitySet(Constant),
+ Comment, Declaration, SubHeading,
+ isInSystemHeader(Constant));
+ }
+}
+
+/// Collect API information for the struct fields and associate with the
+/// parent struct.
+template <typename Derived>
+void ExtractAPIVisitorBase<Derived>::recordStructFields(
+ StructRecord *StructRecord, const RecordDecl::field_range Fields) {
+ for (const auto *Field : Fields) {
+ // Collect symbol information.
+ StringRef Name = Field->getName();
+ StringRef USR = API.recordUSR(Field);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Field->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Field))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the struct field.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForField(Field);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Field);
+
+ API.addStructField(StructRecord, Name, USR, Loc, AvailabilitySet(Field),
+ Comment, Declaration, SubHeading,
+ isInSystemHeader(Field));
+ }
+}
+
+/// Collect API information for the Objective-C methods and associate with the
+/// parent container.
+template <typename Derived>
+void ExtractAPIVisitorBase<Derived>::recordObjCMethods(
+ ObjCContainerRecord *Container,
+ const ObjCContainerDecl::method_range Methods) {
+ for (const auto *Method : Methods) {
+ // Don't record selectors for properties.
+ if (Method->isPropertyAccessor())
+ continue;
+
+ StringRef Name = API.copyString(Method->getSelector().getAsString());
+ StringRef USR = API.recordUSR(Method);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Method->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Method))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments, sub-heading, and signature for the method.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForObjCMethod(Method);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Method);
+ FunctionSignature Signature =
+ DeclarationFragmentsBuilder::getFunctionSignature(Method);
+
+ API.addObjCMethod(Container, Name, USR, Loc, AvailabilitySet(Method),
+ Comment, Declaration, SubHeading, Signature,
+ Method->isInstanceMethod(), isInSystemHeader(Method));
+ }
+}
+
+template <typename Derived>
+void ExtractAPIVisitorBase<Derived>::recordObjCProperties(
+ ObjCContainerRecord *Container,
+ const ObjCContainerDecl::prop_range Properties) {
+ for (const auto *Property : Properties) {
+ StringRef Name = Property->getName();
+ StringRef USR = API.recordUSR(Property);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Property->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Property))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the property.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForObjCProperty(Property);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Property);
+
+ StringRef GetterName =
+ API.copyString(Property->getGetterName().getAsString());
+ StringRef SetterName =
+ API.copyString(Property->getSetterName().getAsString());
+
+ // Get the attributes for property.
+ unsigned Attributes = ObjCPropertyRecord::NoAttr;
+ if (Property->getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_readonly)
+ Attributes |= ObjCPropertyRecord::ReadOnly;
+
+ API.addObjCProperty(
+ Container, Name, USR, Loc, AvailabilitySet(Property), Comment,
+ Declaration, SubHeading,
+ static_cast<ObjCPropertyRecord::AttributeKind>(Attributes), GetterName,
+ SetterName, Property->isOptional(),
+ !(Property->getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_class),
+ isInSystemHeader(Property));
+ }
+}
+
+template <typename Derived>
+void ExtractAPIVisitorBase<Derived>::recordObjCInstanceVariables(
+ ObjCContainerRecord *Container,
+ const llvm::iterator_range<
+ DeclContext::specific_decl_iterator<ObjCIvarDecl>>
+ Ivars) {
+ for (const auto *Ivar : Ivars) {
+ StringRef Name = Ivar->getName();
+ StringRef USR = API.recordUSR(Ivar);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Ivar->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Ivar))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the instance variable.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForField(Ivar);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Ivar);
+
+ ObjCInstanceVariableRecord::AccessControl Access =
+ Ivar->getCanonicalAccessControl();
+
+ API.addObjCInstanceVariable(Container, Name, USR, Loc,
+ AvailabilitySet(Ivar), Comment, Declaration,
+ SubHeading, Access, isInSystemHeader(Ivar));
+ }
+}
+
+template <typename Derived>
+void ExtractAPIVisitorBase<Derived>::recordObjCProtocols(
+ ObjCContainerRecord *Container,
+ ObjCInterfaceDecl::protocol_range Protocols) {
+ for (const auto *Protocol : Protocols)
+ Container->Protocols.emplace_back(Protocol->getName(),
+ API.recordUSR(Protocol));
+}
+
+} // namespace impl
+
+/// The RecursiveASTVisitor to traverse symbol declarations and collect API
+/// information.
+template <typename Derived = void>
+class ExtractAPIVisitor
+ : public impl::ExtractAPIVisitorBase<std::conditional_t<
+ std::is_same_v<Derived, void>, ExtractAPIVisitor<>, Derived>> {
+ using Base = impl::ExtractAPIVisitorBase<std::conditional_t<
+ std::is_same_v<Derived, void>, ExtractAPIVisitor<>, Derived>>;
+
+public:
+ ExtractAPIVisitor(ASTContext &Context, APISet &API) : Base(Context, API) {}
+
+ bool shouldDeclBeIncluded(const Decl *D) const { return true; }
+ const RawComment *fetchRawCommentForDecl(const Decl *D) const {
+ return this->Context.getRawCommentForDeclNoCache(D);
+ }
};
} // namespace extractapi
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/FrontendActions.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/FrontendActions.h
index e946b33abbd9..c67864aac9af 100644
--- a/contrib/llvm-project/clang/include/clang/ExtractAPI/FrontendActions.h
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/FrontendActions.h
@@ -7,41 +7,27 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// This file defines the ExtractAPIAction frontend action.
+/// This file defines the ExtractAPIAction and WrappingExtractAPIAction frontend
+/// actions.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_EXTRACTAPI_FRONTEND_ACTIONS_H
#define LLVM_CLANG_EXTRACTAPI_FRONTEND_ACTIONS_H
-#include "clang/ExtractAPI/API.h"
-#include "clang/ExtractAPI/APIIgnoresList.h"
+#include "clang/ExtractAPI/ExtractAPIActionBase.h"
#include "clang/Frontend/FrontendAction.h"
namespace clang {
/// ExtractAPIAction sets up the output file and creates the ExtractAPIVisitor.
-class ExtractAPIAction : public ASTFrontendAction {
+class ExtractAPIAction : public ASTFrontendAction,
+ private ExtractAPIActionBase {
protected:
std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) override;
private:
- /// A representation of the APIs this action extracts.
- std::unique_ptr<extractapi::APISet> API;
-
- /// A stream to the output file of this action.
- std::unique_ptr<raw_pwrite_stream> OS;
-
- /// The product this action is extracting API information for.
- std::string ProductName;
-
- /// The synthesized input buffer that contains all the provided input header
- /// files.
- std::unique_ptr<llvm::MemoryBuffer> Buffer;
-
- /// The list of symbols to ignore during serialization
- extractapi::APIIgnoresList IgnoresList;
/// The input file originally provided on the command line.
///
@@ -62,10 +48,46 @@ private:
/// emit them in this callback.
void EndSourceFileAction() override;
+ static StringRef getInputBufferName() { return "<extract-api-includes>"; }
+
static std::unique_ptr<llvm::raw_pwrite_stream>
CreateOutputFile(CompilerInstance &CI, StringRef InFile);
+};
- static StringRef getInputBufferName() { return "<extract-api-includes>"; }
+/// Wrap ExtractAPIAction on top of a pre-existing action
+///
+/// Used when the ExtractAPI action needs to be executed as a side effect of a
+/// regular compilation job. Unlike ExtarctAPIAction, this is meant to be used
+/// on regular source files ( .m , .c files) instead of header files
+class WrappingExtractAPIAction : public WrapperFrontendAction,
+ private ExtractAPIActionBase {
+public:
+ WrappingExtractAPIAction(std::unique_ptr<FrontendAction> WrappedAction)
+ : WrapperFrontendAction(std::move(WrappedAction)) {}
+
+protected:
+ /// Create ExtractAPI consumer multiplexed on another consumer.
+ ///
+ /// This allows us to execute ExtractAPI action while on top of
+ std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) override;
+
+private:
+ /// Flag to check if the wrapper front end action's consumer is
+ /// craeted or not
+ bool CreatedASTConsumer = false;
+
+ void EndSourceFile() override { FrontendAction::EndSourceFile(); }
+
+ /// Called after executing the action on the synthesized input buffer.
+ ///
+ /// Executes both Wrapper and ExtractAPIBase end source file
+ /// actions. This is the place where all the gathered symbol graph
+ /// information is emited.
+ void EndSourceFileAction() override;
+
+ static std::unique_ptr<llvm::raw_pwrite_stream>
+ CreateOutputFile(CompilerInstance &CI, StringRef InFile);
};
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/Serialization/SerializerBase.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/Serialization/SerializerBase.h
index d8aa826e3f4f..006e92be2955 100644
--- a/contrib/llvm-project/clang/include/clang/ExtractAPI/Serialization/SerializerBase.h
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/Serialization/SerializerBase.h
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// This file defines the ExtractAPI APISerializer interface.
+/// This file defines the ExtractAPI APISetVisitor interface.
///
//===----------------------------------------------------------------------===//
@@ -15,47 +15,107 @@
#define LLVM_CLANG_EXTRACTAPI_SERIALIZATION_SERIALIZERBASE_H
#include "clang/ExtractAPI/API.h"
-#include "clang/ExtractAPI/APIIgnoresList.h"
-#include "llvm/Support/raw_ostream.h"
namespace clang {
namespace extractapi {
-/// Common options to customize the serializer output.
-struct APISerializerOption {
- /// Do not include unnecessary whitespaces to save space.
- bool Compact;
-};
-
-/// The base interface of serializers for API information.
-class APISerializer {
+/// The base interface of visitors for API information.
+template <typename Derived> class APISetVisitor {
public:
- /// Serialize the API information to \p os.
- virtual void serialize(raw_ostream &os) = 0;
+ void traverseAPISet() {
+ getDerived()->traverseGlobalVariableRecords();
-protected:
- const APISet &API;
+ getDerived()->traverseGlobalFunctionRecords();
+
+ getDerived()->traverseEnumRecords();
+
+ getDerived()->traverseStructRecords();
+
+ getDerived()->traverseObjCInterfaces();
+
+ getDerived()->traverseObjCProtocols();
+
+ getDerived()->traverseMacroDefinitionRecords();
+
+ getDerived()->traverseTypedefRecords();
+ }
+
+ void traverseGlobalFunctionRecords() {
+ for (const auto &GlobalFunction : API.getGlobalFunctions())
+ getDerived()->visitGlobalFunctionRecord(*GlobalFunction.second);
+ }
+
+ void traverseGlobalVariableRecords() {
+ for (const auto &GlobalVariable : API.getGlobalVariables())
+ getDerived()->visitGlobalVariableRecord(*GlobalVariable.second);
+ }
+
+ void traverseEnumRecords() {
+ for (const auto &Enum : API.getEnums())
+ getDerived()->visitEnumRecord(*Enum.second);
+ }
- /// The list of symbols to ignore.
- ///
- /// Note: This should be consulted before emitting a symbol.
- const APIIgnoresList &IgnoresList;
+ void traverseStructRecords() {
+ for (const auto &Struct : API.getStructs())
+ getDerived()->visitStructRecord(*Struct.second);
+ }
- APISerializerOption Options;
+ void traverseObjCInterfaces() {
+ for (const auto &Interface : API.getObjCInterfaces())
+ getDerived()->visitObjCContainerRecord(*Interface.second);
+ }
+
+ void traverseObjCProtocols() {
+ for (const auto &Protocol : API.getObjCProtocols())
+ getDerived()->visitObjCContainerRecord(*Protocol.second);
+ }
+
+ void traverseMacroDefinitionRecords() {
+ for (const auto &Macro : API.getMacros())
+ getDerived()->visitMacroDefinitionRecord(*Macro.second);
+ }
+
+ void traverseTypedefRecords() {
+ for (const auto &Typedef : API.getTypedefs())
+ getDerived()->visitTypedefRecord(*Typedef.second);
+ }
+
+ /// Visit a global function record.
+ void visitGlobalFunctionRecord(const GlobalFunctionRecord &Record){};
+
+ /// Visit a global variable record.
+ void visitGlobalVariableRecord(const GlobalVariableRecord &Record){};
+
+ /// Visit an enum record.
+ void visitEnumRecord(const EnumRecord &Record){};
+
+ /// Visit a struct record.
+ void visitStructRecord(const StructRecord &Record){};
+
+ /// Visit an Objective-C container record.
+ void visitObjCContainerRecord(const ObjCContainerRecord &Record){};
+
+ /// Visit a macro definition record.
+ void visitMacroDefinitionRecord(const MacroDefinitionRecord &Record){};
+
+ /// Visit a typedef record.
+ void visitTypedefRecord(const TypedefRecord &Record){};
+
+protected:
+ const APISet &API;
public:
- APISerializer() = delete;
- APISerializer(const APISerializer &) = delete;
- APISerializer(APISerializer &&) = delete;
- APISerializer &operator=(const APISerializer &) = delete;
- APISerializer &operator=(APISerializer &&) = delete;
+ APISetVisitor() = delete;
+ APISetVisitor(const APISetVisitor &) = delete;
+ APISetVisitor(APISetVisitor &&) = delete;
+ APISetVisitor &operator=(const APISetVisitor &) = delete;
+ APISetVisitor &operator=(APISetVisitor &&) = delete;
protected:
- APISerializer(const APISet &API, const APIIgnoresList &IgnoresList,
- APISerializerOption Options = {})
- : API(API), IgnoresList(IgnoresList), Options(Options) {}
+ APISetVisitor(const APISet &API) : API(API) {}
+ ~APISetVisitor() = default;
- virtual ~APISerializer() = default;
+ Derived *getDerived() { return static_cast<Derived *>(this); };
};
} // namespace extractapi
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/Serialization/SymbolGraphSerializer.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/Serialization/SymbolGraphSerializer.h
index 55c7bb32054b..e77903f8ba08 100644
--- a/contrib/llvm-project/clang/include/clang/ExtractAPI/Serialization/SymbolGraphSerializer.h
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/Serialization/SymbolGraphSerializer.h
@@ -9,8 +9,8 @@
/// \file
/// This file defines the SymbolGraphSerializer class.
///
-/// Implement an APISerializer for the Symbol Graph format for ExtractAPI.
-/// See https://github.com/apple/swift-docc-symbolkit.
+/// Implement an APISetVisitor to serialize the APISet into the Symbol Graph
+/// format for ExtractAPI. See https://github.com/apple/swift-docc-symbolkit.
///
//===----------------------------------------------------------------------===//
@@ -31,14 +31,18 @@ namespace extractapi {
using namespace llvm::json;
-/// The serializer that organizes API information in the Symbol Graph format.
+/// Common options to customize the visitor output.
+struct SymbolGraphSerializerOption {
+ /// Do not include unnecessary whitespaces to save space.
+ bool Compact;
+};
+
+/// The visitor that organizes API information in the Symbol Graph format.
///
/// The Symbol Graph format (https://github.com/apple/swift-docc-symbolkit)
/// models an API set as a directed graph, where nodes are symbol declarations,
/// and edges are relationships between the connected symbols.
-class SymbolGraphSerializer : public APISerializer {
- virtual void anchor();
-
+class SymbolGraphSerializer : public APISetVisitor<SymbolGraphSerializer> {
/// A JSON array of formatted symbols in \c APISet.
Array Symbols;
@@ -48,7 +52,7 @@ class SymbolGraphSerializer : public APISerializer {
/// The Symbol Graph format version used by this serializer.
static const VersionTuple FormatVersion;
- /// Indicates whether child symbols should be serialized. This is mainly
+ /// Indicates whether child symbols should be visited. This is mainly
/// useful for \c serializeSingleSymbolSGF.
bool ShouldRecurse;
@@ -59,15 +63,14 @@ public:
/// Symbol Graph.
Object serialize();
- /// Implement the APISerializer::serialize interface. Wrap serialize(void) and
- /// write out the serialized JSON object to \p os.
- void serialize(raw_ostream &os) override;
+ /// Wrap serialize(void) and write out the serialized JSON object to \p os.
+ void serialize(raw_ostream &os);
/// Serialize a single symbol SGF. This is primarily used for libclang.
///
/// \returns an optional JSON Object representing the payload that libclang
/// expects for providing symbol information for a single symbol. If this is
- /// not a known symbol returns \c None.
+ /// not a known symbol returns \c std::nullopt.
static std::optional<Object> serializeSingleSymbolSGF(StringRef USR,
const APISet &API);
@@ -136,35 +139,44 @@ private:
void serializeRelationship(RelationshipKind Kind, SymbolReference Source,
SymbolReference Target);
- /// Serialize a global function record.
- void serializeGlobalFunctionRecord(const GlobalFunctionRecord &Record);
+protected:
+ /// The list of symbols to ignore.
+ ///
+ /// Note: This should be consulted before emitting a symbol.
+ const APIIgnoresList &IgnoresList;
+
+ SymbolGraphSerializerOption Options;
+
+public:
+ /// Visit a global function record.
+ void visitGlobalFunctionRecord(const GlobalFunctionRecord &Record);
- /// Serialize a global variable record.
- void serializeGlobalVariableRecord(const GlobalVariableRecord &Record);
+ /// Visit a global variable record.
+ void visitGlobalVariableRecord(const GlobalVariableRecord &Record);
- /// Serialize an enum record.
- void serializeEnumRecord(const EnumRecord &Record);
+ /// Visit an enum record.
+ void visitEnumRecord(const EnumRecord &Record);
- /// Serialize a struct record.
- void serializeStructRecord(const StructRecord &Record);
+ /// Visit a struct record.
+ void visitStructRecord(const StructRecord &Record);
- /// Serialize an Objective-C container record.
- void serializeObjCContainerRecord(const ObjCContainerRecord &Record);
+ /// Visit an Objective-C container record.
+ void visitObjCContainerRecord(const ObjCContainerRecord &Record);
- /// Serialize a macro definition record.
- void serializeMacroDefinitionRecord(const MacroDefinitionRecord &Record);
+ /// Visit a macro definition record.
+ void visitMacroDefinitionRecord(const MacroDefinitionRecord &Record);
- /// Serialize a typedef record.
- void serializeTypedefRecord(const TypedefRecord &Record);
+ /// Visit a typedef record.
+ void visitTypedefRecord(const TypedefRecord &Record);
+ /// Serialize a single record.
void serializeSingleRecord(const APIRecord *Record);
-public:
SymbolGraphSerializer(const APISet &API, const APIIgnoresList &IgnoresList,
- APISerializerOption Options = {},
+ SymbolGraphSerializerOption Options = {},
bool ShouldRecurse = true)
- : APISerializer(API, IgnoresList, Options), ShouldRecurse(ShouldRecurse) {
- }
+ : APISetVisitor(API), ShouldRecurse(ShouldRecurse),
+ IgnoresList(IgnoresList), Options(Options) {}
};
} // namespace extractapi
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/TypedefUnderlyingTypeResolver.h
index 54aa11c354c0..54aa11c354c0 100644
--- a/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.h
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/TypedefUnderlyingTypeResolver.h
diff --git a/contrib/llvm-project/clang/include/clang/Format/Format.h b/contrib/llvm-project/clang/include/clang/Format/Format.h
index 7a313460d888..8bcf6e6f58f1 100755
--- a/contrib/llvm-project/clang/include/clang/Format/Format.h
+++ b/contrib/llvm-project/clang/include/clang/Format/Format.h
@@ -92,9 +92,10 @@ struct FormatStyle {
/// )
/// \endcode
///
- /// \warning
- /// Note: This currently only applies to parentheses.
- /// \endwarning
+ /// \note
+ /// This currently only applies to braced initializer lists (when
+ /// ``Cpp11BracedListStyle`` is ``true``) and parentheses.
+ /// \endnote
BAS_BlockIndent,
};
@@ -133,8 +134,10 @@ struct FormatStyle {
/// if not ``None``, when using initialization for an array of structs
/// aligns the fields into columns.
///
- /// NOTE: As of clang-format 15 this option only applied to arrays with equal
- /// number of columns per row.
+ /// \note
+ /// As of clang-format 15 this option only applied to arrays with equal
+ /// number of columns per row.
+ /// \endnote
///
/// \version 13
ArrayInitializerAlignmentStyle AlignArrayOfStructures;
@@ -296,6 +299,103 @@ struct FormatStyle {
/// \version 3.8
AlignConsecutiveStyle AlignConsecutiveDeclarations;
+ /// Alignment options.
+ ///
+ struct ShortCaseStatementsAlignmentStyle {
+ /// Whether aligning is enabled.
+ /// \code
+ /// true:
+ /// switch (level) {
+ /// case log::info: return "info:";
+ /// case log::warning: return "warning:";
+ /// default: return "";
+ /// }
+ ///
+ /// false:
+ /// switch (level) {
+ /// case log::info: return "info:";
+ /// case log::warning: return "warning:";
+ /// default: return "";
+ /// }
+ /// \endcode
+ bool Enabled;
+ /// Whether to align across empty lines.
+ /// \code
+ /// true:
+ /// switch (level) {
+ /// case log::info: return "info:";
+ /// case log::warning: return "warning:";
+ ///
+ /// default: return "";
+ /// }
+ ///
+ /// false:
+ /// switch (level) {
+ /// case log::info: return "info:";
+ /// case log::warning: return "warning:";
+ ///
+ /// default: return "";
+ /// }
+ /// \endcode
+ bool AcrossEmptyLines;
+ /// Whether to align across comments.
+ /// \code
+ /// true:
+ /// switch (level) {
+ /// case log::info: return "info:";
+ /// case log::warning: return "warning:";
+ /// /* A comment. */
+ /// default: return "";
+ /// }
+ ///
+ /// false:
+ /// switch (level) {
+ /// case log::info: return "info:";
+ /// case log::warning: return "warning:";
+ /// /* A comment. */
+ /// default: return "";
+ /// }
+ /// \endcode
+ bool AcrossComments;
+ /// Whether aligned case labels are aligned on the colon, or on the
+ /// , or on the tokens after the colon.
+ /// \code
+ /// true:
+ /// switch (level) {
+ /// case log::info : return "info:";
+ /// case log::warning: return "warning:";
+ /// default : return "";
+ /// }
+ ///
+ /// false:
+ /// switch (level) {
+ /// case log::info: return "info:";
+ /// case log::warning: return "warning:";
+ /// default: return "";
+ /// }
+ /// \endcode
+ bool AlignCaseColons;
+ bool operator==(const ShortCaseStatementsAlignmentStyle &R) const {
+ return Enabled == R.Enabled && AcrossEmptyLines == R.AcrossEmptyLines &&
+ AcrossComments == R.AcrossComments &&
+ AlignCaseColons == R.AlignCaseColons;
+ }
+ };
+
+ /// Style of aligning consecutive short case labels.
+ /// Only applies if ``AllowShortCaseLabelsOnASingleLine`` is ``true``.
+ ///
+ /// \code{.yaml}
+ /// # Example of usage:
+ /// AlignConsecutiveShortCaseStatements:
+ /// Enabled: true
+ /// AcrossEmptyLines: true
+ /// AcrossComments: true
+ /// AlignCaseColons: false
+ /// \endcode
+ /// \version 17
+ ShortCaseStatementsAlignmentStyle AlignConsecutiveShortCaseStatements;
+
/// Different styles for aligning escaped newlines.
enum EscapedNewlineAlignmentStyle : int8_t {
/// Don't align escaped newlines.
@@ -439,8 +539,10 @@ struct FormatStyle {
/// Control of trailing comments.
///
- /// NOTE: As of clang-format 16 this option is not a bool but can be set
- /// to the options. Conventional bool options still can be parsed as before.
+ /// \note
+ /// As of clang-format 16 this option is not a bool but can be set
+ /// to the options. Conventional bool options still can be parsed as before.
+ /// \endnote
///
/// \code{.yaml}
/// # Example of usage:
@@ -944,6 +1046,39 @@ struct FormatStyle {
/// \version 12
BitFieldColonSpacingStyle BitFieldColonSpacing;
+ /// The number of columns to use to indent the contents of braced init lists.
+ /// If unset, ``ContinuationIndentWidth`` is used.
+ /// \code
+ /// AlignAfterOpenBracket: AlwaysBreak
+ /// BracedInitializerIndentWidth: 2
+ ///
+ /// void f() {
+ /// SomeClass c{
+ /// "foo",
+ /// "bar",
+ /// "baz",
+ /// };
+ /// auto s = SomeStruct{
+ /// .foo = "foo",
+ /// .bar = "bar",
+ /// .baz = "baz",
+ /// };
+ /// SomeArrayT a[3] = {
+ /// {
+ /// foo,
+ /// bar,
+ /// },
+ /// {
+ /// foo,
+ /// bar,
+ /// },
+ /// SomeArrayT{},
+ /// };
+ /// }
+ /// \endcode
+ /// \version 17
+ std::optional<unsigned> BracedInitializerIndentWidth;
+
/// Different ways to wrap braces after control statements.
enum BraceWrappingAfterControlStatementStyle : int8_t {
/// Never wrap braces after a control statement.
@@ -1060,8 +1195,10 @@ struct FormatStyle {
/// \endcode
bool AfterNamespace;
/// Wrap ObjC definitions (interfaces, implementations...).
- /// \note @autoreleasepool and @synchronized blocks are wrapped
- /// according to `AfterControlStatement` flag.
+ /// \note
+ /// @autoreleasepool and @synchronized blocks are wrapped
+ /// according to ``AfterControlStatement`` flag.
+ /// \endnote
bool AfterObjCDeclaration;
/// Wrap struct definitions.
/// \code
@@ -1170,9 +1307,10 @@ struct FormatStyle {
bool IndentBraces;
/// If ``false``, empty function body can be put on a single line.
/// This option is used only if the opening brace of the function has
- /// already been wrapped, i.e. the `AfterFunction` brace wrapping mode is
+ /// already been wrapped, i.e. the ``AfterFunction`` brace wrapping mode is
/// set, and the function could/should not be put on a single line (as per
- /// `AllowShortFunctionsOnASingleLine` and constructor formatting options).
+ /// ``AllowShortFunctionsOnASingleLine`` and constructor formatting
+ /// options).
/// \code
/// false: true:
/// int f() vs. int f()
@@ -1183,7 +1321,7 @@ struct FormatStyle {
bool SplitEmptyFunction;
/// If ``false``, empty record (e.g. class, struct or union) body
/// can be put on a single line. This option is used only if the opening
- /// brace of the record has already been wrapped, i.e. the `AfterClass`
+ /// brace of the record has already been wrapped, i.e. the ``AfterClass``
/// (for classes) brace wrapping mode is set.
/// \code
/// false: true:
@@ -1195,7 +1333,7 @@ struct FormatStyle {
bool SplitEmptyRecord;
/// If ``false``, empty namespace body can be put on a single line.
/// This option is used only if the opening brace of the namespace has
- /// already been wrapped, i.e. the `AfterNamespace` brace wrapping mode is
+ /// already been wrapped, i.e. the ``AfterNamespace`` brace wrapping mode is
/// set.
/// \code
/// false: true:
@@ -1252,11 +1390,13 @@ struct FormatStyle {
/// \version 16
AttributeBreakingStyle BreakAfterAttributes;
- /// If ``true``, clang-format will always break after a Json array `[`
- /// otherwise it will scan until the closing `]` to determine if it should add
- /// newlines between elements (prettier compatible).
+ /// If ``true``, clang-format will always break after a Json array ``[``
+ /// otherwise it will scan until the closing ``]`` to determine if it should
+ /// add newlines between elements (prettier compatible).
///
- /// NOTE: This is currently only for formatting JSON.
+ /// \note
+ /// This is currently only for formatting JSON.
+ /// \endnote
/// \code
/// true: false:
/// [ vs. [1, 2, 3, 4]
@@ -1751,7 +1891,7 @@ struct FormatStyle {
/// } // namespace N
/// \endcode
BS_WebKit,
- /// Configure each individual brace in `BraceWrapping`.
+ /// Configure each individual brace in ``BraceWrapping``.
BS_Custom
};
@@ -1768,7 +1908,7 @@ struct FormatStyle {
BBCDS_Never,
/// Breaking between template declaration and ``concept`` is allowed. The
/// actual behavior depends on the content and line breaking rules and
- /// penalities.
+ /// penalties.
BBCDS_Allowed,
/// Always break before ``concept``, putting it in the line after the
/// template declaration.
@@ -2146,8 +2286,10 @@ struct FormatStyle {
/// made, clang-format analyzes whether there are other bin-packed cases in
/// the input file and act accordingly.
///
- /// NOTE: This is an experimental flag, that might go away or be renamed. Do
- /// not use this in config files, etc. Use at your own risk.
+ /// \note
+ /// This is an experimental flag, that might go away or be renamed. Do
+ /// not use this in config files, etc. Use at your own risk.
+ /// \endnote
/// \version 3.7
bool ExperimentalAutoDetectBinPacking;
@@ -2423,9 +2565,9 @@ struct FormatStyle {
/// and ``while``) in C++ unless the control statements are inside macro
/// definitions or the braces would enclose preprocessor directives.
/// \warning
- /// Setting this option to `true` could lead to incorrect code formatting due
- /// to clang-format's lack of complete semantic information. As such, extra
- /// care should be taken to review code changes made by this option.
+ /// Setting this option to ``true`` could lead to incorrect code formatting
+ /// due to clang-format's lack of complete semantic information. As such,
+ /// extra care should be taken to review code changes made by this option.
/// \endwarning
/// \code
/// false: true:
@@ -2642,6 +2784,10 @@ struct FormatStyle {
bool JavaScriptWrapImports;
// clang-format on
+ /// Keep empty lines (up to ``MaxEmptyLinesToKeep``) at end of file.
+ /// \version 17
+ bool KeepEmptyLinesAtEOF;
+
/// If true, the empty line at the start of blocks is kept.
/// \code
/// true: false:
@@ -2670,6 +2816,11 @@ struct FormatStyle {
/// [](SomeReallyLongLambdaSignatureArgument foo) {
/// return;
/// });
+ ///
+ /// someMethod(someOtherMethod(
+ /// [](SomeReallyLongLambdaSignatureArgument foo) {
+ /// return;
+ /// }));
/// \endcode
LBI_OuterScope,
};
@@ -2678,11 +2829,7 @@ struct FormatStyle {
/// causes the lambda body to be indented one additional level relative to
/// the indentation level of the signature. ``OuterScope`` forces the lambda
/// body to be indented one additional level relative to the parent scope
- /// containing the lambda signature. For callback-heavy code, it may improve
- /// readability to have the signature indented two levels and to use
- /// ``OuterScope``. The KJ style guide requires ``OuterScope``.
- /// `KJ style guide
- /// <https://github.com/capnproto/capnproto/blob/master/style-guide.md>`_
+ /// containing the lambda signature.
/// \version 13
LambdaBodyIndentationKind LambdaBodyIndentation;
@@ -2779,6 +2926,46 @@ struct FormatStyle {
/// \version 3.7
std::string MacroBlockEnd;
+ /// A list of macros of the form \c <definition>=<expansion> .
+ ///
+ /// Code will be parsed with macros expanded, in order to determine how to
+ /// interpret and format the macro arguments.
+ ///
+ /// For example, the code:
+ /// \code
+ /// A(a*b);
+ /// \endcode
+ ///
+ /// will usually be interpreted as a call to a function A, and the
+ /// multiplication expression will be formatted as ``a * b``.
+ ///
+ /// If we specify the macro definition:
+ /// \code{.yaml}
+ /// Macros:
+ /// - A(x)=x
+ /// \endcode
+ ///
+ /// the code will now be parsed as a declaration of the variable b of type a*,
+ /// and formatted as ``a* b`` (depending on pointer-binding rules).
+ ///
+ /// Features and restrictions:
+ /// * Both function-like macros and object-like macros are supported.
+ /// * Macro arguments must be used exactly once in the expansion.
+ /// * No recursive expansion; macros referencing other macros will be
+ /// ignored.
+ /// * Overloading by arity is supported: for example, given the macro
+ /// definitions A=x, A()=y, A(a)=a
+ ///
+ /// \code
+ /// A; -> x;
+ /// A(); -> y;
+ /// A(z); -> z;
+ /// A(a, b); // will not be expanded.
+ /// \endcode
+ ///
+ /// \version 17.0
+ std::vector<std::string> Macros;
+
/// The maximum number of consecutive empty lines to keep.
/// \code
/// MaxEmptyLinesToKeep: 1 vs. MaxEmptyLinesToKeep: 0
@@ -2963,6 +3150,21 @@ struct FormatStyle {
/// cccccccccccccccccccc()
/// \endcode
PCIS_NextLine,
+ /// Put all constructor initializers on the next line if they fit.
+ /// Otherwise, put each one on its own line.
+ /// \code
+ /// Constructor()
+ /// : a(), b()
+ ///
+ /// Constructor()
+ /// : aaaaaaaaaaaaaaaaaaaa(), bbbbbbbbbbbbbbbbbbbb(), ddddddddddddd()
+ ///
+ /// Constructor()
+ /// : aaaaaaaaaaaaaaaaaaaa(),
+ /// bbbbbbbbbbbbbbbbbbbb(),
+ /// cccccccccccccccccccc()
+ /// \endcode
+ PCIS_NextLineOnly,
};
/// The pack constructor initializers style to use.
@@ -3085,7 +3287,7 @@ struct FormatStyle {
/// Different ways to arrange specifiers and qualifiers (e.g. const/volatile).
/// \warning
- /// Setting ``QualifierAlignment`` to something other than `Leave`, COULD
+ /// Setting ``QualifierAlignment`` to something other than ``Leave``, COULD
/// lead to incorrect code formatting due to incorrect decisions made due to
/// clang-formats lack of complete semantic information.
/// As such extra care should be taken to review code changes made by the use
@@ -3106,10 +3308,13 @@ struct FormatStyle {
/// * restrict
/// * type
///
- /// Note: it MUST contain 'type'.
+ /// \note
+ /// it MUST contain 'type'.
+ /// \endnote
+ ///
/// Items to the left of 'type' will be placed to the left of the type and
- /// aligned in the order supplied. Items to the right of 'type' will be placed
- /// to the right of the type and aligned in the order supplied.
+ /// aligned in the order supplied. Items to the right of 'type' will be
+ /// placed to the right of the type and aligned in the order supplied.
///
/// \code{.yaml}
/// QualifierOrder: ['inline', 'static', 'type', 'const', 'volatile' ]
@@ -3228,9 +3433,9 @@ struct FormatStyle {
/// This option will be renamed and expanded to support other styles.
/// \endwarning
/// \warning
- /// Setting this option to `true` could lead to incorrect code formatting due
- /// to clang-format's lack of complete semantic information. As such, extra
- /// care should be taken to review code changes made by this option.
+ /// Setting this option to ``true`` could lead to incorrect code formatting
+ /// due to clang-format's lack of complete semantic information. As such,
+ /// extra care should be taken to review code changes made by this option.
/// \endwarning
/// \code
/// false: true:
@@ -3276,11 +3481,47 @@ struct FormatStyle {
/// \version 14
bool RemoveBracesLLVM;
+ /// Types of redundant parentheses to remove.
+ enum RemoveParenthesesStyle : int8_t {
+ /// Do not remove parentheses.
+ /// \code
+ /// class __declspec((dllimport)) X {};
+ /// co_return (((0)));
+ /// return ((a + b) - ((c + d)));
+ /// \endcode
+ RPS_Leave,
+ /// Replace multiple parentheses with single parentheses.
+ /// \code
+ /// class __declspec(dllimport) X {};
+ /// co_return (0);
+ /// return ((a + b) - (c + d));
+ /// \endcode
+ RPS_MultipleParentheses,
+ /// Also remove parentheses enclosing the expression in a
+ /// ``return``/``co_return`` statement.
+ /// \code
+ /// class __declspec(dllimport) X {};
+ /// co_return 0;
+ /// return (a + b) - (c + d);
+ /// \endcode
+ RPS_ReturnStatement,
+ };
+
+ /// Remove redundant parentheses.
+ /// \warning
+ /// Setting this option to any value other than ``Leave`` could lead to
+ /// incorrect code formatting due to clang-format's lack of complete semantic
+ /// information. As such, extra care should be taken to review code changes
+ /// made by this option.
+ /// \endwarning
+ /// \version 17
+ RemoveParenthesesStyle RemoveParentheses;
+
/// Remove semicolons after the closing brace of a non-empty function.
/// \warning
- /// Setting this option to `true` could lead to incorrect code formatting due
- /// to clang-format's lack of complete semantic information. As such, extra
- /// care should be taken to review code changes made by this option.
+ /// Setting this option to ``true`` could lead to incorrect code formatting
+ /// due to clang-format's lack of complete semantic information. As such,
+ /// extra care should be taken to review code changes made by this option.
/// \endwarning
/// \code
/// false: true:
@@ -3386,7 +3627,7 @@ struct FormatStyle {
/// }
/// \endcode
REI_OuterScope,
- /// Align requires expression body relative to the `requires` keyword.
+ /// Align requires expression body relative to the ``requires`` keyword.
/// \code
/// template <typename T>
/// concept C = requires(T t) {
@@ -3512,11 +3753,6 @@ struct FormatStyle {
};
/// Controls if and how clang-format will sort ``#includes``.
- /// If ``Never``, includes are never sorted.
- /// If ``CaseInsensitive``, includes are sorted in an ASCIIbetical or case
- /// insensitive fashion.
- /// If ``CaseSensitive``, includes are sorted in an alphabetical or case
- /// sensitive fashion.
/// \version 3.8
SortIncludesOptions SortIncludes;
@@ -3693,6 +3929,17 @@ struct FormatStyle {
/// \version 7
bool SpaceBeforeInheritanceColon;
+ /// If ``true``, a space will be added before a JSON colon. For other
+ /// languages, e.g. JavaScript, use ``SpacesInContainerLiterals`` instead.
+ /// \code
+ /// true: false:
+ /// { {
+ /// "key" : "value" vs. "key": "value"
+ /// } }
+ /// \endcode
+ /// \version 17
+ bool SpaceBeforeJsonColon;
+
/// Different ways to put a space before opening parentheses.
enum SpaceBeforeParensStyle : int8_t {
/// Never put a space before opening parentheses.
@@ -3751,7 +3998,7 @@ struct FormatStyle {
/// \endcode
SBPO_Always,
/// Configure each individual space before parentheses in
- /// `SpaceBeforeParensOptions`.
+ /// ``SpaceBeforeParensOptions``.
SBPO_Custom,
};
@@ -3904,24 +4151,20 @@ struct FormatStyle {
bool SpaceInEmptyBlock;
/// If ``true``, spaces may be inserted into ``()``.
- /// \code
- /// true: false:
- /// void f( ) { vs. void f() {
- /// int x[] = {foo( ), bar( )}; int x[] = {foo(), bar()};
- /// if (true) { if (true) {
- /// f( ); f();
- /// } }
- /// } }
- /// \endcode
+ /// This option is **deprecated**. See ``InEmptyParentheses`` of
+ /// ``SpacesInParensOptions``.
/// \version 3.7
- bool SpaceInEmptyParentheses;
+ // bool SpaceInEmptyParentheses;
/// The number of spaces before trailing line comments
/// (``//`` - comments).
///
- /// This does not affect trailing block comments (``/*`` - comments) as
- /// those commonly have different usage patterns and a number of special
- /// cases.
+ /// This does not affect trailing block comments (``/*`` - comments) as those
+ /// commonly have different usage patterns and a number of special cases. In
+ /// the case of Verilog, it doesn't affect a comment right after the opening
+ /// parenthesis in the port or parameter list in a module header, because it
+ /// is probably for the port on the following line instead of the parenthesis
+ /// it follows.
/// \code
/// SpacesBeforeTrailingComments: 3
/// void f() {
@@ -3933,7 +4176,7 @@ struct FormatStyle {
/// \version 3.7
unsigned SpacesBeforeTrailingComments;
- /// Styles for adding spacing after ``<`` and before ``>`
+ /// Styles for adding spacing after ``<`` and before ``>``
/// in template argument lists.
enum SpacesInAnglesStyle : int8_t {
/// Remove spaces after ``<`` and before ``>``.
@@ -3958,16 +4201,14 @@ struct FormatStyle {
/// If ``true``, spaces will be inserted around if/for/switch/while
/// conditions.
- /// \code
- /// true: false:
- /// if ( a ) { ... } vs. if (a) { ... }
- /// while ( i < 5 ) { ... } while (i < 5) { ... }
- /// \endcode
+ /// This option is **deprecated**. See ``InConditionalStatements`` of
+ /// ``SpacesInParensOptions``.
/// \version 10
- bool SpacesInConditionalStatement;
+ // bool SpacesInConditionalStatement;
- /// If ``true``, spaces are inserted inside container literals (e.g.
- /// ObjC and Javascript array and dict literals).
+ /// If ``true``, spaces are inserted inside container literals (e.g. ObjC and
+ /// Javascript array and dict literals). For JSON, use
+ /// ``SpaceBeforeJsonColon`` instead.
/// \code{.js}
/// true: false:
/// var arr = [ 1, 2, 3 ]; vs. var arr = [1, 2, 3];
@@ -3977,12 +4218,10 @@ struct FormatStyle {
bool SpacesInContainerLiterals;
/// If ``true``, spaces may be inserted into C style casts.
- /// \code
- /// true: false:
- /// x = ( int32 )y vs. x = (int32)y
- /// \endcode
+ /// This option is **deprecated**. See ``InCStyleCasts`` of
+ /// ``SpacesInParensOptions``.
/// \version 3.7
- bool SpacesInCStyleCastParentheses;
+ // bool SpacesInCStyleCastParentheses;
/// Control of spaces within a single line comment.
struct SpacesInLineComment {
@@ -4026,13 +4265,112 @@ struct FormatStyle {
/// \version 13
SpacesInLineComment SpacesInLineCommentPrefix;
- /// If ``true``, spaces will be inserted after ``(`` and before ``)``.
+ /// Different ways to put a space before opening and closing parentheses.
+ enum SpacesInParensStyle : int8_t {
+ /// Never put a space in parentheses.
+ /// \code
+ /// void f() {
+ /// if(true) {
+ /// f();
+ /// }
+ /// }
+ /// \endcode
+ SIPO_Never,
+ /// Configure each individual space in parentheses in
+ /// `SpacesInParensOptions`.
+ SIPO_Custom,
+ };
+
+ /// If ``true'', spaces will be inserted after ``(`` and before ``)``.
+ /// This option is **deprecated**. The previous behavior is preserved by using
+ /// ``SpacesInParens`` with ``Custom`` and by setting all
+ /// ``SpacesInParensOptions`` to ``true`` except for ``InCStyleCasts`` and
+ /// ``InEmptyParentheses``.
+ /// \version 3.7
+ // bool SpacesInParentheses;
+
+ /// Defines in which cases spaces will be inserted after ``(`` and before
+ /// ``)``.
+ /// \version 17
+ SpacesInParensStyle SpacesInParens;
+
+ /// Precise control over the spacing in parentheses.
/// \code
- /// true: false:
- /// t f( Deleted & ) & = delete; vs. t f(Deleted &) & = delete;
+ /// # Should be declared this way:
+ /// SpacesInParens: Custom
+ /// SpacesInParensOptions:
+ /// InConditionalStatements: true
+ /// Other: true
/// \endcode
- /// \version 3.7
- bool SpacesInParentheses;
+ struct SpacesInParensCustom {
+ /// Put a space in parentheses only inside conditional statements
+ /// (``for/if/while/switch...``).
+ /// \code
+ /// true: false:
+ /// if ( a ) { ... } vs. if (a) { ... }
+ /// while ( i < 5 ) { ... } while (i < 5) { ... }
+ /// \endcode
+ bool InConditionalStatements;
+ /// Put a space in C style casts.
+ /// \code
+ /// true: false:
+ /// x = ( int32 )y vs. x = (int32)y
+ /// \endcode
+ bool InCStyleCasts;
+ /// Put a space in parentheses only if the parentheses are empty i.e. '()'
+ /// \code
+ /// true: false:
+ /// void f( ) { vs. void f() {
+ /// int x[] = {foo( ), bar( )}; int x[] = {foo(), bar()};
+ /// if (true) { if (true) {
+ /// f( ); f();
+ /// } }
+ /// } }
+ /// \endcode
+ bool InEmptyParentheses;
+ /// Put a space in parentheses not covered by preceding options.
+ /// \code
+ /// true: false:
+ /// t f( Deleted & ) & = delete; vs. t f(Deleted &) & = delete;
+ /// \endcode
+ bool Other;
+
+ SpacesInParensCustom()
+ : InConditionalStatements(false), InCStyleCasts(false),
+ InEmptyParentheses(false), Other(false) {}
+
+ SpacesInParensCustom(bool InConditionalStatements, bool InCStyleCasts,
+ bool InEmptyParentheses, bool Other)
+ : InConditionalStatements(InConditionalStatements),
+ InCStyleCasts(InCStyleCasts),
+ InEmptyParentheses(InEmptyParentheses),
+ Other(Other) {}
+
+ bool operator==(const SpacesInParensCustom &R) const {
+ return InConditionalStatements == R.InConditionalStatements &&
+ InCStyleCasts == R.InCStyleCasts &&
+ InEmptyParentheses == R.InEmptyParentheses &&
+ Other == R.Other;
+ }
+ bool operator!=(const SpacesInParensCustom &R) const {
+ return !(*this == R);
+ }
+ };
+
+ /// Control of individual spaces in parentheses.
+ ///
+ /// If ``SpacesInParens`` is set to ``Custom``, use this to specify
+ /// how each individual space in parentheses case should be handled.
+ /// Otherwise, this is ignored.
+ /// \code{.yaml}
+ /// # Example of usage:
+ /// SpacesInParens: Custom
+ /// SpacesInParensOptions:
+ /// InConditionalStatements: true
+ /// InEmptyParentheses: true
+ /// \endcode
+ /// \version 17
+ SpacesInParensCustom SpacesInParensOptions;
/// If ``true``, spaces will be inserted after ``[`` and before ``]``.
/// Lambdas without arguments or unspecified size array declarations will not
@@ -4112,6 +4450,16 @@ struct FormatStyle {
/// \version 3.7
unsigned TabWidth;
+ /// A vector of non-keyword identifiers that should be interpreted as type
+ /// names.
+ ///
+ /// A ``*``, ``&``, or ``&&`` between a type name and another non-keyword
+ /// identifier is annotated as a pointer or reference token instead of a
+ /// binary operator.
+ ///
+ /// \version 17
+ std::vector<std::string> TypeNames;
+
/// \brief A vector of macros that should be interpreted as type declarations
/// instead of as function calls.
///
@@ -4154,6 +4502,20 @@ struct FormatStyle {
/// \version 3.7
UseTabStyle UseTab;
+ /// For Verilog, put each port on its own line in module instantiations.
+ /// \code
+ /// true:
+ /// ffnand ff1(.q(),
+ /// .qbar(out1),
+ /// .clear(in1),
+ /// .preset(in2));
+ ///
+ /// false:
+ /// ffnand ff1(.q(), .qbar(out1), .clear(in1), .preset(in2));
+ /// \endcode
+ /// \version 17
+ bool VerilogBreakBetweenInstancePorts;
+
/// A vector of macros which are whitespace-sensitive and should not
/// be touched.
///
@@ -4179,6 +4541,8 @@ struct FormatStyle {
AlignConsecutiveBitFields == R.AlignConsecutiveBitFields &&
AlignConsecutiveDeclarations == R.AlignConsecutiveDeclarations &&
AlignConsecutiveMacros == R.AlignConsecutiveMacros &&
+ AlignConsecutiveShortCaseStatements ==
+ R.AlignConsecutiveShortCaseStatements &&
AlignEscapedNewlines == R.AlignEscapedNewlines &&
AlignOperands == R.AlignOperands &&
AlignTrailingComments == R.AlignTrailingComments &&
@@ -4204,6 +4568,7 @@ struct FormatStyle {
BinPackArguments == R.BinPackArguments &&
BinPackParameters == R.BinPackParameters &&
BitFieldColonSpacing == R.BitFieldColonSpacing &&
+ BracedInitializerIndentWidth == R.BracedInitializerIndentWidth &&
BreakAfterAttributes == R.BreakAfterAttributes &&
BreakAfterJavaFieldAnnotations == R.BreakAfterJavaFieldAnnotations &&
BreakArrays == R.BreakArrays &&
@@ -4250,12 +4615,13 @@ struct FormatStyle {
JavaImportGroups == R.JavaImportGroups &&
JavaScriptQuotes == R.JavaScriptQuotes &&
JavaScriptWrapImports == R.JavaScriptWrapImports &&
+ KeepEmptyLinesAtEOF == R.KeepEmptyLinesAtEOF &&
KeepEmptyLinesAtTheStartOfBlocks ==
R.KeepEmptyLinesAtTheStartOfBlocks &&
Language == R.Language &&
LambdaBodyIndentation == R.LambdaBodyIndentation &&
LineEnding == R.LineEnding && MacroBlockBegin == R.MacroBlockBegin &&
- MacroBlockEnd == R.MacroBlockEnd &&
+ MacroBlockEnd == R.MacroBlockEnd && Macros == R.Macros &&
MaxEmptyLinesToKeep == R.MaxEmptyLinesToKeep &&
NamespaceIndentation == R.NamespaceIndentation &&
NamespaceMacros == R.NamespaceMacros &&
@@ -4283,6 +4649,7 @@ struct FormatStyle {
RawStringFormats == R.RawStringFormats &&
ReferenceAlignment == R.ReferenceAlignment &&
RemoveBracesLLVM == R.RemoveBracesLLVM &&
+ RemoveParentheses == R.RemoveParentheses &&
RemoveSemicolon == R.RemoveSemicolon &&
RequiresClausePosition == R.RequiresClausePosition &&
RequiresExpressionIndentation == R.RequiresExpressionIndentation &&
@@ -4299,6 +4666,7 @@ struct FormatStyle {
SpaceBeforeCtorInitializerColon ==
R.SpaceBeforeCtorInitializerColon &&
SpaceBeforeInheritanceColon == R.SpaceBeforeInheritanceColon &&
+ SpaceBeforeJsonColon == R.SpaceBeforeJsonColon &&
SpaceBeforeParens == R.SpaceBeforeParens &&
SpaceBeforeParensOptions == R.SpaceBeforeParensOptions &&
SpaceAroundPointerQualifiers == R.SpaceAroundPointerQualifiers &&
@@ -4306,22 +4674,23 @@ struct FormatStyle {
R.SpaceBeforeRangeBasedForLoopColon &&
SpaceBeforeSquareBrackets == R.SpaceBeforeSquareBrackets &&
SpaceInEmptyBlock == R.SpaceInEmptyBlock &&
- SpaceInEmptyParentheses == R.SpaceInEmptyParentheses &&
SpacesBeforeTrailingComments == R.SpacesBeforeTrailingComments &&
SpacesInAngles == R.SpacesInAngles &&
- SpacesInConditionalStatement == R.SpacesInConditionalStatement &&
SpacesInContainerLiterals == R.SpacesInContainerLiterals &&
- SpacesInCStyleCastParentheses == R.SpacesInCStyleCastParentheses &&
SpacesInLineCommentPrefix.Minimum ==
R.SpacesInLineCommentPrefix.Minimum &&
SpacesInLineCommentPrefix.Maximum ==
R.SpacesInLineCommentPrefix.Maximum &&
- SpacesInParentheses == R.SpacesInParentheses &&
+ SpacesInParens == R.SpacesInParens &&
+ SpacesInParensOptions == R.SpacesInParensOptions &&
SpacesInSquareBrackets == R.SpacesInSquareBrackets &&
Standard == R.Standard &&
StatementAttributeLikeMacros == R.StatementAttributeLikeMacros &&
StatementMacros == R.StatementMacros && TabWidth == R.TabWidth &&
- TypenameMacros == R.TypenameMacros && UseTab == R.UseTab &&
+ TypeNames == R.TypeNames && TypenameMacros == R.TypenameMacros &&
+ UseTab == R.UseTab &&
+ VerilogBreakBetweenInstancePorts ==
+ R.VerilogBreakBetweenInstancePorts &&
WhitespaceSensitiveMacros == R.WhitespaceSensitiveMacros;
}
@@ -4464,7 +4833,7 @@ formatReplacements(StringRef Code, const tooling::Replacements &Replaces,
/// - If a replacement has offset UINT_MAX, length 1, and a replacement text
/// that is the name of the header to be removed, the header will be removed
/// from \p Code if it exists.
-/// The include manipulation is done via `tooling::HeaderInclude`, see its
+/// The include manipulation is done via ``tooling::HeaderInclude``, see its
/// documentation for more details on how include insertion points are found and
/// what edits are produced.
llvm::Expected<tooling::Replacements>
@@ -4555,11 +4924,11 @@ LangOptions getFormattingLangOpts(const FormatStyle &Style = getLLVMStyle());
extern const char *StyleOptionHelpDescription;
/// The suggested format style to use by default. This allows tools using
-/// `getStyle` to have a consistent default style.
+/// ``getStyle`` to have a consistent default style.
/// Different builds can modify the value to the preferred styles.
extern const char *DefaultFormatStyle;
-/// The suggested predefined style to use as the fallback style in `getStyle`.
+/// The suggested predefined style to use as the fallback style in ``getStyle``.
/// Different builds can modify the value to the preferred styles.
extern const char *DefaultFallbackStyle;
@@ -4630,6 +4999,9 @@ inline StringRef getLanguageName(FormatStyle::LanguageKind Language) {
}
}
+bool isClangFormatOn(StringRef Comment);
+bool isClangFormatOff(StringRef Comment);
+
} // end namespace format
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/ASTUnit.h b/contrib/llvm-project/clang/include/clang/Frontend/ASTUnit.h
index c79202890272..b762be1c9b1d 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/ASTUnit.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/ASTUnit.h
@@ -119,11 +119,13 @@ private:
std::shared_ptr<PreprocessorOptions> PPOpts;
IntrusiveRefCntPtr<ASTReader> Reader;
bool HadModuleLoaderFatalFailure = false;
+ bool StorePreamblesInMemory = false;
struct ASTWriterData;
std::unique_ptr<ASTWriterData> WriterData;
FileSystemOptions FileSystemOpts;
+ std::string PreambleStoragePath;
/// The AST consumer that received information about the translation
/// unit as it was parsed or loaded.
@@ -641,7 +643,7 @@ public:
bool visitLocalTopLevelDecls(void *context, DeclVisitorFn Fn);
/// Get the PCH file if one was included.
- const FileEntry *getPCHFile();
+ OptionalFileEntryRef getPCHFile();
/// Returns true if the ASTUnit was constructed from a serialized
/// module file.
@@ -692,6 +694,7 @@ public:
const PCHContainerReader &PCHContainerRdr, WhatToLoad ToLoad,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
const FileSystemOptions &FileSystemOpts,
+ std::shared_ptr<HeaderSearchOptions> HSOpts,
bool UseDebugInfo = false, bool OnlyLocalDecls = false,
CaptureDiagsKind CaptureDiagnostics = CaptureDiagsKind::None,
bool AllowASTWithCompilerErrors = false,
@@ -802,6 +805,13 @@ public:
///
/// \param ResourceFilesPath - The path to the compiler resource files.
///
+ /// \param StorePreamblesInMemory - Whether to store PCH in memory. If false,
+ /// PCH are stored in temporary files.
+ ///
+ /// \param PreambleStoragePath - The path to a directory, in which to create
+ /// temporary PCH files. If empty, the default system temporary directory is
+ /// used. This parameter is ignored if \p StorePreamblesInMemory is true.
+ ///
/// \param ModuleFormat - If provided, uses the specific module format.
///
/// \param ErrAST - If non-null and parsing failed without any AST to return
@@ -816,11 +826,12 @@ public:
///
// FIXME: Move OnlyLocalDecls, UseBumpAllocator to setters on the ASTUnit, we
// shouldn't need to specify them at construction time.
- static ASTUnit *LoadFromCommandLine(
+ static std::unique_ptr<ASTUnit> LoadFromCommandLine(
const char **ArgBegin, const char **ArgEnd,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags, StringRef ResourceFilesPath,
- bool OnlyLocalDecls = false,
+ bool StorePreamblesInMemory = false,
+ StringRef PreambleStoragePath = StringRef(), bool OnlyLocalDecls = false,
CaptureDiagsKind CaptureDiagnostics = CaptureDiagsKind::None,
ArrayRef<RemappedFile> RemappedFiles = std::nullopt,
bool RemappedFilesKeepOriginalName = true,
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h b/contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h
index f132c961c8a2..c6af1fd5dd01 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h
@@ -12,6 +12,7 @@
#include "clang/AST/ASTConsumer.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/PCHContainerOperations.h"
#include "clang/Frontend/Utils.h"
@@ -233,6 +234,8 @@ public:
return *Invocation;
}
+ std::shared_ptr<CompilerInvocation> getInvocationPtr() { return Invocation; }
+
/// setInvocation - Replace the current invocation.
void setInvocation(std::shared_ptr<CompilerInvocation> Value);
@@ -338,6 +341,11 @@ public:
return *Diagnostics;
}
+ IntrusiveRefCntPtr<DiagnosticsEngine> getDiagnosticsPtr() const {
+ assert(Diagnostics && "Compiler instance has no diagnostics!");
+ return Diagnostics;
+ }
+
/// setDiagnostics - Replace the current diagnostics engine.
void setDiagnostics(DiagnosticsEngine *Value);
@@ -373,6 +381,11 @@ public:
return *Target;
}
+ IntrusiveRefCntPtr<TargetInfo> getTargetPtr() const {
+ assert(Target && "Compiler instance has no target!");
+ return Target;
+ }
+
/// Replace the current Target.
void setTarget(TargetInfo *Value);
@@ -406,6 +419,11 @@ public:
return *FileMgr;
}
+ IntrusiveRefCntPtr<FileManager> getFileManagerPtr() const {
+ assert(FileMgr && "Compiler instance has no file manager!");
+ return FileMgr;
+ }
+
void resetAndLeakFileManager() {
llvm::BuryPointer(FileMgr.get());
FileMgr.resetWithoutRelease();
@@ -426,6 +444,11 @@ public:
return *SourceMgr;
}
+ IntrusiveRefCntPtr<SourceManager> getSourceManagerPtr() const {
+ assert(SourceMgr && "Compiler instance has no source manager!");
+ return SourceMgr;
+ }
+
void resetAndLeakSourceManager() {
llvm::BuryPointer(SourceMgr.get());
SourceMgr.resetWithoutRelease();
@@ -466,6 +489,11 @@ public:
return *Context;
}
+ IntrusiveRefCntPtr<ASTContext> getASTContextPtr() const {
+ assert(Context && "Compiler instance has no AST context!");
+ return Context;
+ }
+
void resetAndLeakASTContext() {
llvm::BuryPointer(Context.get());
Context.resetWithoutRelease();
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/CompilerInvocation.h b/contrib/llvm-project/clang/include/clang/Frontend/CompilerInvocation.h
index 254f048ed3c7..1dbd1eda62b3 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/CompilerInvocation.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/CompilerInvocation.h
@@ -241,6 +241,17 @@ public:
/// This is a (less-efficient) wrapper over generateCC1CommandLine().
std::vector<std::string> getCC1CommandLine() const;
+ /// Check that \p Args can be parsed and re-serialized without change,
+ /// emiting diagnostics for any differences.
+ ///
+ /// This check is only suitable for command-lines that are expected to already
+ /// be canonical.
+ ///
+ /// \return false if there are any errors.
+ static bool checkCC1RoundTrip(ArrayRef<const char *> Args,
+ DiagnosticsEngine &Diags,
+ const char *Argv0 = nullptr);
+
/// Reset all of the options that are not considered when building a
/// module.
void resetNonModularOptions();
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/DependencyOutputOptions.h b/contrib/llvm-project/clang/include/clang/Frontend/DependencyOutputOptions.h
index e0f445bb5970..e4b26d92647d 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/DependencyOutputOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/DependencyOutputOptions.h
@@ -34,6 +34,8 @@ enum ExtraDepKind {
class DependencyOutputOptions {
public:
unsigned IncludeSystemHeaders : 1; ///< Include system header dependencies.
+ unsigned
+ CanonicalSystemHeaders : 1; ///< canonicalize system header dependencies.
unsigned ShowHeaderIncludes : 1; ///< Show header inclusions (-H).
unsigned UsePhonyTargets : 1; ///< Include phony targets for each
/// dependency, which can avoid some 'make'
@@ -74,9 +76,6 @@ public:
/// target.
std::vector<std::pair<std::string, ExtraDepKind>> ExtraDeps;
- /// In /showIncludes mode, pretend the main TU is a header with this name.
- std::string ShowIncludesPretendHeader;
-
/// The file to write GraphViz-formatted header dependencies to.
std::string DOTOutputFile;
@@ -85,10 +84,11 @@ public:
public:
DependencyOutputOptions()
- : IncludeSystemHeaders(0), ShowHeaderIncludes(0), UsePhonyTargets(0),
- AddMissingHeaderDeps(0), IncludeModuleFiles(0),
- ShowSkippedHeaderIncludes(0), HeaderIncludeFormat(HIFMT_Textual),
- HeaderIncludeFiltering(HIFIL_None) {}
+ : IncludeSystemHeaders(0), CanonicalSystemHeaders(0),
+ ShowHeaderIncludes(0), UsePhonyTargets(0), AddMissingHeaderDeps(0),
+ IncludeModuleFiles(0), ShowSkippedHeaderIncludes(0),
+ HeaderIncludeFormat(HIFMT_Textual), HeaderIncludeFiltering(HIFIL_None) {
+ }
};
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h b/contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h
index 9e6ed1ace190..3940e00eeb8d 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h
@@ -177,9 +177,8 @@ public:
/// Dump information about the given module file, to be used for
/// basic debugging and discovery.
class DumpModuleInfoAction : public ASTFrontendAction {
-public:
// Allow other tools (ex lldb) to direct output for their use.
- llvm::raw_ostream *OutputStream = nullptr;
+ std::shared_ptr<llvm::raw_ostream> OutputStream;
protected:
std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
@@ -188,6 +187,9 @@ protected:
void ExecuteAction() override;
public:
+ DumpModuleInfoAction() = default;
+ explicit DumpModuleInfoAction(std::shared_ptr<llvm::raw_ostream> Out)
+ : OutputStream(Out) {}
bool hasPCHSupport() const override { return false; }
bool hasASTFileSupport() const override { return true; }
bool hasIRSupport() const override { return false; }
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h b/contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h
index 6efe3cdd5802..3132c11705d3 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h
@@ -278,12 +278,11 @@ public:
/// Show frontend performance metrics and statistics.
unsigned ShowStats : 1;
+ unsigned AppendStats : 1;
+
/// print the supported cpus for the current target
unsigned PrintSupportedCPUs : 1;
- /// Output time trace profile.
- unsigned TimeTrace : 1;
-
/// Show the -version text.
unsigned ShowVersion : 1;
@@ -453,8 +452,15 @@ public:
std::string ProductName;
// Currently this is only used as part of the `-extract-api` action.
- /// The file providing a list of APIs to ignore when extracting documentation
- std::string ExtractAPIIgnoresFile;
+ // A comma seperated list of files providing a list of APIs to
+ // ignore when extracting documentation.
+ std::vector<std::string> ExtractAPIIgnoresFileList;
+
+ // Currently this is only used as part of the `-emit-symbol-graph`
+ // action.
+ // Location of output directory where symbol graph information would
+ // be dumped
+ std::string SymbolGraphOutputDir;
/// Args to pass to the plugins
std::map<std::string, std::vector<std::string>> PluginArgs;
@@ -510,7 +516,7 @@ public:
public:
FrontendOptions()
: DisableFree(false), RelocatablePCH(false), ShowHelp(false),
- ShowStats(false), TimeTrace(false), ShowVersion(false),
+ ShowStats(false), AppendStats(false), ShowVersion(false),
FixWhatYouCan(false), FixOnlyWarnings(false), FixAndRecompile(false),
FixToTemporaries(false), ARCMTMigrateEmitARCErrors(false),
SkipFunctionBodies(false), UseGlobalModuleIndex(true),
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/LayoutOverrideSource.h b/contrib/llvm-project/clang/include/clang/Frontend/LayoutOverrideSource.h
index ea1611470a76..c6e2d7311183 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/LayoutOverrideSource.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/LayoutOverrideSource.h
@@ -30,6 +30,12 @@ namespace clang {
/// The alignment of the record.
uint64_t Align;
+ /// The offsets of non-virtual base classes in the record.
+ SmallVector<CharUnits, 8> BaseOffsets;
+
+ /// The offsets of virtual base classes in the record.
+ SmallVector<CharUnits, 8> VBaseOffsets;
+
/// The offsets of the fields, in source order.
SmallVector<uint64_t, 8> FieldOffsets;
};
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h b/contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h
index db9f33ae5961..798870bf24fe 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h
@@ -75,6 +75,10 @@ public:
/// \param StoreInMemory Store PCH in memory. If false, PCH will be stored in
/// a temporary file.
///
+ /// \param StoragePath The path to a directory, in which to create a temporary
+ /// file to store PCH in. If empty, the default system temporary directory is
+ /// used. This parameter is ignored if \p StoreInMemory is true.
+ ///
/// \param Callbacks A set of callbacks to be executed when building
/// the preamble.
static llvm::ErrorOr<PrecompiledPreamble>
@@ -83,7 +87,8 @@ public:
DiagnosticsEngine &Diagnostics,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
- bool StoreInMemory, PreambleCallbacks &Callbacks);
+ bool StoreInMemory, StringRef StoragePath,
+ PreambleCallbacks &Callbacks);
PrecompiledPreamble(PrecompiledPreamble &&);
PrecompiledPreamble &operator=(PrecompiledPreamble &&);
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/TextDiagnostic.h b/contrib/llvm-project/clang/include/clang/Frontend/TextDiagnostic.h
index a2eec46beccd..7eb0ab0cdc9b 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/TextDiagnostic.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/TextDiagnostic.h
@@ -103,7 +103,8 @@ private:
SmallVectorImpl<CharSourceRange> &Ranges,
ArrayRef<FixItHint> Hints);
- void emitSnippet(StringRef SourceLine);
+ void emitSnippet(StringRef SourceLine, unsigned MaxLineNoDisplayWidth,
+ unsigned LineNo);
void emitParseableFixits(ArrayRef<FixItHint> Hints, const SourceManager &SM);
};
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/Utils.h b/contrib/llvm-project/clang/include/clang/Frontend/Utils.h
index 143cf4359f00..8300e45d15fe 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/Utils.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/Utils.h
@@ -41,6 +41,7 @@ class ExternalSemaSource;
class FrontendOptions;
class PCHContainerReader;
class Preprocessor;
+class FileManager;
class PreprocessorOptions;
class PreprocessorOutputOptions;
@@ -79,11 +80,14 @@ public:
/// Return true if system files should be passed to sawDependency().
virtual bool needSystemDependencies() { return false; }
+ /// Return true if system files should be canonicalized.
+ virtual bool shouldCanonicalizeSystemDependencies() { return false; }
+
/// Add a dependency \p Filename if it has not been seen before and
/// sawDependency() returns true.
virtual void maybeAddDependency(StringRef Filename, bool FromModule,
bool IsSystem, bool IsModuleFile,
- bool IsMissing);
+ FileManager *FileMgr, bool IsMissing);
protected:
/// Return true if the filename was added to the list of dependencies, false
@@ -112,6 +116,10 @@ public:
bool sawDependency(StringRef Filename, bool FromModule, bool IsSystem,
bool IsModuleFile, bool IsMissing) final;
+ bool shouldCanonicalizeSystemDependencies() override {
+ return CanonicalSystemHeaders;
+ }
+
protected:
void outputDependencyFile(llvm::raw_ostream &OS);
@@ -121,6 +129,7 @@ private:
std::string OutputFile;
std::vector<std::string> Targets;
bool IncludeSystemHeaders;
+ bool CanonicalSystemHeaders;
bool PhonyTarget;
bool AddMissingHeaderDeps;
bool SeenMissingHeader;
diff --git a/contrib/llvm-project/clang/include/clang/Interpreter/Interpreter.h b/contrib/llvm-project/clang/include/clang/Interpreter/Interpreter.h
index fd22af976613..43573fb1a4b8 100644
--- a/contrib/llvm-project/clang/include/clang/Interpreter/Interpreter.h
+++ b/contrib/llvm-project/clang/include/clang/Interpreter/Interpreter.h
@@ -14,13 +14,15 @@
#ifndef LLVM_CLANG_INTERPRETER_INTERPRETER_H
#define LLVM_CLANG_INTERPRETER_INTERPRETER_H
-#include "clang/Interpreter/PartialTranslationUnit.h"
-
+#include "clang/AST/Decl.h"
#include "clang/AST/GlobalDecl.h"
+#include "clang/Interpreter/PartialTranslationUnit.h"
+#include "clang/Interpreter/Value.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/Support/Error.h"
-
#include <memory>
#include <vector>
@@ -28,7 +30,7 @@ namespace llvm {
namespace orc {
class LLJIT;
class ThreadSafeContext;
-}
+} // namespace orc
} // namespace llvm
namespace clang {
@@ -40,8 +42,34 @@ class IncrementalParser;
/// Create a pre-configured \c CompilerInstance for incremental processing.
class IncrementalCompilerBuilder {
public:
+ IncrementalCompilerBuilder() {}
+
+ void SetCompilerArgs(const std::vector<const char *> &Args) {
+ UserArgs = Args;
+ }
+
+ // General C++
+ llvm::Expected<std::unique_ptr<CompilerInstance>> CreateCpp();
+
+ // Offload options
+ void SetOffloadArch(llvm::StringRef Arch) { OffloadArch = Arch; };
+
+ // CUDA specific
+ void SetCudaSDK(llvm::StringRef path) { CudaSDKPath = path; };
+
+ llvm::Expected<std::unique_ptr<CompilerInstance>> CreateCudaHost();
+ llvm::Expected<std::unique_ptr<CompilerInstance>> CreateCudaDevice();
+
+private:
static llvm::Expected<std::unique_ptr<CompilerInstance>>
create(std::vector<const char *> &ClangArgv);
+
+ llvm::Expected<std::unique_ptr<CompilerInstance>> createCuda(bool device);
+
+ std::vector<const char *> UserArgs;
+
+ llvm::StringRef OffloadArch;
+ llvm::StringRef CudaSDKPath;
};
/// Provides top-level interfaces for incremental compilation and execution.
@@ -50,41 +78,72 @@ class Interpreter {
std::unique_ptr<IncrementalParser> IncrParser;
std::unique_ptr<IncrementalExecutor> IncrExecutor;
+ // An optional parser for CUDA offloading
+ std::unique_ptr<IncrementalParser> DeviceParser;
+
Interpreter(std::unique_ptr<CompilerInstance> CI, llvm::Error &Err);
+ llvm::Error CreateExecutor();
+ unsigned InitPTUSize = 0;
+
+ // This member holds the last result of the value printing. It's a class
+ // member because we might want to access it after more inputs. If no value
+ // printing happens, it's in an invalid state.
+ Value LastValue;
+
public:
~Interpreter();
static llvm::Expected<std::unique_ptr<Interpreter>>
create(std::unique_ptr<CompilerInstance> CI);
+ static llvm::Expected<std::unique_ptr<Interpreter>>
+ createWithCUDA(std::unique_ptr<CompilerInstance> CI,
+ std::unique_ptr<CompilerInstance> DCI);
+ const ASTContext &getASTContext() const;
+ ASTContext &getASTContext();
const CompilerInstance *getCompilerInstance() const;
- const llvm::orc::LLJIT *getExecutionEngine() const;
+ llvm::Expected<llvm::orc::LLJIT &> getExecutionEngine();
+
llvm::Expected<PartialTranslationUnit &> Parse(llvm::StringRef Code);
llvm::Error Execute(PartialTranslationUnit &T);
- llvm::Error ParseAndExecute(llvm::StringRef Code) {
- auto PTU = Parse(Code);
- if (!PTU)
- return PTU.takeError();
- if (PTU->TheModule)
- return Execute(*PTU);
- return llvm::Error::success();
- }
+ llvm::Error ParseAndExecute(llvm::StringRef Code, Value *V = nullptr);
+ llvm::Expected<llvm::orc::ExecutorAddr> CompileDtorCall(CXXRecordDecl *CXXRD);
/// Undo N previous incremental inputs.
llvm::Error Undo(unsigned N = 1);
- /// \returns the \c JITTargetAddress of a \c GlobalDecl. This interface uses
+ /// Link a dynamic library
+ llvm::Error LoadDynamicLibrary(const char *name);
+
+ /// \returns the \c ExecutorAddr of a \c GlobalDecl. This interface uses
/// the CodeGenModule's internal mangling cache to avoid recomputing the
/// mangled name.
- llvm::Expected<llvm::JITTargetAddress> getSymbolAddress(GlobalDecl GD) const;
+ llvm::Expected<llvm::orc::ExecutorAddr> getSymbolAddress(GlobalDecl GD) const;
- /// \returns the \c JITTargetAddress of a given name as written in the IR.
- llvm::Expected<llvm::JITTargetAddress>
+ /// \returns the \c ExecutorAddr of a given name as written in the IR.
+ llvm::Expected<llvm::orc::ExecutorAddr>
getSymbolAddress(llvm::StringRef IRName) const;
- /// \returns the \c JITTargetAddress of a given name as written in the object
+ /// \returns the \c ExecutorAddr of a given name as written in the object
/// file.
- llvm::Expected<llvm::JITTargetAddress>
+ llvm::Expected<llvm::orc::ExecutorAddr>
getSymbolAddressFromLinkerName(llvm::StringRef LinkerName) const;
+
+ enum InterfaceKind { NoAlloc, WithAlloc, CopyArray };
+
+ const llvm::SmallVectorImpl<Expr *> &getValuePrintingInfo() const {
+ return ValuePrintingInfo;
+ }
+
+ Expr *SynthesizeExpr(Expr *E);
+
+private:
+ size_t getEffectivePTUSize() const;
+
+ bool FindRuntimeInterface();
+
+ llvm::DenseMap<CXXRecordDecl *, llvm::orc::ExecutorAddr> Dtors;
+
+ llvm::SmallVector<Expr *, 3> ValuePrintingInfo;
};
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Interpreter/Value.h b/contrib/llvm-project/clang/include/clang/Interpreter/Value.h
new file mode 100644
index 000000000000..c380cd91550d
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Interpreter/Value.h
@@ -0,0 +1,208 @@
+//===--- Value.h - Definition of interpreter value --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Value is a lightweight struct that is used for carrying execution results in
+// clang-repl. It's a special runtime that acts like a messager between compiled
+// code and interpreted code. This makes it possible to exchange interesting
+// information between the compiled & interpreted world.
+//
+// A typical usage is like the below:
+//
+// Value V;
+// Interp.ParseAndExecute("int x = 42;");
+// Interp.ParseAndExecute("x", &V);
+// V.getType(); // <-- Yields a clang::QualType.
+// V.getInt(); // <-- Yields 42.
+//
+// The current design is still highly experimental and nobody should rely on the
+// API being stable because we're hopefully going to make significant changes to
+// it in the relatively near future. For example, Value also intends to be used
+// as an exchange token for JIT support enabling remote execution on the embed
+// devices where the JIT infrastructure cannot fit. To support that we will need
+// to split the memory storage in a different place and perhaps add a resource
+// header is similar to intrinsics headers which have stricter performance
+// constraints.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INTERPRETER_VALUE_H
+#define LLVM_CLANG_INTERPRETER_VALUE_H
+
+#include "llvm/Support/Compiler.h"
+#include <cstdint>
+
+// NOTE: Since the REPL itself could also include this runtime, extreme caution
+// should be taken when MAKING CHANGES to this file, especially when INCLUDE NEW
+// HEADERS, like <string>, <memory> and etc. (That pulls a large number of
+// tokens and will impact the runtime performance of the REPL)
+
+namespace llvm {
+class raw_ostream;
+
+} // namespace llvm
+
+namespace clang {
+
+class ASTContext;
+class Interpreter;
+class QualType;
+
+#if defined(_WIN32)
+// REPL_EXTERNAL_VISIBILITY are symbols that we need to be able to locate
+// at runtime. On Windows, this requires them to be exported from any of the
+// modules loaded at runtime. Marking them as dllexport achieves this; both
+// for DLLs (that normally export symbols as part of their interface) and for
+// EXEs (that normally don't export anything).
+// For a build with libclang-cpp.dll, this doesn't make any difference - the
+// functions would have been exported anyway. But for cases when these are
+// statically linked into an EXE, it makes sure that they're exported.
+#define REPL_EXTERNAL_VISIBILITY __declspec(dllexport)
+#elif __has_attribute(visibility)
+#if defined(LLVM_BUILD_LLVM_DYLIB) || defined(LLVM_BUILD_SHARED_LIBS)
+#define REPL_EXTERNAL_VISIBILITY __attribute__((visibility("default")))
+#else
+#define REPL_EXTERNAL_VISIBILITY
+#endif
+#else
+#define REPL_EXTERNAL_VISIBILITY
+#endif
+
+#define REPL_BUILTIN_TYPES \
+ X(bool, Bool) \
+ X(char, Char_S) \
+ X(signed char, SChar) \
+ X(unsigned char, UChar) \
+ X(short, Short) \
+ X(unsigned short, UShort) \
+ X(int, Int) \
+ X(unsigned int, UInt) \
+ X(long, Long) \
+ X(unsigned long, ULong) \
+ X(long long, LongLong) \
+ X(unsigned long long, ULongLong) \
+ X(float, Float) \
+ X(double, Double) \
+ X(long double, LongDouble)
+
+class REPL_EXTERNAL_VISIBILITY Value {
+ union Storage {
+#define X(type, name) type m_##name;
+ REPL_BUILTIN_TYPES
+#undef X
+ void *m_Ptr;
+ };
+
+public:
+ enum Kind {
+#define X(type, name) K_##name,
+ REPL_BUILTIN_TYPES
+#undef X
+
+ K_Void,
+ K_PtrOrObj,
+ K_Unspecified
+ };
+
+ Value() = default;
+ Value(Interpreter *In, void *Ty);
+ Value(const Value &RHS);
+ Value(Value &&RHS) noexcept;
+ Value &operator=(const Value &RHS);
+ Value &operator=(Value &&RHS) noexcept;
+ ~Value();
+
+ void printType(llvm::raw_ostream &Out) const;
+ void printData(llvm::raw_ostream &Out) const;
+ void print(llvm::raw_ostream &Out) const;
+ void dump() const;
+ void clear();
+
+ ASTContext &getASTContext();
+ const ASTContext &getASTContext() const;
+ Interpreter &getInterpreter();
+ const Interpreter &getInterpreter() const;
+ QualType getType() const;
+
+ bool isValid() const { return ValueKind != K_Unspecified; }
+ bool isVoid() const { return ValueKind == K_Void; }
+ bool hasValue() const { return isValid() && !isVoid(); }
+ bool isManuallyAlloc() const { return IsManuallyAlloc; }
+ Kind getKind() const { return ValueKind; }
+ void setKind(Kind K) { ValueKind = K; }
+ void setOpaqueType(void *Ty) { OpaqueType = Ty; }
+
+ void *getPtr() const;
+ void setPtr(void *Ptr) { Data.m_Ptr = Ptr; }
+
+#define X(type, name) \
+ void set##name(type Val) { Data.m_##name = Val; } \
+ type get##name() const { return Data.m_##name; }
+ REPL_BUILTIN_TYPES
+#undef X
+
+ /// \brief Get the value with cast.
+ //
+ /// Get the value cast to T. This is similar to reinterpret_cast<T>(value),
+ /// casting the value of builtins (except void), enums and pointers.
+ /// Values referencing an object are treated as pointers to the object.
+ template <typename T> T convertTo() const {
+ return convertFwd<T>::cast(*this);
+ }
+
+protected:
+ bool isPointerOrObjectType() const { return ValueKind == K_PtrOrObj; }
+
+ /// \brief Get to the value with type checking casting the underlying
+ /// stored value to T.
+ template <typename T> T as() const {
+ switch (ValueKind) {
+ default:
+ return T();
+#define X(type, name) \
+ case Value::K_##name: \
+ return (T)Data.m_##name;
+ REPL_BUILTIN_TYPES
+#undef X
+ }
+ }
+
+ // Allow convertTo to be partially specialized.
+ template <typename T> struct convertFwd {
+ static T cast(const Value &V) {
+ if (V.isPointerOrObjectType())
+ return (T)(uintptr_t)V.as<void *>();
+ if (!V.isValid() || V.isVoid()) {
+ return T();
+ }
+ return V.as<T>();
+ }
+ };
+
+ template <typename T> struct convertFwd<T *> {
+ static T *cast(const Value &V) {
+ if (V.isPointerOrObjectType())
+ return (T *)(uintptr_t)V.as<void *>();
+ return nullptr;
+ }
+ };
+
+ Interpreter *Interp = nullptr;
+ void *OpaqueType = nullptr;
+ Storage Data;
+ Kind ValueKind = K_Unspecified;
+ bool IsManuallyAlloc = false;
+};
+
+template <> inline void *Value::as() const {
+ if (isPointerOrObjectType())
+ return Data.m_Ptr;
+ return (void *)as<uintptr_t>();
+}
+
+} // namespace clang
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/Lex/DependencyDirectivesScanner.h b/contrib/llvm-project/clang/include/clang/Lex/DependencyDirectivesScanner.h
index 529b93aa0ffb..0e115906fbfe 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/DependencyDirectivesScanner.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/DependencyDirectivesScanner.h
@@ -68,6 +68,7 @@ enum DirectiveKind : uint8_t {
pp_pragma_push_macro,
pp_pragma_pop_macro,
pp_pragma_include_alias,
+ pp_pragma_system_header,
pp_include_next,
pp_if,
pp_ifdef,
diff --git a/contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h b/contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h
index 76e3e786ff07..1bd14283dc53 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h
@@ -277,6 +277,9 @@ class HeaderSearch {
/// Keeps track of each lookup performed by LookupFile.
struct LookupFileCacheInfo {
+ // The requesting module for the lookup we cached.
+ const Module *RequestingModule = nullptr;
+
/// Starting search directory iterator that the cached search was performed
/// from. If there is a hit and this value doesn't match the current query,
/// the cache has to be ignored.
@@ -292,7 +295,9 @@ class HeaderSearch {
/// Default constructor -- Initialize all members with zero.
LookupFileCacheInfo() = default;
- void reset(ConstSearchDirIterator NewStartIt) {
+ void reset(const Module *NewRequestingModule,
+ ConstSearchDirIterator NewStartIt) {
+ RequestingModule = NewRequestingModule;
StartIt = NewStartIt;
MappedName = nullptr;
}
@@ -482,7 +487,7 @@ public:
OptionalFileEntryRef LookupFile(
StringRef Filename, SourceLocation IncludeLoc, bool isAngled,
ConstSearchDirIterator FromDir, ConstSearchDirIterator *CurDir,
- ArrayRef<std::pair<const FileEntry *, const DirectoryEntry *>> Includers,
+ ArrayRef<std::pair<const FileEntry *, DirectoryEntryRef>> Includers,
SmallVectorImpl<char> *SearchPath, SmallVectorImpl<char> *RelativePath,
Module *RequestingModule, ModuleMap::KnownHeader *SuggestedModule,
bool *IsMapped, bool *IsFrameworkFound, bool SkipCache = false,
@@ -553,10 +558,10 @@ public:
/// macro.
///
/// This routine does not consider the effect of \#import
- bool isFileMultipleIncludeGuarded(const FileEntry *File);
+ bool isFileMultipleIncludeGuarded(const FileEntry *File) const;
/// Determine whether the given file is known to have ever been \#imported.
- bool hasFileBeenImported(const FileEntry *File) {
+ bool hasFileBeenImported(const FileEntry *File) const {
const HeaderFileInfo *FI = getExistingFileInfo(File);
return FI && FI->isImport;
}
@@ -637,9 +642,9 @@ public:
bool AllowExtraModuleMapSearch = false);
/// Try to find a module map file in the given directory, returning
- /// \c nullptr if none is found.
- const FileEntry *lookupModuleMapFile(const DirectoryEntry *Dir,
- bool IsFramework);
+ /// \c nullopt if none is found.
+ OptionalFileEntryRef lookupModuleMapFile(DirectoryEntryRef Dir,
+ bool IsFramework);
/// Determine whether there is a module map that may map the header
/// with the given file name to a (sub)module.
@@ -659,15 +664,23 @@ public:
///
/// \param File The header that we wish to map to a module.
/// \param AllowTextual Whether we want to find textual headers too.
- ModuleMap::KnownHeader findModuleForHeader(const FileEntry *File,
+ ModuleMap::KnownHeader findModuleForHeader(FileEntryRef File,
bool AllowTextual = false,
bool AllowExcluded = false) const;
/// Retrieve all the modules corresponding to the given file.
///
+ /// \param AllowCreation Whether to allow inference of a new submodule, or to
+ /// only return existing known modules.
+ ///
/// \ref findModuleForHeader should typically be used instead of this.
ArrayRef<ModuleMap::KnownHeader>
- findAllModulesForHeader(const FileEntry *File) const;
+ findAllModulesForHeader(FileEntryRef File) const;
+
+ /// Like \ref findAllModulesForHeader, but do not attempt to infer module
+ /// ownership from umbrella headers if we've not already done so.
+ ArrayRef<ModuleMap::KnownHeader>
+ findResolvedModulesForHeader(const FileEntry *File) const;
/// Read the contents of the given module map file.
///
@@ -682,8 +695,8 @@ public:
/// used to resolve paths within the module (this is required when
/// building the module from preprocessed source).
/// \returns true if an error occurred, false otherwise.
- bool loadModuleMapFile(const FileEntry *File, bool IsSystem,
- FileID ID = FileID(), unsigned *Offset = nullptr,
+ bool loadModuleMapFile(FileEntryRef File, bool IsSystem, FileID ID = FileID(),
+ unsigned *Offset = nullptr,
StringRef OriginalModuleMapFile = StringRef());
/// Collect the set of all known, top-level modules.
@@ -752,8 +765,7 @@ private:
///
/// \return \c true if the file can be used, \c false if we are not permitted to
/// find this file due to requirements from \p RequestingModule.
- bool findUsableModuleForHeader(const FileEntry *File,
- const DirectoryEntry *Root,
+ bool findUsableModuleForHeader(FileEntryRef File, const DirectoryEntry *Root,
Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule,
bool IsSystemHeaderDir);
@@ -764,7 +776,7 @@ private:
/// \return \c true if the file can be used, \c false if we are not permitted to
/// find this file due to requirements from \p RequestingModule.
bool findUsableModuleForFrameworkHeader(
- const FileEntry *File, StringRef FrameworkName, Module *RequestingModule,
+ FileEntryRef File, StringRef FrameworkName, Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule, bool IsSystemFramework);
/// Look up the file with the specified name and determine its owning
@@ -863,7 +875,7 @@ public:
/// path is relative to a system header directory.
std::string suggestPathToFileForDiagnostics(const FileEntry *File,
llvm::StringRef MainFile,
- bool *IsSystem = nullptr);
+ bool *IsSystem = nullptr) const;
/// Suggest a path by which the specified file could be found, for use in
/// diagnostics to suggest a #include. Returned path will only contain forward
@@ -877,7 +889,7 @@ public:
std::string suggestPathToFileForDiagnostics(llvm::StringRef File,
llvm::StringRef WorkingDir,
llvm::StringRef MainFile,
- bool *IsSystem = nullptr);
+ bool *IsSystem = nullptr) const;
void PrintStats();
@@ -900,8 +912,7 @@ private:
LMM_InvalidModuleMap
};
- LoadModuleMapResult loadModuleMapFileImpl(const FileEntry *File,
- bool IsSystem,
+ LoadModuleMapResult loadModuleMapFileImpl(FileEntryRef File, bool IsSystem,
DirectoryEntryRef Dir,
FileID ID = FileID(),
unsigned *Offset = nullptr);
diff --git a/contrib/llvm-project/clang/include/clang/Lex/Lexer.h b/contrib/llvm-project/clang/include/clang/Lex/Lexer.h
index 8c2923b0150a..98d34b783f08 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/Lexer.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/Lexer.h
@@ -551,7 +551,7 @@ public:
/// Finds the token that comes right after the given location.
///
- /// Returns the next token, or none if the location is inside a macro.
+ /// Returns the next token, or std::nullopt if the location is inside a macro.
static std::optional<Token> findNextToken(SourceLocation Loc,
const SourceManager &SM,
const LangOptions &LangOpts);
diff --git a/contrib/llvm-project/clang/include/clang/Lex/LiteralSupport.h b/contrib/llvm-project/clang/include/clang/Lex/LiteralSupport.h
index fd237c2c9cd8..0a45f32326f4 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/LiteralSupport.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/LiteralSupport.h
@@ -63,7 +63,7 @@ public:
bool isUnsigned : 1;
bool isLong : 1; // This is *not* set for long long.
bool isLongLong : 1;
- bool isSizeT : 1; // 1z, 1uz (C++2b)
+ bool isSizeT : 1; // 1z, 1uz (C++23)
bool isHalf : 1; // 1.0h
bool isFloat : 1; // 1.0f
bool isImaginary : 1; // 1.0i
@@ -212,6 +212,11 @@ public:
}
};
+enum class StringLiteralEvalMethod {
+ Evaluated,
+ Unevaluated,
+};
+
/// StringLiteralParser - This decodes string escape characters and performs
/// wide string analysis and Translation Phase #6 (concatenation of string
/// literals) (C99 5.1.1.2p1).
@@ -230,20 +235,23 @@ class StringLiteralParser {
SmallString<32> UDSuffixBuf;
unsigned UDSuffixToken;
unsigned UDSuffixOffset;
+ StringLiteralEvalMethod EvalMethod;
+
public:
- StringLiteralParser(ArrayRef<Token> StringToks,
- Preprocessor &PP);
- StringLiteralParser(ArrayRef<Token> StringToks,
- const SourceManager &sm, const LangOptions &features,
- const TargetInfo &target,
+ StringLiteralParser(ArrayRef<Token> StringToks, Preprocessor &PP,
+ StringLiteralEvalMethod StringMethod =
+ StringLiteralEvalMethod::Evaluated);
+ StringLiteralParser(ArrayRef<Token> StringToks, const SourceManager &sm,
+ const LangOptions &features, const TargetInfo &target,
DiagnosticsEngine *diags = nullptr)
- : SM(sm), Features(features), Target(target), Diags(diags),
- MaxTokenLength(0), SizeBound(0), CharByteWidth(0), Kind(tok::unknown),
- ResultPtr(ResultBuf.data()), hadError(false), Pascal(false) {
+ : SM(sm), Features(features), Target(target), Diags(diags),
+ MaxTokenLength(0), SizeBound(0), CharByteWidth(0), Kind(tok::unknown),
+ ResultPtr(ResultBuf.data()),
+ EvalMethod(StringLiteralEvalMethod::Evaluated), hadError(false),
+ Pascal(false) {
init(StringToks);
}
-
bool hadError;
bool Pascal;
@@ -269,6 +277,9 @@ public:
bool isUTF16() const { return Kind == tok::utf16_string_literal; }
bool isUTF32() const { return Kind == tok::utf32_string_literal; }
bool isPascal() const { return Pascal; }
+ bool isUnevaluated() const {
+ return EvalMethod == StringLiteralEvalMethod::Unevaluated;
+ }
StringRef getUDSuffix() const { return UDSuffixBuf; }
diff --git a/contrib/llvm-project/clang/include/clang/Lex/MacroInfo.h b/contrib/llvm-project/clang/include/clang/Lex/MacroInfo.h
index 75c9ca70dfbb..00c1c3866bbd 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/MacroInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/MacroInfo.h
@@ -206,7 +206,7 @@ public:
void setIsGNUVarargs() { IsGNUVarargs = true; }
bool isC99Varargs() const { return IsC99Varargs; }
bool isGNUVarargs() const { return IsGNUVarargs; }
- bool isVariadic() const { return IsC99Varargs | IsGNUVarargs; }
+ bool isVariadic() const { return IsC99Varargs || IsGNUVarargs; }
/// Return true if this macro requires processing before expansion.
///
diff --git a/contrib/llvm-project/clang/include/clang/Lex/ModuleMap.h b/contrib/llvm-project/clang/include/clang/Lex/ModuleMap.h
index f9eb0be538c0..8f3f234036d2 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/ModuleMap.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/ModuleMap.h
@@ -67,10 +67,8 @@ public:
/// Called when an umbrella header is added during module map parsing.
///
- /// \param FileMgr FileManager instance
/// \param Header The umbrella header to collect.
- virtual void moduleMapAddUmbrellaHeader(FileManager *FileMgr,
- const FileEntry *Header) {}
+ virtual void moduleMapAddUmbrellaHeader(FileEntryRef Header) {}
};
class ModuleMap {
@@ -84,7 +82,7 @@ class ModuleMap {
/// The directory used for Clang-supplied, builtin include headers,
/// such as "stdint.h".
- const DirectoryEntry *BuiltinIncludeDir = nullptr;
+ OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr BuiltinIncludeDir;
/// Language options used to parse the module map itself.
///
@@ -366,22 +364,22 @@ private:
///
/// \param IntermediateDirs On success, contains the set of directories
/// searched before finding \p File.
- KnownHeader findHeaderInUmbrellaDirs(const FileEntry *File,
- SmallVectorImpl<const DirectoryEntry *> &IntermediateDirs);
+ KnownHeader findHeaderInUmbrellaDirs(
+ FileEntryRef File, SmallVectorImpl<DirectoryEntryRef> &IntermediateDirs);
/// Given that \p File is not in the Headers map, look it up within
/// umbrella directories and find or create a module for it.
- KnownHeader findOrCreateModuleForHeaderInUmbrellaDir(const FileEntry *File);
+ KnownHeader findOrCreateModuleForHeaderInUmbrellaDir(FileEntryRef File);
/// A convenience method to determine if \p File is (possibly nested)
/// in an umbrella directory.
- bool isHeaderInUmbrellaDirs(const FileEntry *File) {
- SmallVector<const DirectoryEntry *, 2> IntermediateDirs;
+ bool isHeaderInUmbrellaDirs(FileEntryRef File) {
+ SmallVector<DirectoryEntryRef, 2> IntermediateDirs;
return static_cast<bool>(findHeaderInUmbrellaDirs(File, IntermediateDirs));
}
- Module *inferFrameworkModule(const DirectoryEntry *FrameworkDir,
- Attributes Attrs, Module *Parent);
+ Module *inferFrameworkModule(DirectoryEntryRef FrameworkDir, Attributes Attrs,
+ Module *Parent);
public:
/// Construct a new module map.
@@ -407,7 +405,7 @@ public:
/// Set the directory that contains Clang-supplied include
/// files, such as our stdarg.h or tgmath.h.
- void setBuiltinIncludeDir(const DirectoryEntry *Dir) {
+ void setBuiltinIncludeDir(DirectoryEntryRef Dir) {
BuiltinIncludeDir = Dir;
}
@@ -439,8 +437,7 @@ public:
/// \returns The module KnownHeader, which provides the module that owns the
/// given header file. The KnownHeader is default constructed to indicate
/// that no module owns this header file.
- KnownHeader findModuleForHeader(const FileEntry *File,
- bool AllowTextual = false,
+ KnownHeader findModuleForHeader(FileEntryRef File, bool AllowTextual = false,
bool AllowExcluded = false);
/// Retrieve all the modules that contain the given header file. Note that
@@ -450,7 +447,7 @@ public:
///
/// Typically, \ref findModuleForHeader should be used instead, as it picks
/// the preferred module for the header.
- ArrayRef<KnownHeader> findAllModulesForHeader(const FileEntry *File);
+ ArrayRef<KnownHeader> findAllModulesForHeader(FileEntryRef File);
/// Like \ref findAllModulesForHeader, but do not attempt to infer module
/// ownership from umbrella headers if we've not already done so.
@@ -490,11 +487,11 @@ public:
/// Determine whether the given header is part of a module
/// marked 'unavailable'.
- bool isHeaderInUnavailableModule(const FileEntry *Header) const;
+ bool isHeaderInUnavailableModule(FileEntryRef Header) const;
/// Determine whether the given header is unavailable as part
/// of the specified module.
- bool isHeaderUnavailableInModule(const FileEntry *Header,
+ bool isHeaderUnavailableInModule(FileEntryRef Header,
const Module *RequestingModule) const;
/// Retrieve a module with the given name.
@@ -553,11 +550,18 @@ public:
/// parent.
Module *createGlobalModuleFragmentForModuleUnit(SourceLocation Loc,
Module *Parent = nullptr);
+ Module *createImplicitGlobalModuleFragmentForModuleUnit(
+ SourceLocation Loc, bool IsExported, Module *Parent = nullptr);
/// Create a global module fragment for a C++ module interface unit.
Module *createPrivateModuleFragmentForInterfaceUnit(Module *Parent,
SourceLocation Loc);
+ /// Create a new C++ module with the specified kind, and reparent any pending
+ /// global module fragment(s) to it.
+ Module *createModuleUnitWithKind(SourceLocation Loc, StringRef Name,
+ Module::ModuleKind Kind);
+
/// Create a new module for a C++ module interface unit.
/// The module must not already exist, and will be configured for the current
/// compilation.
@@ -567,14 +571,21 @@ public:
/// \returns The newly-created module.
Module *createModuleForInterfaceUnit(SourceLocation Loc, StringRef Name);
+ /// Create a new module for a C++ module implementation unit.
+ /// The interface module for this implementation (implicitly imported) must
+ /// exist and be loaded and present in the modules map.
+ ///
+ /// \returns The newly-created module.
+ Module *createModuleForImplementationUnit(SourceLocation Loc, StringRef Name);
+
/// Create a C++20 header unit.
Module *createHeaderUnit(SourceLocation Loc, StringRef Name,
Module::Header H);
/// Infer the contents of a framework module map from the given
/// framework directory.
- Module *inferFrameworkModule(const DirectoryEntry *FrameworkDir,
- bool IsSystem, Module *Parent);
+ Module *inferFrameworkModule(DirectoryEntryRef FrameworkDir, bool IsSystem,
+ Module *Parent);
/// Create a new top-level module that is shadowed by
/// \p ShadowingModule.
@@ -674,17 +685,16 @@ public:
/// false otherwise.
bool resolveConflicts(Module *Mod, bool Complain);
- /// Sets the umbrella header of the given module to the given
- /// header.
- void setUmbrellaHeader(Module *Mod, FileEntryRef UmbrellaHeader,
- const Twine &NameAsWritten,
- const Twine &PathRelativeToRootModuleDirectory);
+ /// Sets the umbrella header of the given module to the given header.
+ void
+ setUmbrellaHeaderAsWritten(Module *Mod, FileEntryRef UmbrellaHeader,
+ const Twine &NameAsWritten,
+ const Twine &PathRelativeToRootModuleDirectory);
- /// Sets the umbrella directory of the given module to the given
- /// directory.
- void setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir,
- const Twine &NameAsWritten,
- const Twine &PathRelativeToRootModuleDirectory);
+ /// Sets the umbrella directory of the given module to the given directory.
+ void setUmbrellaDirAsWritten(Module *Mod, DirectoryEntryRef UmbrellaDir,
+ const Twine &NameAsWritten,
+ const Twine &PathRelativeToRootModuleDirectory);
/// Adds this header to the given module.
/// \param Role The role of the header wrt the module.
@@ -712,8 +722,8 @@ public:
///
/// \returns true if an error occurred, false otherwise.
bool parseModuleMapFile(const FileEntry *File, bool IsSystem,
- const DirectoryEntry *HomeDir,
- FileID ID = FileID(), unsigned *Offset = nullptr,
+ DirectoryEntryRef HomeDir, FileID ID = FileID(),
+ unsigned *Offset = nullptr,
SourceLocation ExternModuleLoc = SourceLocation());
/// Dump the contents of the module map, for debugging purposes.
diff --git a/contrib/llvm-project/clang/include/clang/Lex/MultipleIncludeOpt.h b/contrib/llvm-project/clang/include/clang/Lex/MultipleIncludeOpt.h
index 7ceb7e53c75d..8e570226c4b2 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/MultipleIncludeOpt.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/MultipleIncludeOpt.h
@@ -108,6 +108,12 @@ public:
ImmediatelyAfterTopLevelIfndef = false;
}
+ /// SetReadToken - Set whether the value of 'ReadAnyTokens'. Called to
+ /// override when encountering tokens outside of the include guard that have
+ /// no effect if the file in question is is included multiple times (e.g. the
+ /// null directive).
+ void SetReadToken(bool Value) { ReadAnyTokens = Value; }
+
/// ExpandedMacro - When a macro is expanded with this lexer as the current
/// buffer, this method is called to disable the MIOpt if needed.
void ExpandedMacro() { DidMacroExpansion = true; }
diff --git a/contrib/llvm-project/clang/include/clang/Lex/Pragma.h b/contrib/llvm-project/clang/include/clang/Lex/Pragma.h
index cf8cca5414ea..67eca618f6c4 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/Pragma.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/Pragma.h
@@ -123,6 +123,13 @@ public:
PragmaNamespace *getIfNamespace() override { return this; }
};
+/// Destringize a \c _Pragma("") string according to C11 6.10.9.1:
+/// "The string literal is destringized by deleting any encoding prefix,
+/// deleting the leading and trailing double-quotes, replacing each escape
+/// sequence \" by a double-quote, and replacing each escape sequence \\ by a
+/// single backslash."
+void prepare_PragmaString(SmallVectorImpl<char> &StrVal);
+
} // namespace clang
#endif // LLVM_CLANG_LEX_PRAGMA_H
diff --git a/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h b/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h
index 322626802eab..9efe439bc5f2 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h
@@ -499,7 +499,7 @@ private:
};
public:
- ModuleDeclSeq() : State(NotAModuleDecl) {}
+ ModuleDeclSeq() = default;
void handleExport() {
if (State == NotAModuleDecl)
@@ -586,7 +586,7 @@ private:
}
private:
- ModuleDeclState State;
+ ModuleDeclState State = NotAModuleDecl;
std::string Name;
};
@@ -625,7 +625,7 @@ private:
/// The directory that the main file should be considered to occupy,
/// if it does not correspond to a real file (as happens when building a
/// module).
- const DirectoryEntry *MainFileDir = nullptr;
+ OptionalDirectoryEntryRef MainFileDir;
/// The number of bytes that we will initially skip when entering the
/// main file, along with a flag that indicates whether skipping this number
@@ -729,7 +729,7 @@ private:
/// Only one of CurLexer, or CurTokenLexer will be non-null.
std::unique_ptr<Lexer> CurLexer;
- /// The current top of the stack what we're lexing from
+ /// The current top of the stack that we're lexing from
/// if not expanding a macro.
///
/// This is an alias for CurLexer.
@@ -1486,6 +1486,7 @@ public:
/// Return true if this header has already been included.
bool alreadyIncluded(const FileEntry *File) const {
+ HeaderInfo.getFileInfo(File);
return IncludedFiles.count(File);
}
@@ -2012,9 +2013,7 @@ public:
/// Set the directory in which the main file should be considered
/// to have been found, if it is not a real file.
- void setMainFileDir(const DirectoryEntry *Dir) {
- MainFileDir = Dir;
- }
+ void setMainFileDir(DirectoryEntryRef Dir) { MainFileDir = Dir; }
/// Instruct the preprocessor to skip part of the main source file.
///
@@ -2842,10 +2841,60 @@ public:
const LangOptions &LangOpts,
const TargetInfo &TI);
+ static void processPathToFileName(SmallVectorImpl<char> &FileName,
+ const PresumedLoc &PLoc,
+ const LangOptions &LangOpts,
+ const TargetInfo &TI);
+
private:
void emitMacroDeprecationWarning(const Token &Identifier) const;
void emitRestrictExpansionWarning(const Token &Identifier) const;
void emitFinalMacroWarning(const Token &Identifier, bool IsUndef) const;
+
+ /// This boolean state keeps track if the current scanned token (by this PP)
+ /// is in an "-Wunsafe-buffer-usage" opt-out region. Assuming PP scans a
+ /// translation unit in a linear order.
+ bool InSafeBufferOptOutRegion = false;
+
+ /// Hold the start location of the current "-Wunsafe-buffer-usage" opt-out
+ /// region if PP is currently in such a region. Hold undefined value
+ /// otherwise.
+ SourceLocation CurrentSafeBufferOptOutStart; // It is used to report the start location of an never-closed region.
+
+ // An ordered sequence of "-Wunsafe-buffer-usage" opt-out regions in one
+ // translation unit. Each region is represented by a pair of start and end
+ // locations. A region is "open" if its' start and end locations are
+ // identical.
+ SmallVector<std::pair<SourceLocation, SourceLocation>, 8> SafeBufferOptOutMap;
+
+public:
+ /// \return true iff the given `Loc` is in a "-Wunsafe-buffer-usage" opt-out
+ /// region. This `Loc` must be a source location that has been pre-processed.
+ bool isSafeBufferOptOut(const SourceManager&SourceMgr, const SourceLocation &Loc) const;
+
+ /// Alter the state of whether this PP currently is in a
+ /// "-Wunsafe-buffer-usage" opt-out region.
+ ///
+ /// \param isEnter: true if this PP is entering a region; otherwise, this PP
+ /// is exiting a region
+ /// \param Loc: the location of the entry or exit of a
+ /// region
+ /// \return true iff it is INVALID to enter or exit a region, i.e.,
+ /// attempt to enter a region before exiting a previous region, or exiting a
+ /// region that PP is not currently in.
+ bool enterOrExitSafeBufferOptOutRegion(bool isEnter,
+ const SourceLocation &Loc);
+
+ /// \return true iff this PP is currently in a "-Wunsafe-buffer-usage"
+ /// opt-out region
+ bool isPPInSafeBufferOptOutRegion();
+
+ /// \param StartLoc: output argument. It will be set to the start location of
+ /// the current "-Wunsafe-buffer-usage" opt-out region iff this function
+ /// returns true.
+ /// \return true iff this PP is currently in a "-Wunsafe-buffer-usage"
+ /// opt-out region
+ bool isPPInSafeBufferOptOutRegion(SourceLocation &StartLoc);
};
/// Abstract base class that describes a handler that will receive
diff --git a/contrib/llvm-project/clang/include/clang/Lex/PreprocessorOptions.h b/contrib/llvm-project/clang/include/clang/Lex/PreprocessorOptions.h
index 432f5dfa9bce..058194bcde72 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/PreprocessorOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/PreprocessorOptions.h
@@ -69,6 +69,9 @@ public:
std::vector<std::string> Includes;
std::vector<std::string> MacroIncludes;
+ /// Perform extra checks when loading PCM files for mutable file systems.
+ bool ModulesCheckRelocated = true;
+
/// Initialize the preprocessor with the compiler and target specific
/// predefines.
bool UsePredefines = true;
diff --git a/contrib/llvm-project/clang/include/clang/Lex/Token.h b/contrib/llvm-project/clang/include/clang/Lex/Token.h
index 7fd48b1b4391..1409e2c58b55 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/Token.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/Token.h
@@ -117,8 +117,13 @@ public:
}
/// Return true if this is any of tok::annot_* kind tokens.
- bool isAnnotation() const {
- return tok::isAnnotation(getKind());
+ bool isAnnotation() const { return tok::isAnnotation(getKind()); }
+
+ /// Return true if the token is a keyword that is parsed in the same
+ /// position as a standard attribute, but that has semantic meaning
+ /// and so cannot be a true attribute.
+ bool isRegularKeywordAttribute() const {
+ return tok::isRegularKeywordAttribute(getKind());
}
/// Return a source location identifier for the specified
diff --git a/contrib/llvm-project/clang/include/clang/Parse/LoopHint.h b/contrib/llvm-project/clang/include/clang/Parse/LoopHint.h
index 6e363f72b658..75705fcd4c75 100644
--- a/contrib/llvm-project/clang/include/clang/Parse/LoopHint.h
+++ b/contrib/llvm-project/clang/include/clang/Parse/LoopHint.h
@@ -23,20 +23,18 @@ struct LoopHint {
// Identifier corresponding to the name of the pragma. "loop" for
// "#pragma clang loop" directives and "unroll" for "#pragma unroll"
// hints.
- IdentifierLoc *PragmaNameLoc;
+ IdentifierLoc *PragmaNameLoc = nullptr;
// Name of the loop hint. Examples: "unroll", "vectorize". In the
// "#pragma unroll" and "#pragma nounroll" cases, this is identical to
// PragmaNameLoc.
- IdentifierLoc *OptionLoc;
+ IdentifierLoc *OptionLoc = nullptr;
// Identifier for the hint state argument. If null, then the state is
// default value such as for "#pragma unroll".
- IdentifierLoc *StateLoc;
+ IdentifierLoc *StateLoc = nullptr;
// Expression for the hint argument if it exists, null otherwise.
- Expr *ValueExpr;
+ Expr *ValueExpr = nullptr;
- LoopHint()
- : PragmaNameLoc(nullptr), OptionLoc(nullptr), StateLoc(nullptr),
- ValueExpr(nullptr) {}
+ LoopHint() = default;
};
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Parse/Parser.h b/contrib/llvm-project/clang/include/clang/Parse/Parser.h
index 6f9581b9ea1f..475dfe845528 100644
--- a/contrib/llvm-project/clang/include/clang/Parse/Parser.h
+++ b/contrib/llvm-project/clang/include/clang/Parse/Parser.h
@@ -18,6 +18,7 @@
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/TokenKinds.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
@@ -157,7 +158,7 @@ class Parser : public CodeCompletionHandler {
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
- *Ident_generated_declaration;
+ *Ident_generated_declaration, *Ident_USR;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
@@ -692,7 +693,8 @@ private:
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
- Kind == tok::annot_module_end || Kind == tok::annot_module_include;
+ Kind == tok::annot_module_end || Kind == tok::annot_module_include ||
+ Kind == tok::annot_repl_input_end;
}
/// Checks if the \p Level is valid for use in a fold expression.
@@ -1186,7 +1188,7 @@ private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
- unsigned OldFlags;
+ unsigned OldFlags = 0;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
@@ -1786,8 +1788,12 @@ public:
bool IsUnevaluated);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
+ ExprResult ParseUnevaluatedStringLiteralExpression();
private:
+ ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral,
+ bool Unevaluated);
+
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
@@ -2215,7 +2221,8 @@ private:
DSC_objc_method_result, // ObjC method result context, enables
// 'instancetype'
DSC_condition, // condition declaration context
- DSC_association // A _Generic selection expression's type association
+ DSC_association, // A _Generic selection expression's type association
+ DSC_new, // C++ new expression
};
/// Is this a context in which we are parsing just a type-specifier (or
@@ -2237,6 +2244,7 @@ private:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_association:
+ case DeclSpecContext::DSC_new:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
@@ -2285,6 +2293,7 @@ private:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_conv_operator:
case DeclSpecContext::DSC_template_arg:
+ case DeclSpecContext::DSC_new:
return AllowDefiningTypeSpec::No;
}
llvm_unreachable("Missing DeclSpecContext case");
@@ -2308,6 +2317,7 @@ private:
case DeclSpecContext::DSC_association:
case DeclSpecContext::DSC_conv_operator:
case DeclSpecContext::DSC_template_arg:
+ case DeclSpecContext::DSC_new:
return false;
}
@@ -2327,6 +2337,7 @@ private:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_association:
case DeclSpecContext::DSC_conv_operator:
+ case DeclSpecContext::DSC_new:
return true;
case DeclSpecContext::DSC_objc_method_result:
@@ -2349,6 +2360,7 @@ private:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_template_param:
+ case DeclSpecContext::DSC_new:
return ImplicitTypenameContext::Yes;
case DeclSpecContext::DSC_normal:
@@ -2513,17 +2525,19 @@ private:
/// this is a constructor declarator.
bool isConstructorDeclarator(
bool Unqualified, bool DeductionGuide = false,
- DeclSpec::FriendSpecified IsFriend = DeclSpec::FriendSpecified::No);
+ DeclSpec::FriendSpecified IsFriend = DeclSpec::FriendSpecified::No,
+ const ParsedTemplateInfo *TemplateInfo = nullptr);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
- TypeIdAsTemplateArgument
+ TypeIdAsTemplateArgument,
+ TypeIdInTrailingReturnType,
+ TypeIdAsGenericSelectionArgument,
};
-
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
@@ -2538,13 +2552,29 @@ private:
return isTypeIdInParens(isAmbiguous);
}
+ /// Checks whether the current tokens form a type-id or an expression for the
+ /// purposes of use as the initial operand to a generic selection expression.
+ /// This requires special handling in C++ because it accepts either a type or
+ /// an expression, and we need to disambiguate which is which. However, we
+ /// cannot use the same logic as we've used for sizeof expressions, because
+ /// that logic relies on the operator only accepting a single argument,
+ /// whereas _Generic accepts a list of arguments.
+ bool isTypeIdForGenericSelection() {
+ if (getLangOpts().CPlusPlus) {
+ bool isAmbiguous;
+ return isCXXTypeId(TypeIdAsGenericSelectionArgument, isAmbiguous);
+ }
+ return isTypeSpecifierQualifier();
+ }
+
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
- bool IsAmbiguous;
- if (getLangOpts().CPlusPlus)
- return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
+ if (getLangOpts().CPlusPlus) {
+ bool isAmbiguous;
+ return isCXXTypeId(TypeIdUnambiguous, isAmbiguous);
+ }
return isTypeSpecifierQualifier();
}
@@ -2651,14 +2681,16 @@ private:
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
- TPResult TryParseInitDeclaratorList();
+ TPResult TryParseInitDeclaratorList(bool MayHaveTrailingReturnType = false);
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
- bool mayHaveDirectInit = false);
+ bool mayHaveDirectInit = false,
+ bool mayHaveTrailingReturnType = false);
TPResult TryParseParameterDeclarationClause(
bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false,
ImplicitTypenameContext AllowImplicitTypename =
ImplicitTypenameContext::No);
- TPResult TryParseFunctionDeclarator();
+ TPResult TryParseFunctionDeclarator(bool MayHaveTrailingReturnType = false);
+ bool NameAfterArrowIsNonType();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
@@ -2680,17 +2712,22 @@ public:
private:
void ParseBlockId(SourceLocation CaretLoc);
- /// Are [[]] attributes enabled?
- bool standardAttributesAllowed() const {
- const LangOptions &LO = getLangOpts();
- return LO.DoubleSquareBracketAttributes;
+ /// Return true if the next token should be treated as a [[]] attribute,
+ /// or as a keyword that behaves like one. The former is only true if
+ /// [[]] attributes are enabled, whereas the latter is true whenever
+ /// such a keyword appears. The arguments are as for
+ /// isCXX11AttributeSpecifier.
+ bool isAllowedCXX11AttributeSpecifier(bool Disambiguate = false,
+ bool OuterMightBeMessageSend = false) {
+ return (Tok.isRegularKeywordAttribute() ||
+ isCXX11AttributeSpecifier(Disambiguate, OuterMightBeMessageSend));
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
- if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
+ if (NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
@@ -2698,9 +2735,8 @@ private:
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributes &Attrs,
SourceLocation CorrectLocation) {
- if (!standardAttributesAllowed())
- return;
- if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
+ if (!Tok.isRegularKeywordAttribute() &&
+ (Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
@@ -2716,7 +2752,7 @@ private:
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
- DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
+ DiagnoseProhibitedAttributes(Attrs, FixItLoc);
Attrs.clear();
}
@@ -2724,10 +2760,10 @@ private:
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
- DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
+ DiagnoseProhibitedAttributes(Attrs, FixItLoc);
Attrs.clearListOnly();
}
- void DiagnoseProhibitedAttributes(const SourceRange &Range,
+ void DiagnoseProhibitedAttributes(const ParsedAttributesView &Attrs,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
@@ -2736,7 +2772,8 @@ private:
// For the most cases we don't want to warn on unknown type attributes, but
// left them to later diagnoses. However, for a few cases like module
// declarations and module import declarations, we should do it.
- void ProhibitCXX11Attributes(ParsedAttributes &Attrs, unsigned DiagID,
+ void ProhibitCXX11Attributes(ParsedAttributes &Attrs, unsigned AttrDiagID,
+ unsigned KeywordDiagId,
bool DiagnoseEmptyAttrs = false,
bool WarnOnUnknownAttrs = false);
@@ -2761,7 +2798,7 @@ private:
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax);
+ ParsedAttr::Form Form);
enum ParseAttrKindMask {
PAKM_GNU = 1 << 0,
@@ -2790,7 +2827,7 @@ private:
bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
- (standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
+ isAllowedCXX11AttributeSpecifier()) {
ParseAttributes(WhichAttrKinds, Attrs, LateAttrs);
return true;
}
@@ -2822,14 +2859,14 @@ private:
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax, Declarator *D);
+ ParsedAttr::Form Form, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax);
+ ParsedAttr::Form Form);
void ReplayOpenMPAttributeTokens(CachedTokens &OpenMPTokens) {
// If parsing the attributes found an OpenMP directive, emit those tokens
@@ -2842,7 +2879,7 @@ private:
}
}
void MaybeParseCXX11Attributes(Declarator &D) {
- if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
+ if (isAllowedCXX11AttributeSpecifier()) {
ParsedAttributes Attrs(AttrFactory);
ParseCXX11Attributes(Attrs);
D.takeAttributes(Attrs);
@@ -2851,8 +2888,7 @@ private:
bool MaybeParseCXX11Attributes(ParsedAttributes &Attrs,
bool OuterMightBeMessageSend = false) {
- if (standardAttributesAllowed() &&
- isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) {
+ if (isAllowedCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) {
ParseCXX11Attributes(Attrs);
return true;
}
@@ -2929,6 +2965,7 @@ private:
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
+ void ParseWebAssemblyFuncrefTypeAttribute(ParsedAttributes &Attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
@@ -2947,7 +2984,7 @@ private:
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax);
+ ParsedAttr::Form Form);
std::optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
@@ -2958,7 +2995,7 @@ private:
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax);
+ ParsedAttr::Form Form);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
@@ -2966,7 +3003,7 @@ private:
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax);
+ ParsedAttr::Form Form);
void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
@@ -2974,7 +3011,7 @@ private:
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax);
+ ParsedAttr::Form Form);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
@@ -2982,14 +3019,14 @@ private:
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax);
+ ParsedAttr::Form Form);
void ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax);
+ ParsedAttr::Form Form);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
@@ -2998,8 +3035,9 @@ private:
SourceLocation EndLoc);
void ParseAtomicSpecifier(DeclSpec &DS);
- ExprResult ParseAlignArgument(SourceLocation Start,
- SourceLocation &EllipsisLoc);
+ ExprResult ParseAlignArgument(StringRef KWName, SourceLocation Start,
+ SourceLocation &EllipsisLoc, bool &IsType,
+ ParsedType &Ty);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
ExprResult ParseExtIntegerArgument();
diff --git a/contrib/llvm-project/clang/include/clang/Rewrite/Core/RewriteRope.h b/contrib/llvm-project/clang/include/clang/Rewrite/Core/RewriteRope.h
index 8fa7af245eb8..73e66e111f57 100644
--- a/contrib/llvm-project/clang/include/clang/Rewrite/Core/RewriteRope.h
+++ b/contrib/llvm-project/clang/include/clang/Rewrite/Core/RewriteRope.h
@@ -181,6 +181,10 @@ public:
RewriteRope() = default;
RewriteRope(const RewriteRope &RHS) : Chunks(RHS.Chunks) {}
+ // The copy assignment operator is defined as deleted pending further
+ // motivation.
+ RewriteRope &operator=(const RewriteRope &) = delete;
+
using iterator = RopePieceBTree::iterator;
using const_iterator = RopePieceBTree::iterator;
diff --git a/contrib/llvm-project/clang/include/clang/Sema/AnalysisBasedWarnings.h b/contrib/llvm-project/clang/include/clang/Sema/AnalysisBasedWarnings.h
index 13a88bb9f896..020ddd36cf73 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/AnalysisBasedWarnings.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/AnalysisBasedWarnings.h
@@ -13,6 +13,7 @@
#ifndef LLVM_CLANG_SEMA_ANALYSISBASEDWARNINGS_H
#define LLVM_CLANG_SEMA_ANALYSISBASEDWARNINGS_H
+#include "clang/AST/Decl.h"
#include "llvm/ADT/DenseMap.h"
#include <memory>
@@ -95,6 +96,9 @@ public:
void IssueWarnings(Policy P, FunctionScopeInfo *fscope,
const Decl *D, QualType BlockType);
+ // Issue warnings that require whole-translation-unit analysis.
+ void IssueWarnings(TranslationUnitDecl *D);
+
Policy getDefaultPolicy() { return DefaultPolicy; }
void PrintStats() const;
diff --git a/contrib/llvm-project/clang/include/clang/Sema/CodeCompleteConsumer.h b/contrib/llvm-project/clang/include/clang/Sema/CodeCompleteConsumer.h
index a2d523cd3e92..bb4b63805038 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/CodeCompleteConsumer.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/CodeCompleteConsumer.h
@@ -333,7 +333,10 @@ public:
/// An unknown context, in which we are recovering from a parsing
/// error and don't know which completions we should give.
- CCC_Recovery
+ CCC_Recovery,
+
+ /// Code completion in a @class forward declaration.
+ CCC_ObjCClassForwardDecl
};
using VisitedContextSet = llvm::SmallPtrSet<DeclContext *, 8>;
diff --git a/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h b/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h
index 69fe2c541607..c63378c73290 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h
@@ -62,9 +62,18 @@ namespace clang {
/// often used as if it meant "present".
///
/// The actual scope is described by getScopeRep().
+///
+/// If the kind of getScopeRep() is TypeSpec then TemplateParamLists may be empty
+/// or contain the template parameter lists attached to the current declaration.
+/// Consider the following example:
+/// template <class T> void SomeType<T>::some_method() {}
+/// If CXXScopeSpec refers to SomeType<T> then TemplateParamLists will contain
+/// a single element referring to template <class T>.
+
class CXXScopeSpec {
SourceRange Range;
NestedNameSpecifierLocBuilder Builder;
+ ArrayRef<TemplateParameterList *> TemplateParamLists;
public:
SourceRange getRange() const { return Range; }
@@ -74,6 +83,13 @@ public:
SourceLocation getBeginLoc() const { return Range.getBegin(); }
SourceLocation getEndLoc() const { return Range.getEnd(); }
+ void setTemplateParamLists(ArrayRef<TemplateParameterList *> L) {
+ TemplateParamLists = L;
+ }
+ ArrayRef<TemplateParameterList *> getTemplateParamLists() const {
+ return TemplateParamLists;
+ }
+
/// Retrieve the representation of the nested-name-specifier.
NestedNameSpecifier *getScopeRep() const {
return Builder.getRepresentation();
@@ -1956,9 +1972,10 @@ public:
InventedTemplateParameterList(nullptr) {
assert(llvm::all_of(DeclarationAttrs,
[](const ParsedAttr &AL) {
- return AL.isStandardAttributeSyntax();
+ return (AL.isStandardAttributeSyntax() ||
+ AL.isRegularKeywordAttribute());
}) &&
- "DeclarationAttrs may only contain [[]] attributes");
+ "DeclarationAttrs may only contain [[]] and keyword attributes");
}
~Declarator() {
@@ -2603,14 +2620,6 @@ public:
return false;
}
- /// Return a source range list of C++11 attributes associated
- /// with the declarator.
- void getCXX11AttributeRanges(SmallVectorImpl<SourceRange> &Ranges) {
- for (const ParsedAttr &AL : Attrs)
- if (AL.isCXX11Attribute())
- Ranges.push_back(AL.getRange());
- }
-
void setAsmLabel(Expr *E) { AsmLabel = E; }
Expr *getAsmLabel() const { return AsmLabel; }
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Designator.h b/contrib/llvm-project/clang/include/clang/Sema/Designator.h
index 84837bfeba5b..244535978d4b 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Designator.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Designator.h
@@ -21,60 +21,108 @@ namespace clang {
class Expr;
class IdentifierInfo;
-class Sema;
/// Designator - A designator in a C99 designated initializer.
///
/// This class is a discriminated union which holds the various
-/// different sorts of designators possible. A Designation is an array of
+/// different sorts of designators possible. A Designation is an array of
/// these. An example of a designator are things like this:
-/// [8] .field [47] // C99 designation: 3 designators
-/// [8 ... 47] field: // GNU extensions: 2 designators
+///
+/// [8] .field [47] // C99 designation: 3 designators
+/// [8 ... 47] field: // GNU extensions: 2 designators
+///
/// These occur in initializers, e.g.:
-/// int a[10] = {2, 4, [8]=9, 10};
+///
+/// int a[10] = {2, 4, [8]=9, 10};
///
class Designator {
-public:
- enum DesignatorKind {
- FieldDesignator, ArrayDesignator, ArrayRangeDesignator
- };
-private:
- Designator() {};
-
- DesignatorKind Kind;
-
+ /// A field designator, e.g., ".x = 42".
struct FieldDesignatorInfo {
- const IdentifierInfo *II;
+ /// Refers to the field being initialized.
+ const IdentifierInfo *FieldName;
+
+ /// The location of the '.' in the designated initializer.
SourceLocation DotLoc;
- SourceLocation NameLoc;
+
+ /// The location of the field name in the designated initializer.
+ SourceLocation FieldLoc;
+
+ FieldDesignatorInfo(const IdentifierInfo *FieldName, SourceLocation DotLoc,
+ SourceLocation FieldLoc)
+ : FieldName(FieldName), DotLoc(DotLoc), FieldLoc(FieldLoc) {}
};
+
+ /// An array designator, e.g., "[42] = 0".
struct ArrayDesignatorInfo {
Expr *Index;
+
+ // The location of the '[' in the designated initializer.
SourceLocation LBracketLoc;
+
+ // The location of the ']' in the designated initializer.
mutable SourceLocation RBracketLoc;
+
+ ArrayDesignatorInfo(Expr *Index, SourceLocation LBracketLoc)
+ : Index(Index), LBracketLoc(LBracketLoc) {}
};
+
+ /// An array range designator, e.g. "[42 ... 50] = 1".
struct ArrayRangeDesignatorInfo {
- Expr *Start, *End;
- SourceLocation LBracketLoc, EllipsisLoc;
+ Expr *Start;
+ Expr *End;
+
+ // The location of the '[' in the designated initializer.
+ SourceLocation LBracketLoc;
+
+ // The location of the '...' in the designated initializer.
+ SourceLocation EllipsisLoc;
+
+ // The location of the ']' in the designated initializer.
mutable SourceLocation RBracketLoc;
+
+ ArrayRangeDesignatorInfo(Expr *Start, Expr *End, SourceLocation LBracketLoc,
+ SourceLocation EllipsisLoc)
+ : Start(Start), End(End), LBracketLoc(LBracketLoc),
+ EllipsisLoc(EllipsisLoc) {}
};
+ /// The kind of designator this describes.
+ enum DesignatorKind {
+ FieldDesignator,
+ ArrayDesignator,
+ ArrayRangeDesignator
+ };
+
+ DesignatorKind Kind;
+
union {
FieldDesignatorInfo FieldInfo;
ArrayDesignatorInfo ArrayInfo;
ArrayRangeDesignatorInfo ArrayRangeInfo;
};
-public:
+ Designator(DesignatorKind Kind) : Kind(Kind) {}
- DesignatorKind getKind() const { return Kind; }
+public:
bool isFieldDesignator() const { return Kind == FieldDesignator; }
bool isArrayDesignator() const { return Kind == ArrayDesignator; }
bool isArrayRangeDesignator() const { return Kind == ArrayRangeDesignator; }
- const IdentifierInfo *getField() const {
+ //===--------------------------------------------------------------------===//
+ // FieldDesignatorInfo
+
+ /// Creates a field designator.
+ static Designator CreateFieldDesignator(const IdentifierInfo *FieldName,
+ SourceLocation DotLoc,
+ SourceLocation FieldLoc) {
+ Designator D(FieldDesignator);
+ new (&D.FieldInfo) FieldDesignatorInfo(FieldName, DotLoc, FieldLoc);
+ return D;
+ }
+
+ const IdentifierInfo *getFieldDecl() const {
assert(isFieldDesignator() && "Invalid accessor");
- return FieldInfo.II;
+ return FieldInfo.FieldName;
}
SourceLocation getDotLoc() const {
@@ -84,7 +132,18 @@ public:
SourceLocation getFieldLoc() const {
assert(isFieldDesignator() && "Invalid accessor");
- return FieldInfo.NameLoc;
+ return FieldInfo.FieldLoc;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // ArrayDesignatorInfo:
+
+ /// Creates an array designator.
+ static Designator CreateArrayDesignator(Expr *Index,
+ SourceLocation LBracketLoc) {
+ Designator D(ArrayDesignator);
+ new (&D.ArrayInfo) ArrayDesignatorInfo(Index, LBracketLoc);
+ return D;
}
Expr *getArrayIndex() const {
@@ -92,73 +151,46 @@ public:
return ArrayInfo.Index;
}
- Expr *getArrayRangeStart() const {
- assert(isArrayRangeDesignator() && "Invalid accessor");
- return ArrayRangeInfo.Start;
- }
- Expr *getArrayRangeEnd() const {
- assert(isArrayRangeDesignator() && "Invalid accessor");
- return ArrayRangeInfo.End;
- }
-
SourceLocation getLBracketLoc() const {
assert((isArrayDesignator() || isArrayRangeDesignator()) &&
"Invalid accessor");
- if (isArrayDesignator())
- return ArrayInfo.LBracketLoc;
- else
- return ArrayRangeInfo.LBracketLoc;
+ return isArrayDesignator() ? ArrayInfo.LBracketLoc
+ : ArrayRangeInfo.LBracketLoc;
}
SourceLocation getRBracketLoc() const {
assert((isArrayDesignator() || isArrayRangeDesignator()) &&
"Invalid accessor");
- if (isArrayDesignator())
- return ArrayInfo.RBracketLoc;
- else
- return ArrayRangeInfo.RBracketLoc;
+ return isArrayDesignator() ? ArrayInfo.RBracketLoc
+ : ArrayRangeInfo.RBracketLoc;
}
- SourceLocation getEllipsisLoc() const {
- assert(isArrayRangeDesignator() && "Invalid accessor");
- return ArrayRangeInfo.EllipsisLoc;
- }
+ //===--------------------------------------------------------------------===//
+ // ArrayRangeDesignatorInfo:
- static Designator getField(const IdentifierInfo *II, SourceLocation DotLoc,
- SourceLocation NameLoc) {
- Designator D;
- D.Kind = FieldDesignator;
- new (&D.FieldInfo) FieldDesignatorInfo;
- D.FieldInfo.II = II;
- D.FieldInfo.DotLoc = DotLoc;
- D.FieldInfo.NameLoc = NameLoc;
+ /// Creates a GNU array-range designator.
+ static Designator CreateArrayRangeDesignator(Expr *Start, Expr *End,
+ SourceLocation LBracketLoc,
+ SourceLocation EllipsisLoc) {
+ Designator D(ArrayRangeDesignator);
+ new (&D.ArrayRangeInfo)
+ ArrayRangeDesignatorInfo(Start, End, LBracketLoc, EllipsisLoc);
return D;
}
- static Designator getArray(Expr *Index,
- SourceLocation LBracketLoc) {
- Designator D;
- D.Kind = ArrayDesignator;
- new (&D.ArrayInfo) ArrayDesignatorInfo;
- D.ArrayInfo.Index = Index;
- D.ArrayInfo.LBracketLoc = LBracketLoc;
- D.ArrayInfo.RBracketLoc = SourceLocation();
- return D;
+ Expr *getArrayRangeStart() const {
+ assert(isArrayRangeDesignator() && "Invalid accessor");
+ return ArrayRangeInfo.Start;
}
- static Designator getArrayRange(Expr *Start,
- Expr *End,
- SourceLocation LBracketLoc,
- SourceLocation EllipsisLoc) {
- Designator D;
- D.Kind = ArrayRangeDesignator;
- new (&D.ArrayRangeInfo) ArrayRangeDesignatorInfo;
- D.ArrayRangeInfo.Start = Start;
- D.ArrayRangeInfo.End = End;
- D.ArrayRangeInfo.LBracketLoc = LBracketLoc;
- D.ArrayRangeInfo.EllipsisLoc = EllipsisLoc;
- D.ArrayRangeInfo.RBracketLoc = SourceLocation();
- return D;
+ Expr *getArrayRangeEnd() const {
+ assert(isArrayRangeDesignator() && "Invalid accessor");
+ return ArrayRangeInfo.End;
+ }
+
+ SourceLocation getEllipsisLoc() const {
+ assert(isArrayRangeDesignator() && "Invalid accessor");
+ return ArrayRangeInfo.EllipsisLoc;
}
void setRBracketLoc(SourceLocation RBracketLoc) const {
@@ -169,17 +201,8 @@ public:
else
ArrayRangeInfo.RBracketLoc = RBracketLoc;
}
-
- /// ClearExprs - Null out any expression references, which prevents
- /// them from being 'delete'd later.
- void ClearExprs(Sema &Actions) {}
-
- /// FreeExprs - Release any unclaimed memory for the expressions in
- /// this designator.
- void FreeExprs(Sema &Actions) {}
};
-
/// Designation - Represent a full designation, which is a sequence of
/// designators. This class is mostly a helper for InitListDesignations.
class Designation {
@@ -188,9 +211,7 @@ class Designation {
public:
/// AddDesignator - Add a designator to the end of this list.
- void AddDesignator(Designator D) {
- Designators.push_back(D);
- }
+ void AddDesignator(Designator D) { Designators.push_back(D); }
bool empty() const { return Designators.empty(); }
@@ -199,14 +220,6 @@ public:
assert(Idx < Designators.size());
return Designators[Idx];
}
-
- /// ClearExprs - Null out any expression references, which prevents them from
- /// being 'delete'd later.
- void ClearExprs(Sema &Actions) {}
-
- /// FreeExprs - Release any unclaimed memory for the expressions in this
- /// designation.
- void FreeExprs(Sema &Actions) {}
};
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Sema/EnterExpressionEvaluationContext.h b/contrib/llvm-project/clang/include/clang/Sema/EnterExpressionEvaluationContext.h
new file mode 100644
index 000000000000..5eca797b8842
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Sema/EnterExpressionEvaluationContext.h
@@ -0,0 +1,69 @@
+//===--- EnterExpressionEvaluationContext.h ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_ENTEREXPRESSIONEVALUATIONCONTEXT_H
+#define LLVM_CLANG_SEMA_ENTEREXPRESSIONEVALUATIONCONTEXT_H
+
+#include "clang/Sema/Sema.h"
+
+namespace clang {
+
+class Decl;
+
+/// RAII object that enters a new expression evaluation context.
+class EnterExpressionEvaluationContext {
+ Sema &Actions;
+ bool Entered = true;
+
+public:
+ EnterExpressionEvaluationContext(
+ Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
+ Decl *LambdaContextDecl = nullptr,
+ Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
+ Sema::ExpressionEvaluationContextRecord::EK_Other,
+ bool ShouldEnter = true)
+ : Actions(Actions), Entered(ShouldEnter) {
+ if (Entered)
+ Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
+ ExprContext);
+ }
+ EnterExpressionEvaluationContext(
+ Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
+ Sema::ReuseLambdaContextDecl_t,
+ Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
+ Sema::ExpressionEvaluationContextRecord::EK_Other)
+ : Actions(Actions) {
+ Actions.PushExpressionEvaluationContext(
+ NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
+ }
+
+ enum InitListTag { InitList };
+ EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
+ bool ShouldEnter = true)
+ : Actions(Actions), Entered(false) {
+ // In C++11 onwards, narrowing checks are performed on the contents of
+ // braced-init-lists, even when they occur within unevaluated operands.
+ // Therefore we still need to instantiate constexpr functions used in such
+ // a context.
+ if (ShouldEnter && Actions.isUnevaluatedContext() &&
+ Actions.getLangOpts().CPlusPlus11) {
+ Actions.PushExpressionEvaluationContext(
+ Sema::ExpressionEvaluationContext::UnevaluatedList);
+ Entered = true;
+ }
+ }
+
+ ~EnterExpressionEvaluationContext() {
+ if (Entered)
+ Actions.PopExpressionEvaluationContext();
+ }
+};
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h b/contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h
index 17a7ffd3bb68..22d1ee2df115 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h
@@ -230,6 +230,11 @@ public:
return false;
}
+ /// Notify the external source that a lambda was assigned a mangling number.
+ /// This enables the external source to track the correspondence between
+ /// lambdas and mangling numbers if necessary.
+ virtual void AssignedLambdaNumbering(const CXXRecordDecl *Lambda) {}
+
/// LLVM-style RTTI.
/// \{
bool isA(const void *ClassID) const override {
diff --git a/contrib/llvm-project/clang/include/clang/Sema/HLSLExternalSemaSource.h b/contrib/llvm-project/clang/include/clang/Sema/HLSLExternalSemaSource.h
index 8531609bb9e0..4b6bc96f72e2 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/HLSLExternalSemaSource.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/HLSLExternalSemaSource.h
@@ -23,7 +23,7 @@ class Sema;
class HLSLExternalSemaSource : public ExternalSemaSource {
Sema *SemaPtr = nullptr;
NamespaceDecl *HLSLNamespace = nullptr;
- CXXRecordDecl *ResourceDecl;
+ CXXRecordDecl *ResourceDecl = nullptr;
using CompletionFunction = std::function<void(CXXRecordDecl *)>;
llvm::DenseMap<CXXRecordDecl *, CompletionFunction> Completions;
diff --git a/contrib/llvm-project/clang/include/clang/Sema/IdentifierResolver.h b/contrib/llvm-project/clang/include/clang/Sema/IdentifierResolver.h
index 7c8dc46307d4..1fbd6c48e518 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/IdentifierResolver.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/IdentifierResolver.h
@@ -134,13 +134,14 @@ public:
explicit IdentifierResolver(Preprocessor &PP);
~IdentifierResolver();
- /// begin - Returns an iterator for decls with the name 'Name'.
+ /// Returns a range of decls with the name 'Name'.
+ llvm::iterator_range<iterator> decls(DeclarationName Name);
+
+ /// Returns an iterator over decls with the name 'Name'.
iterator begin(DeclarationName Name);
- /// end - Returns an iterator that has 'finished'.
- iterator end() {
- return iterator();
- }
+ /// Returns the end iterator.
+ iterator end() { return iterator(); }
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Initialization.h b/contrib/llvm-project/clang/include/clang/Sema/Initialization.h
index e1bbea0d118d..2072cd8d1c3e 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Initialization.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Initialization.h
@@ -1121,6 +1121,9 @@ public:
/// Parenthesized list initialization failed at some point.
/// This is a C++20 feature.
FK_ParenthesizedListInitFailed,
+
+ // A designated initializer was provided for a non-aggregate type.
+ FK_DesignatedInitForNonAggregate,
};
private:
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Lookup.h b/contrib/llvm-project/clang/include/clang/Sema/Lookup.h
index 39d980a857b3..351fa0c6ca0c 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Lookup.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Lookup.h
@@ -657,6 +657,15 @@ public:
F.CalledDone = true;
}
+ // The move assignment operator is defined as deleted pending
+ // further motivation.
+ Filter &operator=(Filter &&) = delete;
+
+ // The copy constrcutor and copy assignment operator is defined as deleted
+ // pending further motivation.
+ Filter(const Filter &) = delete;
+ Filter &operator=(const Filter &) = delete;
+
~Filter() {
assert(CalledDone &&
"LookupResult::Filter destroyed without done() call");
diff --git a/contrib/llvm-project/clang/include/clang/Sema/MultiplexExternalSemaSource.h b/contrib/llvm-project/clang/include/clang/Sema/MultiplexExternalSemaSource.h
index b0bb15eccee1..2bf91cb5212c 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/MultiplexExternalSemaSource.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/MultiplexExternalSemaSource.h
@@ -360,6 +360,9 @@ public:
bool MaybeDiagnoseMissingCompleteType(SourceLocation Loc,
QualType T) override;
+ // Inform all attached sources that a mangling number was assigned.
+ void AssignedLambdaNumbering(const CXXRecordDecl *Lambda) override;
+
/// LLVM-style RTTI.
/// \{
bool isA(const void *ClassID) const override {
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Overload.h b/contrib/llvm-project/clang/include/clang/Sema/Overload.h
index 1827ea5d1e54..a97968dc7b20 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Overload.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Overload.h
@@ -162,6 +162,9 @@ class Sema;
/// Arm SVE Vector conversions
ICK_SVE_Vector_Conversion,
+ /// RISC-V RVV Vector conversions
+ ICK_RVV_Vector_Conversion,
+
/// A vector splat from an arithmetic type
ICK_Vector_Splat,
diff --git a/contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h b/contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h
index f060564e6719..592580bccd23 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h
@@ -17,12 +17,12 @@
#include "clang/Basic/AttrSubjectMatchRules.h"
#include "clang/Basic/AttributeCommonInfo.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/ParsedAttrInfo.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Sema/Ownership.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Allocator.h"
-#include "llvm/Support/Registry.h"
#include "llvm/Support/VersionTuple.h"
#include <cassert>
#include <cstddef>
@@ -36,124 +36,10 @@ class Decl;
class Expr;
class IdentifierInfo;
class LangOptions;
-class ParsedAttr;
class Sema;
class Stmt;
class TargetInfo;
-struct ParsedAttrInfo {
- /// Corresponds to the Kind enum.
- unsigned AttrKind : 16;
- /// The number of required arguments of this attribute.
- unsigned NumArgs : 4;
- /// The number of optional arguments of this attributes.
- unsigned OptArgs : 4;
- /// The number of non-fake arguments specified in the attribute definition.
- unsigned NumArgMembers : 4;
- /// True if the parsing does not match the semantic content.
- unsigned HasCustomParsing : 1;
- // True if this attribute accepts expression parameter pack expansions.
- unsigned AcceptsExprPack : 1;
- /// True if this attribute is only available for certain targets.
- unsigned IsTargetSpecific : 1;
- /// True if this attribute applies to types.
- unsigned IsType : 1;
- /// True if this attribute applies to statements.
- unsigned IsStmt : 1;
- /// True if this attribute has any spellings that are known to gcc.
- unsigned IsKnownToGCC : 1;
- /// True if this attribute is supported by #pragma clang attribute.
- unsigned IsSupportedByPragmaAttribute : 1;
- /// The syntaxes supported by this attribute and how they're spelled.
- struct Spelling {
- AttributeCommonInfo::Syntax Syntax;
- const char *NormalizedFullName;
- };
- ArrayRef<Spelling> Spellings;
- // The names of the known arguments of this attribute.
- ArrayRef<const char *> ArgNames;
-
-protected:
- constexpr ParsedAttrInfo(AttributeCommonInfo::Kind AttrKind =
- AttributeCommonInfo::NoSemaHandlerAttribute)
- : AttrKind(AttrKind), NumArgs(0), OptArgs(0), NumArgMembers(0),
- HasCustomParsing(0), AcceptsExprPack(0), IsTargetSpecific(0), IsType(0),
- IsStmt(0), IsKnownToGCC(0), IsSupportedByPragmaAttribute(0) {}
-
- constexpr ParsedAttrInfo(AttributeCommonInfo::Kind AttrKind, unsigned NumArgs,
- unsigned OptArgs, unsigned NumArgMembers,
- unsigned HasCustomParsing, unsigned AcceptsExprPack,
- unsigned IsTargetSpecific, unsigned IsType,
- unsigned IsStmt, unsigned IsKnownToGCC,
- unsigned IsSupportedByPragmaAttribute,
- ArrayRef<Spelling> Spellings,
- ArrayRef<const char *> ArgNames)
- : AttrKind(AttrKind), NumArgs(NumArgs), OptArgs(OptArgs),
- NumArgMembers(NumArgMembers), HasCustomParsing(HasCustomParsing),
- AcceptsExprPack(AcceptsExprPack), IsTargetSpecific(IsTargetSpecific),
- IsType(IsType), IsStmt(IsStmt), IsKnownToGCC(IsKnownToGCC),
- IsSupportedByPragmaAttribute(IsSupportedByPragmaAttribute),
- Spellings(Spellings), ArgNames(ArgNames) {}
-
-public:
- virtual ~ParsedAttrInfo() = default;
-
- /// Check if this attribute appertains to D, and issue a diagnostic if not.
- virtual bool diagAppertainsToDecl(Sema &S, const ParsedAttr &Attr,
- const Decl *D) const {
- return true;
- }
- /// Check if this attribute appertains to St, and issue a diagnostic if not.
- virtual bool diagAppertainsToStmt(Sema &S, const ParsedAttr &Attr,
- const Stmt *St) const {
- return true;
- }
- /// Check if the given attribute is mutually exclusive with other attributes
- /// already applied to the given declaration.
- virtual bool diagMutualExclusion(Sema &S, const ParsedAttr &A,
- const Decl *D) const {
- return true;
- }
- /// Check if this attribute is allowed by the language we are compiling.
- virtual bool acceptsLangOpts(const LangOptions &LO) const { return true; }
-
- /// Check if this attribute is allowed when compiling for the given target.
- virtual bool existsInTarget(const TargetInfo &Target) const {
- return true;
- }
- /// Convert the spelling index of Attr to a semantic spelling enum value.
- virtual unsigned
- spellingIndexToSemanticSpelling(const ParsedAttr &Attr) const {
- return UINT_MAX;
- }
- /// Returns true if the specified parameter index for this attribute in
- /// Attr.td is an ExprArgument or VariadicExprArgument, or a subclass thereof;
- /// returns false otherwise.
- virtual bool isParamExpr(size_t N) const { return false; }
- /// Populate Rules with the match rules of this attribute.
- virtual void getPragmaAttributeMatchRules(
- llvm::SmallVectorImpl<std::pair<attr::SubjectMatchRule, bool>> &Rules,
- const LangOptions &LangOpts) const {
- }
- enum AttrHandling {
- NotHandled,
- AttributeApplied,
- AttributeNotApplied
- };
- /// If this ParsedAttrInfo knows how to handle this ParsedAttr applied to this
- /// Decl then do so and return either AttributeApplied if it was applied or
- /// AttributeNotApplied if it wasn't. Otherwise return NotHandled.
- virtual AttrHandling handleDeclAttribute(Sema &S, Decl *D,
- const ParsedAttr &Attr) const {
- return NotHandled;
- }
-
- static const ParsedAttrInfo &get(const AttributeCommonInfo &A);
- static ArrayRef<const ParsedAttrInfo *> getAllBuiltin();
-};
-
-typedef llvm::Registry<ParsedAttrInfo> ParsedAttrInfoRegistry;
-
/// Represents information about a change in availability for
/// an entity, which is part of the encoding of the 'availability'
/// attribute.
@@ -318,10 +204,9 @@ private:
/// Constructor for attributes with expression arguments.
ParsedAttr(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
- ArgsUnion *args, unsigned numArgs, Syntax syntaxUsed,
+ ArgsUnion *args, unsigned numArgs, Form formUsed,
SourceLocation ellipsisLoc)
- : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc,
- syntaxUsed),
+ : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc, formUsed),
EllipsisLoc(ellipsisLoc), NumArgs(numArgs), Invalid(false),
UsedAsTypeAttr(false), IsAvailability(false),
IsTypeTagForDatatype(false), IsProperty(false), HasParsedType(false),
@@ -337,10 +222,9 @@ private:
IdentifierLoc *Parm, const AvailabilityChange &introduced,
const AvailabilityChange &deprecated,
const AvailabilityChange &obsoleted, SourceLocation unavailable,
- const Expr *messageExpr, Syntax syntaxUsed, SourceLocation strict,
+ const Expr *messageExpr, Form formUsed, SourceLocation strict,
const Expr *replacementExpr)
- : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc,
- syntaxUsed),
+ : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc, formUsed),
NumArgs(1), Invalid(false), UsedAsTypeAttr(false), IsAvailability(true),
IsTypeTagForDatatype(false), IsProperty(false), HasParsedType(false),
HasProcessingCache(false), IsPragmaClangAttribute(false),
@@ -356,9 +240,8 @@ private:
ParsedAttr(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierLoc *Parm1, IdentifierLoc *Parm2, IdentifierLoc *Parm3,
- Syntax syntaxUsed)
- : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc,
- syntaxUsed),
+ Form formUsed)
+ : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc, formUsed),
NumArgs(3), Invalid(false), UsedAsTypeAttr(false),
IsAvailability(false), IsTypeTagForDatatype(false), IsProperty(false),
HasParsedType(false), HasProcessingCache(false),
@@ -373,9 +256,8 @@ private:
ParsedAttr(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierLoc *ArgKind, ParsedType matchingCType,
- bool layoutCompatible, bool mustBeNull, Syntax syntaxUsed)
- : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc,
- syntaxUsed),
+ bool layoutCompatible, bool mustBeNull, Form formUsed)
+ : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc, formUsed),
NumArgs(1), Invalid(false), UsedAsTypeAttr(false),
IsAvailability(false), IsTypeTagForDatatype(true), IsProperty(false),
HasParsedType(false), HasProcessingCache(false),
@@ -391,23 +273,21 @@ private:
/// Constructor for attributes with a single type argument.
ParsedAttr(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
- ParsedType typeArg, Syntax syntaxUsed)
- : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc,
- syntaxUsed),
- NumArgs(0), Invalid(false), UsedAsTypeAttr(false),
- IsAvailability(false), IsTypeTagForDatatype(false), IsProperty(false),
- HasParsedType(true), HasProcessingCache(false),
- IsPragmaClangAttribute(false), Info(ParsedAttrInfo::get(*this)) {
+ ParsedType typeArg, Form formUsed, SourceLocation ellipsisLoc)
+ : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc, formUsed),
+ EllipsisLoc(ellipsisLoc), NumArgs(0), Invalid(false),
+ UsedAsTypeAttr(false), IsAvailability(false),
+ IsTypeTagForDatatype(false), IsProperty(false), HasParsedType(true),
+ HasProcessingCache(false), IsPragmaClangAttribute(false),
+ Info(ParsedAttrInfo::get(*this)) {
new (&getTypeBuffer()) ParsedType(typeArg);
}
/// Constructor for microsoft __declspec(property) attribute.
ParsedAttr(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
- IdentifierInfo *getterId, IdentifierInfo *setterId,
- Syntax syntaxUsed)
- : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc,
- syntaxUsed),
+ IdentifierInfo *getterId, IdentifierInfo *setterId, Form formUsed)
+ : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc, formUsed),
NumArgs(0), Invalid(false), UsedAsTypeAttr(false),
IsAvailability(false), IsTypeTagForDatatype(false), IsProperty(true),
HasParsedType(false), HasProcessingCache(false),
@@ -817,12 +697,19 @@ public:
AttributePool(AttributeFactory &factory) : Factory(factory) {}
AttributePool(const AttributePool &) = delete;
+ // The copy assignment operator is defined as deleted pending further
+ // motivation.
+ AttributePool &operator=(const AttributePool &) = delete;
~AttributePool() { Factory.reclaimPool(*this); }
/// Move the given pool's allocations to this pool.
AttributePool(AttributePool &&pool) = default;
+ // The move assignment operator is defined as deleted pending further
+ // motivation.
+ AttributePool &operator=(AttributePool &&pool) = delete;
+
AttributeFactory &getFactory() const { return Factory; }
void clear() {
@@ -838,8 +725,7 @@ public:
ParsedAttr *create(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
- ArgsUnion *args, unsigned numArgs,
- ParsedAttr::Syntax syntax,
+ ArgsUnion *args, unsigned numArgs, ParsedAttr::Form form,
SourceLocation ellipsisLoc = SourceLocation()) {
size_t temp =
ParsedAttr::totalSizeToAlloc<ArgsUnion, detail::AvailabilityData,
@@ -852,7 +738,7 @@ public:
detail::PropertyData>(numArgs, 0, 0, 0,
0));
return add(new (memory) ParsedAttr(attrName, attrRange, scopeName, scopeLoc,
- args, numArgs, syntax, ellipsisLoc));
+ args, numArgs, form, ellipsisLoc));
}
ParsedAttr *create(IdentifierInfo *attrName, SourceRange attrRange,
@@ -861,24 +747,24 @@ public:
const AvailabilityChange &deprecated,
const AvailabilityChange &obsoleted,
SourceLocation unavailable, const Expr *MessageExpr,
- ParsedAttr::Syntax syntax, SourceLocation strict,
+ ParsedAttr::Form form, SourceLocation strict,
const Expr *ReplacementExpr) {
void *memory = allocate(AttributeFactory::AvailabilityAllocSize);
return add(new (memory) ParsedAttr(
attrName, attrRange, scopeName, scopeLoc, Param, introduced, deprecated,
- obsoleted, unavailable, MessageExpr, syntax, strict, ReplacementExpr));
+ obsoleted, unavailable, MessageExpr, form, strict, ReplacementExpr));
}
ParsedAttr *create(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierLoc *Param1, IdentifierLoc *Param2,
- IdentifierLoc *Param3, ParsedAttr::Syntax syntax) {
+ IdentifierLoc *Param3, ParsedAttr::Form form) {
void *memory = allocate(
ParsedAttr::totalSizeToAlloc<ArgsUnion, detail::AvailabilityData,
detail::TypeTagForDatatypeData, ParsedType,
detail::PropertyData>(3, 0, 0, 0, 0));
return add(new (memory) ParsedAttr(attrName, attrRange, scopeName, scopeLoc,
- Param1, Param2, Param3, syntax));
+ Param1, Param2, Param3, form));
}
ParsedAttr *
@@ -886,34 +772,35 @@ public:
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierLoc *argumentKind,
ParsedType matchingCType, bool layoutCompatible,
- bool mustBeNull, ParsedAttr::Syntax syntax) {
+ bool mustBeNull, ParsedAttr::Form form) {
void *memory = allocate(AttributeFactory::TypeTagForDatatypeAllocSize);
return add(new (memory) ParsedAttr(attrName, attrRange, scopeName, scopeLoc,
argumentKind, matchingCType,
- layoutCompatible, mustBeNull, syntax));
+ layoutCompatible, mustBeNull, form));
}
ParsedAttr *createTypeAttribute(IdentifierInfo *attrName,
SourceRange attrRange,
IdentifierInfo *scopeName,
SourceLocation scopeLoc, ParsedType typeArg,
- ParsedAttr::Syntax syntaxUsed) {
+ ParsedAttr::Form formUsed,
+ SourceLocation ellipsisLoc) {
void *memory = allocate(
ParsedAttr::totalSizeToAlloc<ArgsUnion, detail::AvailabilityData,
detail::TypeTagForDatatypeData, ParsedType,
detail::PropertyData>(0, 0, 0, 1, 0));
return add(new (memory) ParsedAttr(attrName, attrRange, scopeName, scopeLoc,
- typeArg, syntaxUsed));
+ typeArg, formUsed, ellipsisLoc));
}
ParsedAttr *
createPropertyAttribute(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierInfo *getterId, IdentifierInfo *setterId,
- ParsedAttr::Syntax syntaxUsed) {
+ ParsedAttr::Form formUsed) {
void *memory = allocate(AttributeFactory::PropertyAllocSize);
return add(new (memory) ParsedAttr(attrName, attrRange, scopeName, scopeLoc,
- getterId, setterId, syntaxUsed));
+ getterId, setterId, formUsed));
}
};
@@ -1010,6 +897,16 @@ public:
});
}
+ const ParsedAttr *getMSPropertyAttr() const {
+ auto It = llvm::find_if(AttrList, [](const ParsedAttr *AL) {
+ return AL->isDeclspecPropertyAttribute();
+ });
+ if (It != AttrList.end())
+ return *It;
+ return nullptr;
+ }
+ bool hasMSPropertyAttr() const { return getMSPropertyAttr(); }
+
private:
VecTy AttrList;
};
@@ -1024,6 +921,7 @@ class ParsedAttributes : public ParsedAttributesView {
public:
ParsedAttributes(AttributeFactory &factory) : pool(factory) {}
ParsedAttributes(const ParsedAttributes &) = delete;
+ ParsedAttributes &operator=(const ParsedAttributes &) = delete;
AttributePool &getPool() const { return pool; }
@@ -1053,11 +951,10 @@ public:
/// Add attribute with expression arguments.
ParsedAttr *addNew(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
- ArgsUnion *args, unsigned numArgs,
- ParsedAttr::Syntax syntax,
+ ArgsUnion *args, unsigned numArgs, ParsedAttr::Form form,
SourceLocation ellipsisLoc = SourceLocation()) {
ParsedAttr *attr = pool.create(attrName, attrRange, scopeName, scopeLoc,
- args, numArgs, syntax, ellipsisLoc);
+ args, numArgs, form, ellipsisLoc);
addAtEnd(attr);
return attr;
}
@@ -1069,11 +966,11 @@ public:
const AvailabilityChange &deprecated,
const AvailabilityChange &obsoleted,
SourceLocation unavailable, const Expr *MessageExpr,
- ParsedAttr::Syntax syntax, SourceLocation strict,
+ ParsedAttr::Form form, SourceLocation strict,
const Expr *ReplacementExpr) {
ParsedAttr *attr = pool.create(
attrName, attrRange, scopeName, scopeLoc, Param, introduced, deprecated,
- obsoleted, unavailable, MessageExpr, syntax, strict, ReplacementExpr);
+ obsoleted, unavailable, MessageExpr, form, strict, ReplacementExpr);
addAtEnd(attr);
return attr;
}
@@ -1082,9 +979,9 @@ public:
ParsedAttr *addNew(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierLoc *Param1, IdentifierLoc *Param2,
- IdentifierLoc *Param3, ParsedAttr::Syntax syntax) {
+ IdentifierLoc *Param3, ParsedAttr::Form form) {
ParsedAttr *attr = pool.create(attrName, attrRange, scopeName, scopeLoc,
- Param1, Param2, Param3, syntax);
+ Param1, Param2, Param3, form);
addAtEnd(attr);
return attr;
}
@@ -1095,10 +992,10 @@ public:
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierLoc *argumentKind,
ParsedType matchingCType, bool layoutCompatible,
- bool mustBeNull, ParsedAttr::Syntax syntax) {
+ bool mustBeNull, ParsedAttr::Form form) {
ParsedAttr *attr = pool.createTypeTagForDatatype(
attrName, attrRange, scopeName, scopeLoc, argumentKind, matchingCType,
- layoutCompatible, mustBeNull, syntax);
+ layoutCompatible, mustBeNull, form);
addAtEnd(attr);
return attr;
}
@@ -1106,10 +1003,11 @@ public:
/// Add an attribute with a single type argument.
ParsedAttr *addNewTypeAttr(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
- ParsedType typeArg,
- ParsedAttr::Syntax syntaxUsed) {
- ParsedAttr *attr = pool.createTypeAttribute(attrName, attrRange, scopeName,
- scopeLoc, typeArg, syntaxUsed);
+ ParsedType typeArg, ParsedAttr::Form formUsed,
+ SourceLocation ellipsisLoc = SourceLocation()) {
+ ParsedAttr *attr =
+ pool.createTypeAttribute(attrName, attrRange, scopeName, scopeLoc,
+ typeArg, formUsed, ellipsisLoc);
addAtEnd(attr);
return attr;
}
@@ -1119,10 +1017,9 @@ public:
addNewPropertyAttr(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierInfo *getterId, IdentifierInfo *setterId,
- ParsedAttr::Syntax syntaxUsed) {
- ParsedAttr *attr =
- pool.createPropertyAttribute(attrName, attrRange, scopeName, scopeLoc,
- getterId, setterId, syntaxUsed);
+ ParsedAttr::Form formUsed) {
+ ParsedAttr *attr = pool.createPropertyAttribute(
+ attrName, attrRange, scopeName, scopeLoc, getterId, setterId, formUsed);
addAtEnd(attr);
return attr;
}
diff --git a/contrib/llvm-project/clang/include/clang/Sema/RISCVIntrinsicManager.h b/contrib/llvm-project/clang/include/clang/Sema/RISCVIntrinsicManager.h
index 128858bb4301..2a3dd1e7c469 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/RISCVIntrinsicManager.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/RISCVIntrinsicManager.h
@@ -14,6 +14,8 @@
#ifndef LLVM_CLANG_SEMA_RISCVINTRINSICMANAGER_H
#define LLVM_CLANG_SEMA_RISCVINTRINSICMANAGER_H
+#include <cstdint>
+
namespace clang {
class LookupResult;
class IdentifierInfo;
@@ -22,8 +24,12 @@ class Preprocessor;
namespace sema {
class RISCVIntrinsicManager {
public:
+ enum class IntrinsicKind : uint8_t { RVV, SIFIVE_VECTOR };
+
virtual ~RISCVIntrinsicManager() = default;
+ virtual void InitIntrinsicList() = 0;
+
// Create RISC-V intrinsic and insert into symbol table and return true if
// found, otherwise return false.
virtual bool CreateIntrinsicIfFound(LookupResult &LR, IdentifierInfo *II,
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Scope.h b/contrib/llvm-project/clang/include/clang/Sema/Scope.h
index be5cdb62045b..9e81706cd2aa 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Scope.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Scope.h
@@ -145,6 +145,11 @@ public:
/// This is a scope of some OpenMP directive with
/// order clause which specifies concurrent
OpenMPOrderClauseScope = 0x4000000,
+ /// This is the scope for a lambda, after the lambda introducer.
+ /// Lambdas need two FunctionPrototypeScope scopes (because there is a
+ /// template scope in between), the outer scope does not increase the
+ /// depth of recursion.
+ LambdaScope = 0x8000000,
};
private:
diff --git a/contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h b/contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h
index 65fa18fbb290..26c0387dfc44 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h
@@ -172,6 +172,9 @@ public:
/// in the function. One of co_return, co_await, or co_yield.
unsigned char FirstCoroutineStmtKind : 2;
+ /// Whether we found an immediate-escalating expression.
+ bool FoundImmediateEscalatingExpression : 1;
+
/// First coroutine statement in the current function.
/// (ex co_return, co_await, co_yield)
SourceLocation FirstCoroutineStmtLoc;
@@ -388,7 +391,8 @@ public:
HasPotentialAvailabilityViolations(false), ObjCShouldCallSuper(false),
ObjCIsDesignatedInit(false), ObjCWarnForNoDesignatedInitChain(false),
ObjCIsSecondaryInit(false), ObjCWarnForNoInitDelegation(false),
- NeedsCoroutineSuspends(true), ErrorTrap(Diag) {}
+ NeedsCoroutineSuspends(true), FoundImmediateEscalatingExpression(false),
+ ErrorTrap(Diag) {}
virtual ~FunctionScopeInfo();
@@ -838,6 +842,11 @@ public:
/// The lambda's compiler-generated \c operator().
CXXMethodDecl *CallOperator = nullptr;
+ /// Indicate that we parsed the parameter list
+ /// at which point the mutability of the lambda
+ /// is known.
+ bool AfterParameterList = true;
+
/// Source range covering the lambda introducer [...].
SourceRange IntroducerRange;
@@ -849,8 +858,9 @@ public:
/// explicit captures.
unsigned NumExplicitCaptures = 0;
- /// Whether this is a mutable lambda.
- bool Mutable = false;
+ /// Whether this is a mutable lambda. Until the mutable keyword is parsed,
+ /// we assume the lambda is mutable.
+ bool Mutable = true;
/// Whether the (empty) parameter list is explicit.
bool ExplicitParams = false;
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Sema.h b/contrib/llvm-project/clang/include/clang/Sema/Sema.h
index e57955f16bdd..3418a37b3077 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Sema.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Sema.h
@@ -481,6 +481,12 @@ public:
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
+ struct PragmaPackInfo {
+ PragmaMsStackAction Action;
+ StringRef SlotLabel;
+ Token Alignment;
+ };
+
// #pragma pack and align.
class AlignPackInfo {
public:
@@ -704,6 +710,22 @@ public:
return result;
}
+ class FpPragmaStackSaveRAII {
+ public:
+ FpPragmaStackSaveRAII(Sema &S) : S(S), SavedStack(S.FpPragmaStack) {}
+ ~FpPragmaStackSaveRAII() { S.FpPragmaStack = std::move(SavedStack); }
+
+ private:
+ Sema &S;
+ PragmaStack<FPOptionsOverride> SavedStack;
+ };
+
+ void resetFPOptions(FPOptions FPO) {
+ CurFPFeatures = FPO;
+ FpPragmaStack.Stack.clear();
+ FpPragmaStack.CurrentValue = FPO.getChangesFrom(FPOptions(LangOpts));
+ }
+
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
@@ -786,8 +808,7 @@ public:
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
- using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
- llvm::SmallPtrSet<Expr *, 4>>;
+ using MaybeODRUseExprSet = llvm::SmallSetVector<Expr *, 4>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
@@ -800,6 +821,9 @@ public:
/// context.
unsigned FunctionScopesStart = 0;
+ /// Track the number of currently active capturing scopes.
+ unsigned CapturingFunctionScopes = 0;
+
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::ArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
@@ -834,7 +858,7 @@ public:
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
- typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
+ typedef llvm::SmallSetVector<const NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
@@ -929,7 +953,7 @@ public:
class DelayedDiagnostics;
class DelayedDiagnosticsState {
- sema::DelayedDiagnosticPool *SavedPool;
+ sema::DelayedDiagnosticPool *SavedPool = nullptr;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
@@ -940,10 +964,10 @@ public:
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
- sema::DelayedDiagnosticPool *CurPool;
+ sema::DelayedDiagnosticPool *CurPool = nullptr;
public:
- DelayedDiagnostics() : CurPool(nullptr) {}
+ DelayedDiagnostics() = default;
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
@@ -1039,7 +1063,7 @@ public:
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
- bool isConstantEvaluated() {
+ bool isConstantEvaluated() const {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
@@ -1054,12 +1078,19 @@ public:
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
+ auto *FD = dyn_cast<FunctionDecl>(DC);
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
- Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
- if (auto *FD = dyn_cast<FunctionDecl>(DC))
+ (FD && FD->isConsteval())
+ ? ExpressionEvaluationContext::ImmediateFunctionContext
+ : ExpressionEvaluationContext::PotentiallyEvaluated);
+ if (FD) {
FD->setWillHaveBody(true);
- else
+ S.ExprEvalContexts.back().InImmediateFunctionContext =
+ FD->isImmediateFunction();
+ S.ExprEvalContexts.back().InImmediateEscalatingFunctionContext =
+ S.getLangOpts().CPlusPlus20 && FD->isImmediateEscalating();
+ } else
assert(isa<ObjCMethodDecl>(DC));
}
@@ -1078,8 +1109,10 @@ public:
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
- if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
+ if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) {
FD->setWillHaveBody(false);
+ S.CheckImmediateEscalatingFunctionDefinition(FD, S.getCurFunction());
+ }
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
@@ -1135,10 +1168,6 @@ public:
/// standard library.
LazyDeclPtr StdAlignValT;
- /// The C++ "std::experimental" namespace, where the experimental parts
- /// of the standard library resides.
- NamespaceDecl *StdExperimentalNamespaceCache;
-
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
@@ -1146,17 +1175,10 @@ public:
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
- /// The namespace where coroutine components are defined. In standard,
- /// they are defined in std namespace. And in the previous implementation,
- /// they are defined in std::experimental namespace.
- NamespaceDecl *CoroTraitsNamespaceCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
- /// The MSVC "_GUID" struct, which is defined in MSVC header files.
- RecordDecl *MSVCGuidDecl;
-
/// The C++ "std::source_location::__impl" struct, defined in
/// \<source_location>.
RecordDecl *StdSourceLocationImplDecl;
@@ -1246,7 +1268,7 @@ public:
/// In addition of being constant evaluated, the current expression
/// occurs in an immediate function context - either a consteval function
- /// or a consteval if function.
+ /// or a consteval if statement.
ImmediateFunctionContext,
/// The current expression is potentially evaluated at run time,
@@ -1327,6 +1349,7 @@ public:
// an immediate function context, so they need to be tracked independently.
bool InDiscardedStatement;
bool InImmediateFunctionContext;
+ bool InImmediateEscalatingFunctionContext;
bool IsCurrentlyCheckingDefaultArgumentOrInitializer = false;
@@ -1355,7 +1378,8 @@ public:
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext),
- InDiscardedStatement(false), InImmediateFunctionContext(false) {}
+ InDiscardedStatement(false), InImmediateFunctionContext(false),
+ InImmediateEscalatingFunctionContext(false) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
@@ -1372,7 +1396,7 @@ public:
return Context == ExpressionEvaluationContext::ImmediateFunctionContext ||
(Context == ExpressionEvaluationContext::DiscardedStatement &&
InImmediateFunctionContext) ||
- // C++2b [expr.const]p14:
+ // C++23 [expr.const]p14:
// An expression or conversion is in an immediate function
// context if it is potentially evaluated and either:
// * its innermost enclosing non-block scope is a function
@@ -1491,7 +1515,7 @@ public:
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
- bool isExternalWithNoLinkageType(ValueDecl *VD);
+ bool isExternalWithNoLinkageType(const ValueDecl *VD) const;
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
@@ -1623,6 +1647,9 @@ public:
/// Indicate RISC-V vector builtin functions enabled or not.
bool DeclareRISCVVBuiltins = false;
+ /// Indicate RISC-V SiFive vector builtin functions enabled or not.
+ bool DeclareRISCVSiFiveVectorBuiltins = false;
+
private:
std::unique_ptr<sema::RISCVIntrinsicManager> RVIntrinsicManager;
@@ -1780,9 +1807,15 @@ public:
};
SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
- FunctionDecl *Fn, Sema &S);
+ const FunctionDecl *Fn, Sema &S);
SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D);
SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default;
+
+ // The copy and move assignment operator is defined as deleted pending
+ // further motivation.
+ SemaDiagnosticBuilder &operator=(const SemaDiagnosticBuilder &) = delete;
+ SemaDiagnosticBuilder &operator=(SemaDiagnosticBuilder &&) = delete;
+
~SemaDiagnosticBuilder();
bool isImmediate() const { return ImmediateDiag.has_value(); }
@@ -1855,7 +1888,7 @@ public:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
- FunctionDecl *Fn;
+ const FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
@@ -2275,17 +2308,28 @@ private:
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
- bool IsPartition = false;
- bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
- /// The global module fragment of the current translation unit.
- clang::Module *GlobalModuleFragment = nullptr;
- /// The modules we imported directly.
- llvm::SmallPtrSet<clang::Module *, 8> DirectModuleImports;
+ /// For an interface unit, this is the implicitly imported interface unit.
+ clang::Module *ThePrimaryInterface = nullptr;
+
+ /// The explicit global module fragment of the current translation unit.
+ /// The explicit Global Module Fragment, as specified in C++
+ /// [module.global.frag].
+ clang::Module *TheGlobalModuleFragment = nullptr;
+
+ /// The implicit global module fragments of the current translation unit.
+ /// We would only create at most two implicit global module fragments to
+ /// avoid performance penalties when there are many language linkage
+ /// exports.
+ ///
+ /// The contents in the implicit global module fragment can't be discarded
+ /// no matter if it is exported or not.
+ clang::Module *TheImplicitGlobalModuleFragment = nullptr;
+ clang::Module *TheExportedImplicitGlobalModuleFragment = nullptr;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
@@ -2301,11 +2345,17 @@ private:
return getCurrentModule() ? getCurrentModule()->isModulePurview() : false;
}
- /// Enter the scope of the global module.
- Module *PushGlobalModuleFragment(SourceLocation BeginLoc, bool IsImplicit);
- /// Leave the scope of the global module.
+ /// Enter the scope of the explicit global module fragment.
+ Module *PushGlobalModuleFragment(SourceLocation BeginLoc);
+ /// Leave the scope of the explicit global module fragment.
void PopGlobalModuleFragment();
+ /// Enter the scope of an implicit global module fragment.
+ Module *PushImplicitGlobalModuleFragment(SourceLocation BeginLoc,
+ bool IsExported);
+ /// Leave the scope of an implicit global module fragment.
+ void PopImplicitGlobalModuleFragment();
+
VisibleModuleSet VisibleModules;
/// Cache for module units which is usable for current module.
@@ -2337,13 +2387,6 @@ public:
return Entity->getOwningModule();
}
- bool isModuleDirectlyImported(const Module *M) {
- return DirectModuleImports.contains(M);
- }
-
- // Determine whether the module M belongs to the current TU.
- bool isModuleUnitOfCurrentTU(const Module *M) const;
-
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
@@ -2392,8 +2435,8 @@ public:
bool hasReachableDeclarationSlow(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
- bool hasVisibleMergedDefinition(NamedDecl *Def);
- bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
+ bool hasVisibleMergedDefinition(const NamedDecl *Def);
+ bool hasMergedDefinitionInCurrentModule(const NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
@@ -2577,13 +2620,11 @@ public:
//
struct SkipBodyInfo {
- SkipBodyInfo()
- : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
- New(nullptr) {}
- bool ShouldSkip;
- bool CheckSameAsPrevious;
- NamedDecl *Previous;
- NamedDecl *New;
+ SkipBodyInfo() = default;
+ bool ShouldSkip = false;
+ bool CheckSameAsPrevious = false;
+ NamedDecl *Previous = nullptr;
+ NamedDecl *New = nullptr;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
@@ -3231,9 +3272,9 @@ public:
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
- void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
+ void diagnoseMissingImport(SourceLocation Loc, const NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
- void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
+ void diagnoseMissingImport(SourceLocation Loc, const NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
@@ -3544,13 +3585,13 @@ public:
void ActOnExitFunctionContext();
/// If \p AllowLambda is true, treat lambda as function.
- DeclContext *getFunctionLevelDeclContext(bool AllowLambda = false);
+ DeclContext *getFunctionLevelDeclContext(bool AllowLambda = false) const;
/// Returns a pointer to the innermost enclosing function, or nullptr if the
/// current context is not inside a function. If \p AllowLambda is true,
/// this can return the call operator of an enclosing lambda, otherwise
/// lambdas are skipped when looking for an enclosing function.
- FunctionDecl *getCurFunctionDecl(bool AllowLambda = false);
+ FunctionDecl *getCurFunctionDecl(bool AllowLambda = false) const;
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
@@ -3560,7 +3601,7 @@ public:
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
- NamedDecl *getCurFunctionOrMethodDecl();
+ NamedDecl *getCurFunctionOrMethodDecl() const;
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
@@ -3573,7 +3614,7 @@ public:
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
- bool AllowInlineNamespace = false);
+ bool AllowInlineNamespace = false) const;
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
@@ -3851,8 +3892,17 @@ public:
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_ArrayBound, ///< Array bound in array declarator or new-expression.
CCEK_ExplicitBool, ///< Condition in an explicit(bool) specifier.
- CCEK_Noexcept ///< Condition in a noexcept(bool) specifier.
+ CCEK_Noexcept, ///< Condition in a noexcept(bool) specifier.
+ CCEK_StaticAssertMessageSize, ///< Call to size() in a static assert
+ ///< message.
+ CCEK_StaticAssertMessageData, ///< Call to data() in a static assert
+ ///< message.
};
+
+ ExprResult BuildConvertedConstantExpression(Expr *From, QualType T,
+ CCEKind CCE,
+ NamedDecl *Dest = nullptr);
+
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
@@ -3976,7 +4026,8 @@ public:
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = std::nullopt,
- OverloadCandidateParamOrder PO = {});
+ OverloadCandidateParamOrder PO = {},
+ bool AggregateCandidateDeduction = false);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
@@ -4017,7 +4068,8 @@ public:
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
- OverloadCandidateParamOrder PO = {});
+ OverloadCandidateParamOrder PO = {},
+ bool AggregateCandidateDeduction = false);
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
@@ -4065,7 +4117,7 @@ public:
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
- NamedDecl *Found, FunctionDecl *Fn,
+ const NamedDecl *Found, const FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
@@ -4135,10 +4187,9 @@ public:
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
- FunctionDecl *
- ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
- bool Complain = false,
- DeclAccessPair *Found = nullptr);
+ FunctionDecl *ResolveSingleFunctionTemplateSpecialization(
+ OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr,
+ TemplateSpecCandidateSet *FailedTSC = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false,
@@ -4345,7 +4396,7 @@ public:
ForExternalRedeclaration
};
- RedeclarationKind forRedeclarationInCurContext() {
+ RedeclarationKind forRedeclarationInCurContext() const {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
@@ -4499,7 +4550,7 @@ public:
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
- FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
+ FunctionEmissionStatus getEmissionStatus(const FunctionDecl *Decl,
bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
@@ -4711,6 +4762,11 @@ public:
void CheckAlignasUnderalignment(Decl *D);
+ bool CheckNoInlineAttr(const Stmt *OrigSt, const Stmt *CurSt,
+ const AttributeCommonInfo &A);
+ bool CheckAlwaysInlineAttr(const Stmt *OrigSt, const Stmt *CurSt,
+ const AttributeCommonInfo &A);
+
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
@@ -5547,7 +5603,7 @@ public:
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
- bool DiagnoseDependentMemberLookup(LookupResult &R);
+ bool DiagnoseDependentMemberLookup(const LookupResult &R);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
@@ -5671,16 +5727,29 @@ public:
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
+ ExprResult ActOnUnevaluatedStringLiteral(ArrayRef<Token> StringToks);
+
+ /// ControllingExprOrType is either an opaque pointer coming out of a
+ /// ParsedType or an Expr *. FIXME: it'd be better to split this interface
+ /// into two so we don't take a void *, but that's awkward because one of
+ /// the operands is either a ParsedType or an Expr *, which doesn't lend
+ /// itself to generic code very well.
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
- Expr *ControllingExpr,
+ bool PredicateIsExpr,
+ void *ControllingExprOrType,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
+ /// ControllingExprOrType is either a TypeSourceInfo * or an Expr *. FIXME:
+ /// it'd be better to split this interface into two so we don't take a
+ /// void *, but see the FIXME on ActOnGenericSelectionExpr as to why that
+ /// isn't a trivial change.
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
- Expr *ControllingExpr,
+ bool PredicateIsExpr,
+ void *ControllingExprOrType,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
@@ -5697,6 +5766,11 @@ public:
bool CheckTypeTraitArity(unsigned Arity, SourceLocation Loc, size_t N);
+ bool ActOnAlignasTypeArgument(StringRef KWName, ParsedType Ty,
+ SourceLocation OpLoc, SourceRange R);
+ bool CheckAlignasTypeArgument(StringRef KWName, TypeSourceInfo *TInfo,
+ SourceLocation OpLoc, SourceRange R);
+
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
@@ -5715,7 +5789,8 @@ public:
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
- UnaryExprOrTypeTrait ExprKind);
+ UnaryExprOrTypeTrait ExprKind,
+ StringRef KWName);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
@@ -5980,8 +6055,8 @@ public:
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
- // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
- // __builtin_COLUMN(), __builtin_source_location()
+ // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FUNCSIG(),
+ // __builtin_FILE(), __builtin_COLUMN(), __builtin_source_location()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
@@ -6089,9 +6164,6 @@ public:
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
- NamespaceDecl *lookupStdExperimentalNamespace();
- NamespaceDecl *getCachedCoroNamespace() { return CoroTraitsNamespaceCache; }
-
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
@@ -6510,6 +6582,13 @@ public:
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
+ bool CheckImmediateEscalatingFunctionDefinition(
+ FunctionDecl *FD, const sema::FunctionScopeInfo *FSI);
+
+ void MarkExpressionAsImmediateEscalating(Expr *E);
+
+ void DiagnoseImmediateEscalatingReason(FunctionDecl *FD);
+
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
QualType DeclInitType, MultiExprArg ArgsPtr,
SourceLocation Loc,
@@ -6946,9 +7025,6 @@ public:
}
};
- bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
- NestedNameSpecInfo &IdInfo);
-
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
@@ -7089,30 +7165,27 @@ public:
unsigned LambdaDependencyKind,
LambdaCaptureDefault CaptureDefault);
- /// Start the definition of a lambda expression.
- CXXMethodDecl *
- startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange,
- TypeSourceInfo *MethodType, SourceLocation EndLoc,
- ArrayRef<ParmVarDecl *> Params,
- ConstexprSpecKind ConstexprKind, StorageClass SC,
- Expr *TrailingRequiresClause);
-
/// Number lambda for linkage purposes if necessary.
- void handleLambdaNumbering(
- CXXRecordDecl *Class, CXXMethodDecl *Method,
- std::optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling =
- std::nullopt);
+ void handleLambdaNumbering(CXXRecordDecl *Class, CXXMethodDecl *Method,
+ std::optional<CXXRecordDecl::LambdaNumbering>
+ NumberingOverride = std::nullopt);
/// Endow the lambda scope info with the relevant properties.
- void buildLambdaScope(sema::LambdaScopeInfo *LSI,
- CXXMethodDecl *CallOperator,
+ void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
- SourceLocation CaptureDefaultLoc,
- bool ExplicitParams,
- bool ExplicitResultType,
+ SourceLocation CaptureDefaultLoc, bool ExplicitParams,
bool Mutable);
+ CXXMethodDecl *CreateLambdaCallOperator(SourceRange IntroducerRange,
+ CXXRecordDecl *Class);
+ void CompleteLambdaCallOperator(
+ CXXMethodDecl *Method, SourceLocation LambdaLoc,
+ SourceLocation CallOperatorLoc, Expr *TrailingRequiresClause,
+ TypeSourceInfo *MethodTyInfo, ConstexprSpecKind ConstexprKind,
+ StorageClass SC, ArrayRef<ParmVarDecl *> Params,
+ bool HasExplicitResultType);
+
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
@@ -7133,42 +7206,49 @@ public:
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
- VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
- QualType InitCaptureType,
- SourceLocation EllipsisLoc,
- IdentifierInfo *Id,
- unsigned InitStyle, Expr *Init);
+ VarDecl *createLambdaInitCaptureVarDecl(
+ SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc,
+ IdentifierInfo *Id, unsigned InitStyle, Expr *Init, DeclContext *DeclCtx);
/// Add an init-capture to a lambda scope.
- void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var,
- bool isReferenceType);
+ void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var, bool ByRef);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
- /// \brief This is called after parsing the explicit template parameter list
+ /// Deduce a block or lambda's return type based on the return
+ /// statements present in the body.
+ void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
+
+ /// Once the Lambdas capture are known, we can start to create the closure,
+ /// call operator method, and keep track of the captures.
+ /// We do the capture lookup here, but they are not actually captured until
+ /// after we know what the qualifiers of the call operator are.
+ void ActOnLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro,
+ Scope *CurContext);
+
+ /// This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
- void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
+ void ActOnLambdaExplicitTemplateParameterList(LambdaIntroducer &Intro,
+ SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc,
ExprResult RequiresClause);
- /// Introduce the lambda parameters into scope.
- void addLambdaParameters(
- ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
- CXXMethodDecl *CallOperator, Scope *CurScope);
+ void ActOnLambdaClosureQualifiers(LambdaIntroducer &Intro,
+ SourceLocation MutableLoc);
- /// Deduce a block or lambda's return type based on the return
- /// statements present in the body.
- void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
+ void ActOnLambdaClosureParameters(
+ Scope *LambdaScope,
+ MutableArrayRef<DeclaratorChunk::ParamInfo> ParamInfo);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
- Declarator &ParamInfo, Scope *CurScope);
+ Declarator &ParamInfo, const DeclSpec &DS);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
@@ -7263,6 +7343,13 @@ private:
LocalInstantiationScope &Scope,
const MultiLevelTemplateArgumentList &TemplateArgs);
+ /// Introduce the instantiated captures of the lambda into the local
+ /// instantiation scope.
+ bool addInstantiatedCapturesToScope(
+ FunctionDecl *Function, const FunctionDecl *PatternDecl,
+ LocalInstantiationScope &Scope,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+
/// used by SetupConstraintCheckingTemplateArgumentsAndScope to recursively(in
/// the case of lambdas) set up the LocalInstantiationScope of the current
/// function.
@@ -7722,15 +7809,16 @@ public:
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
+ bool EvaluateStaticAssertMessageAsString(Expr *Message, std::string &Result,
+ ASTContext &Ctx,
+ bool ErrorOnInvalidMessage);
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
- Expr *AssertExpr,
- StringLiteral *AssertMessageExpr,
- SourceLocation RParenLoc,
- bool Failed);
+ Expr *AssertExpr, Expr *AssertMessageExpr,
+ SourceLocation RParenLoc, bool Failed);
void DiagnoseStaticAssertDetails(const Expr *E);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
@@ -7750,7 +7838,7 @@ public:
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
- void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
+ bool CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
@@ -8044,7 +8132,7 @@ public:
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
- SourceLocation NameLoc,
+ SourceLocation NameLoc, CXXScopeSpec &SS,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
@@ -8074,6 +8162,8 @@ public:
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
+ bool CheckTypeConstraint(TemplateIdAnnotation *TypeConstraint);
+
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
@@ -8092,7 +8182,8 @@ public:
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
- NonTypeTemplateParmDecl *ConstrainedParameter,
+ NonTypeTemplateParmDecl *NewConstrainedParm,
+ NonTypeTemplateParmDecl *OrigConstrainedParm,
SourceLocation EllipsisLoc);
bool RequireStructuralType(QualType T, SourceLocation Loc);
@@ -8426,24 +8517,31 @@ public:
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
- TPL_TemplateTemplateArgumentMatch
+ TPL_TemplateTemplateArgumentMatch,
+
+ /// We are determining whether the template-parameters are equivalent
+ /// according to C++ [temp.over.link]/6. This comparison does not consider
+ /// constraints.
+ ///
+ /// \code
+ /// template<C1 T> void f(T);
+ /// template<C2 T> void f(T);
+ /// \endcode
+ TPL_TemplateParamsEquivalent,
};
bool TemplateParameterListsAreEqual(
const NamedDecl *NewInstFrom, TemplateParameterList *New,
const NamedDecl *OldInstFrom, TemplateParameterList *Old, bool Complain,
TemplateParameterListEqualKind Kind,
- SourceLocation TemplateArgLoc = SourceLocation(),
- bool PartialOrdering = false);
+ SourceLocation TemplateArgLoc = SourceLocation());
bool TemplateParameterListsAreEqual(
TemplateParameterList *New, TemplateParameterList *Old, bool Complain,
TemplateParameterListEqualKind Kind,
- SourceLocation TemplateArgLoc = SourceLocation(),
- bool PartialOrdering = false) {
+ SourceLocation TemplateArgLoc = SourceLocation()) {
return TemplateParameterListsAreEqual(nullptr, New, nullptr, Old, Complain,
- Kind, TemplateArgLoc,
- PartialOrdering);
+ Kind, TemplateArgLoc);
}
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
@@ -9049,7 +9147,7 @@ public:
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
- bool PartialOverloading,
+ bool PartialOverloading, bool AggregateDeductionCandidate,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
@@ -9092,23 +9190,33 @@ public:
TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
- TemplateDeductionResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *Initializer,
- QualType &Result,
- sema::TemplateDeductionInfo &Info,
- bool DependentDeduction = false,
- bool IgnoreConstraints = false);
+ TemplateDeductionResult
+ DeduceAutoType(TypeLoc AutoTypeLoc, Expr *Initializer, QualType &Result,
+ sema::TemplateDeductionInfo &Info,
+ bool DependentDeduction = false,
+ bool IgnoreConstraints = false,
+ TemplateSpecCandidateSet *FailedTSC = nullptr);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
+ bool CheckIfFunctionSpecializationIsImmediate(FunctionDecl *FD,
+ SourceLocation Loc);
+
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
+ FunctionTemplateDecl *DeclareImplicitDeductionGuideFromInitList(
+ TemplateDecl *Template, MutableArrayRef<QualType> ParamTypes,
+ SourceLocation Loc);
+ llvm::DenseMap<unsigned, CXXDeductionGuideDecl *>
+ AggregateDeductionCandidates;
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
- const InitializationKind &Kind, MultiExprArg Init);
+ const InitializationKind &Kind, MultiExprArg Init,
+ ParenListExpr *PL = nullptr);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
@@ -9214,6 +9322,9 @@ public:
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
+ /// We are substituting into a lambda expression.
+ LambdaExpressionSubstitution,
+
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
@@ -9285,7 +9396,10 @@ public:
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
- Memoization
+ Memoization,
+
+ /// We are building deduction guides for a class.
+ BuildingDeductionGuides,
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
@@ -9598,6 +9712,13 @@ public:
const RequiresExpr *E,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
+
+ struct BuildingDeductionGuidesTag {};
+ /// \brief Note that we are building deduction guides.
+ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ TemplateDecl *Entity, BuildingDeductionGuidesTag,
+ SourceRange InstantiationRange = SourceRange());
+
/// Note that we have finished instantiating this template.
void Clear();
@@ -9823,14 +9944,21 @@ public:
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
+ SmallVector<SmallVector<VTableUse, 16>, 8> SavedVTableUses;
+ SmallVector<std::deque<PendingImplicitInstantiation>, 8>
+ SavedPendingInstantiations;
+
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
- SavedPendingInstantiations.swap(S.PendingInstantiations);
- SavedVTableUses.swap(S.VTableUses);
+ S.SavedPendingInstantiations.emplace_back();
+ S.SavedPendingInstantiations.back().swap(S.PendingInstantiations);
+
+ S.SavedVTableUses.emplace_back();
+ S.SavedVTableUses.back().swap(S.VTableUses);
}
void perform() {
@@ -9846,26 +9974,28 @@ public:
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
- S.VTableUses.swap(SavedVTableUses);
+ S.VTableUses.swap(S.SavedVTableUses.back());
+ S.SavedVTableUses.pop_back();
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
- S.PendingInstantiations.swap(SavedPendingInstantiations);
+ S.PendingInstantiations.swap(S.SavedPendingInstantiations.back());
+ S.SavedPendingInstantiations.pop_back();
} else {
// Template instantiations in the PCH may be delayed until the TU.
- S.PendingInstantiations.swap(SavedPendingInstantiations);
- S.PendingInstantiations.insert(S.PendingInstantiations.end(),
- SavedPendingInstantiations.begin(),
- SavedPendingInstantiations.end());
+ S.PendingInstantiations.swap(S.SavedPendingInstantiations.back());
+ S.PendingInstantiations.insert(
+ S.PendingInstantiations.end(),
+ S.SavedPendingInstantiations.back().begin(),
+ S.SavedPendingInstantiations.back().end());
+ S.SavedPendingInstantiations.pop_back();
}
}
private:
Sema &S;
- SmallVector<VTableUse, 16> SavedVTableUses;
- std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
@@ -10067,6 +10197,7 @@ public:
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
+ void updateAttrsForLateParsedTemplate(const Decl *Pattern, Decl *Inst);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
@@ -10662,6 +10793,9 @@ public:
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
+ /// Called on #pragma clang __debug dump E
+ void ActOnPragmaDump(Expr *E);
+
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
@@ -10883,7 +11017,7 @@ public:
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
- // C++ Coroutines TS
+ // C++ Coroutines
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
@@ -10908,8 +11042,7 @@ public:
/// Lookup 'coroutine_traits' in std namespace and std::experimental
/// namespace. The namespace found is recorded in Namespace.
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
- SourceLocation FuncLoc,
- NamespaceDecl *&Namespace);
+ SourceLocation FuncLoc);
/// Check that the expression co_await promise.final_suspend() shall not be
/// potentially-throwing.
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
@@ -10950,10 +11083,7 @@ private:
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
- ExprResult
- VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
- bool StrictlyPositive = true,
- bool SuppressExprDiags = false);
+
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
@@ -11039,6 +11169,11 @@ public:
return !OMPDeclareVariantScopes.empty();
}
+ ExprResult
+ VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
+ bool StrictlyPositive = true,
+ bool SuppressExprDiags = false);
+
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
@@ -11261,6 +11396,11 @@ public:
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
+
+ /// Adds OMPDeclareTargetDeclAttr to referenced variables in declare target
+ /// directive.
+ void ActOnOpenMPDeclareTargetInitializer(Decl *D);
+
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
@@ -12194,6 +12334,13 @@ public:
SourceLocation LParenLoc,
SourceLocation EndLoc);
+ /// Called on well-formed 'doacross' clause.
+ OMPClause *
+ ActOnOpenMPDoacrossClause(OpenMPDoacrossClauseModifier DepType,
+ SourceLocation DepLoc, SourceLocation ColonLoc,
+ ArrayRef<Expr *> VarList, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc);
+
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
@@ -12547,6 +12694,8 @@ public:
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
+ void diagnoseLogicalInsteadOfBitwise(Expr *Op1, Expr *Op2, SourceLocation Loc,
+ BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
@@ -12630,13 +12779,13 @@ public:
SourceLocation Loc, bool IsCompAssign);
bool isValidSveBitcast(QualType srcType, QualType destType);
+ bool isValidRVVBitcast(QualType srcType, QualType destType);
bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy);
bool areVectorTypesSameSize(QualType srcType, QualType destType);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
- bool areSameVectorElemTypes(QualType srcType, QualType destType);
bool anyAltivecTypes(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
@@ -12789,20 +12938,22 @@ public:
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
- bool HasKnownValue;
- bool KnownValue;
+ std::optional<bool> KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
- : ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
- HasKnownValue(IsConstexpr && Condition.get() &&
- !Condition.get()->isValueDependent()),
- KnownValue(HasKnownValue &&
- !!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
+ : ConditionVar(ConditionVar), Condition(Condition), Invalid(false) {
+ if (IsConstexpr && Condition.get()) {
+ if (std::optional<llvm::APSInt> Val =
+ Condition.get()->getIntegerConstantExpr(S.Context)) {
+ KnownValue = !!(*Val);
+ }
+ }
+ }
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
- HasKnownValue(false), KnownValue(false) {}
+ KnownValue(std::nullopt) {}
public:
ConditionResult() : ConditionResult(false) {}
@@ -12811,11 +12962,7 @@ public:
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
- std::optional<bool> getKnownValue() const {
- if (!HasKnownValue)
- return std::nullopt;
- return KnownValue;
- }
+ std::optional<bool> getKnownValue() const { return KnownValue; }
};
static ConditionResult ConditionError() { return ConditionResult(true); }
@@ -12872,13 +13019,6 @@ public:
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
- /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
- /// the specified width and sign. If an overflow occurs, detect it and emit
- /// the specified diagnostic.
- void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
- unsigned NewWidth, bool NewSign,
- SourceLocation Loc, unsigned DiagID);
-
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
@@ -12945,14 +13085,14 @@ public:
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
- llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
+ llvm::DenseMap<CanonicalDeclPtr<const FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
- CanonicalDeclPtr<FunctionDecl> FD;
+ CanonicalDeclPtr<const FunctionDecl> FD;
SourceLocation Loc;
};
@@ -12966,7 +13106,7 @@ public:
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
- llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
+ llvm::DenseMap</* Callee = */ CanonicalDeclPtr<const FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
@@ -13011,8 +13151,9 @@ public:
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
- SemaDiagnosticBuilder
- diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD);
+ SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc,
+ unsigned DiagID,
+ const FunctionDecl *FD);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
@@ -13028,13 +13169,14 @@ public:
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc,
- unsigned DiagID, FunctionDecl *FD);
+ unsigned DiagID,
+ const FunctionDecl *FD);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID,
- FunctionDecl *FD = nullptr);
+ const FunctionDecl *FD = nullptr);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc,
const PartialDiagnostic &PD,
- FunctionDecl *FD = nullptr) {
+ const FunctionDecl *FD = nullptr) {
return targetDiag(Loc, PD.getDiagID(), FD) << PD;
}
@@ -13349,6 +13491,7 @@ public:
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
+ void CodeCompleteObjCClassForwardDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
@@ -13491,8 +13634,14 @@ private:
bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum);
bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
+ void checkRVVTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D);
bool CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall);
+ bool CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
@@ -13545,9 +13694,12 @@ private:
bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc);
bool SemaBuiltinElementwiseMath(CallExpr *TheCall);
+ bool SemaBuiltinElementwiseTernaryMath(CallExpr *TheCall);
bool PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall);
bool PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall);
+ bool SemaBuiltinNonDeterministicValue(CallExpr *TheCall);
+
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
@@ -13556,6 +13708,16 @@ private:
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
+ // WebAssembly builtin handling.
+ bool BuiltinWasmRefNullExtern(CallExpr *TheCall);
+ bool BuiltinWasmRefNullFunc(CallExpr *TheCall);
+ bool BuiltinWasmTableGet(CallExpr *TheCall);
+ bool BuiltinWasmTableSet(CallExpr *TheCall);
+ bool BuiltinWasmTableSize(CallExpr *TheCall);
+ bool BuiltinWasmTableGrow(CallExpr *TheCall);
+ bool BuiltinWasmTableFill(CallExpr *TheCall);
+ bool BuiltinWasmTableCopy(CallExpr *TheCall);
+
public:
enum FormatStringType {
FST_Scanf,
@@ -13618,7 +13780,7 @@ public:
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
- void CheckForIntOverflow(Expr *E);
+ void CheckForIntOverflow(const Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
@@ -13696,7 +13858,6 @@ private:
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
- mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
@@ -13745,7 +13906,6 @@ public:
}
IdentifierInfo *getSuperIdentifier() const;
- IdentifierInfo *getFloat128Identifier() const;
ObjCContainerDecl *getObjCDeclContext() const;
@@ -13888,74 +14048,11 @@ public:
SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID);
- /// Check whether we're allowed to call Callee from the current context.
- ///
- /// - If the call is never allowed in a semantically-correct program
- /// emits an error and returns false.
- ///
- /// - If the call is allowed in semantically-correct programs, but only if
- /// it's never codegen'ed, creates a deferred diagnostic to be emitted if
- /// and when the caller is codegen'ed, and returns true.
- ///
- /// - Otherwise, returns true without emitting any diagnostics.
- ///
- /// Adds Callee to DeviceCallGraph if we don't know if its caller will be
- /// codegen'ed yet.
- bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
void deepTypeCheckForSYCLDevice(SourceLocation UsedAt,
llvm::DenseSet<QualType> Visited,
ValueDecl *DeclToCheck);
};
-/// RAII object that enters a new expression evaluation context.
-class EnterExpressionEvaluationContext {
- Sema &Actions;
- bool Entered = true;
-
-public:
- EnterExpressionEvaluationContext(
- Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
- Decl *LambdaContextDecl = nullptr,
- Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
- Sema::ExpressionEvaluationContextRecord::EK_Other,
- bool ShouldEnter = true)
- : Actions(Actions), Entered(ShouldEnter) {
- if (Entered)
- Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
- ExprContext);
- }
- EnterExpressionEvaluationContext(
- Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
- Sema::ReuseLambdaContextDecl_t,
- Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
- Sema::ExpressionEvaluationContextRecord::EK_Other)
- : Actions(Actions) {
- Actions.PushExpressionEvaluationContext(
- NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
- }
-
- enum InitListTag { InitList };
- EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
- bool ShouldEnter = true)
- : Actions(Actions), Entered(false) {
- // In C++11 onwards, narrowing checks are performed on the contents of
- // braced-init-lists, even when they occur within unevaluated operands.
- // Therefore we still need to instantiate constexpr functions used in such
- // a context.
- if (ShouldEnter && Actions.isUnevaluatedContext() &&
- Actions.getLangOpts().CPlusPlus11) {
- Actions.PushExpressionEvaluationContext(
- Sema::ExpressionEvaluationContext::UnevaluatedList);
- Entered = true;
- }
- }
-
- ~EnterExpressionEvaluationContext() {
- if (Entered)
- Actions.PopExpressionEvaluationContext();
- }
-};
-
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
@@ -13966,6 +14063,8 @@ struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
+ /// Floating-point options in the point of definition.
+ FPOptions FPO;
};
template <>
@@ -13983,7 +14082,8 @@ namespace llvm {
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
- using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
+ using FDBaseInfo =
+ DenseMapInfo<clang::CanonicalDeclPtr<const clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Template.h b/contrib/llvm-project/clang/include/clang/Sema/Template.h
index 908389438389..1de2cc6917b4 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Template.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Template.h
@@ -232,9 +232,21 @@ enum class TemplateSubstitutionKind : char {
/// Replaces the current 'innermost' level with the provided argument list.
/// This is useful for type deduction cases where we need to get the entire
/// list from the AST, but then add the deduced innermost list.
- void replaceInnermostTemplateArguments(ArgList Args) {
- assert(TemplateArgumentLists.size() > 0 && "Replacing in an empty list?");
- TemplateArgumentLists[0].Args = Args;
+ void replaceInnermostTemplateArguments(Decl *AssociatedDecl, ArgList Args) {
+ assert((!TemplateArgumentLists.empty() || NumRetainedOuterLevels) &&
+ "Replacing in an empty list?");
+
+ if (!TemplateArgumentLists.empty()) {
+ assert((TemplateArgumentLists[0].AssociatedDeclAndFinal.getPointer() ||
+ TemplateArgumentLists[0].AssociatedDeclAndFinal.getPointer() ==
+ AssociatedDecl) &&
+ "Trying to change incorrect declaration?");
+ TemplateArgumentLists[0].Args = Args;
+ } else {
+ --NumRetainedOuterLevels;
+ TemplateArgumentLists.push_back(
+ {{AssociatedDecl, /*Final=*/false}, Args});
+ }
}
/// Add an outermost level that we are not substituting. We have no
@@ -261,6 +273,23 @@ enum class TemplateSubstitutionKind : char {
}
ArgListsIterator end() { return TemplateArgumentLists.end(); }
ConstArgListsIterator end() const { return TemplateArgumentLists.end(); }
+
+ LLVM_DUMP_METHOD void dump() const {
+ LangOptions LO;
+ LO.CPlusPlus = true;
+ LO.Bool = true;
+ PrintingPolicy PP(LO);
+ llvm::errs() << "NumRetainedOuterLevels: " << NumRetainedOuterLevels
+ << "\n";
+ for (unsigned Depth = NumRetainedOuterLevels; Depth < getNumLevels();
+ ++Depth) {
+ llvm::errs() << Depth << ": ";
+ printTemplateArgumentList(
+ llvm::errs(),
+ TemplateArgumentLists[getNumLevels() - Depth - 1].Args, PP);
+ llvm::errs() << "\n";
+ }
+ }
};
/// The context in which partial ordering of function templates occurs.
diff --git a/contrib/llvm-project/clang/include/clang/Sema/TemplateDeduction.h b/contrib/llvm-project/clang/include/clang/Sema/TemplateDeduction.h
index 9d860a8949d7..85691c66a044 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/TemplateDeduction.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/TemplateDeduction.h
@@ -234,6 +234,13 @@ public:
/// different argument type from its substituted parameter type.
unsigned CallArgIndex = 0;
+ // C++20 [over.match.class.deduct]p5.2:
+ // During template argument deduction for the aggregate deduction
+ // candidate, the number of elements in a trailing parameter pack is only
+ // deduced from the number of remaining function arguments if it is not
+ // otherwise deduced.
+ bool AggregateDeductionCandidateHasMismatchedArity = false;
+
/// Information on packs that we're currently expanding.
///
/// FIXME: This should be kept internal to SemaTemplateDeduction.
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h b/contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h
index 9ba94da03720..2ae9e09998c4 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h
@@ -41,7 +41,7 @@ namespace serialization {
/// Version 4 of AST files also requires that the version control branch and
/// revision match exactly, since there is no backward compatibility of
/// AST files at this time.
-const unsigned VERSION_MAJOR = 25;
+const unsigned VERSION_MAJOR = 27;
/// AST file minor version number supported by this version of
/// Clang.
@@ -696,8 +696,7 @@ enum ASTRecordTypes {
/// Record code for \#pragma float_control options.
FLOAT_CONTROL_PRAGMA_OPTIONS = 65,
- /// Record code for included files.
- PP_INCLUDED_FILES = 66,
+ /// ID 66 used to be the list of included files.
/// Record code for an unterminated \#pragma clang assume_nonnull begin
/// recorded in a preamble.
@@ -1096,6 +1095,11 @@ enum PredefinedTypeIDs {
// \brief RISC-V V types with auto numeration
#define RVV_TYPE(Name, Id, SingletonId) PREDEF_TYPE_##Id##_ID,
#include "clang/Basic/RISCVVTypes.def"
+// \brief WebAssembly reference types with auto numeration
+#define WASM_TYPE(Name, Id, SingletonId) PREDEF_TYPE_##Id##_ID,
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
+ // Sentinel value. Considered a predefined type but not useable as one.
+ PREDEF_TYPE_LAST_ID
};
/// The number of predefined type IDs that are reserved for
@@ -1103,7 +1107,13 @@ enum PredefinedTypeIDs {
///
/// Type IDs for non-predefined types will start at
/// NUM_PREDEF_TYPE_IDs.
-const unsigned NUM_PREDEF_TYPE_IDS = 300;
+const unsigned NUM_PREDEF_TYPE_IDS = 500;
+
+// Ensure we do not overrun the predefined types we reserved
+// in the enum PredefinedTypeIDs above.
+static_assert(PREDEF_TYPE_LAST_ID < NUM_PREDEF_TYPE_IDS,
+ "Too many enumerators in PredefinedTypeIDs. Review the value of "
+ "NUM_PREDEF_TYPE_IDS");
/// Record codes for each kind of type.
///
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h b/contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h
index 5cdbdfe4e38d..d56e2117a53f 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h
@@ -560,6 +560,10 @@ private:
llvm::DenseMap<Decl*, llvm::SmallVector<NamedDecl*, 2>>
AnonymousDeclarationsForMerging;
+ /// Map from numbering information for lambdas to the corresponding lambdas.
+ llvm::DenseMap<std::pair<const Decl *, unsigned>, NamedDecl *>
+ LambdaDeclarationsForMerging;
+
/// Key used to identify LifetimeExtendedTemporaryDecl for merging,
/// containing the lifetime-extending declaration and the mangling number.
using LETemporaryKey = std::pair<Decl *, unsigned>;
@@ -944,8 +948,14 @@ public:
private:
/// A list of modules that were imported by precompiled headers or
- /// any other non-module AST file.
- SmallVector<ImportedSubmodule, 2> ImportedModules;
+ /// any other non-module AST file and have not yet been made visible. If a
+ /// module is made visible in the ASTReader, it will be transfered to
+ /// \c PendingImportedModulesSema.
+ SmallVector<ImportedSubmodule, 2> PendingImportedModules;
+
+ /// A list of modules that were imported by precompiled headers or
+ /// any other non-module AST file and have not yet been made visible for Sema.
+ SmallVector<ImportedSubmodule, 2> PendingImportedModulesSema;
//@}
/// The system include root to be used when loading the
@@ -1101,7 +1111,13 @@ private:
/// they might contain a deduced return type that refers to a local type
/// declared within the function.
SmallVector<std::pair<FunctionDecl *, serialization::TypeID>, 16>
- PendingFunctionTypes;
+ PendingDeducedFunctionTypes;
+
+ /// The list of deduced variable types that we have not yet read, because
+ /// they might contain a deduced type that refers to a local type declared
+ /// within the variable.
+ SmallVector<std::pair<VarDecl *, serialization::TypeID>, 16>
+ PendingDeducedVarTypes;
/// The list of redeclaration chains that still need to be
/// reconstructed, and the local offset to the corresponding list
@@ -1139,6 +1155,11 @@ private:
2>
PendingObjCExtensionIvarRedeclarations;
+ /// Members that have been added to classes, for which the class has not yet
+ /// been notified. CXXRecordDecl::addedMember will be called for each of
+ /// these once recursive deserialization is complete.
+ SmallVector<std::pair<CXXRecordDecl*, Decl*>, 4> PendingAddedClassMembers;
+
/// The set of NamedDecls that have been loaded, but are members of a
/// context that has been merged into another context where the corresponding
/// declaration is either missing or has not yet been loaded.
@@ -1369,9 +1390,7 @@ private:
void ReadModuleOffsetMap(ModuleFile &F) const;
void ParseLineTable(ModuleFile &F, const RecordData &Record);
llvm::Error ReadSourceManagerBlock(ModuleFile &F);
- llvm::BitstreamCursor &SLocCursorForID(int ID);
SourceLocation getImportLocation(ModuleFile *F);
- void readIncludedFiles(ModuleFile &F, StringRef Blob, Preprocessor &PP);
ASTReadResult ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
const ModuleFile *ImportedBy,
unsigned ClientLoadCapabilities);
@@ -2082,6 +2101,8 @@ public:
llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>>
&LPTMap) override;
+ void AssignedLambdaNumbering(const CXXRecordDecl *Lambda) override;
+
/// Load a selector from disk, registering its ID if it exists.
void LoadSelector(Selector Sel);
@@ -2236,7 +2257,7 @@ public:
unsigned &Idx, LocSeq *Seq = nullptr);
// Read a string
- static std::string ReadString(const RecordData &Record, unsigned &Idx);
+ static std::string ReadString(const RecordDataImpl &Record, unsigned &Idx);
// Skip a string
static void SkipString(const RecordData &Record, unsigned &Idx) {
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h b/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h
index 09ee1744e894..e328dd0cd557 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h
@@ -500,7 +500,6 @@ private:
void WriteInputFiles(SourceManager &SourceMgr, HeaderSearchOptions &HSOpts);
void WriteSourceManagerBlock(SourceManager &SourceMgr,
const Preprocessor &PP);
- void writeIncludedFiles(raw_ostream &Out, const Preprocessor &PP);
void WritePreprocessor(const Preprocessor &PP, bool IsModule);
void WriteHeaderSearch(const HeaderSearch &HS);
void WritePreprocessorDetail(PreprocessingRecord &PPRec,
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/GlobalModuleIndex.h b/contrib/llvm-project/clang/include/clang/Serialization/GlobalModuleIndex.h
index 9d6b52a97f52..d82e0dd294b9 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/GlobalModuleIndex.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/GlobalModuleIndex.h
@@ -136,12 +136,6 @@ public:
/// The caller accepts ownership of the returned object.
IdentifierIterator *createIdentifierIterator() const;
- /// Retrieve the set of modules that have up-to-date indexes.
- ///
- /// \param ModuleFiles Will be populated with the set of module files that
- /// have been indexed.
- void getKnownModules(llvm::SmallVectorImpl<ModuleFile *> &ModuleFiles);
-
/// Retrieve the set of module files on which the given module file
/// directly depends.
void getModuleDependencies(ModuleFile *File,
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ModuleFile.h b/contrib/llvm-project/clang/include/clang/Serialization/ModuleFile.h
index 871fdd0a4838..b632b4e3e7a7 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ModuleFile.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ModuleFile.h
@@ -164,6 +164,9 @@ public:
/// Whether this precompiled header is a relocatable PCH file.
bool RelocatablePCH = false;
+ /// Whether this module file is a standard C++ module.
+ bool StandardCXXModule = false;
+
/// Whether timestamps are included in this module file.
bool HasTimestamps = false;
@@ -193,7 +196,7 @@ public:
/// The memory buffer that stores the data associated with
/// this AST file, owned by the InMemoryModuleCache.
- llvm::MemoryBuffer *Buffer;
+ llvm::MemoryBuffer *Buffer = nullptr;
/// The size of this file, in bits.
uint64_t SizeInBits = 0;
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/PCHContainerOperations.h b/contrib/llvm-project/clang/include/clang/Serialization/PCHContainerOperations.h
index 9f9700a418a9..be10feb5e351 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/PCHContainerOperations.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/PCHContainerOperations.h
@@ -56,7 +56,7 @@ class PCHContainerReader {
public:
virtual ~PCHContainerReader() = 0;
/// Equivalent to the format passed to -fmodule-format=
- virtual llvm::StringRef getFormat() const = 0;
+ virtual llvm::ArrayRef<llvm::StringRef> getFormats() const = 0;
/// Returns the serialized AST inside the PCH container Buffer.
virtual llvm::StringRef ExtractPCH(llvm::MemoryBufferRef Buffer) const = 0;
@@ -78,8 +78,7 @@ class RawPCHContainerWriter : public PCHContainerWriter {
/// Implements read operations for a raw pass-through PCH container.
class RawPCHContainerReader : public PCHContainerReader {
- llvm::StringRef getFormat() const override { return "raw"; }
-
+ llvm::ArrayRef<llvm::StringRef> getFormats() const override;
/// Simply returns the buffer contained in Buffer.
llvm::StringRef ExtractPCH(llvm::MemoryBufferRef Buffer) const override;
};
@@ -87,7 +86,9 @@ class RawPCHContainerReader : public PCHContainerReader {
/// A registry of PCHContainerWriter and -Reader objects for different formats.
class PCHContainerOperations {
llvm::StringMap<std::unique_ptr<PCHContainerWriter>> Writers;
- llvm::StringMap<std::unique_ptr<PCHContainerReader>> Readers;
+ llvm::StringMap<PCHContainerReader *> Readers;
+ llvm::SmallVector<std::unique_ptr<PCHContainerReader>> OwnedReaders;
+
public:
/// Automatically registers a RawPCHContainerWriter and
/// RawPCHContainerReader.
@@ -96,13 +97,17 @@ public:
Writers[Writer->getFormat()] = std::move(Writer);
}
void registerReader(std::unique_ptr<PCHContainerReader> Reader) {
- Readers[Reader->getFormat()] = std::move(Reader);
+ assert(!Reader->getFormats().empty() &&
+ "PCHContainerReader must handle >=1 format");
+ for (llvm::StringRef Fmt : Reader->getFormats())
+ Readers[Fmt] = Reader.get();
+ OwnedReaders.push_back(std::move(Reader));
}
const PCHContainerWriter *getWriterOrNull(llvm::StringRef Format) {
return Writers[Format].get();
}
const PCHContainerReader *getReaderOrNull(llvm::StringRef Format) {
- return Readers[Format].get();
+ return Readers[Format];
}
const PCHContainerReader &getRawReader() {
return *getReaderOrNull("raw");
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td
index 094b3a69c230..885b1b68de96 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td
@@ -359,29 +359,6 @@ def ErrnoModeling : Checker<"Errno">,
HelpText<"Make the special value 'errno' available to other checkers.">,
Documentation<NotDocumented>;
-def StdCLibraryFunctionsChecker : Checker<"StdCLibraryFunctions">,
- HelpText<"Improve modeling of the C standard library functions">,
- // Uninitialized value check is a mandatory dependency. This Checker asserts
- // that arguments are always initialized.
- Dependencies<[CallAndMessageModeling]>,
- CheckerOptions<[
- CmdLineOption<Boolean,
- "DisplayLoadedSummaries",
- "If set to true, the checker displays the found summaries "
- "for the given translation unit.",
- "false",
- Released,
- Hide>,
- CmdLineOption<Boolean,
- "ModelPOSIX",
- "If set to true, the checker models functions from the "
- "POSIX standard.",
- "false",
- InAlpha>
- ]>,
- Documentation<NotDocumented>,
- Hidden;
-
def TrustNonnullChecker : Checker<"TrustNonnull">,
HelpText<"Trust that returns from framework methods annotated with _Nonnull "
"are not null">,
@@ -486,12 +463,12 @@ def CStringNotNullTerm : Checker<"NotNullTerminated">,
HelpText<"Check for arguments which are not null-terminating strings">,
Dependencies<[CStringModeling]>,
Documentation<HasDocumentation>;
-
+
def CStringUninitializedRead : Checker<"UninitializedRead">,
HelpText<"Checks if the string manipulation function would read uninitialized bytes">,
Dependencies<[CStringModeling]>,
Documentation<HasDocumentation>;
-
+
} // end "alpha.unix.cstring"
let ParentPackage = Unix in {
@@ -573,6 +550,7 @@ def PthreadLockChecker : Checker<"PthreadLock">,
def StreamChecker : Checker<"Stream">,
HelpText<"Check stream handling functions">,
+ WeakDependencies<[NonNullParamChecker]>,
Documentation<HasDocumentation>;
def SimpleStreamChecker : Checker<"SimpleStream">,
@@ -583,12 +561,25 @@ def BlockInCriticalSectionChecker : Checker<"BlockInCriticalSection">,
HelpText<"Check for calls to blocking functions inside a critical section">,
Documentation<HasDocumentation>;
-def StdCLibraryFunctionArgsChecker : Checker<"StdCLibraryFunctionArgs">,
- HelpText<"Check constraints of arguments of C standard library functions, "
- "such as whether the parameter of isalpha is in the range [0, 255] "
- "or is EOF.">,
- Dependencies<[StdCLibraryFunctionsChecker]>,
- WeakDependencies<[CallAndMessageChecker, NonNullParamChecker]>,
+def StdCLibraryFunctionsChecker : Checker<"StdCLibraryFunctions">,
+ HelpText<"Check for invalid arguments of C standard library functions, "
+ "and apply relations between arguments and return value">,
+ CheckerOptions<[
+ CmdLineOption<Boolean,
+ "DisplayLoadedSummaries",
+ "If set to true, the checker displays the found summaries "
+ "for the given translation unit.",
+ "false",
+ Released,
+ Hide>,
+ CmdLineOption<Boolean,
+ "ModelPOSIX",
+ "If set to true, the checker models additional functions "
+ "from the POSIX standard.",
+ "false",
+ InAlpha>
+ ]>,
+ WeakDependencies<[CallAndMessageChecker, NonNullParamChecker, StreamChecker]>,
Documentation<HasDocumentation>;
} // end "alpha.unix"
@@ -1618,7 +1609,6 @@ def DebugIteratorModeling : Checker<"DebugIteratorModeling">,
def StdCLibraryFunctionsTesterChecker : Checker<"StdCLibraryFunctionsTester">,
HelpText<"Add test functions to the summary map, so testing of individual "
"summary constituents becomes possible.">,
- Dependencies<[StdCLibraryFunctionsChecker]>,
Documentation<NotDocumented>;
} // end "debug"
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Taint.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Taint.h
index df863a249541..3ec8dbfb09ee 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Taint.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Taint.h
@@ -79,26 +79,48 @@ bool isTainted(ProgramStateRef State, SymbolRef Sym,
bool isTainted(ProgramStateRef State, const MemRegion *Reg,
TaintTagType Kind = TaintTagGeneric);
+/// Returns the tainted Symbols for a given Statement and state.
+std::vector<SymbolRef> getTaintedSymbols(ProgramStateRef State, const Stmt *S,
+ const LocationContext *LCtx,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Returns the tainted Symbols for a given SVal and state.
+std::vector<SymbolRef> getTaintedSymbols(ProgramStateRef State, SVal V,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Returns the tainted Symbols for a SymbolRef and state.
+std::vector<SymbolRef> getTaintedSymbols(ProgramStateRef State, SymbolRef Sym,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Returns the tainted (index, super/sub region, symbolic region) symbols
+/// for a given memory region.
+std::vector<SymbolRef> getTaintedSymbols(ProgramStateRef State,
+ const MemRegion *Reg,
+ TaintTagType Kind = TaintTagGeneric);
+
+std::vector<SymbolRef> getTaintedSymbolsImpl(ProgramStateRef State,
+ const Stmt *S,
+ const LocationContext *LCtx,
+ TaintTagType Kind,
+ bool returnFirstOnly);
+
+std::vector<SymbolRef> getTaintedSymbolsImpl(ProgramStateRef State, SVal V,
+ TaintTagType Kind,
+ bool returnFirstOnly);
+
+std::vector<SymbolRef> getTaintedSymbolsImpl(ProgramStateRef State,
+ SymbolRef Sym, TaintTagType Kind,
+ bool returnFirstOnly);
+
+std::vector<SymbolRef> getTaintedSymbolsImpl(ProgramStateRef State,
+ const MemRegion *Reg,
+ TaintTagType Kind,
+ bool returnFirstOnly);
+
void printTaint(ProgramStateRef State, raw_ostream &Out, const char *nl = "\n",
const char *sep = "");
LLVM_DUMP_METHOD void dumpTaint(ProgramStateRef State);
-
-/// The bug visitor prints a diagnostic message at the location where a given
-/// variable was tainted.
-class TaintBugVisitor final : public BugReporterVisitor {
-private:
- const SVal V;
-
-public:
- TaintBugVisitor(const SVal V) : V(V) {}
- void Profile(llvm::FoldingSetNodeID &ID) const override { ID.Add(V); }
-
- PathDiagnosticPieceRef VisitNode(const ExplodedNode *N,
- BugReporterContext &BRC,
- PathSensitiveBugReport &BR) override;
-};
-
} // namespace taint
} // namespace ento
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def
index acfbcf67b1b9..2fc825c2af9c 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def
@@ -327,15 +327,6 @@ ANALYZER_OPTION(bool, ShouldSupportSymbolicIntegerCasts,
false)
ANALYZER_OPTION(
- bool, ShouldConsiderSingleElementArraysAsFlexibleArrayMembers,
- "consider-single-element-arrays-as-flexible-array-members",
- "Consider single element arrays as flexible array member candidates. "
- "This will prevent the analyzer from assuming that a single element array "
- "holds a single element. [DEPRECATED, removing in clang-17; "
- "use '-fstrict-flex-arrays=<N>' instead]",
- true)
-
-ANALYZER_OPTION(
bool, ShouldAssumeControlledEnvironment, "assume-controlled-environment",
"Whether the analyzed application runs in a controlled environment. "
"We will assume that environment variables exist in queries and they hold "
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h
index e81d7bbb8823..a947bd086702 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h
@@ -260,9 +260,10 @@ public:
#undef ANALYZER_OPTION
#undef ANALYZER_OPTION_DEPENDS_ON_USER_MODE
- // Create an array of all -analyzer-config command line options. Sort it in
- // the constructor.
- std::vector<llvm::StringLiteral> AnalyzerConfigCmdFlags = {
+ bool isUnknownAnalyzerConfig(llvm::StringRef Name) {
+ static std::vector<llvm::StringLiteral> AnalyzerConfigCmdFlags = []() {
+ // Create an array of all -analyzer-config command line options.
+ std::vector<llvm::StringLiteral> AnalyzerConfigCmdFlags = {
#define ANALYZER_OPTION_DEPENDS_ON_USER_MODE(TYPE, NAME, CMDFLAG, DESC, \
SHALLOW_VAL, DEEP_VAL) \
ANALYZER_OPTION(TYPE, NAME, CMDFLAG, DESC, SHALLOW_VAL)
@@ -273,10 +274,11 @@ public:
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.def"
#undef ANALYZER_OPTION
#undef ANALYZER_OPTION_DEPENDS_ON_USER_MODE
- };
-
- bool isUnknownAnalyzerConfig(StringRef Name) const {
- assert(llvm::is_sorted(AnalyzerConfigCmdFlags));
+ };
+ // FIXME: Sort this at compile-time when we get constexpr sort (C++20).
+ llvm::sort(AnalyzerConfigCmdFlags);
+ return AnalyzerConfigCmdFlags;
+ }();
return !std::binary_search(AnalyzerConfigCmdFlags.begin(),
AnalyzerConfigCmdFlags.end(), Name);
@@ -292,9 +294,7 @@ public:
AnalyzerDisplayProgress(false), eagerlyAssumeBinOpBifurcation(false),
TrimGraph(false), visualizeExplodedGraphWithGraphViz(false),
UnoptimizedCFG(false), PrintStats(false), NoRetryExhausted(false),
- AnalyzerWerror(false) {
- llvm::sort(AnalyzerConfigCmdFlags);
- }
+ AnalyzerWerror(false) {}
/// Interprets an option's string value as a boolean. The "true" string is
/// interpreted as true and the "false" string is interpreted as false.
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
index e5ebb04d67b2..826370c2648c 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
@@ -608,8 +608,9 @@ public:
/// Iterator over the set of BugReports tracked by the BugReporter.
using EQClasses_iterator = llvm::FoldingSet<BugReportEquivClass>::iterator;
- EQClasses_iterator EQClasses_begin() { return EQClasses.begin(); }
- EQClasses_iterator EQClasses_end() { return EQClasses.end(); }
+ llvm::iterator_range<EQClasses_iterator> equivalenceClasses() {
+ return EQClasses;
+ }
ASTContext &getContext() { return D.getASTContext(); }
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h
index c479daea6783..d9b3d9352d32 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h
@@ -51,6 +51,12 @@ public:
BugReporterVisitor() = default;
BugReporterVisitor(const BugReporterVisitor &) = default;
BugReporterVisitor(BugReporterVisitor &&) {}
+
+ // The copy and move assignment operator is defined as deleted pending further
+ // motivation.
+ BugReporterVisitor &operator=(const BugReporterVisitor &) = delete;
+ BugReporterVisitor &operator=(BugReporterVisitor &&) = delete;
+
virtual ~BugReporterVisitor();
/// Return a diagnostic piece which should be associated with the
@@ -386,19 +392,19 @@ const Expr *getDerefExpr(const Stmt *S);
} // namespace bugreporter
class TrackConstraintBRVisitor final : public BugReporterVisitor {
- DefinedSVal Constraint;
- bool Assumption;
+ const SmallString<64> Message;
+ const DefinedSVal Constraint;
+ const bool Assumption;
bool IsSatisfied = false;
- bool IsZeroCheck;
/// We should start tracking from the last node along the path in which the
/// value is constrained.
bool IsTrackingTurnedOn = false;
public:
- TrackConstraintBRVisitor(DefinedSVal constraint, bool assumption)
- : Constraint(constraint), Assumption(assumption),
- IsZeroCheck(!Assumption && isa<Loc>(Constraint)) {}
+ TrackConstraintBRVisitor(DefinedSVal constraint, bool assumption,
+ StringRef Message)
+ : Message(Message), Constraint(constraint), Assumption(assumption) {}
void Profile(llvm::FoldingSetNodeID &ID) const override;
@@ -411,6 +417,9 @@ public:
PathSensitiveBugReport &BR) override;
private:
+ /// Checks if the constraint refers to a null-location.
+ bool isZeroCheck() const;
+
/// Checks if the constraint is valid in the current state.
bool isUnderconstrained(const ExplodedNode *N) const;
};
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h
index 392bc484bf62..5d2c96e5bc9d 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h
@@ -22,6 +22,7 @@ extern const char *const CXXObjectLifecycle;
extern const char *const CXXMoveSemantics;
extern const char *const SecurityError;
extern const char *const UnusedCode;
+extern const char *const TaintedData;
} // namespace categories
} // namespace ento
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Checker.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Checker.h
index 36a8bcb26bd2..b92f0e1e9f0f 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Checker.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Checker.h
@@ -370,13 +370,12 @@ class PointerEscape {
Kind);
InvalidatedSymbols RegularEscape;
- for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
- E = Escaped.end(); I != E; ++I)
- if (!ETraits->hasTrait(*I,
- RegionAndSymbolInvalidationTraits::TK_PreserveContents) &&
- !ETraits->hasTrait(*I,
- RegionAndSymbolInvalidationTraits::TK_SuppressEscape))
- RegularEscape.insert(*I);
+ for (SymbolRef Sym : Escaped)
+ if (!ETraits->hasTrait(
+ Sym, RegionAndSymbolInvalidationTraits::TK_PreserveContents) &&
+ !ETraits->hasTrait(
+ Sym, RegionAndSymbolInvalidationTraits::TK_SuppressEscape))
+ RegularEscape.insert(Sym);
if (RegularEscape.empty())
return State;
@@ -410,13 +409,13 @@ class ConstPointerEscape {
return State;
InvalidatedSymbols ConstEscape;
- for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
- E = Escaped.end(); I != E; ++I)
- if (ETraits->hasTrait(*I,
- RegionAndSymbolInvalidationTraits::TK_PreserveContents) &&
- !ETraits->hasTrait(*I,
- RegionAndSymbolInvalidationTraits::TK_SuppressEscape))
- ConstEscape.insert(*I);
+ for (SymbolRef Sym : Escaped) {
+ if (ETraits->hasTrait(
+ Sym, RegionAndSymbolInvalidationTraits::TK_PreserveContents) &&
+ !ETraits->hasTrait(
+ Sym, RegionAndSymbolInvalidationTraits::TK_SuppressEscape))
+ ConstEscape.insert(Sym);
+ }
if (ConstEscape.empty())
return State;
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
index 710bc8c33849..8129ebc8fdc6 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
@@ -84,6 +84,10 @@ public:
CallEventRef(const T *Call) : IntrusiveRefCntPtr<const T>(Call) {}
CallEventRef(const CallEventRef &Orig) : IntrusiveRefCntPtr<const T>(Orig) {}
+ // The copy assignment operator is defined as deleted pending further
+ // motivation.
+ CallEventRef &operator=(const CallEventRef &) = delete;
+
CallEventRef<T> cloneWithState(ProgramStateRef State) const {
return this->get()->template cloneWithState<T>(State);
}
@@ -154,6 +158,7 @@ private:
ProgramStateRef State;
const LocationContext *LCtx;
llvm::PointerUnion<const Expr *, const Decl *> Origin;
+ CFGBlock::ConstCFGElementRef ElemRef = {nullptr, 0};
mutable std::optional<bool> Foreign; // Set by CTU analysis.
protected:
@@ -176,16 +181,19 @@ private:
protected:
friend class CallEventManager;
- CallEvent(const Expr *E, ProgramStateRef state, const LocationContext *lctx)
- : State(std::move(state)), LCtx(lctx), Origin(E) {}
+ CallEvent(const Expr *E, ProgramStateRef state, const LocationContext *lctx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : State(std::move(state)), LCtx(lctx), Origin(E), ElemRef(ElemRef) {}
- CallEvent(const Decl *D, ProgramStateRef state, const LocationContext *lctx)
- : State(std::move(state)), LCtx(lctx), Origin(D) {}
+ CallEvent(const Decl *D, ProgramStateRef state, const LocationContext *lctx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : State(std::move(state)), LCtx(lctx), Origin(D), ElemRef(ElemRef) {}
// DO NOT MAKE PUBLIC
CallEvent(const CallEvent &Original)
: State(Original.State), LCtx(Original.LCtx), Origin(Original.Origin),
- Data(Original.Data), Location(Original.Location) {}
+ ElemRef(Original.ElemRef), Data(Original.Data),
+ Location(Original.Location) {}
/// Copies this CallEvent, with vtable intact, into a new block of memory.
virtual void cloneTo(void *Dest) const = 0;
@@ -232,6 +240,10 @@ public:
return LCtx;
}
+ const CFGBlock::ConstCFGElementRef &getCFGElementRef() const {
+ return ElemRef;
+ }
+
/// Returns the definition of the function or method that will be
/// called.
virtual RuntimeDefinition getRuntimeDefinition() const = 0;
@@ -484,11 +496,13 @@ public:
class AnyFunctionCall : public CallEvent {
protected:
AnyFunctionCall(const Expr *E, ProgramStateRef St,
- const LocationContext *LCtx)
- : CallEvent(E, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : CallEvent(E, St, LCtx, ElemRef) {}
AnyFunctionCall(const Decl *D, ProgramStateRef St,
- const LocationContext *LCtx)
- : CallEvent(D, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : CallEvent(D, St, LCtx, ElemRef) {}
AnyFunctionCall(const AnyFunctionCall &Other) = default;
public:
@@ -521,8 +535,9 @@ class SimpleFunctionCall : public AnyFunctionCall {
protected:
SimpleFunctionCall(const CallExpr *CE, ProgramStateRef St,
- const LocationContext *LCtx)
- : AnyFunctionCall(CE, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : AnyFunctionCall(CE, St, LCtx, ElemRef) {}
SimpleFunctionCall(const SimpleFunctionCall &Other) = default;
void cloneTo(void *Dest) const override {
@@ -557,9 +572,9 @@ class BlockCall : public CallEvent {
friend class CallEventManager;
protected:
- BlockCall(const CallExpr *CE, ProgramStateRef St,
- const LocationContext *LCtx)
- : CallEvent(CE, St, LCtx) {}
+ BlockCall(const CallExpr *CE, ProgramStateRef St, const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : CallEvent(CE, St, LCtx, ElemRef) {}
BlockCall(const BlockCall &Other) = default;
void cloneTo(void *Dest) const override { new (Dest) BlockCall(*this); }
@@ -605,10 +620,9 @@ public:
const BlockDataRegion *BR = getBlockRegion();
assert(BR && "Block converted from lambda must have a block region");
- auto I = BR->referenced_vars_begin();
- assert(I != BR->referenced_vars_end());
-
- return I.getCapturedRegion();
+ auto ReferencedVars = BR->referenced_vars();
+ assert(!ReferencedVars.empty());
+ return ReferencedVars.begin().getCapturedRegion();
}
RuntimeDefinition getRuntimeDefinition() const override {
@@ -661,11 +675,13 @@ public:
class CXXInstanceCall : public AnyFunctionCall {
protected:
CXXInstanceCall(const CallExpr *CE, ProgramStateRef St,
- const LocationContext *LCtx)
- : AnyFunctionCall(CE, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : AnyFunctionCall(CE, St, LCtx, ElemRef) {}
CXXInstanceCall(const FunctionDecl *D, ProgramStateRef St,
- const LocationContext *LCtx)
- : AnyFunctionCall(D, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : AnyFunctionCall(D, St, LCtx, ElemRef) {}
CXXInstanceCall(const CXXInstanceCall &Other) = default;
void getExtraInvalidatedValues(ValueList &Values,
@@ -699,8 +715,9 @@ class CXXMemberCall : public CXXInstanceCall {
protected:
CXXMemberCall(const CXXMemberCallExpr *CE, ProgramStateRef St,
- const LocationContext *LCtx)
- : CXXInstanceCall(CE, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : CXXInstanceCall(CE, St, LCtx, ElemRef) {}
CXXMemberCall(const CXXMemberCall &Other) = default;
void cloneTo(void *Dest) const override { new (Dest) CXXMemberCall(*this); }
@@ -741,8 +758,9 @@ class CXXMemberOperatorCall : public CXXInstanceCall {
protected:
CXXMemberOperatorCall(const CXXOperatorCallExpr *CE, ProgramStateRef St,
- const LocationContext *LCtx)
- : CXXInstanceCall(CE, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : CXXInstanceCall(CE, St, LCtx, ElemRef) {}
CXXMemberOperatorCall(const CXXMemberOperatorCall &Other) = default;
void cloneTo(void *Dest) const override {
@@ -808,10 +826,17 @@ protected:
/// \param Target The object region to be destructed.
/// \param St The path-sensitive state at this point in the program.
/// \param LCtx The location context at this point in the program.
+ /// \param ElemRef The reference to this destructor in the CFG.
+ ///
+ /// FIXME: Eventually we want to drop \param Target and deduce it from
+ /// \param ElemRef. To do that we need to migrate the logic for target
+ /// region lookup from ExprEngine::ProcessImplicitDtor() and make it
+ /// independent from ExprEngine.
CXXDestructorCall(const CXXDestructorDecl *DD, const Stmt *Trigger,
const MemRegion *Target, bool IsBaseDestructor,
- ProgramStateRef St, const LocationContext *LCtx)
- : CXXInstanceCall(DD, St, LCtx) {
+ ProgramStateRef St, const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : CXXInstanceCall(DD, St, LCtx, ElemRef) {
Data = DtorDataTy(Target, IsBaseDestructor).getOpaqueValue();
Location = Trigger->getEndLoc();
}
@@ -847,8 +872,9 @@ public:
class AnyCXXConstructorCall : public AnyFunctionCall {
protected:
AnyCXXConstructorCall(const Expr *E, const MemRegion *Target,
- ProgramStateRef St, const LocationContext *LCtx)
- : AnyFunctionCall(E, St, LCtx) {
+ ProgramStateRef St, const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : AnyFunctionCall(E, St, LCtx, ElemRef) {
assert(E && (isa<CXXConstructExpr>(E) || isa<CXXInheritedCtorInitExpr>(E)));
// Target may be null when the region is unknown.
Data = Target;
@@ -884,9 +910,14 @@ protected:
/// a new symbolic region will be used.
/// \param St The path-sensitive state at this point in the program.
/// \param LCtx The location context at this point in the program.
+ /// \param ElemRef The reference to this constructor in the CFG.
+ ///
+ /// FIXME: Eventually we want to drop \param Target and deduce it from
+ /// \param ElemRef.
CXXConstructorCall(const CXXConstructExpr *CE, const MemRegion *Target,
- ProgramStateRef St, const LocationContext *LCtx)
- : AnyCXXConstructorCall(CE, Target, St, LCtx) {}
+ ProgramStateRef St, const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : AnyCXXConstructorCall(CE, Target, St, LCtx, ElemRef) {}
CXXConstructorCall(const CXXConstructorCall &Other) = default;
@@ -941,8 +972,9 @@ class CXXInheritedConstructorCall : public AnyCXXConstructorCall {
protected:
CXXInheritedConstructorCall(const CXXInheritedCtorInitExpr *CE,
const MemRegion *Target, ProgramStateRef St,
- const LocationContext *LCtx)
- : AnyCXXConstructorCall(CE, Target, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : AnyCXXConstructorCall(CE, Target, St, LCtx, ElemRef) {}
CXXInheritedConstructorCall(const CXXInheritedConstructorCall &Other) =
default;
@@ -1003,8 +1035,9 @@ class CXXAllocatorCall : public AnyFunctionCall {
protected:
CXXAllocatorCall(const CXXNewExpr *E, ProgramStateRef St,
- const LocationContext *LCtx)
- : AnyFunctionCall(E, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : AnyFunctionCall(E, St, LCtx, ElemRef) {}
CXXAllocatorCall(const CXXAllocatorCall &Other) = default;
void cloneTo(void *Dest) const override { new (Dest) CXXAllocatorCall(*this); }
@@ -1084,8 +1117,9 @@ class CXXDeallocatorCall : public AnyFunctionCall {
protected:
CXXDeallocatorCall(const CXXDeleteExpr *E, ProgramStateRef St,
- const LocationContext *LCtx)
- : AnyFunctionCall(E, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : AnyFunctionCall(E, St, LCtx, ElemRef) {}
CXXDeallocatorCall(const CXXDeallocatorCall &Other) = default;
void cloneTo(void *Dest) const override {
@@ -1136,8 +1170,9 @@ class ObjCMethodCall : public CallEvent {
protected:
ObjCMethodCall(const ObjCMessageExpr *Msg, ProgramStateRef St,
- const LocationContext *LCtx)
- : CallEvent(Msg, St, LCtx) {
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : CallEvent(Msg, St, LCtx, ElemRef) {
Data = nullptr;
}
@@ -1265,34 +1300,36 @@ class CallEventManager {
}
template <typename T, typename Arg>
- T *create(Arg A, ProgramStateRef St, const LocationContext *LCtx) {
+ T *create(Arg A, ProgramStateRef St, const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef) {
static_assert(sizeof(T) == sizeof(CallEventTemplateTy),
"CallEvent subclasses are not all the same size");
- return new (allocate()) T(A, St, LCtx);
+ return new (allocate()) T(A, St, LCtx, ElemRef);
}
template <typename T, typename Arg1, typename Arg2>
- T *create(Arg1 A1, Arg2 A2, ProgramStateRef St, const LocationContext *LCtx) {
+ T *create(Arg1 A1, Arg2 A2, ProgramStateRef St, const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef) {
static_assert(sizeof(T) == sizeof(CallEventTemplateTy),
"CallEvent subclasses are not all the same size");
- return new (allocate()) T(A1, A2, St, LCtx);
+ return new (allocate()) T(A1, A2, St, LCtx, ElemRef);
}
template <typename T, typename Arg1, typename Arg2, typename Arg3>
T *create(Arg1 A1, Arg2 A2, Arg3 A3, ProgramStateRef St,
- const LocationContext *LCtx) {
+ const LocationContext *LCtx, CFGBlock::ConstCFGElementRef ElemRef) {
static_assert(sizeof(T) == sizeof(CallEventTemplateTy),
"CallEvent subclasses are not all the same size");
- return new (allocate()) T(A1, A2, A3, St, LCtx);
+ return new (allocate()) T(A1, A2, A3, St, LCtx, ElemRef);
}
template <typename T, typename Arg1, typename Arg2, typename Arg3,
typename Arg4>
T *create(Arg1 A1, Arg2 A2, Arg3 A3, Arg4 A4, ProgramStateRef St,
- const LocationContext *LCtx) {
+ const LocationContext *LCtx, CFGBlock::ConstCFGElementRef ElemRef) {
static_assert(sizeof(T) == sizeof(CallEventTemplateTy),
"CallEvent subclasses are not all the same size");
- return new (allocate()) T(A1, A2, A3, A4, St, LCtx);
+ return new (allocate()) T(A1, A2, A3, A4, St, LCtx, ElemRef);
}
public:
@@ -1304,50 +1341,57 @@ public:
/// Gets a call event for a function call, Objective-C method call,
/// a 'new', or a 'delete' call.
- CallEventRef<>
- getCall(const Stmt *S, ProgramStateRef State,
- const LocationContext *LC);
+ CallEventRef<> getCall(const Stmt *S, ProgramStateRef State,
+ const LocationContext *LC,
+ CFGBlock::ConstCFGElementRef ElemRef);
- CallEventRef<>
- getSimpleCall(const CallExpr *E, ProgramStateRef State,
- const LocationContext *LCtx);
+ CallEventRef<> getSimpleCall(const CallExpr *E, ProgramStateRef State,
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef);
CallEventRef<ObjCMethodCall>
getObjCMethodCall(const ObjCMessageExpr *E, ProgramStateRef State,
- const LocationContext *LCtx) {
- return create<ObjCMethodCall>(E, State, LCtx);
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef) {
+ return create<ObjCMethodCall>(E, State, LCtx, ElemRef);
}
CallEventRef<CXXConstructorCall>
getCXXConstructorCall(const CXXConstructExpr *E, const MemRegion *Target,
- ProgramStateRef State, const LocationContext *LCtx) {
- return create<CXXConstructorCall>(E, Target, State, LCtx);
+ ProgramStateRef State, const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef) {
+ return create<CXXConstructorCall>(E, Target, State, LCtx, ElemRef);
}
CallEventRef<CXXInheritedConstructorCall>
getCXXInheritedConstructorCall(const CXXInheritedCtorInitExpr *E,
const MemRegion *Target, ProgramStateRef State,
- const LocationContext *LCtx) {
- return create<CXXInheritedConstructorCall>(E, Target, State, LCtx);
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef) {
+ return create<CXXInheritedConstructorCall>(E, Target, State, LCtx, ElemRef);
}
CallEventRef<CXXDestructorCall>
getCXXDestructorCall(const CXXDestructorDecl *DD, const Stmt *Trigger,
const MemRegion *Target, bool IsBase,
- ProgramStateRef State, const LocationContext *LCtx) {
- return create<CXXDestructorCall>(DD, Trigger, Target, IsBase, State, LCtx);
+ ProgramStateRef State, const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef) {
+ return create<CXXDestructorCall>(DD, Trigger, Target, IsBase, State, LCtx,
+ ElemRef);
}
CallEventRef<CXXAllocatorCall>
getCXXAllocatorCall(const CXXNewExpr *E, ProgramStateRef State,
- const LocationContext *LCtx) {
- return create<CXXAllocatorCall>(E, State, LCtx);
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef) {
+ return create<CXXAllocatorCall>(E, State, LCtx, ElemRef);
}
CallEventRef<CXXDeallocatorCall>
getCXXDeallocatorCall(const CXXDeleteExpr *E, ProgramStateRef State,
- const LocationContext *LCtx) {
- return create<CXXDeallocatorCall>(E, State, LCtx);
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef) {
+ return create<CXXDeallocatorCall>(E, State, LCtx, ElemRef);
}
};
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
index 9bd5a802d5d6..9923c41e6ad2 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
@@ -316,8 +316,8 @@ public:
/// bug path significantly shorter.
const NoteTag *getNoteTag(StringRef Note, bool IsPrunable = false) {
return getNoteTag(
- [Note](BugReporterContext &,
- PathSensitiveBugReport &) { return std::string(Note); },
+ [Note = std::string(Note)](BugReporterContext &,
+ PathSensitiveBugReport &) { return Note; },
IsPrunable);
}
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
index a595d517cd27..8dbe767cef9d 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
@@ -25,6 +25,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/WorkList.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include <cassert>
#include <memory>
@@ -175,21 +176,11 @@ public:
WorkList *getWorkList() const { return WList.get(); }
WorkList *getCTUWorkList() const { return CTUWList.get(); }
- BlocksExhausted::const_iterator blocks_exhausted_begin() const {
- return blocksExhausted.begin();
+ auto exhausted_blocks() const {
+ return llvm::iterator_range(blocksExhausted);
}
- BlocksExhausted::const_iterator blocks_exhausted_end() const {
- return blocksExhausted.end();
- }
-
- BlocksAborted::const_iterator blocks_aborted_begin() const {
- return blocksAborted.begin();
- }
-
- BlocksAborted::const_iterator blocks_aborted_end() const {
- return blocksAborted.end();
- }
+ auto aborted_blocks() const { return llvm::iterator_range(blocksAborted); }
/// Enqueue the given set of nodes onto the work list.
void enqueue(ExplodedNodeSet &Set);
@@ -507,6 +498,11 @@ public:
iterator(CFGBlock::const_succ_iterator i) : I(i) {}
public:
+ // This isn't really a conventional iterator.
+ // We just implement the deref as a no-op for now to make range-based for
+ // loops work.
+ const iterator &operator*() const { return *this; }
+
iterator &operator++() { ++I; return *this; }
bool operator!=(const iterator &X) const { return I != X.I; }
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
index cb424ba5f3e7..2fb05ac46e8f 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
@@ -32,6 +32,7 @@
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Compiler.h"
#include <cassert>
@@ -395,13 +396,9 @@ public:
using node_iterator = AllNodesTy::iterator;
using const_node_iterator = AllNodesTy::const_iterator;
- node_iterator nodes_begin() { return Nodes.begin(); }
+ llvm::iterator_range<node_iterator> nodes() { return Nodes; }
- node_iterator nodes_end() { return Nodes.end(); }
-
- const_node_iterator nodes_begin() const { return Nodes.begin(); }
-
- const_node_iterator nodes_end() const { return Nodes.end(); }
+ llvm::iterator_range<const_node_iterator> nodes() const { return Nodes; }
roots_iterator roots_begin() { return Roots.begin(); }
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
index d73661545535..ed5c4adb5e3d 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
@@ -234,10 +234,10 @@ public:
return (*G.roots_begin())->getLocation().getLocationContext();
}
- void GenerateAutoTransition(ExplodedNode *N);
- void enqueueEndOfPath(ExplodedNodeSet &S);
- void GenerateCallExitNode(ExplodedNode *N);
-
+ CFGBlock::ConstCFGElementRef getCFGElementRef() const {
+ const CFGBlock *blockPtr = currBldrCtx ? currBldrCtx->getBlock() : nullptr;
+ return {blockPtr, currStmtIdx};
+ }
/// Dump graph to the specified filename.
/// If filename is empty, generate a temporary one.
@@ -602,14 +602,7 @@ public:
StmtNodeBuilder &Bldr,
ExplodedNode *Pred);
- ProgramStateRef handleLVectorSplat(ProgramStateRef state,
- const LocationContext *LCtx,
- const CastExpr *CastE,
- StmtNodeBuilder &Bldr,
- ExplodedNode *Pred);
-
- void handleUOExtension(ExplodedNodeSet::iterator I,
- const UnaryOperator* U,
+ void handleUOExtension(ExplodedNode *N, const UnaryOperator *U,
StmtNodeBuilder &Bldr);
public:
@@ -768,15 +761,6 @@ private:
void finishArgumentConstruction(ExplodedNodeSet &Dst, ExplodedNode *Pred,
const CallEvent &Call);
- void evalLoadCommon(ExplodedNodeSet &Dst,
- const Expr *NodeEx, /* Eventually will be a CFGStmt */
- const Expr *BoundEx,
- ExplodedNode *Pred,
- ProgramStateRef St,
- SVal location,
- const ProgramPointTag *tag,
- QualType LoadTy);
-
void evalLocation(ExplodedNodeSet &Dst,
const Stmt *NodeEx, /* This will eventually be a CFGStmt */
const Stmt *BoundEx,
@@ -911,13 +895,6 @@ private:
static SVal makeElementRegion(ProgramStateRef State, SVal LValue,
QualType &Ty, bool &IsArray, unsigned Idx = 0);
- /// For a DeclStmt or CXXInitCtorInitializer, walk backward in the current CFG
- /// block to find the constructor expression that directly constructed into
- /// the storage for this statement. Returns null if the constructor for this
- /// statement created a temporary object region rather than directly
- /// constructing into an existing region.
- const CXXConstructExpr *findDirectConstructorForCurrentCFGElement();
-
/// Common code that handles either a CXXConstructExpr or a
/// CXXInheritedCtorInitExpr.
void handleConstructor(const Expr *E, ExplodedNode *Pred,
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
index e982384b3fb0..151d3e57c1cb 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
@@ -31,6 +31,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Casting.h"
#include <cassert>
@@ -136,8 +137,6 @@ public:
/// It might return null.
const SymbolicRegion *getSymbolicBase() const;
- bool hasGlobalsOrParametersStorage() const;
-
bool hasStackStorage() const;
bool hasStackNonParametersStorage() const;
@@ -739,6 +738,11 @@ public:
++OriginalR;
return *this;
}
+
+ // This isn't really a conventional iterator.
+ // We just implement the deref as a no-op for now to make range-based for
+ // loops work.
+ const referenced_vars_iterator &operator*() const { return *this; }
};
/// Return the original region for a captured region, if
@@ -747,6 +751,7 @@ public:
referenced_vars_iterator referenced_vars_begin() const;
referenced_vars_iterator referenced_vars_end() const;
+ llvm::iterator_range<referenced_vars_iterator> referenced_vars() const;
void dumpToStream(raw_ostream &os) const override;
@@ -1233,8 +1238,7 @@ class CXXTempObjectRegion : public TypedValueRegion {
CXXTempObjectRegion(Expr const *E, MemSpaceRegion const *sReg)
: TypedValueRegion(sReg, CXXTempObjectRegionKind), Ex(E) {
assert(E);
- assert(isa<StackLocalsSpaceRegion>(sReg) ||
- isa<GlobalInternalSpaceRegion>(sReg));
+ assert(isa<StackLocalsSpaceRegion>(sReg));
}
static void ProfileRegion(llvm::FoldingSetNodeID &ID,
@@ -1244,6 +1248,9 @@ public:
LLVM_ATTRIBUTE_RETURNS_NONNULL
const Expr *getExpr() const { return Ex; }
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
+ const StackFrameContext *getStackFrame() const;
+
QualType getValueType() const override { return Ex->getType(); }
void dumpToStream(raw_ostream &os) const override;
@@ -1255,6 +1262,45 @@ public:
}
};
+// C++ temporary object that have lifetime extended to lifetime of the
+// variable. Usually they represent temporary bounds to reference variables.
+class CXXLifetimeExtendedObjectRegion : public TypedValueRegion {
+ friend class MemRegionManager;
+
+ Expr const *Ex;
+ ValueDecl const *ExD;
+
+ CXXLifetimeExtendedObjectRegion(Expr const *E, ValueDecl const *D,
+ MemSpaceRegion const *sReg)
+ : TypedValueRegion(sReg, CXXLifetimeExtendedObjectRegionKind), Ex(E),
+ ExD(D) {
+ assert(E);
+ assert(D);
+ assert((isa<StackLocalsSpaceRegion, GlobalInternalSpaceRegion>(sReg)));
+ }
+
+ static void ProfileRegion(llvm::FoldingSetNodeID &ID, Expr const *E,
+ ValueDecl const *D, const MemRegion *sReg);
+
+public:
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
+ const Expr *getExpr() const { return Ex; }
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
+ const ValueDecl *getExtendingDecl() const { return ExD; }
+ /// It might return null.
+ const StackFrameContext *getStackFrame() const;
+
+ QualType getValueType() const override { return Ex->getType(); }
+
+ void dumpToStream(raw_ostream &os) const override;
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override;
+
+ static bool classof(const MemRegion *R) {
+ return R->getKind() == CXXLifetimeExtendedObjectRegionKind;
+ }
+};
+
// CXXBaseObjectRegion represents a base object within a C++ object. It is
// identified by the base class declaration and the region of its parent object.
class CXXBaseObjectRegion : public TypedValueRegion {
@@ -1487,6 +1533,19 @@ public:
const CXXTempObjectRegion *getCXXTempObjectRegion(Expr const *Ex,
LocationContext const *LC);
+ /// Create a CXXLifetimeExtendedObjectRegion for temporaries which are
+ /// lifetime-extended by local references.
+ const CXXLifetimeExtendedObjectRegion *
+ getCXXLifetimeExtendedObjectRegion(Expr const *Ex, ValueDecl const *VD,
+ LocationContext const *LC);
+
+ /// Create a CXXLifetimeExtendedObjectRegion for temporaries which are
+ /// lifetime-extended by *static* references.
+ /// This differs from \ref getCXXLifetimeExtendedObjectRegion(Expr const *,
+ /// ValueDecl const *, LocationContext const *) in the super-region used.
+ const CXXLifetimeExtendedObjectRegion *
+ getCXXStaticLifetimeExtendedObjectRegion(const Expr *Ex, ValueDecl const *VD);
+
/// Create a CXXBaseObjectRegion with the given base class for region
/// \p Super.
///
@@ -1525,11 +1584,6 @@ public:
const LocationContext *lc,
unsigned blockCount);
- /// Create a CXXTempObjectRegion for temporaries which are lifetime-extended
- /// by static references. This differs from getCXXTempObjectRegion in the
- /// super-region used.
- const CXXTempObjectRegion *getCXXStaticTempObjectRegion(const Expr *Ex);
-
private:
template <typename RegionTy, typename SuperTy,
typename Arg1Ty>
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Regions.def b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Regions.def
index 44ab31fc9f2e..245828a2fcc0 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Regions.def
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Regions.def
@@ -69,6 +69,7 @@ ABSTRACT_REGION(SubRegion, MemRegion)
REGION(CXXBaseObjectRegion, TypedValueRegion)
REGION(CXXDerivedObjectRegion, TypedValueRegion)
REGION(CXXTempObjectRegion, TypedValueRegion)
+ REGION(CXXLifetimeExtendedObjectRegion, TypedValueRegion)
REGION(CXXThisRegion, TypedValueRegion)
ABSTRACT_REGION(DeclRegion, TypedValueRegion)
REGION(FieldRegion, DeclRegion)
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h
index d5c2dc617243..5116a4c06850 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h
@@ -203,9 +203,9 @@ public:
auto CZ = State->get<ConstraintSMT>();
auto &CZFactory = State->get_context<ConstraintSMT>();
- for (auto I = CZ.begin(), E = CZ.end(); I != E; ++I) {
- if (SymReaper.isDead(I->first))
- CZ = CZFactory.remove(CZ, *I);
+ for (const auto &Entry : CZ) {
+ if (SymReaper.isDead(Entry.first))
+ CZ = CZFactory.remove(CZ, Entry);
}
return State->set<ConstraintSMT>(CZ);
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
index 32a1125ce4d8..00cce21151a7 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
@@ -21,6 +21,7 @@
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableList.h"
#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include <cassert>
#include <cstdint>
@@ -180,16 +181,11 @@ public:
void dumpToStream(raw_ostream &OS) const;
void dump() const;
- SymExpr::symbol_iterator symbol_begin() const {
- const SymExpr *SE = getAsSymbol(/*IncludeBaseRegions=*/true);
- if (SE)
- return SE->symbol_begin();
- else
- return SymExpr::symbol_iterator();
- }
-
- SymExpr::symbol_iterator symbol_end() const {
- return SymExpr::symbol_end();
+ llvm::iterator_range<SymExpr::symbol_iterator> symbols() const {
+ if (const SymExpr *SE = getAsSymbol(/*IncludeBaseRegions=*/true))
+ return SE->symbols();
+ SymExpr::symbol_iterator end{};
+ return llvm::make_range(end, end);
}
/// Try to get a reasonable type for the given value.
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h
index abd05fe34f54..862a30c0e736 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h
@@ -17,6 +17,7 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
#include <cassert>
namespace clang {
@@ -83,8 +84,9 @@ public:
bool operator!=(const symbol_iterator &X) const;
};
- symbol_iterator symbol_begin() const { return symbol_iterator(this); }
- static symbol_iterator symbol_end() { return symbol_iterator(); }
+ llvm::iterator_range<symbol_iterator> symbols() const {
+ return llvm::make_range(symbol_iterator(this), symbol_iterator());
+ }
virtual unsigned computeComplexity() const = 0;
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
index b7ce6ebe9878..3b64d38ee2b2 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
@@ -24,6 +24,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Allocator.h"
#include <cassert>
@@ -631,10 +632,9 @@ public:
/// symbol marking has occurred, i.e. in the MarkLiveSymbols callback.
void markInUse(SymbolRef sym);
- using region_iterator = RegionSetTy::const_iterator;
-
- region_iterator region_begin() const { return LiveRegionRoots.begin(); }
- region_iterator region_end() const { return LiveRegionRoots.end(); }
+ llvm::iterator_range<RegionSetTy::const_iterator> regions() const {
+ return LiveRegionRoots;
+ }
/// Returns whether or not a symbol has been confirmed dead.
///
@@ -672,6 +672,11 @@ public:
SymbolVisitor(const SymbolVisitor &) = default;
SymbolVisitor(SymbolVisitor &&) {}
+ // The copy and move assignment operator is defined as deleted pending further
+ // motivation.
+ SymbolVisitor &operator=(const SymbolVisitor &) = delete;
+ SymbolVisitor &operator=(SymbolVisitor &&) = delete;
+
/// A visitor method invoked by ProgramStateManager::scanReachableSymbols.
///
/// The method returns \c true if symbols should continue be scanned and \c
diff --git a/contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h
index bf31dced98b2..804b1518c06b 100644
--- a/contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h
+++ b/contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h
@@ -58,6 +58,13 @@ enum class VectorTypeModifier : uint8_t {
SFixedLog2LMUL1,
SFixedLog2LMUL2,
SFixedLog2LMUL3,
+ Tuple2,
+ Tuple3,
+ Tuple4,
+ Tuple5,
+ Tuple6,
+ Tuple7,
+ Tuple8,
};
// Similar to basic type but used to describe what's kind of type related to
@@ -243,6 +250,8 @@ class RVVType {
unsigned ElementBitwidth = 0;
VScaleVal Scale = 0;
bool Valid;
+ bool IsTuple = false;
+ unsigned NF = 0;
std::string BuiltinStr;
std::string ClangBuiltinStr;
@@ -293,10 +302,15 @@ public:
}
bool isConstant() const { return IsConstant; }
bool isPointer() const { return IsPointer; }
+ bool isTuple() const { return IsTuple; }
unsigned getElementBitwidth() const { return ElementBitwidth; }
ScalarTypeKind getScalarType() const { return ScalarType; }
VScaleVal getScale() const { return Scale; }
+ unsigned getNF() const {
+ assert(NF > 1 && NF <= 8 && "Only legal NF should be fetched");
+ return NF;
+ }
private:
// Verify RVV vector type and set Valid.
@@ -383,7 +397,7 @@ public:
const RVVTypes &Types,
const std::vector<int64_t> &IntrinsicTypes,
const std::vector<llvm::StringRef> &RequiredFeatures,
- unsigned NF, Policy PolicyAttrs);
+ unsigned NF, Policy PolicyAttrs, bool HasFRMRoundModeOp);
~RVVIntrinsic() = default;
RVVTypePtr getOutputType() const { return OutputType; }
@@ -444,7 +458,7 @@ public:
computeBuiltinTypes(llvm::ArrayRef<PrototypeDescriptor> Prototype,
bool IsMasked, bool HasMaskedOffOperand, bool HasVL,
unsigned NF, PolicyScheme DefaultScheme,
- Policy PolicyAttrs);
+ Policy PolicyAttrs, bool IsTuple);
static llvm::SmallVector<Policy> getSupportedUnMaskedPolicies();
static llvm::SmallVector<Policy>
@@ -453,7 +467,7 @@ public:
static void updateNamesAndPolicy(bool IsMasked, bool HasPolicy,
std::string &Name, std::string &BuiltinName,
std::string &OverloadedName,
- Policy &PolicyAttrs);
+ Policy &PolicyAttrs, bool HasFRMRoundModeOp);
};
// RVVRequire should be sync'ed with target features, but only
@@ -461,9 +475,9 @@ public:
enum RVVRequire : uint8_t {
RVV_REQ_None = 0,
RVV_REQ_RV64 = 1 << 0,
- RVV_REQ_FullMultiply = 1 << 1,
+ RVV_REQ_Xsfvcp = 1 << 1,
- LLVM_MARK_AS_BITMASK_ENUM(RVV_REQ_FullMultiply)
+ LLVM_MARK_AS_BITMASK_ENUM(RVV_REQ_Xsfvcp)
};
// Raw RVV intrinsic info, used to expand later.
@@ -511,6 +525,8 @@ struct RVVIntrinsicRecord {
bool HasMaskedOffOperand : 1;
bool HasTailPolicy : 1;
bool HasMaskPolicy : 1;
+ bool HasFRMRoundModeOp : 1;
+ bool IsTuple : 1;
uint8_t UnMaskedPolicyScheme : 2;
uint8_t MaskedPolicyScheme : 2;
};
diff --git a/contrib/llvm-project/clang/include/clang/Testing/CommandLineArgs.h b/contrib/llvm-project/clang/include/clang/Testing/CommandLineArgs.h
index e668781ee2ce..4dd28718dfa6 100644
--- a/contrib/llvm-project/clang/include/clang/Testing/CommandLineArgs.h
+++ b/contrib/llvm-project/clang/include/clang/Testing/CommandLineArgs.h
@@ -38,6 +38,11 @@ std::vector<std::string> getCC1ArgsForTesting(TestLanguage Lang);
StringRef getFilenameForTesting(TestLanguage Lang);
+/// Find a target name such that looking for it in TargetRegistry by that name
+/// returns the same target. We expect that there is at least one target
+/// configured with this property.
+std::string getAnyTargetForTesting();
+
} // end namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Testing/TestAST.h b/contrib/llvm-project/clang/include/clang/Testing/TestAST.h
index 7ba2ca882b91..845e31f65438 100644
--- a/contrib/llvm-project/clang/include/clang/Testing/TestAST.h
+++ b/contrib/llvm-project/clang/include/clang/Testing/TestAST.h
@@ -49,6 +49,9 @@ struct TestInputs {
/// Keys are plain filenames ("foo.h"), values are file content.
llvm::StringMap<std::string> ExtraFiles = {};
+ /// Filename to use for translation unit. A default will be used when empty.
+ std::string FileName;
+
/// By default, error diagnostics during parsing are reported as gtest errors.
/// To suppress this, set ErrorOK or include "error-ok" in a comment in Code.
/// In either case, all diagnostics appear in TestAST::diagnostics().
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
index b1a4df141edc..87a4299c7f1b 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
@@ -28,8 +28,14 @@ namespace dependencies {
using LookupModuleOutputCallback =
llvm::function_ref<std::string(const ModuleID &, ModuleOutputKind)>;
+/// Graph of modular dependencies.
+using ModuleDepsGraph = std::vector<ModuleDeps>;
+
/// The full dependencies and module graph for a specific input.
-struct FullDependencies {
+struct TranslationUnitDeps {
+ /// The graph of direct and transitive modular dependencies.
+ ModuleDepsGraph ModuleGraph;
+
/// The identifier of the C++20 module this translation unit exports.
///
/// If the translation unit is not a module then \c ID.ModuleName is empty.
@@ -62,11 +68,6 @@ struct FullDependencies {
std::vector<std::string> DriverCommandLine;
};
-struct FullDependenciesResult {
- FullDependencies FullDeps;
- std::vector<ModuleDeps> DiscoveredModules;
-};
-
struct P1689Rule {
std::string PrimaryOutput;
std::optional<P1689ModuleInfo> Provides;
@@ -84,14 +85,12 @@ public:
/// Print out the dependency information into a string using the dependency
/// file format that is specified in the options (-MD is the default) and
- /// return it. If \p ModuleName isn't empty, this function returns the
- /// dependency information of module \p ModuleName.
+ /// return it.
///
/// \returns A \c StringError with the diagnostic output if clang errors
/// occurred, dependency file contents otherwise.
llvm::Expected<std::string>
- getDependencyFile(const std::vector<std::string> &CommandLine, StringRef CWD,
- std::optional<StringRef> ModuleName = std::nullopt);
+ getDependencyFile(const std::vector<std::string> &CommandLine, StringRef CWD);
/// Collect the module dependency in P1689 format for C++20 named modules.
///
@@ -105,9 +104,9 @@ public:
/// \returns A \c StringError with the diagnostic output if clang errors
/// occurred, P1689 dependency format rules otherwise.
llvm::Expected<P1689Rule>
- getP1689ModuleDependencyFile(
- const clang::tooling::CompileCommand &Command, StringRef CWD,
- std::string &MakeformatOutput, std::string &MakeformatOutputPath);
+ getP1689ModuleDependencyFile(const clang::tooling::CompileCommand &Command,
+ StringRef CWD, std::string &MakeformatOutput,
+ std::string &MakeformatOutputPath);
/// Given a Clang driver command-line for a translation unit, gather the
/// modular dependencies and return the information needed for explicit build.
@@ -122,18 +121,21 @@ public:
/// arguments for dependencies.
///
/// \returns a \c StringError with the diagnostic output if clang errors
- /// occurred, \c FullDependencies otherwise.
- llvm::Expected<FullDependenciesResult>
- getFullDependencies(const std::vector<std::string> &CommandLine,
- StringRef CWD, const llvm::StringSet<> &AlreadySeen,
- LookupModuleOutputCallback LookupModuleOutput,
- std::optional<StringRef> ModuleName = std::nullopt);
-
- llvm::Expected<FullDependenciesResult> getFullDependenciesLegacyDriverCommand(
- const std::vector<std::string> &CommandLine, StringRef CWD,
- const llvm::StringSet<> &AlreadySeen,
- LookupModuleOutputCallback LookupModuleOutput,
- std::optional<StringRef> ModuleName = std::nullopt);
+ /// occurred, \c TranslationUnitDeps otherwise.
+ llvm::Expected<TranslationUnitDeps>
+ getTranslationUnitDependencies(const std::vector<std::string> &CommandLine,
+ StringRef CWD,
+ const llvm::StringSet<> &AlreadySeen,
+ LookupModuleOutputCallback LookupModuleOutput);
+
+ /// Given a compilation context specified via the Clang driver command-line,
+ /// gather modular dependencies of module with the given name, and return the
+ /// information needed for explicit build.
+ llvm::Expected<ModuleDepsGraph>
+ getModuleDependencies(StringRef ModuleName,
+ const std::vector<std::string> &CommandLine,
+ StringRef CWD, const llvm::StringSet<> &AlreadySeen,
+ LookupModuleOutputCallback LookupModuleOutput);
private:
DependencyScanningWorker Worker;
@@ -141,11 +143,8 @@ private:
class FullDependencyConsumer : public DependencyConsumer {
public:
- FullDependencyConsumer(const llvm::StringSet<> &AlreadySeen,
- LookupModuleOutputCallback LookupModuleOutput,
- bool EagerLoadModules)
- : AlreadySeen(AlreadySeen), LookupModuleOutput(LookupModuleOutput),
- EagerLoadModules(EagerLoadModules) {}
+ FullDependencyConsumer(const llvm::StringSet<> &AlreadySeen)
+ : AlreadySeen(AlreadySeen) {}
void handleBuildCommand(Command Cmd) override {
Commands.push_back(std::move(Cmd));
@@ -169,15 +168,8 @@ public:
ContextHash = std::move(Hash);
}
- std::string lookupModuleOutput(const ModuleID &ID,
- ModuleOutputKind Kind) override {
- return LookupModuleOutput(ID, Kind);
- }
-
- FullDependenciesResult getFullDependenciesLegacyDriverCommand(
- const std::vector<std::string> &OriginalCommandLine) const;
-
- FullDependenciesResult takeFullDependencies();
+ TranslationUnitDeps takeTranslationUnitDeps();
+ ModuleDepsGraph takeModuleGraphDeps();
private:
std::vector<std::string> Dependencies;
@@ -188,8 +180,31 @@ private:
std::string ContextHash;
std::vector<std::string> OutputPaths;
const llvm::StringSet<> &AlreadySeen;
+};
+
+/// A simple dependency action controller that uses a callback. If no callback
+/// is provided, it is assumed that looking up module outputs is unreachable.
+class CallbackActionController : public DependencyActionController {
+public:
+ virtual ~CallbackActionController();
+
+ CallbackActionController(LookupModuleOutputCallback LMO)
+ : LookupModuleOutput(std::move(LMO)) {
+ if (!LookupModuleOutput) {
+ LookupModuleOutput = [](const ModuleID &,
+ ModuleOutputKind) -> std::string {
+ llvm::report_fatal_error("unexpected call to lookupModuleOutput");
+ };
+ }
+ }
+
+ std::string lookupModuleOutput(const ModuleID &ID,
+ ModuleOutputKind Kind) override {
+ return LookupModuleOutput(ID, Kind);
+ }
+
+private:
LookupModuleOutputCallback LookupModuleOutput;
- bool EagerLoadModules;
};
} // end namespace dependencies
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h
index 6edf2cbe6b53..350acb8f8a79 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h
@@ -31,7 +31,7 @@ class DependencyScanningWorkerFilesystem;
/// A command-line tool invocation that is part of building a TU.
///
-/// \see FullDependencies::Commands.
+/// \see TranslationUnitDeps::Commands.
struct Command {
std::string Executable;
std::vector<std::string> Arguments;
@@ -57,6 +57,13 @@ public:
virtual void handleModuleDependency(ModuleDeps MD) = 0;
virtual void handleContextHash(std::string Hash) = 0;
+};
+
+/// Dependency scanner callbacks that are used during scanning to influence the
+/// behaviour of the scan - for example, to customize the scanned invocations.
+class DependencyActionController {
+public:
+ virtual ~DependencyActionController();
virtual std::string lookupModuleOutput(const ModuleID &ID,
ModuleOutputKind Kind) = 0;
@@ -83,15 +90,15 @@ public:
bool computeDependencies(StringRef WorkingDirectory,
const std::vector<std::string> &CommandLine,
DependencyConsumer &DepConsumer,
+ DependencyActionController &Controller,
DiagnosticConsumer &DiagConsumer,
std::optional<StringRef> ModuleName = std::nullopt);
/// \returns A \c StringError with the diagnostic output if clang errors
/// occurred, success otherwise.
- llvm::Error
- computeDependencies(StringRef WorkingDirectory,
- const std::vector<std::string> &CommandLine,
- DependencyConsumer &Consumer,
- std::optional<StringRef> ModuleName = std::nullopt);
+ llvm::Error computeDependencies(
+ StringRef WorkingDirectory, const std::vector<std::string> &CommandLine,
+ DependencyConsumer &Consumer, DependencyActionController &Controller,
+ std::optional<StringRef> ModuleName = std::nullopt);
bool shouldEagerLoadModules() const { return EagerLoadModules; }
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h
index bce3e066372f..0a5cbc4f046a 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h
@@ -27,6 +27,7 @@ namespace clang {
namespace tooling {
namespace dependencies {
+class DependencyActionController;
class DependencyConsumer;
/// Modular dependency that has already been built prior to the dependency scan.
@@ -58,7 +59,13 @@ struct ModuleID {
std::string ContextHash;
bool operator==(const ModuleID &Other) const {
- return ModuleName == Other.ModuleName && ContextHash == Other.ContextHash;
+ return std::tie(ModuleName, ContextHash) ==
+ std::tie(Other.ModuleName, Other.ContextHash);
+ }
+
+ bool operator<(const ModuleID& Other) const {
+ return std::tie(ModuleName, ContextHash) <
+ std::tie(Other.ModuleName, Other.ContextHash);
}
};
@@ -147,9 +154,9 @@ class ModuleDepCollectorPP final : public PPCallbacks {
public:
ModuleDepCollectorPP(ModuleDepCollector &MDC) : MDC(MDC) {}
- void FileChanged(SourceLocation Loc, FileChangeReason Reason,
- SrcMgr::CharacteristicKind FileType,
- FileID PrevFID) override;
+ void LexedFileChanged(FileID FID, LexedFileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType, FileID PrevFID,
+ SourceLocation Loc) override;
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
CharSourceRange FilenameRange,
@@ -201,6 +208,7 @@ class ModuleDepCollector final : public DependencyCollector {
public:
ModuleDepCollector(std::unique_ptr<DependencyOutputOptions> Opts,
CompilerInstance &ScanInstance, DependencyConsumer &C,
+ DependencyActionController &Controller,
CompilerInvocation OriginalCI, bool OptimizeArgs,
bool EagerLoadModules, bool IsStdModuleP1689Format);
@@ -218,6 +226,8 @@ private:
CompilerInstance &ScanInstance;
/// The consumer of collected dependency information.
DependencyConsumer &Consumer;
+ /// Callbacks for computing dependency information.
+ DependencyActionController &Controller;
/// Path to the main source file.
std::string MainFile;
/// Hash identifying the compilation conditions of the current TU.
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderAnalysis.h b/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderAnalysis.h
index 760b8dd0879c..84d90c44de07 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderAnalysis.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderAnalysis.h
@@ -28,7 +28,7 @@ namespace tooling {
/// This function can be expensive as it may scan the source code to find out
/// dont-include-me pattern heuristically.
bool isSelfContainedHeader(const FileEntry *FE, const SourceManager &SM,
- HeaderSearch &HeaderInfo);
+ const HeaderSearch &HeaderInfo);
/// This scans the given source code to see if it contains #import(s).
bool codeContainsImports(llvm::StringRef Code);
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderIncludes.h b/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderIncludes.h
index 9e08046d2e33..d5439dd2c84e 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderIncludes.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderIncludes.h
@@ -128,6 +128,8 @@ private:
// inserting new #includes into the actual code section (e.g. after a
// declaration).
unsigned MaxInsertOffset;
+ // True if we find the main-file header in the Code.
+ bool MainIncludeFound;
IncludeCategoryManager Categories;
// Record the offset of the end of the last include in each category.
std::unordered_map<int, int> CategoryEndOffsets;
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/StandardLibrary.h b/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/StandardLibrary.h
index beb2f496f51f..a39ceb520dcf 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/StandardLibrary.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/StandardLibrary.h
@@ -30,6 +30,7 @@ namespace tooling {
namespace stdlib {
class Symbol;
+enum class Lang { C = 0, CXX, LastValue = CXX };
// A standard library header, such as <iostream>
// Lightweight class, in fact just an index into a table.
@@ -37,8 +38,10 @@ class Symbol;
// "<cstdio>" and "<stdio.h>" (and their symbols) are treated differently.
class Header {
public:
+ static std::vector<Header> all(Lang L = Lang::CXX);
// Name should contain the angle brackets, e.g. "<vector>".
- static std::optional<Header> named(llvm::StringRef Name);
+ static std::optional<Header> named(llvm::StringRef Name,
+ Lang Language = Lang::CXX);
friend llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Header &H) {
return OS << H.name();
@@ -46,8 +49,10 @@ public:
llvm::StringRef name() const;
private:
- Header(unsigned ID) : ID(ID) {}
+ Header(unsigned ID, Lang Language) : ID(ID), Language(Language) {}
unsigned ID;
+ Lang Language;
+
friend Symbol;
friend llvm::DenseMapInfo<Header>;
friend bool operator==(const Header &L, const Header &R) {
@@ -63,24 +68,28 @@ private:
// for them.
class Symbol {
public:
+ static std::vector<Symbol> all(Lang L = Lang::CXX);
/// \p Scope should have the trailing "::", for example:
/// named("std::chrono::", "system_clock")
- static std::optional<Symbol> named(llvm::StringRef Scope,
- llvm::StringRef Name);
+ static std::optional<Symbol>
+ named(llvm::StringRef Scope, llvm::StringRef Name, Lang Language = Lang::CXX);
friend llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Symbol &S) {
- return OS << S.scope() << S.name();
+ return OS << S.qualifiedName();
}
llvm::StringRef scope() const;
llvm::StringRef name() const;
+ llvm::StringRef qualifiedName() const;
// The preferred header for this symbol (e.g. the suggested insertion).
- Header header() const;
+ std::optional<Header> header() const;
// Some symbols may be provided by multiple headers.
llvm::SmallVector<Header> headers() const;
private:
- Symbol(unsigned ID) : ID(ID) {}
+ Symbol(unsigned ID, Lang Language) : ID(ID), Language(Language) {}
unsigned ID;
+ Lang Language;
+
friend class Recognizer;
friend llvm::DenseMapInfo<Symbol>;
friend bool operator==(const Symbol &L, const Symbol &R) {
@@ -99,7 +108,7 @@ public:
private:
using NSSymbolMap = llvm::DenseMap<llvm::StringRef, unsigned>;
- NSSymbolMap *namespaceSymbols(const NamespaceDecl *D);
+ NSSymbolMap *namespaceSymbols(const DeclContext *DC, Lang L);
llvm::DenseMap<const DeclContext *, NSSymbolMap *> NamespaceCache;
};
@@ -111,10 +120,12 @@ namespace llvm {
template <> struct DenseMapInfo<clang::tooling::stdlib::Header> {
static inline clang::tooling::stdlib::Header getEmptyKey() {
- return clang::tooling::stdlib::Header(-1);
+ return clang::tooling::stdlib::Header(-1,
+ clang::tooling::stdlib::Lang::CXX);
}
static inline clang::tooling::stdlib::Header getTombstoneKey() {
- return clang::tooling::stdlib::Header(-2);
+ return clang::tooling::stdlib::Header(-2,
+ clang::tooling::stdlib::Lang::CXX);
}
static unsigned getHashValue(const clang::tooling::stdlib::Header &H) {
return hash_value(H.ID);
@@ -127,10 +138,12 @@ template <> struct DenseMapInfo<clang::tooling::stdlib::Header> {
template <> struct DenseMapInfo<clang::tooling::stdlib::Symbol> {
static inline clang::tooling::stdlib::Symbol getEmptyKey() {
- return clang::tooling::stdlib::Symbol(-1);
+ return clang::tooling::stdlib::Symbol(-1,
+ clang::tooling::stdlib::Lang::CXX);
}
static inline clang::tooling::stdlib::Symbol getTombstoneKey() {
- return clang::tooling::stdlib::Symbol(-2);
+ return clang::tooling::stdlib::Symbol(-2,
+ clang::tooling::stdlib::Lang::CXX);
}
static unsigned getHashValue(const clang::tooling::stdlib::Symbol &S) {
return hash_value(S.ID);
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/StdSymbolMap.inc b/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/StdSymbolMap.inc
deleted file mode 100644
index e5e9065731e9..000000000000
--- a/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/StdSymbolMap.inc
+++ /dev/null
@@ -1,1538 +0,0 @@
-//===-- gen_std.py generated file -------------------------------*- C++ -*-===//
-//
-// Used to build a lookup table (qualified names => include headers) for CPP
-// Standard Library symbols.
-//
-// This file was generated automatically by
-// clang/tools/include-mapping/gen_std.py, DO NOT EDIT!
-//
-// Generated from cppreference offline HTML book (modified on 2018-10-28).
-//===----------------------------------------------------------------------===//
-
-SYMBOL(Assignable, std::, <concepts>)
-SYMBOL(Boolean, std::, <concepts>)
-SYMBOL(Common, std::, <concepts>)
-SYMBOL(CommonReference, std::, <concepts>)
-SYMBOL(Constructible, std::, <concepts>)
-SYMBOL(ConvertibleTo, std::, <concepts>)
-SYMBOL(CopyConstructible, std::, <concepts>)
-SYMBOL(Copyable, std::, <concepts>)
-SYMBOL(DefaultConstructible, std::, <concepts>)
-SYMBOL(DerivedFrom, std::, <concepts>)
-SYMBOL(Destructible, std::, <concepts>)
-SYMBOL(EqualityComparable, std::, <concepts>)
-SYMBOL(EqualityComparableWith, std::, <concepts>)
-SYMBOL(FILE, std::, <cstdio>)
-SYMBOL(Integral, std::, <concepts>)
-SYMBOL(Invocable, std::, <concepts>)
-SYMBOL(Movable, std::, <concepts>)
-SYMBOL(MoveConstructible, std::, <concepts>)
-SYMBOL(Predicate, std::, <concepts>)
-SYMBOL(Regular, std::, <concepts>)
-SYMBOL(RegularInvocable, std::, <concepts>)
-SYMBOL(Relation, std::, <concepts>)
-SYMBOL(Same, std::, <concepts>)
-SYMBOL(Semiregular, std::, <concepts>)
-SYMBOL(SignedIntegral, std::, <concepts>)
-SYMBOL(StrictTotallyOrdered, std::, <concepts>)
-SYMBOL(StrictTotallyOrderedWith, std::, <concepts>)
-SYMBOL(StrictWeakOrder, std::, <concepts>)
-SYMBOL(Swappable, std::, <concepts>)
-SYMBOL(SwappableWith, std::, <concepts>)
-SYMBOL(UniformRandomBitGenerator, std::, <random>)
-SYMBOL(UnsignedIntegral, std::, <concepts>)
-SYMBOL(_Exit, std::, <cstdlib>)
-SYMBOL(accumulate, std::, <numeric>)
-SYMBOL(acos, std::, <cmath>)
-SYMBOL(acosh, std::, <cmath>)
-SYMBOL(add_const, std::, <type_traits>)
-SYMBOL(add_const_t, std::, <type_traits>)
-SYMBOL(add_cv, std::, <type_traits>)
-SYMBOL(add_cv_t, std::, <type_traits>)
-SYMBOL(add_lvalue_reference, std::, <type_traits>)
-SYMBOL(add_lvalue_reference_t, std::, <type_traits>)
-SYMBOL(add_pointer, std::, <type_traits>)
-SYMBOL(add_pointer_t, std::, <type_traits>)
-SYMBOL(add_rvalue_reference, std::, <type_traits>)
-SYMBOL(add_rvalue_reference_t, std::, <type_traits>)
-SYMBOL(add_volatile, std::, <type_traits>)
-SYMBOL(add_volatile_t, std::, <type_traits>)
-SYMBOL(addressof, std::, <memory>)
-SYMBOL(adjacent_difference, std::, <numeric>)
-SYMBOL(adjacent_find, std::, <algorithm>)
-SYMBOL(adopt_lock, std::, <mutex>)
-SYMBOL(adopt_lock_t, std::, <mutex>)
-SYMBOL(advance, std::, <iterator>)
-SYMBOL(align, std::, <memory>)
-SYMBOL(align_val_t, std::, <new>)
-SYMBOL(aligned_alloc, std::, <cstdlib>)
-SYMBOL(aligned_storage, std::, <type_traits>)
-SYMBOL(aligned_storage_t, std::, <type_traits>)
-SYMBOL(aligned_union, std::, <type_traits>)
-SYMBOL(aligned_union_t, std::, <type_traits>)
-SYMBOL(alignment_of, std::, <type_traits>)
-SYMBOL(alignment_of_v, std::, <type_traits>)
-SYMBOL(all_of, std::, <algorithm>)
-SYMBOL(allocate_shared, std::, <memory>)
-SYMBOL(allocator, std::, <memory>)
-SYMBOL(allocator_arg, std::, <memory>)
-SYMBOL(allocator_arg_t, std::, <memory>)
-SYMBOL(allocator_traits, std::, <memory>)
-SYMBOL(any, std::, <any>)
-SYMBOL(any_of, std::, <algorithm>)
-SYMBOL(apply, std::, <tuple>)
-SYMBOL(arg, std::, <complex>)
-SYMBOL(array, std::, <array>)
-SYMBOL(as_const, std::, <utility>)
-SYMBOL(asctime, std::, <ctime>)
-SYMBOL(asin, std::, <cmath>)
-SYMBOL(asinh, std::, <cmath>)
-SYMBOL(async, std::, <future>)
-SYMBOL(at_quick_exit, std::, <cstdlib>)
-SYMBOL(atan, std::, <cmath>)
-SYMBOL(atan2, std::, <cmath>)
-SYMBOL(atanh, std::, <cmath>)
-SYMBOL(atexit, std::, <cstdlib>)
-SYMBOL(atof, std::, <cstdlib>)
-SYMBOL(atoi, std::, <cstdlib>)
-SYMBOL(atol, std::, <cstdlib>)
-SYMBOL(atoll, std::, <cstdlib>)
-SYMBOL(atomic_compare_exchange_strong, std::, <atomic>)
-SYMBOL(atomic_compare_exchange_strong_explicit, std::, <atomic>)
-SYMBOL(atomic_compare_exchange_weak, std::, <atomic>)
-SYMBOL(atomic_compare_exchange_weak_explicit, std::, <atomic>)
-SYMBOL(atomic_exchange, std::, <atomic>)
-SYMBOL(atomic_exchange_explicit, std::, <atomic>)
-SYMBOL(atomic_fetch_add, std::, <atomic>)
-SYMBOL(atomic_fetch_add_explicit, std::, <atomic>)
-SYMBOL(atomic_fetch_and, std::, <atomic>)
-SYMBOL(atomic_fetch_and_explicit, std::, <atomic>)
-SYMBOL(atomic_fetch_or, std::, <atomic>)
-SYMBOL(atomic_fetch_or_explicit, std::, <atomic>)
-SYMBOL(atomic_fetch_sub, std::, <atomic>)
-SYMBOL(atomic_fetch_sub_explicit, std::, <atomic>)
-SYMBOL(atomic_fetch_xor, std::, <atomic>)
-SYMBOL(atomic_fetch_xor_explicit, std::, <atomic>)
-SYMBOL(atomic_flag, std::, <atomic>)
-SYMBOL(atomic_flag_clear, std::, <atomic>)
-SYMBOL(atomic_flag_clear_explicit, std::, <atomic>)
-SYMBOL(atomic_flag_test_and_set, std::, <atomic>)
-SYMBOL(atomic_flag_test_and_set_explicit, std::, <atomic>)
-SYMBOL(atomic_init, std::, <atomic>)
-SYMBOL(atomic_is_lockfree, std::, <atomic>)
-SYMBOL(atomic_load, std::, <atomic>)
-SYMBOL(atomic_load_explicit, std::, <atomic>)
-SYMBOL(atomic_ref, std::, <atomic>)
-SYMBOL(atomic_signal_fence, std::, <atomic>)
-SYMBOL(atomic_store, std::, <atomic>)
-SYMBOL(atomic_store_explicit, std::, <atomic>)
-SYMBOL(atomic_thread_fence, std::, <atomic>)
-SYMBOL(atto, std::, <ratio>)
-SYMBOL(auto_ptr, std::, <memory>)
-SYMBOL(back_insert_iterator, std::, <iterator>)
-SYMBOL(back_inserter, std::, <iterator>)
-SYMBOL(bad_alloc, std::, <new>)
-SYMBOL(bad_any_cast, std::, <any>)
-SYMBOL(bad_array_new_length, std::, <new>)
-SYMBOL(bad_cast, std::, <typeinfo>)
-SYMBOL(bad_exception, std::, <exception>)
-SYMBOL(bad_function_call, std::, <functional>)
-SYMBOL(bad_optional_access, std::, <optional>)
-SYMBOL(bad_typeid, std::, <typeinfo>)
-SYMBOL(bad_variant_access, std::, <variant>)
-SYMBOL(bad_weak_ptr, std::, <memory>)
-SYMBOL(basic_common_reference, std::, <type_traits>)
-SYMBOL(basic_filebuf, std::, <fstream>)
-SYMBOL(basic_fstream, std::, <fstream>)
-SYMBOL(basic_ifstream, std::, <fstream>)
-SYMBOL(basic_ios, std::, <ios>)
-SYMBOL(basic_iostream, std::, <istream>)
-SYMBOL(basic_istream, std::, <istream>)
-SYMBOL(basic_istringstream, std::, <sstream>)
-SYMBOL(basic_ofstream, std::, <fstream>)
-SYMBOL(basic_ostream, std::, <ostream>)
-SYMBOL(basic_ostringstream, std::, <sstream>)
-SYMBOL(basic_osyncstream, std::, <syncstream>)
-SYMBOL(basic_regex, std::, <regex>)
-SYMBOL(basic_streambuf, std::, <streambuf>)
-SYMBOL(basic_string, std::, <string>)
-SYMBOL(basic_string_view, std::, <string_view>)
-SYMBOL(basic_stringbuf, std::, <sstream>)
-SYMBOL(basic_stringstream, std::, <sstream>)
-SYMBOL(basic_syncbuf, std::, <syncstream>)
-SYMBOL(begin, std::, <iterator>)
-SYMBOL(bernoulli_distribution, std::, <random>)
-SYMBOL(bidirectional_iterator_tag, std::, <iterator>)
-SYMBOL(binary_search, std::, <algorithm>)
-SYMBOL(bind, std::, <functional>)
-SYMBOL(binomial_distribution, std::, <random>)
-SYMBOL(bit_and, std::, <functional>)
-SYMBOL(bit_cast, std::, <bit>)
-SYMBOL(bit_not, std::, <functional>)
-SYMBOL(bit_or, std::, <functional>)
-SYMBOL(bit_xor, std::, <functional>)
-SYMBOL(bitset, std::, <bitset>)
-SYMBOL(bool_constant, std::, <type_traits>)
-SYMBOL(boolalpha, std::, <ios>)
-SYMBOL(boyer_moore_horspool_searcher, std::, <functional>)
-SYMBOL(boyer_moore_searcher, std::, <functional>)
-SYMBOL(bsearch, std::, <cstdlib>)
-SYMBOL(btowc, std::, <cwchar>)
-SYMBOL(byte, std::, <cstddef>)
-SYMBOL(c16rtomb, std::, <cuchar>)
-SYMBOL(c32rtomb, std::, <cuchar>)
-SYMBOL(call_once, std::, <mutex>)
-SYMBOL(calloc, std::, <cstdlib>)
-SYMBOL(cauchy_distribution, std::, <random>)
-SYMBOL(cbegin, std::, <iterator>)
-SYMBOL(cbrt, std::, <cmath>)
-SYMBOL(ceil, std::, <cmath>)
-SYMBOL(ceil2, std::, <bit>)
-SYMBOL(cend, std::, <iterator>)
-SYMBOL(centi, std::, <ratio>)
-SYMBOL(cerr, std::, <iostream>)
-SYMBOL(char_traits, std::, <string>)
-SYMBOL(chars_format, std::, <charconv>)
-SYMBOL(chi_squared_distribution, std::, <random>)
-SYMBOL(cin, std::, <iostream>)
-SYMBOL(clamp, std::, <algorithm>)
-SYMBOL(clearerr, std::, <cstdio>)
-SYMBOL(clock, std::, <ctime>)
-SYMBOL(clock_t, std::, <ctime>)
-SYMBOL(clog, std::, <iostream>)
-SYMBOL(cmatch, std::, <regex>)
-SYMBOL(codecvt, std::, <locale>)
-SYMBOL(codecvt_base, std::, <locale>)
-SYMBOL(codecvt_byname, std::, <locale>)
-SYMBOL(codecvt_mode, std::, <codecvt>)
-SYMBOL(codecvt_utf16, std::, <codecvt>)
-SYMBOL(codecvt_utf8, std::, <codecvt>)
-SYMBOL(codecvt_utf8_utf16, std::, <codecvt>)
-SYMBOL(collate, std::, <locale>)
-SYMBOL(collate_byname, std::, <locale>)
-SYMBOL(common_comparison_category, std::, <compare>)
-SYMBOL(common_comparison_category_t, std::, <compare>)
-SYMBOL(common_reference, std::, <type_traits>)
-SYMBOL(common_reference_t, std::, <type_traits>)
-SYMBOL(common_type, std::, <type_traits>)
-SYMBOL(common_type_t, std::, <type_traits>)
-SYMBOL(compare_3way, std::, <algorithm>)
-SYMBOL(complex, std::, <complex>)
-SYMBOL(condition_variable, std::, <condition_variable>)
-SYMBOL(condition_variable_any, std::, <condition_variable>)
-SYMBOL(conditional, std::, <type_traits>)
-SYMBOL(conditional_t, std::, <type_traits>)
-SYMBOL(conj, std::, <complex>)
-SYMBOL(conjunction, std::, <type_traits>)
-SYMBOL(conjunction_v, std::, <type_traits>)
-SYMBOL(const_pointer_cast, std::, <memory>)
-SYMBOL(contract_violation, std::, <contract>)
-SYMBOL(copy, std::, <algorithm>)
-SYMBOL(copy_backward, std::, <algorithm>)
-SYMBOL(copy_if, std::, <algorithm>)
-SYMBOL(copy_n, std::, <algorithm>)
-SYMBOL(copysign, std::, <cmath>)
-SYMBOL(cos, std::, <cmath>)
-SYMBOL(cosh, std::, <cmath>)
-SYMBOL(count, std::, <algorithm>)
-SYMBOL(count_if, std::, <algorithm>)
-SYMBOL(cout, std::, <iostream>)
-SYMBOL(crbegin, std::, <iterator>)
-SYMBOL(cref, std::, <functional>)
-SYMBOL(cregex_iterator, std::, <regex>)
-SYMBOL(cregex_token_iterator, std::, <regex>)
-SYMBOL(crend, std::, <iterator>)
-SYMBOL(csub_match, std::, <regex>)
-SYMBOL(ctime, std::, <ctime>)
-SYMBOL(ctype, std::, <locale>)
-SYMBOL(ctype_base, std::, <locale>)
-SYMBOL(ctype_byname, std::, <locale>)
-SYMBOL(current_exception, std::, <exception>)
-SYMBOL(cv_status, std::, <condition_variable>)
-SYMBOL(data, std::, <iterator>)
-SYMBOL(dec, std::, <ios>)
-SYMBOL(deca, std::, <ratio>)
-SYMBOL(decay, std::, <type_traits>)
-SYMBOL(decay_t, std::, <type_traits>)
-SYMBOL(deci, std::, <ratio>)
-SYMBOL(declare_no_pointers, std::, <memory>)
-SYMBOL(declare_reachable, std::, <memory>)
-SYMBOL(declval, std::, <utility>)
-SYMBOL(default_delete, std::, <memory>)
-SYMBOL(default_random_engine, std::, <random>)
-SYMBOL(default_searcher, std::, <functional>)
-SYMBOL(defaultfloat, std::, <ios>)
-SYMBOL(defer_lock, std::, <mutex>)
-SYMBOL(defer_lock_t, std::, <mutex>)
-SYMBOL(denorm_absent, std::, <limits>)
-SYMBOL(denorm_indeterminate, std::, <limits>)
-SYMBOL(denorm_present, std::, <limits>)
-SYMBOL(deque, std::, <deque>)
-SYMBOL(destroy, std::, <memory>)
-SYMBOL(destroy_at, std::, <memory>)
-SYMBOL(destroy_n, std::, <memory>)
-SYMBOL(destroying_delete, std::, <new>)
-SYMBOL(destroying_delete_t, std::, <new>)
-SYMBOL(difftime, std::, <ctime>)
-SYMBOL(discard_block_engine, std::, <random>)
-SYMBOL(discrete_distribution, std::, <random>)
-SYMBOL(disjunction, std::, <type_traits>)
-SYMBOL(disjunction_v, std::, <type_traits>)
-SYMBOL(distance, std::, <iterator>)
-SYMBOL(div_t, std::, <cstdlib>)
-SYMBOL(divides, std::, <functional>)
-SYMBOL(domain_error, std::, <stdexcept>)
-SYMBOL(double_t, std::, <cmath>)
-SYMBOL(dynamic_extent, std::, <span>)
-SYMBOL(dynamic_pointer_cast, std::, <memory>)
-SYMBOL(emit_on_flush, std::, <ostream>)
-SYMBOL(empty, std::, <iterator>)
-SYMBOL(enable_if, std::, <type_traits>)
-SYMBOL(enable_if_t, std::, <type_traits>)
-SYMBOL(enable_shared_from_this, std::, <memory>)
-SYMBOL(end, std::, <iterator>)
-SYMBOL(endian, std::, <type_traits>)
-SYMBOL(endl, std::, <ostream>)
-SYMBOL(ends, std::, <ostream>)
-SYMBOL(equal, std::, <algorithm>)
-SYMBOL(equal_range, std::, <algorithm>)
-SYMBOL(equal_to, std::, <functional>)
-SYMBOL(erf, std::, <cmath>)
-SYMBOL(erfc, std::, <cmath>)
-SYMBOL(errc, std::, <system_error>)
-SYMBOL(error_category, std::, <system_error>)
-SYMBOL(error_code, std::, <system_error>)
-SYMBOL(error_condition, std::, <system_error>)
-SYMBOL(exa, std::, <ratio>)
-SYMBOL(exception, std::, <exception>)
-SYMBOL(exception_ptr, std::, <exception>)
-SYMBOL(exchange, std::, <utility>)
-SYMBOL(exclusive_scan, std::, <numeric>)
-SYMBOL(exit, std::, <cstdlib>)
-SYMBOL(exp, std::, <cmath>)
-SYMBOL(exp2, std::, <cmath>)
-SYMBOL(expm1, std::, <cmath>)
-SYMBOL(exponential_distribution, std::, <random>)
-SYMBOL(extent, std::, <type_traits>)
-SYMBOL(extent_v, std::, <type_traits>)
-SYMBOL(extreme_value_distribution, std::, <random>)
-SYMBOL(false_type, std::, <type_traits>)
-SYMBOL(fclose, std::, <cstdio>)
-SYMBOL(fdim, std::, <cmath>)
-SYMBOL(feclearexcept, std::, <cfenv>)
-SYMBOL(fegetenv, std::, <cfenv>)
-SYMBOL(fegetexceptflag, std::, <cfenv>)
-SYMBOL(fegetround, std::, <cfenv>)
-SYMBOL(feholdexcept, std::, <cfenv>)
-SYMBOL(femto, std::, <ratio>)
-SYMBOL(fenv_t, std::, <cfenv>)
-SYMBOL(feof, std::, <cstdio>)
-SYMBOL(feraiseexcept, std::, <cfenv>)
-SYMBOL(ferror, std::, <cstdio>)
-SYMBOL(fesetenv, std::, <cfenv>)
-SYMBOL(fesetexceptflag, std::, <cfenv>)
-SYMBOL(fesetround, std::, <cfenv>)
-SYMBOL(fetestexcept, std::, <cfenv>)
-SYMBOL(feupdateenv, std::, <cfenv>)
-SYMBOL(fexcept_t, std::, <cfenv>)
-SYMBOL(fflush, std::, <cstdio>)
-SYMBOL(fgetc, std::, <cstdio>)
-SYMBOL(fgetpos, std::, <cstdio>)
-SYMBOL(fgets, std::, <cstdio>)
-SYMBOL(fgetwc, std::, <cwchar>)
-SYMBOL(fgetws, std::, <cwchar>)
-SYMBOL(filebuf, std::, <streambuf>)
-SYMBOL(fill, std::, <algorithm>)
-SYMBOL(fill_n, std::, <algorithm>)
-SYMBOL(find, std::, <algorithm>)
-SYMBOL(find_end, std::, <algorithm>)
-SYMBOL(find_first_of, std::, <algorithm>)
-SYMBOL(find_if, std::, <algorithm>)
-SYMBOL(find_if_not, std::, <algorithm>)
-SYMBOL(fisher_f_distribution, std::, <random>)
-SYMBOL(fixed, std::, <ios>)
-SYMBOL(float_denorm_style, std::, <limits>)
-SYMBOL(float_round_style, std::, <limits>)
-SYMBOL(float_t, std::, <cmath>)
-SYMBOL(floor, std::, <cmath>)
-SYMBOL(floor2, std::, <bit>)
-SYMBOL(flush, std::, <ostream>)
-SYMBOL(flush_emit, std::, <ostream>)
-SYMBOL(fma, std::, <cmath>)
-SYMBOL(fmax, std::, <cmath>)
-SYMBOL(fmin, std::, <cmath>)
-SYMBOL(fmod, std::, <cmath>)
-SYMBOL(fopen, std::, <cstdio>)
-SYMBOL(for_each, std::, <algorithm>)
-SYMBOL(for_each_n, std::, <algorithm>)
-SYMBOL(forward, std::, <utility>)
-SYMBOL(forward_as_tuple, std::, <tuple>)
-SYMBOL(forward_iterator_tag, std::, <iterator>)
-SYMBOL(forward_list, std::, <forward_list>)
-SYMBOL(fpclassify, std::, <cmath>)
-SYMBOL(fpos, std::, <ios>)
-SYMBOL(fpos_t, std::, <cstdio>)
-SYMBOL(fprintf, std::, <cstdio>)
-SYMBOL(fputc, std::, <cstdio>)
-SYMBOL(fputs, std::, <cstdio>)
-SYMBOL(fputwc, std::, <cwchar>)
-SYMBOL(fputws, std::, <cwchar>)
-SYMBOL(fread, std::, <cstdio>)
-SYMBOL(free, std::, <cstdlib>)
-SYMBOL(freopen, std::, <cstdio>)
-SYMBOL(frexp, std::, <cmath>)
-SYMBOL(from_chars, std::, <charconv>)
-SYMBOL(front_insert_iterator, std::, <iterator>)
-SYMBOL(front_inserter, std::, <iterator>)
-SYMBOL(fscanf, std::, <cstdio>)
-SYMBOL(fseek, std::, <cstdio>)
-SYMBOL(fsetpos, std::, <cstdio>)
-SYMBOL(fstream, std::, <fstream>)
-SYMBOL(ftell, std::, <cstdio>)
-SYMBOL(function, std::, <functional>)
-SYMBOL(future, std::, <future>)
-SYMBOL(future_category, std::, <future>)
-SYMBOL(future_errc, std::, <future>)
-SYMBOL(future_error, std::, <future>)
-SYMBOL(future_status, std::, <future>)
-SYMBOL(fwide, std::, <cwchar>)
-SYMBOL(fwprintf, std::, <cwchar>)
-SYMBOL(fwrite, std::, <cstdio>)
-SYMBOL(fwscanf, std::, <cwchar>)
-SYMBOL(gamma_distribution, std::, <random>)
-SYMBOL(gcd, std::, <numeric>)
-SYMBOL(generate, std::, <algorithm>)
-SYMBOL(generate_canonical, std::, <random>)
-SYMBOL(generate_n, std::, <algorithm>)
-SYMBOL(generic_category, std::, <system_error>)
-SYMBOL(geometric_distribution, std::, <random>)
-SYMBOL(get_if, std::, <variant>)
-SYMBOL(get_money, std::, <iomanip>)
-SYMBOL(get_new_handler, std::, <new>)
-SYMBOL(get_pointer_safety, std::, <memory>)
-SYMBOL(get_terminate, std::, <exception>)
-SYMBOL(get_time, std::, <iomanip>)
-SYMBOL(getc, std::, <cstdio>)
-SYMBOL(getchar, std::, <cstdio>)
-SYMBOL(getenv, std::, <cstdlib>)
-SYMBOL(gets, std::, <cstdio>)
-SYMBOL(getwc, std::, <cwchar>)
-SYMBOL(getwchar, std::, <cwchar>)
-SYMBOL(giga, std::, <ratio>)
-SYMBOL(gmtime, std::, <ctime>)
-SYMBOL(greater, std::, <functional>)
-SYMBOL(greater_equal, std::, <functional>)
-SYMBOL(gslice, std::, <valarray>)
-SYMBOL(gslice_array, std::, <valarray>)
-SYMBOL(hardware_constructive_interference_size, std::, <new>)
-SYMBOL(hardware_destructive_interference_size, std::, <new>)
-SYMBOL(has_facet, std::, <locale>)
-SYMBOL(has_unique_object_representations, std::, <type_traits>)
-SYMBOL(has_unique_object_representations_v, std::, <type_traits>)
-SYMBOL(has_virtual_destructor, std::, <type_traits>)
-SYMBOL(has_virtual_destructor_v, std::, <type_traits>)
-SYMBOL(hash, std::, <functional>)
-SYMBOL(hecto, std::, <ratio>)
-SYMBOL(hex, std::, <ios>)
-SYMBOL(hexfloat, std::, <ios>)
-SYMBOL(holds_alternative, std::, <variant>)
-SYMBOL(hypot, std::, <cmath>)
-SYMBOL(identity, std::, <functional>)
-SYMBOL(ifstream, std::, <fstream>)
-SYMBOL(ignore, std::, <tuple>)
-SYMBOL(ilogb, std::, <cmath>)
-SYMBOL(imag, std::, <complex>)
-SYMBOL(imaxabs, std::, <cinttypes>)
-SYMBOL(imaxdiv, std::, <cinttypes>)
-SYMBOL(imaxdiv_t, std::, <cinttypes>)
-SYMBOL(in_place, std::, <utility>)
-SYMBOL(in_place_index, std::, <utility>)
-SYMBOL(in_place_index_t, std::, <utility>)
-SYMBOL(in_place_t, std::, <utility>)
-SYMBOL(in_place_type, std::, <utility>)
-SYMBOL(in_place_type_t, std::, <utility>)
-SYMBOL(includes, std::, <algorithm>)
-SYMBOL(inclusive_scan, std::, <numeric>)
-SYMBOL(independent_bits_engine, std::, <random>)
-SYMBOL(indirect_array, std::, <valarray>)
-SYMBOL(initializer_list, std::, <initializer_list>)
-SYMBOL(inner_product, std::, <numeric>)
-SYMBOL(inplace_merge, std::, <algorithm>)
-SYMBOL(input_iterator_tag, std::, <iterator>)
-SYMBOL(insert_iterator, std::, <iterator>)
-SYMBOL(inserter, std::, <iterator>)
-SYMBOL(int16_t, std::, <cstdint>)
-SYMBOL(int32_t, std::, <cstdint>)
-SYMBOL(int64_t, std::, <cstdint>)
-SYMBOL(int8_t, std::, <cstdint>)
-SYMBOL(int_fast16_t, std::, <cstdint>)
-SYMBOL(int_fast32_t, std::, <cstdint>)
-SYMBOL(int_fast64_t, std::, <cstdint>)
-SYMBOL(int_fast8_t, std::, <cstdint>)
-SYMBOL(int_least16_t, std::, <cstdint>)
-SYMBOL(int_least32_t, std::, <cstdint>)
-SYMBOL(int_least64_t, std::, <cstdint>)
-SYMBOL(int_least8_t, std::, <cstdint>)
-SYMBOL(integer_sequence, std::, <utility>)
-SYMBOL(integral_constant, std::, <type_traits>)
-SYMBOL(internal, std::, <ios>)
-SYMBOL(intmax_t, std::, <cstdint>)
-SYMBOL(intptr_t, std::, <cstdint>)
-SYMBOL(invalid_argument, std::, <stdexcept>)
-SYMBOL(invoke, std::, <functional>)
-SYMBOL(invoke_result, std::, <type_traits>)
-SYMBOL(invoke_result_t, std::, <type_traits>)
-SYMBOL(io_errc, std::, <ios>)
-SYMBOL(ios, std::, <ios>)
-SYMBOL(ios_base, std::, <ios>)
-SYMBOL(iostream, std::, <istream>)
-SYMBOL(iostream_category, std::, <ios>)
-SYMBOL(iota, std::, <numeric>)
-SYMBOL(is_abstract, std::, <type_traits>)
-SYMBOL(is_abstract_v, std::, <type_traits>)
-SYMBOL(is_aggregate, std::, <type_traits>)
-SYMBOL(is_aggregate_v, std::, <type_traits>)
-SYMBOL(is_arithmetic, std::, <type_traits>)
-SYMBOL(is_arithmetic_v, std::, <type_traits>)
-SYMBOL(is_array, std::, <type_traits>)
-SYMBOL(is_array_v, std::, <type_traits>)
-SYMBOL(is_assignable, std::, <type_traits>)
-SYMBOL(is_assignable_v, std::, <type_traits>)
-SYMBOL(is_base_of, std::, <type_traits>)
-SYMBOL(is_base_of_v, std::, <type_traits>)
-SYMBOL(is_bind_expression, std::, <functional>)
-SYMBOL(is_bind_expression_v, std::, <functional>)
-SYMBOL(is_class, std::, <type_traits>)
-SYMBOL(is_class_v, std::, <type_traits>)
-SYMBOL(is_compound, std::, <type_traits>)
-SYMBOL(is_compound_v, std::, <type_traits>)
-SYMBOL(is_const, std::, <type_traits>)
-SYMBOL(is_const_v, std::, <type_traits>)
-SYMBOL(is_constructible, std::, <type_traits>)
-SYMBOL(is_constructible_v, std::, <type_traits>)
-SYMBOL(is_convertible, std::, <type_traits>)
-SYMBOL(is_convertible_v, std::, <type_traits>)
-SYMBOL(is_copy_assignable, std::, <type_traits>)
-SYMBOL(is_copy_assignable_v, std::, <type_traits>)
-SYMBOL(is_copy_constructible, std::, <type_traits>)
-SYMBOL(is_copy_constructible_v, std::, <type_traits>)
-SYMBOL(is_default_constructible, std::, <type_traits>)
-SYMBOL(is_default_constructible_v, std::, <type_traits>)
-SYMBOL(is_destructible, std::, <type_traits>)
-SYMBOL(is_destructible_v, std::, <type_traits>)
-SYMBOL(is_empty, std::, <type_traits>)
-SYMBOL(is_empty_v, std::, <type_traits>)
-SYMBOL(is_enum, std::, <type_traits>)
-SYMBOL(is_enum_v, std::, <type_traits>)
-SYMBOL(is_eq, std::, <compare>)
-SYMBOL(is_error_code_enum, std::, <system_error>)
-SYMBOL(is_error_condition_enum, std::, <system_error>)
-SYMBOL(is_error_condition_enum_v, std::, <system_error>)
-SYMBOL(is_execution_policy, std::, <execution>)
-SYMBOL(is_execution_policy_v, std::, <execution>)
-SYMBOL(is_final, std::, <type_traits>)
-SYMBOL(is_final_v, std::, <type_traits>)
-SYMBOL(is_floating_point, std::, <type_traits>)
-SYMBOL(is_floating_point_v, std::, <type_traits>)
-SYMBOL(is_function, std::, <type_traits>)
-SYMBOL(is_function_v, std::, <type_traits>)
-SYMBOL(is_fundamental, std::, <type_traits>)
-SYMBOL(is_fundamental_v, std::, <type_traits>)
-SYMBOL(is_gt, std::, <compare>)
-SYMBOL(is_gteq, std::, <compare>)
-SYMBOL(is_heap, std::, <algorithm>)
-SYMBOL(is_heap_until, std::, <algorithm>)
-SYMBOL(is_integral, std::, <type_traits>)
-SYMBOL(is_integral_v, std::, <type_traits>)
-SYMBOL(is_invocable, std::, <type_traits>)
-SYMBOL(is_invocable_r, std::, <type_traits>)
-SYMBOL(is_invocable_r_v, std::, <type_traits>)
-SYMBOL(is_invocable_v, std::, <type_traits>)
-SYMBOL(is_lt, std::, <compare>)
-SYMBOL(is_lteq, std::, <compare>)
-SYMBOL(is_lvalue_reference, std::, <type_traits>)
-SYMBOL(is_lvalue_reference_v, std::, <type_traits>)
-SYMBOL(is_member_function_pointer, std::, <type_traits>)
-SYMBOL(is_member_function_pointer_v, std::, <type_traits>)
-SYMBOL(is_member_object_pointer, std::, <type_traits>)
-SYMBOL(is_member_object_pointer_v, std::, <type_traits>)
-SYMBOL(is_member_pointer, std::, <type_traits>)
-SYMBOL(is_member_pointer_v, std::, <type_traits>)
-SYMBOL(is_move_assignable, std::, <type_traits>)
-SYMBOL(is_move_assignable_v, std::, <type_traits>)
-SYMBOL(is_move_constructible, std::, <type_traits>)
-SYMBOL(is_move_constructible_v, std::, <type_traits>)
-SYMBOL(is_neq, std::, <compare>)
-SYMBOL(is_nothrow_assignable, std::, <type_traits>)
-SYMBOL(is_nothrow_assignable_v, std::, <type_traits>)
-SYMBOL(is_nothrow_constructible, std::, <type_traits>)
-SYMBOL(is_nothrow_constructible_v, std::, <type_traits>)
-SYMBOL(is_nothrow_copy_assignable, std::, <type_traits>)
-SYMBOL(is_nothrow_copy_assignable_v, std::, <type_traits>)
-SYMBOL(is_nothrow_copy_constructible, std::, <type_traits>)
-SYMBOL(is_nothrow_copy_constructible_v, std::, <type_traits>)
-SYMBOL(is_nothrow_default_constructible, std::, <type_traits>)
-SYMBOL(is_nothrow_default_constructible_v, std::, <type_traits>)
-SYMBOL(is_nothrow_destructible, std::, <type_traits>)
-SYMBOL(is_nothrow_destructible_v, std::, <type_traits>)
-SYMBOL(is_nothrow_invocable, std::, <type_traits>)
-SYMBOL(is_nothrow_invocable_r, std::, <type_traits>)
-SYMBOL(is_nothrow_invocable_r_v, std::, <type_traits>)
-SYMBOL(is_nothrow_invocable_v, std::, <type_traits>)
-SYMBOL(is_nothrow_move_assignable, std::, <type_traits>)
-SYMBOL(is_nothrow_move_assignable_v, std::, <type_traits>)
-SYMBOL(is_nothrow_move_constructible, std::, <type_traits>)
-SYMBOL(is_nothrow_move_constructible_v, std::, <type_traits>)
-SYMBOL(is_nothrow_swappable, std::, <type_traits>)
-SYMBOL(is_nothrow_swappable_v, std::, <type_traits>)
-SYMBOL(is_nothrow_swappable_with, std::, <type_traits>)
-SYMBOL(is_nothrow_swappable_with_v, std::, <type_traits>)
-SYMBOL(is_null_pointer, std::, <type_traits>)
-SYMBOL(is_null_pointer_v, std::, <type_traits>)
-SYMBOL(is_object, std::, <type_traits>)
-SYMBOL(is_object_v, std::, <type_traits>)
-SYMBOL(is_partitioned, std::, <algorithm>)
-SYMBOL(is_permutation, std::, <algorithm>)
-SYMBOL(is_placeholder, std::, <functional>)
-SYMBOL(is_placeholder_v, std::, <functional>)
-SYMBOL(is_pod, std::, <type_traits>)
-SYMBOL(is_pod_v, std::, <type_traits>)
-SYMBOL(is_pointer, std::, <type_traits>)
-SYMBOL(is_pointer_v, std::, <type_traits>)
-SYMBOL(is_polymorphic, std::, <type_traits>)
-SYMBOL(is_polymorphic_v, std::, <type_traits>)
-SYMBOL(is_reference, std::, <type_traits>)
-SYMBOL(is_reference_v, std::, <type_traits>)
-SYMBOL(is_rvalue_reference, std::, <type_traits>)
-SYMBOL(is_rvalue_reference_v, std::, <type_traits>)
-SYMBOL(is_same, std::, <type_traits>)
-SYMBOL(is_same_v, std::, <type_traits>)
-SYMBOL(is_scalar, std::, <type_traits>)
-SYMBOL(is_scalar_v, std::, <type_traits>)
-SYMBOL(is_signed, std::, <type_traits>)
-SYMBOL(is_signed_v, std::, <type_traits>)
-SYMBOL(is_sorted, std::, <algorithm>)
-SYMBOL(is_sorted_until, std::, <algorithm>)
-SYMBOL(is_standard_layout, std::, <type_traits>)
-SYMBOL(is_standard_layout_v, std::, <type_traits>)
-SYMBOL(is_swappable, std::, <type_traits>)
-SYMBOL(is_swappable_v, std::, <type_traits>)
-SYMBOL(is_swappable_with, std::, <type_traits>)
-SYMBOL(is_swappable_with_v, std::, <type_traits>)
-SYMBOL(is_trivial, std::, <type_traits>)
-SYMBOL(is_trivial_v, std::, <type_traits>)
-SYMBOL(is_trivially_assignable, std::, <type_traits>)
-SYMBOL(is_trivially_assignable_v, std::, <type_traits>)
-SYMBOL(is_trivially_constructible, std::, <type_traits>)
-SYMBOL(is_trivially_constructible_v, std::, <type_traits>)
-SYMBOL(is_trivially_copy_assignable, std::, <type_traits>)
-SYMBOL(is_trivially_copy_assignable_v, std::, <type_traits>)
-SYMBOL(is_trivially_copy_constructible, std::, <type_traits>)
-SYMBOL(is_trivially_copy_constructible_v, std::, <type_traits>)
-SYMBOL(is_trivially_copyable, std::, <type_traits>)
-SYMBOL(is_trivially_copyable_v, std::, <type_traits>)
-SYMBOL(is_trivially_default_constructible, std::, <type_traits>)
-SYMBOL(is_trivially_default_constructible_v, std::, <type_traits>)
-SYMBOL(is_trivially_destructible, std::, <type_traits>)
-SYMBOL(is_trivially_destructible_v, std::, <type_traits>)
-SYMBOL(is_trivially_move_assignable, std::, <type_traits>)
-SYMBOL(is_trivially_move_assignable_v, std::, <type_traits>)
-SYMBOL(is_trivially_move_constructible, std::, <type_traits>)
-SYMBOL(is_trivially_move_constructible_v, std::, <type_traits>)
-SYMBOL(is_union, std::, <type_traits>)
-SYMBOL(is_union_v, std::, <type_traits>)
-SYMBOL(is_unsigned, std::, <type_traits>)
-SYMBOL(is_unsigned_v, std::, <type_traits>)
-SYMBOL(is_void, std::, <type_traits>)
-SYMBOL(is_void_v, std::, <type_traits>)
-SYMBOL(is_volatile, std::, <type_traits>)
-SYMBOL(is_volatile_v, std::, <type_traits>)
-SYMBOL(isalnum, std::, <cctype>)
-SYMBOL(isalpha, std::, <cctype>)
-SYMBOL(isblank, std::, <cctype>)
-SYMBOL(iscntrl, std::, <cctype>)
-SYMBOL(isdigit, std::, <cctype>)
-SYMBOL(isfinite, std::, <cmath>)
-SYMBOL(isgraph, std::, <cctype>)
-SYMBOL(isgreater, std::, <cmath>)
-SYMBOL(isgreaterequal, std::, <cmath>)
-SYMBOL(isinf, std::, <cmath>)
-SYMBOL(isless, std::, <cmath>)
-SYMBOL(islessequal, std::, <cmath>)
-SYMBOL(islessgreater, std::, <cmath>)
-SYMBOL(islower, std::, <cctype>)
-SYMBOL(isnan, std::, <cmath>)
-SYMBOL(isnormal, std::, <cmath>)
-SYMBOL(ispow2, std::, <bit>)
-SYMBOL(isprint, std::, <cctype>)
-SYMBOL(ispunct, std::, <cctype>)
-SYMBOL(isspace, std::, <cctype>)
-SYMBOL(istream, std::, <istream>)
-SYMBOL(istream_iterator, std::, <iterator>)
-SYMBOL(istreambuf_iterator, std::, <iterator>)
-SYMBOL(istringstream, std::, <sstream>)
-SYMBOL(isunordered, std::, <cmath>)
-SYMBOL(isupper, std::, <cctype>)
-SYMBOL(iswalnum, std::, <cwctype>)
-SYMBOL(iswalpha, std::, <cwctype>)
-SYMBOL(iswblank, std::, <cwctype>)
-SYMBOL(iswcntrl, std::, <cwctype>)
-SYMBOL(iswctype, std::, <cwctype>)
-SYMBOL(iswdigit, std::, <cwctype>)
-SYMBOL(iswgraph, std::, <cwctype>)
-SYMBOL(iswlower, std::, <cwctype>)
-SYMBOL(iswprint, std::, <cwctype>)
-SYMBOL(iswpunct, std::, <cwctype>)
-SYMBOL(iswspace, std::, <cwctype>)
-SYMBOL(iswupper, std::, <cwctype>)
-SYMBOL(iswxdigit, std::, <cwctype>)
-SYMBOL(isxdigit, std::, <cctype>)
-SYMBOL(iter_swap, std::, <algorithm>)
-SYMBOL(iterator, std::, <iterator>)
-SYMBOL(iterator_traits, std::, <iterator>)
-SYMBOL(jmp_buf, std::, <csetjmp>)
-SYMBOL(kill_dependency, std::, <atomic>)
-SYMBOL(kilo, std::, <ratio>)
-SYMBOL(knuth_b, std::, <random>)
-SYMBOL(labs, std::, <cstdlib>)
-SYMBOL(launch, std::, <future>)
-SYMBOL(launder, std::, <new>)
-SYMBOL(lcm, std::, <numeric>)
-SYMBOL(lconv, std::, <clocale>)
-SYMBOL(ldexp, std::, <cmath>)
-SYMBOL(ldiv, std::, <cstdlib>)
-SYMBOL(ldiv_t, std::, <cstdlib>)
-SYMBOL(left, std::, <ios>)
-SYMBOL(length_error, std::, <stdexcept>)
-SYMBOL(less, std::, <functional>)
-SYMBOL(less_equal, std::, <functional>)
-SYMBOL(lexicographical_compare, std::, <algorithm>)
-SYMBOL(lexicographical_compare_3way, std::, <algorithm>)
-SYMBOL(lgamma, std::, <cmath>)
-SYMBOL(linear_congruential_engine, std::, <random>)
-SYMBOL(list, std::, <list>)
-SYMBOL(llabs, std::, <cstdlib>)
-SYMBOL(lldiv, std::, <cstdlib>)
-SYMBOL(lldiv_t, std::, <cstdlib>)
-SYMBOL(llrint, std::, <cmath>)
-SYMBOL(llround, std::, <cmath>)
-SYMBOL(locale, std::, <locale>)
-SYMBOL(localeconv, std::, <clocale>)
-SYMBOL(localtime, std::, <ctime>)
-SYMBOL(lock, std::, <mutex>)
-SYMBOL(lock_guard, std::, <mutex>)
-SYMBOL(log, std::, <cmath>)
-SYMBOL(log10, std::, <cmath>)
-SYMBOL(log1p, std::, <cmath>)
-SYMBOL(log2, std::, <cmath>)
-SYMBOL(log2p1, std::, <bit>)
-SYMBOL(logb, std::, <cmath>)
-SYMBOL(logic_error, std::, <stdexcept>)
-SYMBOL(logical_and, std::, <functional>)
-SYMBOL(logical_not, std::, <functional>)
-SYMBOL(logical_or, std::, <functional>)
-SYMBOL(lognormal_distribution, std::, <random>)
-SYMBOL(longjmp, std::, <csetjmp>)
-SYMBOL(lower_bound, std::, <algorithm>)
-SYMBOL(lrint, std::, <cmath>)
-SYMBOL(lround, std::, <cmath>)
-SYMBOL(make_exception_ptr, std::, <exception>)
-SYMBOL(make_from_tuple, std::, <tuple>)
-SYMBOL(make_heap, std::, <algorithm>)
-SYMBOL(make_move_iterator, std::, <iterator>)
-SYMBOL(make_optional, std::, <optional>)
-SYMBOL(make_pair, std::, <utility>)
-SYMBOL(make_reverse_iterator, std::, <iterator>)
-SYMBOL(make_shared, std::, <memory>)
-SYMBOL(make_signed, std::, <type_traits>)
-SYMBOL(make_signed_t, std::, <type_traits>)
-SYMBOL(make_tuple, std::, <tuple>)
-SYMBOL(make_unique, std::, <memory>)
-SYMBOL(make_unsigned, std::, <type_traits>)
-SYMBOL(make_unsigned_t, std::, <type_traits>)
-SYMBOL(malloc, std::, <cstdlib>)
-SYMBOL(map, std::, <map>)
-SYMBOL(mask_array, std::, <valarray>)
-SYMBOL(match_results, std::, <regex>)
-SYMBOL(max, std::, <algorithm>)
-SYMBOL(max_align_t, std::, <cstddef>)
-SYMBOL(max_element, std::, <algorithm>)
-SYMBOL(mblen, std::, <cstdlib>)
-SYMBOL(mbrlen, std::, <cwchar>)
-SYMBOL(mbrtoc16, std::, <cuchar>)
-SYMBOL(mbrtoc32, std::, <cuchar>)
-SYMBOL(mbrtowc, std::, <cwchar>)
-SYMBOL(mbsinit, std::, <cwchar>)
-SYMBOL(mbsrtowcs, std::, <cwchar>)
-SYMBOL(mbstowcs, std::, <cstdlib>)
-SYMBOL(mbtowc, std::, <cstdlib>)
-SYMBOL(mega, std::, <ratio>)
-SYMBOL(mem_fn, std::, <functional>)
-SYMBOL(memchr, std::, <cstring>)
-SYMBOL(memcmp, std::, <cstring>)
-SYMBOL(memcpy, std::, <cstring>)
-SYMBOL(memmove, std::, <cstring>)
-SYMBOL(memory_order, std::, <atomic>)
-SYMBOL(memory_order_acq_rel, std::, <atomic>)
-SYMBOL(memory_order_acquire, std::, <atomic>)
-SYMBOL(memory_order_consume, std::, <atomic>)
-SYMBOL(memory_order_relaxed, std::, <atomic>)
-SYMBOL(memory_order_release, std::, <atomic>)
-SYMBOL(memory_order_seq_cst, std::, <atomic>)
-SYMBOL(memset, std::, <cstring>)
-SYMBOL(merge, std::, <algorithm>)
-SYMBOL(mersenne_twister_engine, std::, <random>)
-SYMBOL(messages, std::, <locale>)
-SYMBOL(messages_base, std::, <locale>)
-SYMBOL(messages_byname, std::, <locale>)
-SYMBOL(micro, std::, <ratio>)
-SYMBOL(milli, std::, <ratio>)
-SYMBOL(min, std::, <algorithm>)
-SYMBOL(min_element, std::, <algorithm>)
-SYMBOL(minmax, std::, <algorithm>)
-SYMBOL(minmax_element, std::, <algorithm>)
-SYMBOL(minstd_rand, std::, <random>)
-SYMBOL(minstd_rand0, std::, <random>)
-SYMBOL(minus, std::, <functional>)
-SYMBOL(mismatch, std::, <algorithm>)
-SYMBOL(mktime, std::, <ctime>)
-SYMBOL(modf, std::, <cmath>)
-SYMBOL(modulus, std::, <functional>)
-SYMBOL(money_base, std::, <locale>)
-SYMBOL(money_get, std::, <locale>)
-SYMBOL(money_put, std::, <locale>)
-SYMBOL(moneypunct, std::, <locale>)
-SYMBOL(moneypunct_byname, std::, <locale>)
-SYMBOL(monostate, std::, <variant>)
-SYMBOL(move_backward, std::, <algorithm>)
-SYMBOL(move_if_noexcept, std::, <utility>)
-SYMBOL(move_iterator, std::, <iterator>)
-SYMBOL(mt19937, std::, <random>)
-SYMBOL(mt19937_64, std::, <random>)
-SYMBOL(multimap, std::, <map>)
-SYMBOL(multiplies, std::, <functional>)
-SYMBOL(multiset, std::, <set>)
-SYMBOL(mutex, std::, <mutex>)
-SYMBOL(nan, std::, <cmath>)
-SYMBOL(nanf, std::, <cmath>)
-SYMBOL(nanl, std::, <cmath>)
-SYMBOL(nano, std::, <ratio>)
-SYMBOL(nearbyint, std::, <cmath>)
-SYMBOL(negate, std::, <functional>)
-SYMBOL(negation, std::, <type_traits>)
-SYMBOL(negation_v, std::, <type_traits>)
-SYMBOL(negative_binomial_distribution, std::, <random>)
-SYMBOL(nested_exception, std::, <exception>)
-SYMBOL(new_handler, std::, <new>)
-SYMBOL(next, std::, <iterator>)
-SYMBOL(next_permutation, std::, <algorithm>)
-SYMBOL(nextafter, std::, <cmath>)
-SYMBOL(nexttoward, std::, <cmath>)
-SYMBOL(no_emit_on_flush, std::, <ostream>)
-SYMBOL(noboolalpha, std::, <ios>)
-SYMBOL(none_of, std::, <algorithm>)
-SYMBOL(norm, std::, <complex>)
-SYMBOL(normal_distribution, std::, <random>)
-SYMBOL(noshowbase, std::, <ios>)
-SYMBOL(noshowpoint, std::, <ios>)
-SYMBOL(noshowpos, std::, <ios>)
-SYMBOL(noskipws, std::, <ios>)
-SYMBOL(not_equal_to, std::, <functional>)
-SYMBOL(not_fn, std::, <functional>)
-SYMBOL(nothrow, std::, <new>)
-SYMBOL(nothrow_t, std::, <new>)
-SYMBOL(notify_all_at_thread_exit, std::, <condition_variable>)
-SYMBOL(nounitbuf, std::, <ios>)
-SYMBOL(nouppercase, std::, <ios>)
-SYMBOL(nth_element, std::, <algorithm>)
-SYMBOL(nullopt, std::, <optional>)
-SYMBOL(nullopt_t, std::, <optional>)
-SYMBOL(nullptr_t, std::, <cstddef>)
-SYMBOL(num_get, std::, <locale>)
-SYMBOL(num_put, std::, <locale>)
-SYMBOL(numeric_limits, std::, <limits>)
-SYMBOL(numpunct, std::, <locale>)
-SYMBOL(numpunct_byname, std::, <locale>)
-SYMBOL(oct, std::, <ios>)
-SYMBOL(ofstream, std::, <fstream>)
-SYMBOL(once_flag, std::, <mutex>)
-SYMBOL(optional, std::, <optional>)
-SYMBOL(ostream, std::, <ostream>)
-SYMBOL(ostream_iterator, std::, <iterator>)
-SYMBOL(ostreambuf_iterator, std::, <iterator>)
-SYMBOL(ostringstream, std::, <sstream>)
-SYMBOL(osyncstream, std::, <syncstream>)
-SYMBOL(out_of_range, std::, <stdexcept>)
-SYMBOL(output_iterator_tag, std::, <iterator>)
-SYMBOL(overflow_error, std::, <stdexcept>)
-SYMBOL(owner_less, std::, <memory>)
-SYMBOL(packaged_task, std::, <future>)
-SYMBOL(pair, std::, <utility>)
-SYMBOL(partial_order, std::, <compare>)
-SYMBOL(partial_ordering, std::, <compare>)
-SYMBOL(partial_sort, std::, <algorithm>)
-SYMBOL(partial_sort_copy, std::, <algorithm>)
-SYMBOL(partial_sum, std::, <numeric>)
-SYMBOL(partition, std::, <algorithm>)
-SYMBOL(partition_copy, std::, <algorithm>)
-SYMBOL(partition_point, std::, <algorithm>)
-SYMBOL(perror, std::, <cstdio>)
-SYMBOL(peta, std::, <ratio>)
-SYMBOL(pico, std::, <ratio>)
-SYMBOL(piecewise_constant_distribution, std::, <random>)
-SYMBOL(piecewise_construct_t, std::, <utility>)
-SYMBOL(piecewise_linear_distribution, std::, <random>)
-SYMBOL(plus, std::, <functional>)
-SYMBOL(pointer_safety, std::, <memory>)
-SYMBOL(pointer_traits, std::, <memory>)
-SYMBOL(poisson_distribution, std::, <random>)
-SYMBOL(polar, std::, <complex>)
-SYMBOL(polymorphic_allocator, std::, <memory_resource>)
-SYMBOL(pop_heap, std::, <algorithm>)
-SYMBOL(pow, std::, <cmath>)
-SYMBOL(prev, std::, <iterator>)
-SYMBOL(prev_permutation, std::, <algorithm>)
-SYMBOL(printf, std::, <cstdio>)
-SYMBOL(priority_queue, std::, <queue>)
-SYMBOL(proj, std::, <complex>)
-SYMBOL(promise, std::, <future>)
-SYMBOL(ptrdiff_t, std::, <cstddef>)
-SYMBOL(push_heap, std::, <algorithm>)
-SYMBOL(put_money, std::, <iomanip>)
-SYMBOL(put_time, std::, <iomanip>)
-SYMBOL(putc, std::, <cstdio>)
-SYMBOL(putchar, std::, <cstdio>)
-SYMBOL(puts, std::, <cstdio>)
-SYMBOL(putwc, std::, <cwchar>)
-SYMBOL(putwchar, std::, <cwchar>)
-SYMBOL(qsort, std::, <cstdlib>)
-SYMBOL(queue, std::, <queue>)
-SYMBOL(quick_exit, std::, <cstdlib>)
-SYMBOL(quoted, std::, <iomanip>)
-SYMBOL(raise, std::, <csignal>)
-SYMBOL(rand, std::, <cstdlib>)
-SYMBOL(random_access_iterator_tag, std::, <iterator>)
-SYMBOL(random_device, std::, <random>)
-SYMBOL(random_shuffle, std::, <algorithm>)
-SYMBOL(range_error, std::, <stdexcept>)
-SYMBOL(rank, std::, <type_traits>)
-SYMBOL(rank_v, std::, <type_traits>)
-SYMBOL(ranlux24, std::, <random>)
-SYMBOL(ranlux24_base, std::, <random>)
-SYMBOL(ranlux48, std::, <random>)
-SYMBOL(ranlux48_base, std::, <random>)
-SYMBOL(ratio, std::, <ratio>)
-SYMBOL(ratio_add, std::, <ratio>)
-SYMBOL(ratio_divide, std::, <ratio>)
-SYMBOL(ratio_equal, std::, <ratio>)
-SYMBOL(ratio_equal_v, std::, <ratio>)
-SYMBOL(ratio_greater, std::, <ratio>)
-SYMBOL(ratio_greater_equal, std::, <ratio>)
-SYMBOL(ratio_greater_equal_v, std::, <ratio>)
-SYMBOL(ratio_greater_v, std::, <ratio>)
-SYMBOL(ratio_less, std::, <ratio>)
-SYMBOL(ratio_less_equal, std::, <ratio>)
-SYMBOL(ratio_less_equal_v, std::, <ratio>)
-SYMBOL(ratio_less_v, std::, <ratio>)
-SYMBOL(ratio_multiply, std::, <ratio>)
-SYMBOL(ratio_not_equal, std::, <ratio>)
-SYMBOL(ratio_not_equal_v, std::, <ratio>)
-SYMBOL(ratio_subtract, std::, <ratio>)
-SYMBOL(rbegin, std::, <iterator>)
-SYMBOL(real, std::, <complex>)
-SYMBOL(realloc, std::, <cstdlib>)
-SYMBOL(recursive_mutex, std::, <mutex>)
-SYMBOL(recursive_timed_mutex, std::, <mutex>)
-SYMBOL(reduce, std::, <numeric>)
-SYMBOL(ref, std::, <functional>)
-SYMBOL(reference_wrapper, std::, <functional>)
-SYMBOL(regex, std::, <regex>)
-SYMBOL(regex_error, std::, <regex>)
-SYMBOL(regex_iterator, std::, <regex>)
-SYMBOL(regex_match, std::, <regex>)
-SYMBOL(regex_replace, std::, <regex>)
-SYMBOL(regex_search, std::, <regex>)
-SYMBOL(regex_token_iterator, std::, <regex>)
-SYMBOL(regex_traits, std::, <regex>)
-SYMBOL(reinterpret_pointer_cast, std::, <memory>)
-SYMBOL(remainder, std::, <cmath>)
-SYMBOL(remove_all_extents, std::, <type_traits>)
-SYMBOL(remove_all_extents_t, std::, <type_traits>)
-SYMBOL(remove_const, std::, <type_traits>)
-SYMBOL(remove_const_t, std::, <type_traits>)
-SYMBOL(remove_copy, std::, <algorithm>)
-SYMBOL(remove_copy_if, std::, <algorithm>)
-SYMBOL(remove_cv, std::, <type_traits>)
-SYMBOL(remove_cv_t, std::, <type_traits>)
-SYMBOL(remove_cvref, std::, <type_traits>)
-SYMBOL(remove_cvref_t, std::, <type_traits>)
-SYMBOL(remove_extent, std::, <type_traits>)
-SYMBOL(remove_extent_t, std::, <type_traits>)
-SYMBOL(remove_pointer, std::, <type_traits>)
-SYMBOL(remove_pointer_t, std::, <type_traits>)
-SYMBOL(remove_reference, std::, <type_traits>)
-SYMBOL(remove_reference_t, std::, <type_traits>)
-SYMBOL(remove_volatile, std::, <type_traits>)
-SYMBOL(remove_volatile_t, std::, <type_traits>)
-SYMBOL(remquo, std::, <cmath>)
-SYMBOL(rename, std::, <cstdio>)
-SYMBOL(rend, std::, <iterator>)
-SYMBOL(replace, std::, <algorithm>)
-SYMBOL(replace_copy, std::, <algorithm>)
-SYMBOL(replace_copy_if, std::, <algorithm>)
-SYMBOL(replace_if, std::, <algorithm>)
-SYMBOL(resetiosflags, std::, <iomanip>)
-SYMBOL(result_of, std::, <type_traits>)
-SYMBOL(result_of_t, std::, <type_traits>)
-SYMBOL(rethrow_exception, std::, <exception>)
-SYMBOL(rethrow_if_nested, std::, <exception>)
-SYMBOL(reverse, std::, <algorithm>)
-SYMBOL(reverse_copy, std::, <algorithm>)
-SYMBOL(reverse_iterator, std::, <iterator>)
-SYMBOL(rewind, std::, <cstdio>)
-SYMBOL(right, std::, <ios>)
-SYMBOL(rint, std::, <cmath>)
-SYMBOL(rotate, std::, <algorithm>)
-SYMBOL(rotate_copy, std::, <algorithm>)
-SYMBOL(round, std::, <cmath>)
-SYMBOL(round_indeterminate, std::, <limits>)
-SYMBOL(round_to_nearest, std::, <limits>)
-SYMBOL(round_toward_infinity, std::, <limits>)
-SYMBOL(round_toward_neg_infinity, std::, <limits>)
-SYMBOL(round_toward_zero, std::, <limits>)
-SYMBOL(runtime_error, std::, <stdexcept>)
-SYMBOL(sample, std::, <algorithm>)
-SYMBOL(scalbln, std::, <cmath>)
-SYMBOL(scalbn, std::, <cmath>)
-SYMBOL(scanf, std::, <cstdio>)
-SYMBOL(scientific, std::, <ios>)
-SYMBOL(scoped_allocator_adaptor, std::, <scoped_allocator>)
-SYMBOL(search, std::, <algorithm>)
-SYMBOL(search_n, std::, <algorithm>)
-SYMBOL(seed_seq, std::, <random>)
-SYMBOL(set, std::, <set>)
-SYMBOL(set_difference, std::, <algorithm>)
-SYMBOL(set_intersection, std::, <algorithm>)
-SYMBOL(set_new_handler, std::, <new>)
-SYMBOL(set_symmetric_difference, std::, <algorithm>)
-SYMBOL(set_terminate, std::, <exception>)
-SYMBOL(set_union, std::, <algorithm>)
-SYMBOL(setbase, std::, <iomanip>)
-SYMBOL(setbuf, std::, <cstdio>)
-SYMBOL(setfill, std::, <iomanip>)
-SYMBOL(setiosflags, std::, <iomanip>)
-SYMBOL(setlocale, std::, <clocale>)
-SYMBOL(setprecision, std::, <iomanip>)
-SYMBOL(setvbuf, std::, <cstdio>)
-SYMBOL(setw, std::, <iomanip>)
-SYMBOL(shared_future, std::, <future>)
-SYMBOL(shared_lock, std::, <shared_mutex>)
-SYMBOL(shared_mutex, std::, <shared_mutex>)
-SYMBOL(shared_ptr, std::, <memory>)
-SYMBOL(shared_timed_mutex, std::, <shared_mutex>)
-SYMBOL(shift_left, std::, <algorithm>)
-SYMBOL(shift_right, std::, <algorithm>)
-SYMBOL(showbase, std::, <ios>)
-SYMBOL(showpoint, std::, <ios>)
-SYMBOL(showpos, std::, <ios>)
-SYMBOL(shuffle, std::, <algorithm>)
-SYMBOL(shuffle_order_engine, std::, <random>)
-SYMBOL(sig_atomic_t, std::, <csignal>)
-SYMBOL(signal, std::, <csignal>)
-SYMBOL(signbit, std::, <cmath>)
-SYMBOL(sin, std::, <cmath>)
-SYMBOL(sinh, std::, <cmath>)
-SYMBOL(size, std::, <iterator>)
-SYMBOL(skipws, std::, <ios>)
-SYMBOL(slice, std::, <valarray>)
-SYMBOL(slice_array, std::, <valarray>)
-SYMBOL(smatch, std::, <regex>)
-SYMBOL(snprintf, std::, <cstdio>)
-SYMBOL(sort, std::, <algorithm>)
-SYMBOL(sort_heap, std::, <algorithm>)
-SYMBOL(span, std::, <span>)
-SYMBOL(sprintf, std::, <cstdio>)
-SYMBOL(sqrt, std::, <cmath>)
-SYMBOL(srand, std::, <cstdlib>)
-SYMBOL(sregex_iterator, std::, <regex>)
-SYMBOL(sregex_token_iterator, std::, <regex>)
-SYMBOL(sscanf, std::, <cstdio>)
-SYMBOL(ssub_match, std::, <regex>)
-SYMBOL(stable_partition, std::, <algorithm>)
-SYMBOL(stable_sort, std::, <algorithm>)
-SYMBOL(stack, std::, <stack>)
-SYMBOL(static_pointer_cast, std::, <memory>)
-SYMBOL(strcat, std::, <cstring>)
-SYMBOL(strchr, std::, <cstring>)
-SYMBOL(strcmp, std::, <cstring>)
-SYMBOL(strcoll, std::, <cstring>)
-SYMBOL(strcpy, std::, <cstring>)
-SYMBOL(strcspn, std::, <cstring>)
-SYMBOL(streambuf, std::, <streambuf>)
-SYMBOL(streamoff, std::, <ios>)
-SYMBOL(streampos, std::, <ios>)
-SYMBOL(streamsize, std::, <ios>)
-SYMBOL(strerror, std::, <cstring>)
-SYMBOL(strftime, std::, <ctime>)
-SYMBOL(string, std::, <string>)
-SYMBOL(string_view, std::, <string_view>)
-SYMBOL(stringbuf, std::, <sstream>)
-SYMBOL(stringstream, std::, <sstream>)
-SYMBOL(strlen, std::, <cstring>)
-SYMBOL(strncat, std::, <cstring>)
-SYMBOL(strncmp, std::, <cstring>)
-SYMBOL(strncpy, std::, <cstring>)
-SYMBOL(strong_equal, std::, <compare>)
-SYMBOL(strong_equality, std::, <compare>)
-SYMBOL(strong_order, std::, <compare>)
-SYMBOL(strong_ordering, std::, <compare>)
-SYMBOL(strpbrk, std::, <cstring>)
-SYMBOL(strrchr, std::, <cstring>)
-SYMBOL(strspn, std::, <cstring>)
-SYMBOL(strstr, std::, <cstring>)
-SYMBOL(strtod, std::, <cstdlib>)
-SYMBOL(strtof, std::, <cstdlib>)
-SYMBOL(strtoimax, std::, <cinttypes>)
-SYMBOL(strtok, std::, <cstring>)
-SYMBOL(strtol, std::, <cstdlib>)
-SYMBOL(strtold, std::, <cstdlib>)
-SYMBOL(strtoll, std::, <cstdlib>)
-SYMBOL(strtoul, std::, <cstdlib>)
-SYMBOL(strtoull, std::, <cstdlib>)
-SYMBOL(strtoumax, std::, <cinttypes>)
-SYMBOL(strxfrm, std::, <cstring>)
-SYMBOL(student_t_distribution, std::, <random>)
-SYMBOL(sub_match, std::, <regex>)
-SYMBOL(subtract_with_carry_engine, std::, <random>)
-SYMBOL(swap_ranges, std::, <algorithm>)
-SYMBOL(swprintf, std::, <cwchar>)
-SYMBOL(swscanf, std::, <cwchar>)
-SYMBOL(syncbuf, std::, <syncstream>)
-SYMBOL(system, std::, <cstdlib>)
-SYMBOL(system_category, std::, <system_error>)
-SYMBOL(system_error, std::, <system_error>)
-SYMBOL(tan, std::, <cmath>)
-SYMBOL(tanh, std::, <cmath>)
-SYMBOL(tera, std::, <ratio>)
-SYMBOL(terminate, std::, <exception>)
-SYMBOL(terminate_handler, std::, <exception>)
-SYMBOL(tgamma, std::, <cmath>)
-SYMBOL(thread, std::, <thread>)
-SYMBOL(throw_with_nested, std::, <exception>)
-SYMBOL(tie, std::, <tuple>)
-SYMBOL(time, std::, <ctime>)
-SYMBOL(time_base, std::, <locale>)
-SYMBOL(time_get, std::, <locale>)
-SYMBOL(time_get_byname, std::, <locale>)
-SYMBOL(time_put, std::, <locale>)
-SYMBOL(time_put_byname, std::, <locale>)
-SYMBOL(time_t, std::, <ctime>)
-SYMBOL(timed_mutex, std::, <mutex>)
-SYMBOL(timespec, std::, <ctime>)
-SYMBOL(timespec_get, std::, <ctime>)
-SYMBOL(tm, std::, <ctime>)
-SYMBOL(tmpfile, std::, <cstdio>)
-SYMBOL(tmpnam, std::, <cstdio>)
-SYMBOL(to_address, std::, <memory>)
-SYMBOL(to_chars, std::, <charconv>)
-SYMBOL(to_integer, std::, <cstddef>)
-SYMBOL(to_string, std::, <string>)
-SYMBOL(tolower, std::, <cctype>)
-SYMBOL(toupper, std::, <cctype>)
-SYMBOL(towctrans, std::, <cwctype>)
-SYMBOL(towlower, std::, <cwctype>)
-SYMBOL(towupper, std::, <cwctype>)
-SYMBOL(transform, std::, <algorithm>)
-SYMBOL(transform_exclusive_scan, std::, <numeric>)
-SYMBOL(transform_inclusive_scan, std::, <numeric>)
-SYMBOL(transform_reduce, std::, <numeric>)
-SYMBOL(true_type, std::, <type_traits>)
-SYMBOL(trunc, std::, <cmath>)
-SYMBOL(try_lock, std::, <mutex>)
-SYMBOL(try_to_lock, std::, <mutex>)
-SYMBOL(try_to_lock_t, std::, <mutex>)
-SYMBOL(tuple, std::, <tuple>)
-SYMBOL(tuple_cat, std::, <tuple>)
-SYMBOL(type_identity, std::, <type_traits>)
-SYMBOL(type_identity_t, std::, <type_traits>)
-SYMBOL(type_index, std::, <typeindex>)
-SYMBOL(type_info, std::, <typeinfo>)
-SYMBOL(u16streampos, std::, <ios>)
-SYMBOL(u16string, std::, <string>)
-SYMBOL(u16string_view, std::, <string_view>)
-SYMBOL(u32streampos, std::, <ios>)
-SYMBOL(u32string, std::, <string>)
-SYMBOL(u32string_view, std::, <string_view>)
-SYMBOL(uint16_t, std::, <cstdint>)
-SYMBOL(uint32_t, std::, <cstdint>)
-SYMBOL(uint64_t, std::, <cstdint>)
-SYMBOL(uint8_t, std::, <cstdint>)
-SYMBOL(uint_fast16_t, std::, <cstdint>)
-SYMBOL(uint_fast32_t, std::, <cstdint>)
-SYMBOL(uint_fast64_t, std::, <cstdint>)
-SYMBOL(uint_fast8_t, std::, <cstdint>)
-SYMBOL(uint_least16_t, std::, <cstdint>)
-SYMBOL(uint_least32_t, std::, <cstdint>)
-SYMBOL(uint_least64_t, std::, <cstdint>)
-SYMBOL(uint_least8_t, std::, <cstdint>)
-SYMBOL(uintmax_t, std::, <cstdint>)
-SYMBOL(uintptr_t, std::, <cstdint>)
-SYMBOL(uncaught_exceptions, std::, <exception>)
-SYMBOL(undeclare_no_pointers, std::, <memory>)
-SYMBOL(undeclare_reachable, std::, <memory>)
-SYMBOL(underflow_error, std::, <stdexcept>)
-SYMBOL(underlying_type, std::, <type_traits>)
-SYMBOL(underlying_type_t, std::, <type_traits>)
-SYMBOL(ungetc, std::, <cstdio>)
-SYMBOL(ungetwc, std::, <cwchar>)
-SYMBOL(uniform_int_distribution, std::, <random>)
-SYMBOL(uniform_real_distribution, std::, <random>)
-SYMBOL(uninitialized_copy, std::, <memory>)
-SYMBOL(uninitialized_copy_n, std::, <memory>)
-SYMBOL(uninitialized_default_construct, std::, <memory>)
-SYMBOL(uninitialized_default_construct_n, std::, <memory>)
-SYMBOL(uninitialized_fill, std::, <memory>)
-SYMBOL(uninitialized_fill_n, std::, <memory>)
-SYMBOL(uninitialized_move, std::, <memory>)
-SYMBOL(uninitialized_move_n, std::, <memory>)
-SYMBOL(uninitialized_value_construct, std::, <memory>)
-SYMBOL(uninitialized_value_construct_n, std::, <memory>)
-SYMBOL(unique, std::, <algorithm>)
-SYMBOL(unique_copy, std::, <algorithm>)
-SYMBOL(unique_lock, std::, <mutex>)
-SYMBOL(unique_ptr, std::, <memory>)
-SYMBOL(unitbuf, std::, <ios>)
-SYMBOL(unordered_map, std::, <unordered_map>)
-SYMBOL(unordered_multimap, std::, <unordered_map>)
-SYMBOL(unordered_multiset, std::, <unordered_set>)
-SYMBOL(unordered_set, std::, <unordered_set>)
-SYMBOL(upper_bound, std::, <algorithm>)
-SYMBOL(uppercase, std::, <ios>)
-SYMBOL(use_facet, std::, <locale>)
-SYMBOL(uses_allocator, std::, <memory>)
-SYMBOL(uses_allocator_v, std::, <memory>)
-SYMBOL(va_list, std::, <cstdarg>)
-SYMBOL(valarray, std::, <valarray>)
-SYMBOL(variant, std::, <variant>)
-SYMBOL(variant_alternative, std::, <variant>)
-SYMBOL(variant_alternative_t, std::, <variant>)
-SYMBOL(variant_npos, std::, <variant>)
-SYMBOL(variant_size, std::, <variant>)
-SYMBOL(variant_size_v, std::, <variant>)
-SYMBOL(vector, std::, <vector>)
-SYMBOL(vfprintf, std::, <cstdio>)
-SYMBOL(vfscanf, std::, <cstdio>)
-SYMBOL(vfwprintf, std::, <cwchar>)
-SYMBOL(vfwscanf, std::, <cwchar>)
-SYMBOL(visit, std::, <variant>)
-SYMBOL(void_t, std::, <type_traits>)
-SYMBOL(vprintf, std::, <cstdio>)
-SYMBOL(vscanf, std::, <cstdio>)
-SYMBOL(vsnprintf, std::, <cstdio>)
-SYMBOL(vsprintf, std::, <cstdio>)
-SYMBOL(vsscanf, std::, <cstdio>)
-SYMBOL(vswprintf, std::, <cwchar>)
-SYMBOL(vswscanf, std::, <cwchar>)
-SYMBOL(vwprintf, std::, <cwchar>)
-SYMBOL(vwscanf, std::, <cwchar>)
-SYMBOL(wbuffer_convert, std::, <locale>)
-SYMBOL(wcerr, std::, <iostream>)
-SYMBOL(wcin, std::, <iostream>)
-SYMBOL(wclog, std::, <iostream>)
-SYMBOL(wcmatch, std::, <regex>)
-SYMBOL(wcout, std::, <iostream>)
-SYMBOL(wcregex_iterator, std::, <regex>)
-SYMBOL(wcregex_token_iterator, std::, <regex>)
-SYMBOL(wcrtomb, std::, <cwchar>)
-SYMBOL(wcscat, std::, <cwchar>)
-SYMBOL(wcschr, std::, <cwchar>)
-SYMBOL(wcscmp, std::, <cwchar>)
-SYMBOL(wcscoll, std::, <cwchar>)
-SYMBOL(wcscpy, std::, <cwchar>)
-SYMBOL(wcscspn, std::, <cwchar>)
-SYMBOL(wcsftime, std::, <cwchar>)
-SYMBOL(wcslen, std::, <cwchar>)
-SYMBOL(wcsncat, std::, <cwchar>)
-SYMBOL(wcsncmp, std::, <cwchar>)
-SYMBOL(wcsncpy, std::, <cwchar>)
-SYMBOL(wcspbrk, std::, <cwchar>)
-SYMBOL(wcsrchr, std::, <cwchar>)
-SYMBOL(wcsrtombs, std::, <cwchar>)
-SYMBOL(wcsspn, std::, <cwchar>)
-SYMBOL(wcsstr, std::, <cwchar>)
-SYMBOL(wcstod, std::, <cwchar>)
-SYMBOL(wcstof, std::, <cwchar>)
-SYMBOL(wcstoimax, std::, <cinttypes>)
-SYMBOL(wcstok, std::, <cwchar>)
-SYMBOL(wcstol, std::, <cwchar>)
-SYMBOL(wcstold, std::, <cwchar>)
-SYMBOL(wcstoll, std::, <cwchar>)
-SYMBOL(wcstombs, std::, <cstdlib>)
-SYMBOL(wcstoul, std::, <cwchar>)
-SYMBOL(wcstoull, std::, <cwchar>)
-SYMBOL(wcstoumax, std::, <cinttypes>)
-SYMBOL(wcsub_match, std::, <regex>)
-SYMBOL(wcsxfrm, std::, <cwchar>)
-SYMBOL(wctob, std::, <cwchar>)
-SYMBOL(wctomb, std::, <cstdlib>)
-SYMBOL(wctrans, std::, <cwctype>)
-SYMBOL(wctrans_t, std::, <cwctype>)
-SYMBOL(wctype, std::, <cwctype>)
-SYMBOL(wctype_t, std::, <cwctype>)
-SYMBOL(weak_equal, std::, <compare>)
-SYMBOL(weak_equality, std::, <compare>)
-SYMBOL(weak_order, std::, <compare>)
-SYMBOL(weak_ordering, std::, <compare>)
-SYMBOL(weak_ptr, std::, <memory>)
-SYMBOL(weibull_distribution, std::, <random>)
-SYMBOL(wfilebuf, std::, <streambuf>)
-SYMBOL(wfstream, std::, <fstream>)
-SYMBOL(wifstream, std::, <fstream>)
-SYMBOL(wios, std::, <ios>)
-SYMBOL(wiostream, std::, <istream>)
-SYMBOL(wistream, std::, <istream>)
-SYMBOL(wistringstream, std::, <sstream>)
-SYMBOL(wmemchr, std::, <cwchar>)
-SYMBOL(wmemcmp, std::, <cwchar>)
-SYMBOL(wmemcpy, std::, <cwchar>)
-SYMBOL(wmemmove, std::, <cwchar>)
-SYMBOL(wmemset, std::, <cwchar>)
-SYMBOL(wofstream, std::, <fstream>)
-SYMBOL(wostream, std::, <ostream>)
-SYMBOL(wostringstream, std::, <sstream>)
-SYMBOL(wosyncstream, std::, <syncstream>)
-SYMBOL(wprintf, std::, <cwchar>)
-SYMBOL(wregex, std::, <regex>)
-SYMBOL(ws, std::, <istream>)
-SYMBOL(wscanf, std::, <cwchar>)
-SYMBOL(wsmatch, std::, <regex>)
-SYMBOL(wsregex_iterator, std::, <regex>)
-SYMBOL(wsregex_token_iterator, std::, <regex>)
-SYMBOL(wssub_match, std::, <regex>)
-SYMBOL(wstreambuf, std::, <streambuf>)
-SYMBOL(wstreampos, std::, <ios>)
-SYMBOL(wstring, std::, <string>)
-SYMBOL(wstring_convert, std::, <locale>)
-SYMBOL(wstring_view, std::, <string_view>)
-SYMBOL(wstringbuf, std::, <sstream>)
-SYMBOL(wstringstream, std::, <sstream>)
-SYMBOL(wsyncbuf, std::, <syncstream>)
-SYMBOL(yocto, std::, <ratio>)
-SYMBOL(yotta, std::, <ratio>)
-SYMBOL(zepto, std::, <ratio>)
-SYMBOL(zetta, std::, <ratio>)
-SYMBOL(April, std::chrono::, <chrono>)
-SYMBOL(August, std::chrono::, <chrono>)
-SYMBOL(December, std::chrono::, <chrono>)
-SYMBOL(February, std::chrono::, <chrono>)
-SYMBOL(Friday, std::chrono::, <chrono>)
-SYMBOL(January, std::chrono::, <chrono>)
-SYMBOL(July, std::chrono::, <chrono>)
-SYMBOL(June, std::chrono::, <chrono>)
-SYMBOL(March, std::chrono::, <chrono>)
-SYMBOL(May, std::chrono::, <chrono>)
-SYMBOL(Monday, std::chrono::, <chrono>)
-SYMBOL(November, std::chrono::, <chrono>)
-SYMBOL(October, std::chrono::, <chrono>)
-SYMBOL(Saturday, std::chrono::, <chrono>)
-SYMBOL(September, std::chrono::, <chrono>)
-SYMBOL(Sunday, std::chrono::, <chrono>)
-SYMBOL(Thursday, std::chrono::, <chrono>)
-SYMBOL(Tuesday, std::chrono::, <chrono>)
-SYMBOL(Wednesday, std::chrono::, <chrono>)
-SYMBOL(abs, std::chrono::, <chrono>)
-SYMBOL(ambiguous_local_time, std::chrono::, <chrono>)
-SYMBOL(ceil, std::chrono::, <chrono>)
-SYMBOL(choose, std::chrono::, <chrono>)
-SYMBOL(clock_cast, std::chrono::, <chrono>)
-SYMBOL(clock_time_conversion, std::chrono::, <chrono>)
-SYMBOL(current_zone, std::chrono::, <chrono>)
-SYMBOL(day, std::chrono::, <chrono>)
-SYMBOL(duration, std::chrono::, <chrono>)
-SYMBOL(duration_values, std::chrono::, <chrono>)
-SYMBOL(file_clock, std::chrono::, <chrono>)
-SYMBOL(file_seconds, std::chrono::, <chrono>)
-SYMBOL(file_time, std::chrono::, <chrono>)
-SYMBOL(floor, std::chrono::, <chrono>)
-SYMBOL(gps_clock, std::chrono::, <chrono>)
-SYMBOL(gps_seconds, std::chrono::, <chrono>)
-SYMBOL(gps_time, std::chrono::, <chrono>)
-SYMBOL(high_resolution_clock, std::chrono::, <chrono>)
-SYMBOL(hours, std::chrono::, <chrono>)
-SYMBOL(is_clock, std::chrono::, <chrono>)
-SYMBOL(is_clock_v, std::chrono::, <chrono>)
-SYMBOL(last, std::chrono::, <chrono>)
-SYMBOL(last_spec, std::chrono::, <chrono>)
-SYMBOL(leap, std::chrono::, <chrono>)
-SYMBOL(link, std::chrono::, <chrono>)
-SYMBOL(local_info, std::chrono::, <chrono>)
-SYMBOL(local_seconds, std::chrono::, <chrono>)
-SYMBOL(local_t, std::chrono::, <chrono>)
-SYMBOL(local_time, std::chrono::, <chrono>)
-SYMBOL(locate_zone, std::chrono::, <chrono>)
-SYMBOL(microseconds, std::chrono::, <chrono>)
-SYMBOL(milliseconds, std::chrono::, <chrono>)
-SYMBOL(minutes, std::chrono::, <chrono>)
-SYMBOL(month, std::chrono::, <chrono>)
-SYMBOL(month_day, std::chrono::, <chrono>)
-SYMBOL(month_day_last, std::chrono::, <chrono>)
-SYMBOL(month_weekday, std::chrono::, <chrono>)
-SYMBOL(month_weekday_last, std::chrono::, <chrono>)
-SYMBOL(nanoseconds, std::chrono::, <chrono>)
-SYMBOL(nonexistent_local_time, std::chrono::, <chrono>)
-SYMBOL(round, std::chrono::, <chrono>)
-SYMBOL(seconds, std::chrono::, <chrono>)
-SYMBOL(steady_clock, std::chrono::, <chrono>)
-SYMBOL(sys_days, std::chrono::, <chrono>)
-SYMBOL(sys_info, std::chrono::, <chrono>)
-SYMBOL(sys_seconds, std::chrono::, <chrono>)
-SYMBOL(sys_time, std::chrono::, <chrono>)
-SYMBOL(system_clock, std::chrono::, <chrono>)
-SYMBOL(tai_clock, std::chrono::, <chrono>)
-SYMBOL(tai_seconds, std::chrono::, <chrono>)
-SYMBOL(tai_time, std::chrono::, <chrono>)
-SYMBOL(time_of_day, std::chrono::, <chrono>)
-SYMBOL(time_point, std::chrono::, <chrono>)
-SYMBOL(time_zone, std::chrono::, <chrono>)
-SYMBOL(treat_as_floating_point, std::chrono::, <chrono>)
-SYMBOL(treat_as_floating_point_v, std::chrono::, <chrono>)
-SYMBOL(tzdb, std::chrono::, <chrono>)
-SYMBOL(tzdb_list, std::chrono::, <chrono>)
-SYMBOL(utc_clock, std::chrono::, <chrono>)
-SYMBOL(utc_seconds, std::chrono::, <chrono>)
-SYMBOL(utc_time, std::chrono::, <chrono>)
-SYMBOL(weekday, std::chrono::, <chrono>)
-SYMBOL(weekday_indexed, std::chrono::, <chrono>)
-SYMBOL(weekday_last, std::chrono::, <chrono>)
-SYMBOL(year, std::chrono::, <chrono>)
-SYMBOL(year_month, std::chrono::, <chrono>)
-SYMBOL(year_month_day, std::chrono::, <chrono>)
-SYMBOL(year_month_day_last, std::chrono::, <chrono>)
-SYMBOL(year_month_weekday, std::chrono::, <chrono>)
-SYMBOL(year_month_weekday_last, std::chrono::, <chrono>)
-SYMBOL(zoned_time, std::chrono::, <chrono>)
-SYMBOL(zoned_traits, std::chrono::, <chrono>)
-SYMBOL(absolute, std::filesystem::, <filesystem>)
-SYMBOL(canonical, std::filesystem::, <filesystem>)
-SYMBOL(copy, std::filesystem::, <filesystem>)
-SYMBOL(copy_file, std::filesystem::, <filesystem>)
-SYMBOL(copy_options, std::filesystem::, <filesystem>)
-SYMBOL(copy_symlink, std::filesystem::, <filesystem>)
-SYMBOL(create_directories, std::filesystem::, <filesystem>)
-SYMBOL(create_directory, std::filesystem::, <filesystem>)
-SYMBOL(create_directory_symlink, std::filesystem::, <filesystem>)
-SYMBOL(create_hard_link, std::filesystem::, <filesystem>)
-SYMBOL(create_symlink, std::filesystem::, <filesystem>)
-SYMBOL(current_path, std::filesystem::, <filesystem>)
-SYMBOL(directory_entry, std::filesystem::, <filesystem>)
-SYMBOL(directory_iterator, std::filesystem::, <filesystem>)
-SYMBOL(directory_options, std::filesystem::, <filesystem>)
-SYMBOL(equivalent, std::filesystem::, <filesystem>)
-SYMBOL(exists, std::filesystem::, <filesystem>)
-SYMBOL(file_size, std::filesystem::, <filesystem>)
-SYMBOL(file_status, std::filesystem::, <filesystem>)
-SYMBOL(file_time_type, std::filesystem::, <filesystem>)
-SYMBOL(file_type, std::filesystem::, <filesystem>)
-SYMBOL(filesystem_error, std::filesystem::, <filesystem>)
-SYMBOL(hard_link_count, std::filesystem::, <filesystem>)
-SYMBOL(is_block_file, std::filesystem::, <filesystem>)
-SYMBOL(is_character_file, std::filesystem::, <filesystem>)
-SYMBOL(is_directory, std::filesystem::, <filesystem>)
-SYMBOL(is_empty, std::filesystem::, <filesystem>)
-SYMBOL(is_fifo, std::filesystem::, <filesystem>)
-SYMBOL(is_other, std::filesystem::, <filesystem>)
-SYMBOL(is_regular_file, std::filesystem::, <filesystem>)
-SYMBOL(is_socket, std::filesystem::, <filesystem>)
-SYMBOL(is_symlink, std::filesystem::, <filesystem>)
-SYMBOL(last_write_time, std::filesystem::, <filesystem>)
-SYMBOL(path, std::filesystem::, <filesystem>)
-SYMBOL(perm_options, std::filesystem::, <filesystem>)
-SYMBOL(permissions, std::filesystem::, <filesystem>)
-SYMBOL(perms, std::filesystem::, <filesystem>)
-SYMBOL(proximate, std::filesystem::, <filesystem>)
-SYMBOL(read_symlink, std::filesystem::, <filesystem>)
-SYMBOL(recursive_directory_iterator, std::filesystem::, <filesystem>)
-SYMBOL(relative, std::filesystem::, <filesystem>)
-SYMBOL(remove, std::filesystem::, <filesystem>)
-SYMBOL(remove_all, std::filesystem::, <filesystem>)
-SYMBOL(rename, std::filesystem::, <filesystem>)
-SYMBOL(resize_file, std::filesystem::, <filesystem>)
-SYMBOL(space, std::filesystem::, <filesystem>)
-SYMBOL(space_info, std::filesystem::, <filesystem>)
-SYMBOL(status, std::filesystem::, <filesystem>)
-SYMBOL(status_known, std::filesystem::, <filesystem>)
-SYMBOL(symlink_status, std::filesystem::, <filesystem>)
-SYMBOL(temp_directory_path, std::filesystem::, <filesystem>)
-SYMBOL(u8path, std::filesystem::, <filesystem>)
-SYMBOL(weakly_canonical, std::filesystem::, <filesystem>)
-SYMBOL(basic_string, std::pmr::, <string>)
-SYMBOL(deque, std::pmr::, <deque>)
-SYMBOL(forward_list, std::pmr::, <forward_list>)
-SYMBOL(get_default_resource, std::pmr::, <memory_resource>)
-SYMBOL(list, std::pmr::, <list>)
-SYMBOL(map, std::pmr::, <map>)
-SYMBOL(memory_resource, std::pmr::, <memory_resource>)
-SYMBOL(monotonic_buffer_resource, std::pmr::, <memory_resource>)
-SYMBOL(multimap, std::pmr::, <map>)
-SYMBOL(multiset, std::pmr::, <set>)
-SYMBOL(new_delete_resource, std::pmr::, <memory_resource>)
-SYMBOL(null_memory_resource, std::pmr::, <memory_resource>)
-SYMBOL(polymorphic_allocator, std::pmr::, <memory_resource>)
-SYMBOL(pool_options, std::pmr::, <memory_resource>)
-SYMBOL(set, std::pmr::, <set>)
-SYMBOL(set_default_resource, std::pmr::, <memory_resource>)
-SYMBOL(string, std::pmr::, <string>)
-SYMBOL(synchronized_pool_resource, std::pmr::, <memory_resource>)
-SYMBOL(u16string, std::pmr::, <string>)
-SYMBOL(u32string, std::pmr::, <string>)
-SYMBOL(unordered_map, std::pmr::, <unordered_map>)
-SYMBOL(unordered_multimap, std::pmr::, <unordered_map>)
-SYMBOL(unordered_multiset, std::pmr::, <unordered_set>)
-SYMBOL(unordered_set, std::pmr::, <unordered_set>)
-SYMBOL(unsynchronized_pool_resource, std::pmr::, <memory_resource>)
-SYMBOL(vector, std::pmr::, <vector>)
-SYMBOL(wstring, std::pmr::, <string>)
-SYMBOL(ECMAScript, std::regex_constants::, <regex>)
-SYMBOL(awk, std::regex_constants::, <regex>)
-SYMBOL(basic, std::regex_constants::, <regex>)
-SYMBOL(collate, std::regex_constants::, <regex>)
-SYMBOL(egrep, std::regex_constants::, <regex>)
-SYMBOL(error_backref, std::regex_constants::, <regex>)
-SYMBOL(error_badbrace, std::regex_constants::, <regex>)
-SYMBOL(error_badrepeat, std::regex_constants::, <regex>)
-SYMBOL(error_brace, std::regex_constants::, <regex>)
-SYMBOL(error_brack, std::regex_constants::, <regex>)
-SYMBOL(error_collate, std::regex_constants::, <regex>)
-SYMBOL(error_complexity, std::regex_constants::, <regex>)
-SYMBOL(error_ctype, std::regex_constants::, <regex>)
-SYMBOL(error_escape, std::regex_constants::, <regex>)
-SYMBOL(error_paren, std::regex_constants::, <regex>)
-SYMBOL(error_range, std::regex_constants::, <regex>)
-SYMBOL(error_space, std::regex_constants::, <regex>)
-SYMBOL(error_stack, std::regex_constants::, <regex>)
-SYMBOL(error_type, std::regex_constants::, <regex>)
-SYMBOL(extended, std::regex_constants::, <regex>)
-SYMBOL(format_default, std::regex_constants::, <regex>)
-SYMBOL(format_first_only, std::regex_constants::, <regex>)
-SYMBOL(format_no_copy, std::regex_constants::, <regex>)
-SYMBOL(format_sed, std::regex_constants::, <regex>)
-SYMBOL(grep, std::regex_constants::, <regex>)
-SYMBOL(icase, std::regex_constants::, <regex>)
-SYMBOL(match_any, std::regex_constants::, <regex>)
-SYMBOL(match_continuous, std::regex_constants::, <regex>)
-SYMBOL(match_default, std::regex_constants::, <regex>)
-SYMBOL(match_flag_type, std::regex_constants::, <regex>)
-SYMBOL(match_not_bol, std::regex_constants::, <regex>)
-SYMBOL(match_not_bow, std::regex_constants::, <regex>)
-SYMBOL(match_not_eol, std::regex_constants::, <regex>)
-SYMBOL(match_not_eow, std::regex_constants::, <regex>)
-SYMBOL(match_not_null, std::regex_constants::, <regex>)
-SYMBOL(match_prev_avail, std::regex_constants::, <regex>)
-SYMBOL(multiline, std::regex_constants::, <regex>)
-SYMBOL(nosubs, std::regex_constants::, <regex>)
-SYMBOL(optimize, std::regex_constants::, <regex>)
-SYMBOL(syntax_option_type, std::regex_constants::, <regex>)
-SYMBOL(get_id, std::this_thread::, <thread>)
-SYMBOL(sleep_for, std::this_thread::, <thread>)
-SYMBOL(sleep_until, std::this_thread::, <thread>)
-SYMBOL(yield, std::this_thread::, <thread>)
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h
index 6fb2decf8614..015dbba26f68 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h
@@ -124,10 +124,11 @@ public:
bool VisitDesignatedInitExpr(const DesignatedInitExpr *E) {
for (const DesignatedInitExpr::Designator &D : E->designators()) {
- if (D.isFieldDesignator() && D.getField()) {
- const FieldDecl *Decl = D.getField();
- if (!visit(Decl, D.getFieldLoc(), D.getFieldLoc()))
- return false;
+ if (D.isFieldDesignator()) {
+ if (const FieldDecl *Decl = D.getFieldDecl()) {
+ if (!visit(Decl, D.getFieldLoc(), D.getFieldLoc()))
+ return false;
+ }
}
}
return true;
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Tooling.h b/contrib/llvm-project/clang/include/clang/Tooling/Tooling.h
index 52a81cd9e778..13c1b51bf85f 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Tooling.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Tooling.h
@@ -361,11 +361,6 @@ public:
/// append them to ASTs.
int buildASTs(std::vector<std::unique_ptr<ASTUnit>> &ASTs);
- /// Sets whether working directory should be restored after calling run(). By
- /// default, working directory is restored. However, it could be useful to
- /// turn this off when running on multiple threads to avoid the raciness.
- void setRestoreWorkingDir(bool RestoreCWD);
-
/// Sets whether an error message should be printed out if an action fails. By
/// default, if an action fails, a message is printed out to stderr.
void setPrintErrorMessage(bool PrintErrorMessage);
@@ -395,7 +390,6 @@ private:
DiagnosticConsumer *DiagConsumer = nullptr;
- bool RestoreCWD = true;
bool PrintErrorMessage = true;
};
@@ -506,6 +500,12 @@ llvm::Expected<std::string> getAbsolutePath(llvm::vfs::FileSystem &FS,
void addTargetAndModeForProgramName(std::vector<std::string> &CommandLine,
StringRef InvokedAs);
+/// Helper function that expands response files in command line.
+void addExpandedResponseFiles(std::vector<std::string> &CommandLine,
+ llvm::StringRef WorkingDir,
+ llvm::cl::TokenizerCallback Tokenizer,
+ llvm::vfs::FileSystem &FS);
+
/// Creates a \c CompilerInvocation.
CompilerInvocation *newInvocation(DiagnosticsEngine *Diagnostics,
ArrayRef<const char *> CC1Args,
diff --git a/contrib/llvm-project/clang/include/clang/module.modulemap b/contrib/llvm-project/clang/include/clang/module.modulemap
deleted file mode 100644
index 57a9b4803127..000000000000
--- a/contrib/llvm-project/clang/include/clang/module.modulemap
+++ /dev/null
@@ -1,199 +0,0 @@
-module Clang_Analysis {
- requires cplusplus
- umbrella "Analysis"
-
- textual header "Analysis/Analyses/ThreadSafetyOps.def"
-
- module * { export * }
-
- // FIXME: Exclude these headers to avoid pulling all of the AST matchers
- // library into clang. Due to inline key functions in the headers,
- // importing the AST matchers library gives a link dependency on the AST
- // matchers (and thus the AST), which clang-format should not have.
- exclude header "Analysis/Analyses/ExprMutationAnalyzer.h"
-}
-
-module Clang_AST {
- requires cplusplus
- umbrella "AST"
-
- textual header "AST/BuiltinTypes.def"
- textual header "AST/CXXRecordDeclDefinitionBits.def"
- textual header "AST/OperationKinds.def"
- textual header "AST/TypeLocNodes.def"
-
- module * { export * }
-}
-
-module Clang_ASTMatchers { requires cplusplus umbrella "ASTMatchers" module * { export * } }
-
-module Clang_Basic {
- requires cplusplus
- umbrella "Basic"
-
- textual header "Basic/AArch64SVEACLETypes.def"
- textual header "Basic/BuiltinsAArch64.def"
- textual header "Basic/BuiltinsAMDGPU.def"
- textual header "Basic/BuiltinsAArch64NeonSVEBridge.def"
- textual header "Basic/BuiltinsAArch64NeonSVEBridge_cg.def"
- textual header "Basic/BuiltinsARM.def"
- textual header "Basic/BuiltinsBPF.def"
- textual header "Basic/Builtins.def"
- textual header "Basic/BuiltinHeaders.def"
- textual header "Basic/BuiltinsHexagon.def"
- textual header "Basic/BuiltinsHexagonDep.def"
- textual header "Basic/BuiltinsHexagonMapCustomDep.def"
- textual header "Basic/BuiltinsLoongArch.def"
- textual header "Basic/BuiltinsMips.def"
- textual header "Basic/BuiltinsNEON.def"
- textual header "Basic/BuiltinsNVPTX.def"
- textual header "Basic/BuiltinsPPC.def"
- textual header "Basic/BuiltinsRISCV.def"
- textual header "Basic/BuiltinsRISCVVector.def"
- textual header "Basic/BuiltinsSVE.def"
- textual header "Basic/BuiltinsSystemZ.def"
- textual header "Basic/BuiltinsVE.def"
- textual header "Basic/BuiltinsVEVL.gen.def"
- textual header "Basic/BuiltinsWebAssembly.def"
- textual header "Basic/BuiltinsX86.def"
- textual header "Basic/BuiltinsX86_64.def"
- textual header "Basic/BuiltinsXCore.def"
- textual header "Basic/CodeGenOptions.def"
- textual header "Basic/DiagnosticOptions.def"
- textual header "Basic/Features.def"
- textual header "Basic/FPOptions.def"
- textual header "Basic/MSP430Target.def"
- textual header "Basic/LangOptions.def"
- textual header "Basic/OpenCLExtensions.def"
- textual header "Basic/OpenCLImageTypes.def"
- textual header "Basic/OpenCLExtensionTypes.def"
- textual header "Basic/OpenMPKinds.def"
- textual header "Basic/OperatorKinds.def"
- textual header "Basic/PPCTypes.def"
- textual header "Basic/RISCVVTypes.def"
- textual header "Basic/Sanitizers.def"
- textual header "Basic/TargetCXXABI.def"
- textual header "Basic/TransformTypeTraits.def"
-
- module * { export * }
-}
-module Clang_Basic_TokenKinds {
- requires cplusplus
-
- header "Basic/TokenKinds.h"
- textual header "Basic/TokenKinds.def"
-
- export *
-}
-
-module Clang_CodeGen { requires cplusplus umbrella "CodeGen" module * { export * } }
-module Clang_Config { requires cplusplus umbrella "Config" module * { export * } }
-
-// Files for diagnostic groups are spread all over the include/clang/ tree, but
-// logically form a single module.
-module Clang_Diagnostics {
- requires cplusplus
-
- module All { header "Basic/AllDiagnostics.h" export * }
- module Analysis { textual header "Analysis/Analyses/UnsafeBufferUsageGadgets.def" }
- module AST { header "AST/ASTDiagnostic.h" export * }
- module Comment { header "AST/CommentDiagnostic.h" export * }
- module Driver { header "Driver/DriverDiagnostic.h" export * }
- module Frontend { header "Frontend/FrontendDiagnostic.h" export * }
- module Lex { header "Lex/LexDiagnostic.h" export * }
- module Parse { header "Parse/ParseDiagnostic.h" export * }
- module Sema { header "Sema/SemaDiagnostic.h" export * }
- module Serialization { header "Serialization/SerializationDiagnostic.h" export * }
- module Refactoring { header "Tooling/Refactoring/RefactoringDiagnostic.h" export * }
-}
-
-module Clang_Driver {
- requires cplusplus
- umbrella "Driver"
-
- textual header "Driver/Types.def"
-
- module * { export * }
-}
-
-module Clang_Edit { requires cplusplus umbrella "Edit" module * { export * } }
-module Clang_Format { requires cplusplus umbrella "Format" module * { export * } }
-
-module Clang_Frontend {
- requires cplusplus
- umbrella "Frontend"
-
- textual header "Basic/LangStandards.def"
-
- module * { export * }
-}
-
-module Clang_FrontendTool { requires cplusplus umbrella "FrontendTool" module * { export * } }
-module Clang_Index { requires cplusplus umbrella "Index" module * { export * } }
-module Clang_Lex { requires cplusplus umbrella "Lex" module * { export * } }
-module Clang_Parse { requires cplusplus umbrella "Parse" module * { export * } }
-module Clang_Rewrite { requires cplusplus umbrella "Rewrite/Core" module * { export * } }
-module Clang_RewriteFrontend { requires cplusplus umbrella "Rewrite/Frontend" module * { export * } }
-module Clang_Sema { requires cplusplus umbrella "Sema" module * { export * } }
-
-module Clang_Serialization {
- requires cplusplus
- umbrella "Serialization"
-
- textual header "Serialization/TypeBitCodes.def"
-
- module * { export * }
-}
-
-module Clang_StaticAnalyzer_Core {
- requires cplusplus
- umbrella "StaticAnalyzer/Core"
-
- textual header "StaticAnalyzer/Core/Analyses.def"
- textual header "StaticAnalyzer/Core/AnalyzerOptions.def"
- textual header "StaticAnalyzer/Core/PathSensitive/SVals.def"
- textual header "StaticAnalyzer/Core/PathSensitive/Symbols.def"
- textual header "StaticAnalyzer/Core/PathSensitive/Regions.def"
-
- module * { export * }
-}
-
-module Clang_StaticAnalyzer_Checkers {
- requires cplusplus
- umbrella "StaticAnalyzer/Checkers"
- module * { export * }
-}
-
-module Clang_StaticAnalyzer_Frontend {
- requires cplusplus
- umbrella "StaticAnalyzer/Frontend"
- module * { export * }
-}
-
-module Clang_Testing {
- requires cplusplus
- umbrella "Testing"
- module * { export * }
-}
-
-module Clang_Tooling {
- requires cplusplus umbrella "Tooling" module * { export * }
- // FIXME: Exclude these headers to avoid pulling all of the AST matchers
- // library into clang-format. Due to inline key functions in the headers,
- // importing the AST matchers library gives a link dependency on the AST
- // matchers (and thus the AST), which clang-format should not have.
- exclude header "Tooling/RefactoringCallbacks.h"
-}
-
-module Clang_ToolingCore {
- requires cplusplus
- umbrella "Tooling/Core" module * { export * }
-}
-
-module Clang_ToolingInclusions {
- requires cplusplus
- umbrella "Tooling/Inclusions"
- textual header "Tooling/Inclusions/CSymbolMap.inc"
- textual header "Tooling/Inclusions/StdSymbolMap.inc"
- module * { export * }
-}
diff --git a/contrib/llvm-project/clang/include/module.modulemap b/contrib/llvm-project/clang/include/module.modulemap
new file mode 100644
index 000000000000..6ea613c70306
--- /dev/null
+++ b/contrib/llvm-project/clang/include/module.modulemap
@@ -0,0 +1,205 @@
+module Clang_C {
+ umbrella "clang-c"
+ module * { export * }
+}
+
+module Clang_Analysis {
+ requires cplusplus
+ umbrella "clang/Analysis"
+
+ textual header "clang/Analysis/Analyses/ThreadSafetyOps.def"
+
+ module * { export * }
+
+ // FIXME: Exclude these headers to avoid pulling all of the AST matchers
+ // library into clang. Due to inline key functions in the headers,
+ // importing the AST matchers library gives a link dependency on the AST
+ // matchers (and thus the AST), which clang-format should not have.
+ exclude header "clang/Analysis/Analyses/ExprMutationAnalyzer.h"
+}
+
+module Clang_AST {
+ requires cplusplus
+ umbrella "clang/AST"
+
+ textual header "clang/AST/BuiltinTypes.def"
+ textual header "clang/AST/CXXRecordDeclDefinitionBits.def"
+ textual header "clang/AST/OperationKinds.def"
+ textual header "clang/AST/TypeLocNodes.def"
+
+ module * { export * }
+}
+
+module Clang_ASTMatchers { requires cplusplus umbrella "clang/ASTMatchers" module * { export * } }
+
+module Clang_Basic {
+ requires cplusplus
+ umbrella "clang/Basic"
+
+ textual header "clang/Basic/AArch64SVEACLETypes.def"
+ textual header "clang/Basic/BuiltinsAArch64.def"
+ textual header "clang/Basic/BuiltinsAMDGPU.def"
+ textual header "clang/Basic/BuiltinsAArch64NeonSVEBridge.def"
+ textual header "clang/Basic/BuiltinsAArch64NeonSVEBridge_cg.def"
+ textual header "clang/Basic/BuiltinsARM.def"
+ textual header "clang/Basic/BuiltinsBPF.def"
+ textual header "clang/Basic/Builtins.def"
+ textual header "clang/Basic/BuiltinHeaders.def"
+ textual header "clang/Basic/BuiltinsHexagon.def"
+ textual header "clang/Basic/BuiltinsHexagonDep.def"
+ textual header "clang/Basic/BuiltinsHexagonMapCustomDep.def"
+ textual header "clang/Basic/BuiltinsLoongArch.def"
+ textual header "clang/Basic/BuiltinsMips.def"
+ textual header "clang/Basic/BuiltinsNEON.def"
+ textual header "clang/Basic/BuiltinsNVPTX.def"
+ textual header "clang/Basic/BuiltinsPPC.def"
+ textual header "clang/Basic/BuiltinsRISCV.def"
+ textual header "clang/Basic/BuiltinsRISCVVector.def"
+ textual header "clang/Basic/BuiltinsSME.def"
+ textual header "clang/Basic/BuiltinsSVE.def"
+ textual header "clang/Basic/BuiltinsSystemZ.def"
+ textual header "clang/Basic/BuiltinsVE.def"
+ textual header "clang/Basic/BuiltinsVEVL.gen.def"
+ textual header "clang/Basic/BuiltinsWebAssembly.def"
+ textual header "clang/Basic/BuiltinsX86.def"
+ textual header "clang/Basic/BuiltinsX86_64.def"
+ textual header "clang/Basic/BuiltinsXCore.def"
+ textual header "clang/Basic/CodeGenOptions.def"
+ textual header "clang/Basic/DiagnosticOptions.def"
+ textual header "clang/Basic/Features.def"
+ textual header "clang/Basic/FPOptions.def"
+ textual header "clang/Basic/MSP430Target.def"
+ textual header "clang/Basic/LangOptions.def"
+ textual header "clang/Basic/OpenCLExtensions.def"
+ textual header "clang/Basic/OpenCLImageTypes.def"
+ textual header "clang/Basic/OpenCLExtensionTypes.def"
+ textual header "clang/Basic/OpenMPKinds.def"
+ textual header "clang/Basic/OperatorKinds.def"
+ textual header "clang/Basic/PPCTypes.def"
+ textual header "clang/Basic/RISCVVTypes.def"
+ textual header "clang/Basic/Sanitizers.def"
+ textual header "clang/Basic/TargetCXXABI.def"
+ textual header "clang/Basic/TransformTypeTraits.def"
+ textual header "clang/Basic/TokenKinds.def"
+ textual header "clang/Basic/WebAssemblyReferenceTypes.def"
+
+ module * { export * }
+}
+module Clang_Basic_TokenKinds {
+ requires cplusplus
+
+ header "clang/Basic/TokenKinds.h"
+ textual header "clang/Basic/TokenKinds.def"
+
+ export *
+}
+
+module Clang_CodeGen { requires cplusplus umbrella "clang/CodeGen" module * { export * } }
+module Clang_Config { requires cplusplus umbrella "clang/Config" module * { export * } }
+
+// Files for diagnostic groups are spread all over the include/clang/ tree, but
+// logically form a single module.
+module Clang_Diagnostics {
+ requires cplusplus
+
+ module All { header "clang/Basic/AllDiagnostics.h" export * }
+ module Analysis { textual header "clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def" }
+ module AST { header "clang/AST/ASTDiagnostic.h" export * }
+ module Comment { header "clang/AST/CommentDiagnostic.h" export * }
+ module Driver { header "clang/Driver/DriverDiagnostic.h" export * }
+ module Frontend { header "clang/Frontend/FrontendDiagnostic.h" export * }
+ module Lex { header "clang/Lex/LexDiagnostic.h" export * }
+ module Parse { header "clang/Parse/ParseDiagnostic.h" export * }
+ module Sema { header "clang/Sema/SemaDiagnostic.h" export * }
+ module Serialization { header "clang/Serialization/SerializationDiagnostic.h" export * }
+ module Refactoring { header "clang/Tooling/Refactoring/RefactoringDiagnostic.h" export * }
+}
+
+module Clang_Driver {
+ requires cplusplus
+ umbrella "clang/Driver"
+
+ textual header "clang/Driver/Types.def"
+
+ module * { export * }
+}
+
+module Clang_Edit { requires cplusplus umbrella "clang/Edit" module * { export * } }
+module Clang_Format { requires cplusplus umbrella "clang/Format" module * { export * } }
+
+module Clang_Frontend {
+ requires cplusplus
+ umbrella "clang/Frontend"
+
+ textual header "clang/Basic/LangStandards.def"
+
+ module * { export * }
+}
+
+module Clang_FrontendTool { requires cplusplus umbrella "clang/FrontendTool" module * { export * } }
+module Clang_Index { requires cplusplus umbrella "clang/Index" module * { export * } }
+module Clang_Lex { requires cplusplus umbrella "clang/Lex" module * { export * } }
+module Clang_Parse { requires cplusplus umbrella "clang/Parse" module * { export * } }
+module Clang_Rewrite { requires cplusplus umbrella "clang/Rewrite/Core" module * { export * } }
+module Clang_RewriteFrontend { requires cplusplus umbrella "clang/Rewrite/Frontend" module * { export * } }
+module Clang_Sema { requires cplusplus umbrella "clang/Sema" module * { export * } }
+
+module Clang_Serialization {
+ requires cplusplus
+ umbrella "clang/Serialization"
+
+ textual header "clang/Serialization/TypeBitCodes.def"
+
+ module * { export * }
+}
+
+module Clang_StaticAnalyzer_Core {
+ requires cplusplus
+ umbrella "clang/StaticAnalyzer/Core"
+
+ textual header "clang/StaticAnalyzer/Core/Analyses.def"
+ textual header "clang/StaticAnalyzer/Core/AnalyzerOptions.def"
+ textual header "clang/StaticAnalyzer/Core/PathSensitive/SVals.def"
+ textual header "clang/StaticAnalyzer/Core/PathSensitive/Symbols.def"
+ textual header "clang/StaticAnalyzer/Core/PathSensitive/Regions.def"
+
+ module * { export * }
+}
+
+module Clang_StaticAnalyzer_Checkers {
+ requires cplusplus
+ umbrella "clang/StaticAnalyzer/Checkers"
+ module * { export * }
+}
+
+module Clang_StaticAnalyzer_Frontend {
+ requires cplusplus
+ umbrella "clang/StaticAnalyzer/Frontend"
+ module * { export * }
+}
+
+module Clang_Testing {
+ requires cplusplus
+ umbrella "clang/Testing"
+ module * { export * }
+}
+
+module Clang_Tooling {
+ requires cplusplus umbrella "clang/Tooling" module * { export * }
+ // FIXME: Exclude these headers to avoid pulling all of the AST matchers
+ // library into clang-format. Due to inline key functions in the headers,
+ // importing the AST matchers library gives a link dependency on the AST
+ // matchers (and thus the AST), which clang-format should not have.
+ exclude header "clang/Tooling/RefactoringCallbacks.h"
+}
+
+module Clang_ToolingCore {
+ requires cplusplus
+ umbrella "clang/Tooling/Core" module * { export * }
+}
+
+module Clang_ToolingInclusions {
+ requires cplusplus
+ umbrella "clang/Tooling/Inclusions"
+ module * { export * }
+}
diff --git a/contrib/llvm-project/clang/lib/APINotes/APINotesFormat.h b/contrib/llvm-project/clang/lib/APINotes/APINotesFormat.h
index 6b76ecfc2567..a0a5efe8f9be 100644
--- a/contrib/llvm-project/clang/lib/APINotes/APINotesFormat.h
+++ b/contrib/llvm-project/clang/lib/APINotes/APINotesFormat.h
@@ -220,7 +220,7 @@ using TagDataLayout =
// below)
llvm::BCBlob // map from name to tag information
>;
-}; // namespace tag_block
+} // namespace tag_block
namespace typedef_block {
enum { TYPEDEF_DATA = 1 };
@@ -231,7 +231,7 @@ using TypedefDataLayout =
// below)
llvm::BCBlob // map from name to typedef information
>;
-}; // namespace typedef_block
+} // namespace typedef_block
namespace enum_constant_block {
enum { ENUM_CONSTANT_DATA = 1 };
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp
index 68ee7c59270e..ac79f3f03e6b 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#include "Internals.h"
#include "clang/ARCMigrate/ARCMT.h"
+#include "Internals.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/Basic/DiagnosticCategories.h"
#include "clang/Frontend/ASTUnit.h"
@@ -20,8 +20,8 @@
#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "clang/Serialization/ASTReader.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/TargetParser/Triple.h"
#include <utility>
using namespace clang;
using namespace arcmt;
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/FileRemapper.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/FileRemapper.cpp
index 92027fe4f1f4..7e56e0683c12 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/FileRemapper.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/FileRemapper.cpp
@@ -251,8 +251,7 @@ const FileEntry *FileRemapper::getOriginalFile(StringRef filePath) {
I = ToFromMappings.find(file);
if (I != ToFromMappings.end()) {
file = I->second;
- assert(FromToMappings.find(file) != FromToMappings.end() &&
- "Original file not in mappings!");
+ assert(FromToMappings.contains(file) && "Original file not in mappings!");
}
return file;
}
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp
index ce1decd3ba3e..c76efd78976a 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp
@@ -153,8 +153,7 @@ protected:
bool canModifyFile(StringRef Path) {
if (AllowListFilenames.empty())
return true;
- return AllowListFilenames.find(llvm::sys::path::filename(Path)) !=
- AllowListFilenames.end();
+ return AllowListFilenames.contains(llvm::sys::path::filename(Path));
}
bool canModifyFile(OptionalFileEntryRef FE) {
if (!FE)
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/TransProperties.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/TransProperties.cpp
index e5ccf1cf79b1..6d1d950821a0 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/TransProperties.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/TransProperties.cpp
@@ -45,7 +45,7 @@ namespace {
class PropertiesRewriter {
MigrationContext &MigrateCtx;
MigrationPass &Pass;
- ObjCImplementationDecl *CurImplD;
+ ObjCImplementationDecl *CurImplD = nullptr;
enum PropActionKind {
PropAction_None,
diff --git a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
index 8054eb2e12d3..7acacd7bf4f5 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
@@ -77,7 +77,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/Support/Capacity.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
@@ -85,6 +85,7 @@
#include "llvm/Support/MD5.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
@@ -181,22 +182,96 @@ static SourceLocation getDeclLocForCommentSearch(const Decl *D,
const SourceLocation DeclLoc = D->getLocation();
if (DeclLoc.isMacroID()) {
- if (isa<TypedefDecl>(D)) {
- // If location of the typedef name is in a macro, it is because being
- // declared via a macro. Try using declaration's starting location as
- // the "declaration location".
- return D->getBeginLoc();
- }
-
- if (const auto *TD = dyn_cast<TagDecl>(D)) {
- // If location of the tag decl is inside a macro, but the spelling of
- // the tag name comes from a macro argument, it looks like a special
- // macro like NS_ENUM is being used to define the tag decl. In that
- // case, adjust the source location to the expansion loc so that we can
- // attach the comment to the tag decl.
- if (SourceMgr.isMacroArgExpansion(DeclLoc) && TD->isCompleteDefinition())
- return SourceMgr.getExpansionLoc(DeclLoc);
+ // There are (at least) three types of macros we care about here.
+ //
+ // 1. Macros that are used in the definition of a type outside the macro,
+ // with a comment attached at the macro call site.
+ // ```
+ // #define MAKE_NAME(Foo) Name##Foo
+ //
+ // /// Comment is here, where we use the macro.
+ // struct MAKE_NAME(Foo) {
+ // int a;
+ // int b;
+ // };
+ // ```
+ // 2. Macros that define whole things along with the comment.
+ // ```
+ // #define MAKE_METHOD(name) \
+ // /** Comment is here, inside the macro. */ \
+ // void name() {}
+ //
+ // struct S {
+ // MAKE_METHOD(f)
+ // }
+ // ```
+ // 3. Macros that both declare a type and name a decl outside the macro.
+ // ```
+ // /// Comment is here, where we use the macro.
+ // typedef NS_ENUM(NSInteger, Size) {
+ // SizeWidth,
+ // SizeHeight
+ // };
+ // ```
+ // In this case NS_ENUM declares am enum type, and uses the same name for
+ // the typedef declaration that appears outside the macro. The comment
+ // here should be applied to both declarations inside and outside the
+ // macro.
+ //
+ // We have found a Decl name that comes from inside a macro, but
+ // Decl::getLocation() returns the place where the macro is being called.
+ // If the declaration (and not just the name) resides inside the macro,
+ // then we want to map Decl::getLocation() into the macro to where the
+ // declaration and its attached comment (if any) were written.
+ //
+ // This mapping into the macro is done by mapping the location to its
+ // spelling location, however even if the declaration is inside a macro,
+ // the name's spelling can come from a macro argument (case 2 above). In
+ // this case mapping the location to the spelling location finds the
+ // argument's position (at `f` in MAKE_METHOD(`f`) above), which is not
+ // where the declaration and its comment are located.
+ //
+ // To avoid this issue, we make use of Decl::getBeginLocation() instead.
+ // While the declaration's position is where the name is written, the
+ // comment is always attached to the begining of the declaration, not to
+ // the name.
+ //
+ // In the first case, the begin location of the decl is outside the macro,
+ // at the location of `typedef`. This is where the comment is found as
+ // well. The begin location is not inside a macro, so it's spelling
+ // location is the same.
+ //
+ // In the second case, the begin location of the decl is the call to the
+ // macro, at `MAKE_METHOD`. However its spelling location is inside the
+ // the macro at the location of `void`. This is where the comment is found
+ // again.
+ //
+ // In the third case, there's no correct single behaviour. We want to use
+ // the comment outside the macro for the definition that's inside the macro.
+ // There is also a definition outside the macro, and we want the comment to
+ // apply to both. The cases we care about here is NS_ENUM() and
+ // NS_OPTIONS(). In general, if an enum is defined inside a macro, we should
+ // try to find the comment there.
+
+ // This is handling case 3 for NS_ENUM() and NS_OPTIONS(), which define
+ // enum types inside the macro.
+ if (isa<EnumDecl>(D)) {
+ SourceLocation MacroCallLoc = SourceMgr.getExpansionLoc(DeclLoc);
+ if (auto BufferRef =
+ SourceMgr.getBufferOrNone(SourceMgr.getFileID(MacroCallLoc));
+ BufferRef.has_value()) {
+ llvm::StringRef buffer = BufferRef->getBuffer().substr(
+ SourceMgr.getFileOffset(MacroCallLoc));
+ if (buffer.starts_with("NS_ENUM(") ||
+ buffer.starts_with("NS_OPTIONS(")) {
+ // We want to use the comment on the call to NS_ENUM and NS_OPTIONS
+ // macros for the types defined inside the macros, which is at the
+ // expansion location.
+ return MacroCallLoc;
+ }
+ }
}
+ return SourceMgr.getSpellingLoc(D->getBeginLoc());
}
return DeclLoc;
@@ -274,7 +349,7 @@ RawComment *ASTContext::getRawCommentForDeclNoCacheImpl(
// There should be no other declarations or preprocessor directives between
// comment and declaration.
- if (Text.find_first_of(";{}#@") != StringRef::npos)
+ if (Text.find_last_of(";{}#@") != StringRef::npos)
return nullptr;
return CommentBeforeDecl;
@@ -424,10 +499,7 @@ const RawComment *ASTContext::getRawCommentForAnyRedecl(
// Any redeclarations of D that we haven't checked for comments yet?
// We can't use DenseMap::iterator directly since it'd get invalid.
auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * {
- auto LookupRes = CommentlessRedeclChains.find(CanonicalD);
- if (LookupRes != CommentlessRedeclChains.end())
- return LookupRes->second;
- return nullptr;
+ return CommentlessRedeclChains.lookup(CanonicalD);
}();
for (const auto Redecl : D->redecls()) {
@@ -679,11 +751,6 @@ ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
ID.AddInteger(0);
ID.AddBoolean(TTP->isParameterPack());
- const TypeConstraint *TC = TTP->getTypeConstraint();
- ID.AddBoolean(TC != nullptr);
- if (TC)
- TC->getImmediatelyDeclaredConstraint()->Profile(ID, C,
- /*Canonical=*/true);
if (TTP->isExpandedParameterPack()) {
ID.AddBoolean(true);
ID.AddInteger(TTP->getNumExpansionParameters());
@@ -695,11 +762,8 @@ ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
ID.AddInteger(1);
ID.AddBoolean(NTTP->isParameterPack());
- const Expr *TC = NTTP->getPlaceholderTypeConstraint();
- ID.AddBoolean(TC != nullptr);
- ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr());
- if (TC)
- TC->Profile(ID, C, /*Canonical=*/true);
+ ID.AddPointer(C.getUnconstrainedType(C.getCanonicalType(NTTP->getType()))
+ .getAsOpaquePtr());
if (NTTP->isExpandedParameterPack()) {
ID.AddBoolean(true);
ID.AddInteger(NTTP->getNumExpansionTypes());
@@ -716,65 +780,6 @@ ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
ID.AddInteger(2);
Profile(ID, C, TTP);
}
- Expr *RequiresClause = Parm->getTemplateParameters()->getRequiresClause();
- ID.AddBoolean(RequiresClause != nullptr);
- if (RequiresClause)
- RequiresClause->Profile(ID, C, /*Canonical=*/true);
-}
-
-static Expr *
-canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC,
- QualType ConstrainedType) {
- // This is a bit ugly - we need to form a new immediately-declared
- // constraint that references the new parameter; this would ideally
- // require semantic analysis (e.g. template<C T> struct S {}; - the
- // converted arguments of C<T> could be an argument pack if C is
- // declared as template<typename... T> concept C = ...).
- // We don't have semantic analysis here so we dig deep into the
- // ready-made constraint expr and change the thing manually.
- ConceptSpecializationExpr *CSE;
- if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC))
- CSE = cast<ConceptSpecializationExpr>(Fold->getLHS());
- else
- CSE = cast<ConceptSpecializationExpr>(IDC);
- ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments();
- SmallVector<TemplateArgument, 3> NewConverted;
- NewConverted.reserve(OldConverted.size());
- if (OldConverted.front().getKind() == TemplateArgument::Pack) {
- // The case:
- // template<typename... T> concept C = true;
- // template<C<int> T> struct S; -> constraint is C<{T, int}>
- NewConverted.push_back(ConstrainedType);
- llvm::append_range(NewConverted,
- OldConverted.front().pack_elements().drop_front(1));
- TemplateArgument NewPack(NewConverted);
-
- NewConverted.clear();
- NewConverted.push_back(NewPack);
- assert(OldConverted.size() == 1 &&
- "Template parameter pack should be the last parameter");
- } else {
- assert(OldConverted.front().getKind() == TemplateArgument::Type &&
- "Unexpected first argument kind for immediately-declared "
- "constraint");
- NewConverted.push_back(ConstrainedType);
- llvm::append_range(NewConverted, OldConverted.drop_front(1));
- }
- auto *CSD = ImplicitConceptSpecializationDecl::Create(
- C, CSE->getNamedConcept()->getDeclContext(),
- CSE->getNamedConcept()->getLocation(), NewConverted);
-
- Expr *NewIDC = ConceptSpecializationExpr::Create(
- C, CSE->getNamedConcept(), CSE->getTemplateArgsAsWritten(), CSD,
- /*Satisfaction=*/nullptr, CSE->isInstantiationDependent(),
- CSE->containsUnexpandedParameterPack());
-
- if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC))
- NewIDC = new (C) CXXFoldExpr(
- OrigFold->getType(), /*Callee*/ nullptr, SourceLocation(), NewIDC,
- BinaryOperatorKind::BO_LAnd, SourceLocation(), /*RHS=*/nullptr,
- SourceLocation(), /*NumExpansions=*/std::nullopt);
- return NewIDC;
}
TemplateTemplateParmDecl *
@@ -796,30 +801,19 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
for (TemplateParameterList::const_iterator P = Params->begin(),
PEnd = Params->end();
P != PEnd; ++P) {
+ // Note that, per C++20 [temp.over.link]/6, when determining whether
+ // template-parameters are equivalent, constraints are ignored.
if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(
*this, getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
TTP->getDepth(), TTP->getIndex(), nullptr, false,
- TTP->isParameterPack(), TTP->hasTypeConstraint(),
+ TTP->isParameterPack(), /*HasTypeConstraint=*/false,
TTP->isExpandedParameterPack()
? std::optional<unsigned>(TTP->getNumExpansionParameters())
: std::nullopt);
- if (const auto *TC = TTP->getTypeConstraint()) {
- QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0);
- Expr *NewIDC = canonicalizeImmediatelyDeclaredConstraint(
- *this, TC->getImmediatelyDeclaredConstraint(),
- ParamAsArgument);
- NewTTP->setTypeConstraint(
- NestedNameSpecifierLoc(),
- DeclarationNameInfo(TC->getNamedConcept()->getDeclName(),
- SourceLocation()), /*FoundDecl=*/nullptr,
- // Actually canonicalizing a TemplateArgumentLoc is difficult so we
- // simply omit the ArgsAsWritten
- TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC);
- }
CanonParams.push_back(NewTTP);
} else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
- QualType T = getCanonicalType(NTTP->getType());
+ QualType T = getUnconstrainedType(getCanonicalType(NTTP->getType()));
TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
NonTypeTemplateParmDecl *Param;
if (NTTP->isExpandedParameterPack()) {
@@ -850,35 +844,18 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
NTTP->isParameterPack(),
TInfo);
}
- if (AutoType *AT = T->getContainedAutoType()) {
- if (AT->isConstrained()) {
- Param->setPlaceholderTypeConstraint(
- canonicalizeImmediatelyDeclaredConstraint(
- *this, NTTP->getPlaceholderTypeConstraint(), T));
- }
- }
CanonParams.push_back(Param);
-
} else
CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
cast<TemplateTemplateParmDecl>(*P)));
}
- Expr *CanonRequiresClause = nullptr;
- if (Expr *RequiresClause = TTP->getTemplateParameters()->getRequiresClause())
- CanonRequiresClause = RequiresClause;
-
- TemplateTemplateParmDecl *CanonTTP
- = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
- SourceLocation(), TTP->getDepth(),
- TTP->getPosition(),
- TTP->isParameterPack(),
- nullptr,
- TemplateParameterList::Create(*this, SourceLocation(),
- SourceLocation(),
- CanonParams,
- SourceLocation(),
- CanonRequiresClause));
+ TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create(
+ *this, getTranslationUnitDecl(), SourceLocation(), TTP->getDepth(),
+ TTP->getPosition(), TTP->isParameterPack(), nullptr,
+ TemplateParameterList::Create(*this, SourceLocation(), SourceLocation(),
+ CanonParams, SourceLocation(),
+ /*RequiresClause=*/nullptr));
// Get the new insert position for the node we care about.
Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
@@ -1172,6 +1149,13 @@ ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) {
return Inits->Initializers;
}
+void ASTContext::setCurrentNamedModule(Module *M) {
+ assert(M->isModulePurview());
+ assert(!CurrentCXXNamedModule &&
+ "We should set named module for ASTContext for only once");
+ CurrentCXXNamedModule = M;
+}
+
ExternCContextDecl *ASTContext::getExternCContextDecl() const {
if (!ExternCContext)
ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
@@ -1433,6 +1417,12 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
#include "clang/Basic/RISCVVTypes.def"
}
+ if (Target.getTriple().isWasm() && Target.hasFeature("reference-types")) {
+#define WASM_TYPE(Name, Id, SingletonId) \
+ InitBuiltinType(SingletonId, BuiltinType::Id);
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
+ }
+
// Builtin type for __objc_yes and __objc_no
ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
SignedCharTy : BoolTy);
@@ -1530,11 +1520,7 @@ ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst,
NamedDecl *
ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) {
- auto Pos = InstantiatedFromUsingDecl.find(UUD);
- if (Pos == InstantiatedFromUsingDecl.end())
- return nullptr;
-
- return Pos->second;
+ return InstantiatedFromUsingDecl.lookup(UUD);
}
void
@@ -1553,11 +1539,7 @@ ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) {
UsingEnumDecl *
ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) {
- auto Pos = InstantiatedFromUsingEnumDecl.find(UUD);
- if (Pos == InstantiatedFromUsingEnumDecl.end())
- return nullptr;
-
- return Pos->second;
+ return InstantiatedFromUsingEnumDecl.lookup(UUD);
}
void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst,
@@ -1568,12 +1550,7 @@ void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst,
UsingShadowDecl *
ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) {
- llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos
- = InstantiatedFromUsingShadowDecl.find(Inst);
- if (Pos == InstantiatedFromUsingShadowDecl.end())
- return nullptr;
-
- return Pos->second;
+ return InstantiatedFromUsingShadowDecl.lookup(Inst);
}
void
@@ -1584,12 +1561,7 @@ ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst,
}
FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) {
- llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos
- = InstantiatedFromUnnamedFieldDecl.find(Field);
- if (Pos == InstantiatedFromUnnamedFieldDecl.end())
- return nullptr;
-
- return Pos->second;
+ return InstantiatedFromUnnamedFieldDecl.lookup(Field);
}
void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst,
@@ -1696,11 +1668,11 @@ const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
case BuiltinType::Ibm128:
return Target->getIbm128Format();
case BuiltinType::LongDouble:
- if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice)
+ if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
return AuxTarget->getLongDoubleFormat();
return Target->getLongDoubleFormat();
case BuiltinType::Float128:
- if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice)
+ if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
return AuxTarget->getFloat128Format();
return Target->getFloat128Format();
}
@@ -2003,13 +1975,14 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
TypeInfo EltInfo = getTypeInfo(VT->getElementType());
Width = VT->isExtVectorBoolType() ? VT->getNumElements()
: EltInfo.Width * VT->getNumElements();
- // Enforce at least byte alignment.
+ // Enforce at least byte size and alignment.
+ Width = std::max<unsigned>(8, Width);
Align = std::max<unsigned>(8, Width);
// If the alignment is not a power of 2, round up to the next power of 2.
// This happens for non-power-of-2 length vectors.
if (Align & (Align-1)) {
- Align = llvm::NextPowerOf2(Align);
+ Align = llvm::bit_ceil(Align);
Width = llvm::alignTo(Width, Align);
}
// Adjust the alignment based on the target max.
@@ -2023,6 +1996,9 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
else if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
// Adjust the alignment for fixed-length SVE predicates.
Align = 16;
+ else if (VT->getVectorKind() == VectorType::RVVFixedLengthDataVector)
+ // Adjust the alignment for fixed-length RVV vectors.
+ Align = std::min<unsigned>(64, Width);
break;
}
@@ -2141,16 +2117,22 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
if (Target->hasBFloat16Type()) {
Width = Target->getBFloat16Width();
Align = Target->getBFloat16Align();
+ } else if ((getLangOpts().SYCLIsDevice ||
+ (getLangOpts().OpenMP &&
+ getLangOpts().OpenMPIsTargetDevice)) &&
+ AuxTarget->hasBFloat16Type()) {
+ Width = AuxTarget->getBFloat16Width();
+ Align = AuxTarget->getBFloat16Align();
}
break;
case BuiltinType::Float16:
case BuiltinType::Half:
if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
- !getLangOpts().OpenMPIsDevice) {
+ !getLangOpts().OpenMPIsTargetDevice) {
Width = Target->getHalfWidth();
Align = Target->getHalfAlign();
} else {
- assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
+ assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
"Expected OpenMP device compilation.");
Width = AuxTarget->getHalfWidth();
Align = AuxTarget->getHalfAlign();
@@ -2169,7 +2151,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Align = Target->getIbm128Align();
break;
case BuiltinType::LongDouble:
- if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
+ if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
(Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() ||
Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) {
Width = AuxTarget->getLongDoubleWidth();
@@ -2181,11 +2163,11 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
break;
case BuiltinType::Float128:
if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
- !getLangOpts().OpenMPIsDevice) {
+ !getLangOpts().OpenMPIsTargetDevice) {
Width = Target->getFloat128Width();
Align = Target->getFloat128Align();
} else {
- assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
+ assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
"Expected OpenMP device compilation.");
Width = AuxTarget->getFloat128Width();
Align = AuxTarget->getFloat128Align();
@@ -2236,6 +2218,11 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Width = 0; \
Align = 16; \
break;
+#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \
+ case BuiltinType::Id: \
+ Width = 0; \
+ Align = 16; \
+ break;
#include "clang/Basic/AArch64SVEACLETypes.def"
#define PPC_VECTOR_TYPE(Name, Id, Size) \
case BuiltinType::Id: \
@@ -2255,6 +2242,12 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Align = 8; \
break;
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) \
+ case BuiltinType::Id: \
+ Width = 0; \
+ Align = 8; \
+ break;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
}
break;
case Type::ObjCObjectPointer:
@@ -2313,10 +2306,8 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
}
case Type::BitInt: {
const auto *EIT = cast<BitIntType>(T);
- Align =
- std::min(static_cast<unsigned>(std::max(
- getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))),
- Target->getLongLongAlign());
+ Align = std::clamp<unsigned>(llvm::PowerOf2Ceil(EIT->getNumBits()),
+ getCharWidth(), Target->getLongLongAlign());
Width = llvm::alignTo(EIT->getNumBits(), Align);
break;
}
@@ -2422,8 +2413,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
// favorable to atomic operations:
// Round the size up to a power of 2.
- if (!llvm::isPowerOf2_64(Width))
- Width = llvm::NextPowerOf2(Width);
+ Width = llvm::bit_ceil(Width);
// Set the alignment equal to the size.
Align = static_cast<unsigned>(Width);
@@ -2463,7 +2453,8 @@ unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
}
unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const {
- unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign();
+ unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign(
+ getTargetInfo().getTriple(), Target->getTargetOpts().FeatureMap);
return SimdAlign;
}
@@ -2667,12 +2658,14 @@ void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
}
static bool unionHasUniqueObjectRepresentations(const ASTContext &Context,
- const RecordDecl *RD) {
+ const RecordDecl *RD,
+ bool CheckIfTriviallyCopyable) {
assert(RD->isUnion() && "Must be union type");
CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl());
for (const auto *Field : RD->fields()) {
- if (!Context.hasUniqueObjectRepresentations(Field->getType()))
+ if (!Context.hasUniqueObjectRepresentations(Field->getType(),
+ CheckIfTriviallyCopyable))
return false;
CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType());
if (FieldSize != UnionSize)
@@ -2695,26 +2688,35 @@ static int64_t getSubobjectOffset(const CXXRecordDecl *RD,
static std::optional<int64_t>
structHasUniqueObjectRepresentations(const ASTContext &Context,
- const RecordDecl *RD);
+ const RecordDecl *RD,
+ bool CheckIfTriviallyCopyable);
static std::optional<int64_t>
-getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context) {
+getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context,
+ bool CheckIfTriviallyCopyable) {
if (Field->getType()->isRecordType()) {
const RecordDecl *RD = Field->getType()->getAsRecordDecl();
if (!RD->isUnion())
- return structHasUniqueObjectRepresentations(Context, RD);
+ return structHasUniqueObjectRepresentations(Context, RD,
+ CheckIfTriviallyCopyable);
}
// A _BitInt type may not be unique if it has padding bits
// but if it is a bitfield the padding bits are not used.
bool IsBitIntType = Field->getType()->isBitIntType();
if (!Field->getType()->isReferenceType() && !IsBitIntType &&
- !Context.hasUniqueObjectRepresentations(Field->getType()))
+ !Context.hasUniqueObjectRepresentations(Field->getType(),
+ CheckIfTriviallyCopyable))
return std::nullopt;
int64_t FieldSizeInBits =
Context.toBits(Context.getTypeSizeInChars(Field->getType()));
if (Field->isBitField()) {
+ // If we have explicit padding bits, they don't contribute bits
+ // to the actual object representation, so return 0.
+ if (Field->isUnnamedBitfield())
+ return 0;
+
int64_t BitfieldSize = Field->getBitWidthValue(Context);
if (IsBitIntType) {
if ((unsigned)BitfieldSize >
@@ -2724,25 +2726,28 @@ getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context) {
return std::nullopt;
}
FieldSizeInBits = BitfieldSize;
- } else if (IsBitIntType &&
- !Context.hasUniqueObjectRepresentations(Field->getType())) {
+ } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations(
+ Field->getType(), CheckIfTriviallyCopyable)) {
return std::nullopt;
}
return FieldSizeInBits;
}
static std::optional<int64_t>
-getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context) {
- return structHasUniqueObjectRepresentations(Context, RD);
+getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context,
+ bool CheckIfTriviallyCopyable) {
+ return structHasUniqueObjectRepresentations(Context, RD,
+ CheckIfTriviallyCopyable);
}
template <typename RangeT>
static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations(
const RangeT &Subobjects, int64_t CurOffsetInBits,
- const ASTContext &Context, const clang::ASTRecordLayout &Layout) {
+ const ASTContext &Context, const clang::ASTRecordLayout &Layout,
+ bool CheckIfTriviallyCopyable) {
for (const auto *Subobject : Subobjects) {
std::optional<int64_t> SizeInBits =
- getSubobjectSizeInBits(Subobject, Context);
+ getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable);
if (!SizeInBits)
return std::nullopt;
if (*SizeInBits != 0) {
@@ -2757,7 +2762,8 @@ static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations(
static std::optional<int64_t>
structHasUniqueObjectRepresentations(const ASTContext &Context,
- const RecordDecl *RD) {
+ const RecordDecl *RD,
+ bool CheckIfTriviallyCopyable) {
assert(!RD->isUnion() && "Must be struct/class type");
const auto &Layout = Context.getASTRecordLayout(RD);
@@ -2778,8 +2784,8 @@ structHasUniqueObjectRepresentations(const ASTContext &Context,
});
std::optional<int64_t> OffsetAfterBases =
- structSubobjectsHaveUniqueObjectRepresentations(Bases, CurOffsetInBits,
- Context, Layout);
+ structSubobjectsHaveUniqueObjectRepresentations(
+ Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable);
if (!OffsetAfterBases)
return std::nullopt;
CurOffsetInBits = *OffsetAfterBases;
@@ -2787,7 +2793,8 @@ structHasUniqueObjectRepresentations(const ASTContext &Context,
std::optional<int64_t> OffsetAfterFields =
structSubobjectsHaveUniqueObjectRepresentations(
- RD->fields(), CurOffsetInBits, Context, Layout);
+ RD->fields(), CurOffsetInBits, Context, Layout,
+ CheckIfTriviallyCopyable);
if (!OffsetAfterFields)
return std::nullopt;
CurOffsetInBits = *OffsetAfterFields;
@@ -2795,7 +2802,8 @@ structHasUniqueObjectRepresentations(const ASTContext &Context,
return CurOffsetInBits;
}
-bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const {
+bool ASTContext::hasUniqueObjectRepresentations(
+ QualType Ty, bool CheckIfTriviallyCopyable) const {
// C++17 [meta.unary.prop]:
// The predicate condition for a template specialization
// has_unique_object_representations<T> shall be
@@ -2817,16 +2825,17 @@ bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const {
// Arrays are unique only if their element type is unique.
if (Ty->isArrayType())
- return hasUniqueObjectRepresentations(getBaseElementType(Ty));
+ return hasUniqueObjectRepresentations(getBaseElementType(Ty),
+ CheckIfTriviallyCopyable);
// (9.1) - T is trivially copyable...
- if (!Ty.isTriviallyCopyableType(*this))
+ if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(*this))
return false;
// All integrals and enums are unique.
if (Ty->isIntegralOrEnumerationType()) {
// Except _BitInt types that have padding bits.
- if (const auto *BIT = dyn_cast<BitIntType>(Ty))
+ if (const auto *BIT = Ty->getAs<BitIntType>())
return getTypeSize(BIT) == BIT->getNumBits();
return true;
@@ -2836,10 +2845,8 @@ bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const {
if (Ty->isPointerType())
return true;
- if (Ty->isMemberPointerType()) {
- const auto *MPT = Ty->getAs<MemberPointerType>();
+ if (const auto *MPT = Ty->getAs<MemberPointerType>())
return !ABI->getMemberPointerInfo(MPT).HasPadding;
- }
if (Ty->isRecordType()) {
const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl();
@@ -2848,10 +2855,11 @@ bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const {
return false;
if (Record->isUnion())
- return unionHasUniqueObjectRepresentations(*this, Record);
+ return unionHasUniqueObjectRepresentations(*this, Record,
+ CheckIfTriviallyCopyable);
- std::optional<int64_t> StructSize =
- structHasUniqueObjectRepresentations(*this, Record);
+ std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations(
+ *this, Record, CheckIfTriviallyCopyable);
return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty));
}
@@ -2990,7 +2998,7 @@ TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
auto *TInfo =
(TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
- new (TInfo) TypeSourceInfo(T);
+ new (TInfo) TypeSourceInfo(T, DataSize);
return TInfo;
}
@@ -3946,6 +3954,10 @@ ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const {
return SVE_INT_ELTTY(64, 2, false, 4);
case BuiltinType::SveBool:
return SVE_ELTTY(BoolTy, 16, 1);
+ case BuiltinType::SveBoolx2:
+ return SVE_ELTTY(BoolTy, 16, 2);
+ case BuiltinType::SveBoolx4:
+ return SVE_ELTTY(BoolTy, 16, 4);
case BuiltinType::SveFloat16:
return SVE_ELTTY(HalfTy, 8, 1);
case BuiltinType::SveFloat16x2:
@@ -3994,11 +4006,24 @@ ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const {
}
}
+/// getExternrefType - Return a WebAssembly externref type, which represents an
+/// opaque reference to a host value.
+QualType ASTContext::getWebAssemblyExternrefType() const {
+ if (Target->getTriple().isWasm() && Target->hasFeature("reference-types")) {
+#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
+ if (BuiltinType::Id == BuiltinType::WasmExternRef) \
+ return SingletonId;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
+ }
+ llvm_unreachable(
+ "shouldn't try to generate type externref outside WebAssembly target");
+}
+
/// getScalableVectorType - Return the unique reference to a scalable vector
/// type of the specified element type and size. VectorType must be a built-in
/// type.
-QualType ASTContext::getScalableVectorType(QualType EltTy,
- unsigned NumElts) const {
+QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts,
+ unsigned NumFields) const {
if (Target->hasAArch64SVETypes()) {
uint64_t EltTySize = getTypeSize(EltTy);
#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
@@ -4016,20 +4041,21 @@ QualType ASTContext::getScalableVectorType(QualType EltTy,
#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
if (EltTy->isBooleanType() && NumElts == NumEls) \
return SingletonId;
+#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingleTonId)
#include "clang/Basic/AArch64SVEACLETypes.def"
} else if (Target->hasRISCVVTypes()) {
uint64_t EltTySize = getTypeSize(EltTy);
#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
IsFP) \
- if (!EltTy->isBooleanType() && \
- ((EltTy->hasIntegerRepresentation() && \
- EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
- (EltTy->hasFloatingRepresentation() && IsFP)) && \
- EltTySize == ElBits && NumElts == NumEls) \
- return SingletonId;
+ if (!EltTy->isBooleanType() && \
+ ((EltTy->hasIntegerRepresentation() && \
+ EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
+ (EltTy->hasFloatingRepresentation() && IsFP)) && \
+ EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \
+ return SingletonId;
#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
- if (EltTy->isBooleanType() && NumElts == NumEls) \
- return SingletonId;
+ if (EltTy->isBooleanType() && NumElts == NumEls) \
+ return SingletonId;
#include "clang/Basic/RISCVVTypes.def"
}
return QualType();
@@ -4116,8 +4142,8 @@ QualType ASTContext::getExtVectorType(QualType vecType,
assert(vecType->isBuiltinType() || vecType->isDependentType() ||
(vecType->isBitIntType() &&
// Only support _BitInt elements with byte-sized power of 2 NumBits.
- llvm::isPowerOf2_32(vecType->getAs<BitIntType>()->getNumBits()) &&
- vecType->getAs<BitIntType>()->getNumBits() >= 8));
+ llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) &&
+ vecType->castAs<BitIntType>()->getNumBits() >= 8));
// Check if we've already instantiated a vector of this type.
llvm::FoldingSetNodeID ID;
@@ -5775,12 +5801,19 @@ QualType ASTContext::getAutoTypeInternal(
if (!DeducedType.isNull()) {
Canon = DeducedType.getCanonicalType();
} else if (TypeConstraintConcept) {
- Canon = getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack,
- nullptr, {}, true);
- // Find the insert position again.
- [[maybe_unused]] auto *Nothing =
- AutoTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(!Nothing && "canonical type broken");
+ bool AnyNonCanonArgs = false;
+ ConceptDecl *CanonicalConcept = TypeConstraintConcept->getCanonicalDecl();
+ auto CanonicalConceptArgs = ::getCanonicalTemplateArguments(
+ *this, TypeConstraintArgs, AnyNonCanonArgs);
+ if (CanonicalConcept != TypeConstraintConcept || AnyNonCanonArgs) {
+ Canon =
+ getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack,
+ CanonicalConcept, CanonicalConceptArgs, true);
+ // Find the insert position again.
+ [[maybe_unused]] auto *Nothing =
+ AutoTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!Nothing && "canonical type broken");
+ }
}
}
@@ -5813,6 +5846,26 @@ ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
TypeConstraintConcept, TypeConstraintArgs);
}
+QualType ASTContext::getUnconstrainedType(QualType T) const {
+ QualType CanonT = T.getCanonicalType();
+
+ // Remove a type-constraint from a top-level auto or decltype(auto).
+ if (auto *AT = CanonT->getAs<AutoType>()) {
+ if (!AT->isConstrained())
+ return T;
+ return getQualifiedType(getAutoType(QualType(), AT->getKeyword(), false,
+ AT->containsUnexpandedParameterPack()),
+ T.getQualifiers());
+ }
+
+ // FIXME: We only support constrained auto at the top level in the type of a
+ // non-type template parameter at the moment. Once we lift that restriction,
+ // we'll need to recursively build types containing auto here.
+ assert(!CanonT->getContainedAutoType() ||
+ !CanonT->getContainedAutoType()->isConstrained());
+ return T;
+}
+
/// Return the uniqued reference to the deduced template specialization type
/// which has been deduced to the given type, or to the canonical undeduced
/// such type, or the canonical deduced-but-dependent such type.
@@ -6275,8 +6328,8 @@ bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const {
return true;
llvm::FoldingSetNodeID XCEID, YCEID;
- XCE->Profile(XCEID, *this, /*Canonical=*/true);
- YCE->Profile(YCEID, *this, /*Canonical=*/true);
+ XCE->Profile(XCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
+ YCE->Profile(YCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
return XCEID == YCEID;
}
@@ -6479,32 +6532,11 @@ static bool hasSameOverloadableAttrs(const FunctionDecl *A,
return true;
}
-bool ASTContext::FriendsDifferByConstraints(const FunctionDecl *X,
- const FunctionDecl *Y) const {
- // If these aren't friends, then they aren't friends that differ by
- // constraints.
- if (!X->getFriendObjectKind() || !Y->getFriendObjectKind())
- return false;
-
- // If the two functions share lexical declaration context, they are not in
- // separate instantations, and thus in the same scope.
- if (X->getLexicalDeclContext() == Y->getLexicalDeclContext())
- return false;
-
- if (!X->getDescribedFunctionTemplate()) {
- assert(!Y->getDescribedFunctionTemplate() &&
- "How would these be the same if they aren't both templates?");
-
- // If these friends don't have constraints, they aren't constrained, and
- // thus don't fall under temp.friend p9. Else the simple presence of a
- // constraint makes them unique.
- return X->getTrailingRequiresClause();
- }
-
- return X->FriendConstraintRefersToEnclosingTemplate();
-}
-
bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const {
+ // Caution: this function is called by the AST reader during deserialization,
+ // so it cannot rely on AST invariants being met. Non-trivial accessors
+ // should be avoided, along with any traversal of redeclaration chains.
+
if (X == Y)
return true;
@@ -6580,12 +6612,17 @@ bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const {
return false;
}
- if (!isSameConstraintExpr(FuncX->getTrailingRequiresClause(),
- FuncY->getTrailingRequiresClause()))
+ // Per C++20 [temp.over.link]/4, friends in different classes are sometimes
+ // not the same entity if they are constrained.
+ if ((FuncX->isMemberLikeConstrainedFriend() ||
+ FuncY->isMemberLikeConstrainedFriend()) &&
+ !FuncX->getLexicalDeclContext()->Equals(
+ FuncY->getLexicalDeclContext())) {
return false;
+ }
- // Constrained friends are different in certain cases, see: [temp.friend]p9.
- if (FriendsDifferByConstraints(FuncX, FuncY))
+ if (!isSameConstraintExpr(FuncX->getTrailingRequiresClause(),
+ FuncY->getTrailingRequiresClause()))
return false;
auto GetTypeAsWritten = [](const FunctionDecl *FD) {
@@ -6620,6 +6657,11 @@ bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const {
if (const auto *VarX = dyn_cast<VarDecl>(X)) {
const auto *VarY = cast<VarDecl>(Y);
if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) {
+ // During deserialization, we might compare variables before we load
+ // their types. Assume the types will end up being the same.
+ if (VarX->getType().isNull() || VarY->getType().isNull())
+ return true;
+
if (hasSameType(VarX->getType(), VarY->getType()))
return true;
@@ -6652,13 +6694,8 @@ bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const {
// ConceptDecl wouldn't be the same if their constraint expression differs.
if (const auto *ConceptX = dyn_cast<ConceptDecl>(X)) {
const auto *ConceptY = cast<ConceptDecl>(Y);
- const Expr *XCE = ConceptX->getConstraintExpr();
- const Expr *YCE = ConceptY->getConstraintExpr();
- assert(XCE && YCE && "ConceptDecl without constraint expression?");
- llvm::FoldingSetNodeID XID, YID;
- XCE->Profile(XID, *this, /*Canonical=*/true);
- YCE->Profile(YID, *this, /*Canonical=*/true);
- if (XID != YID)
+ if (!isSameConstraintExpr(ConceptX->getConstraintExpr(),
+ ConceptY->getConstraintExpr()))
return false;
}
@@ -6740,26 +6777,29 @@ ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
case TemplateArgument::Declaration: {
auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl());
- return TemplateArgument(D, getCanonicalType(Arg.getParamTypeForDecl()));
+ return TemplateArgument(D, getCanonicalType(Arg.getParamTypeForDecl()),
+ Arg.getIsDefaulted());
}
case TemplateArgument::NullPtr:
return TemplateArgument(getCanonicalType(Arg.getNullPtrType()),
- /*isNullPtr*/true);
+ /*isNullPtr*/ true, Arg.getIsDefaulted());
case TemplateArgument::Template:
- return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()));
+ return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()),
+ Arg.getIsDefaulted());
case TemplateArgument::TemplateExpansion:
- return TemplateArgument(getCanonicalTemplateName(
- Arg.getAsTemplateOrTemplatePattern()),
- Arg.getNumTemplateExpansions());
+ return TemplateArgument(
+ getCanonicalTemplateName(Arg.getAsTemplateOrTemplatePattern()),
+ Arg.getNumTemplateExpansions(), Arg.getIsDefaulted());
case TemplateArgument::Integral:
return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType()));
case TemplateArgument::Type:
- return TemplateArgument(getCanonicalType(Arg.getAsType()));
+ return TemplateArgument(getCanonicalType(Arg.getAsType()),
+ /*isNullPtr*/ false, Arg.getIsDefaulted());
case TemplateArgument::Pack: {
bool AnyNonCanonArgs = false;
@@ -8030,6 +8070,8 @@ static char getObjCEncodingForPrimitiveType(const ASTContext *C,
#include "clang/Basic/AArch64SVEACLETypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
{
DiagnosticsEngine &Diags = C->getDiagnostics();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
@@ -8144,7 +8186,7 @@ static bool hasTemplateSpecializationInEncodedString(const Type *T,
if (!CXXRD->hasDefinition() || !VisitBasesAndFields)
return false;
- for (auto B : CXXRD->bases())
+ for (const auto &B : CXXRD->bases())
if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(),
true))
return true;
@@ -9409,7 +9451,9 @@ bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
First->getVectorKind() != VectorType::SveFixedLengthDataVector &&
First->getVectorKind() != VectorType::SveFixedLengthPredicateVector &&
Second->getVectorKind() != VectorType::SveFixedLengthDataVector &&
- Second->getVectorKind() != VectorType::SveFixedLengthPredicateVector)
+ Second->getVectorKind() != VectorType::SveFixedLengthPredicateVector &&
+ First->getVectorKind() != VectorType::RVVFixedLengthDataVector &&
+ Second->getVectorKind() != VectorType::RVVFixedLengthDataVector)
return true;
return false;
@@ -9418,16 +9462,18 @@ bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
/// getSVETypeSize - Return SVE vector or predicate register size.
static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) {
assert(Ty->isVLSTBuiltinType() && "Invalid SVE Type");
- return Ty->getKind() == BuiltinType::SveBool
- ? (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth()
- : Context.getLangOpts().VScaleMin * 128;
+ if (Ty->getKind() == BuiltinType::SveBool ||
+ Ty->getKind() == BuiltinType::SveCount)
+ return (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth();
+ return Context.getLangOpts().VScaleMin * 128;
}
bool ASTContext::areCompatibleSveTypes(QualType FirstType,
QualType SecondType) {
- assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) ||
- (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) &&
- "Expected SVE builtin type and vector type!");
+ assert(
+ ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) ||
+ (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) &&
+ "Expected SVE builtin type and vector type!");
auto IsValidCast = [this](QualType FirstType, QualType SecondType) {
if (const auto *BT = FirstType->getAs<BuiltinType>()) {
@@ -9454,9 +9500,10 @@ bool ASTContext::areCompatibleSveTypes(QualType FirstType,
bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType,
QualType SecondType) {
- assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) ||
- (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) &&
- "Expected SVE builtin type and vector type!");
+ assert(
+ ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) ||
+ (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) &&
+ "Expected SVE builtin type and vector type!");
auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) {
const auto *BT = FirstType->getAs<BuiltinType>();
@@ -9504,6 +9551,91 @@ bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType,
IsLaxCompatible(SecondType, FirstType);
}
+/// getRVVTypeSize - Return RVV vector register size.
+static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) {
+ assert(Ty->isRVVVLSBuiltinType() && "Invalid RVV Type");
+ auto VScale = Context.getTargetInfo().getVScaleRange(Context.getLangOpts());
+ if (!VScale)
+ return 0;
+
+ ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty);
+
+ uint64_t EltSize = Context.getTypeSize(Info.ElementType);
+ uint64_t MinElts = Info.EC.getKnownMinValue();
+ return VScale->first * MinElts * EltSize;
+}
+
+bool ASTContext::areCompatibleRVVTypes(QualType FirstType,
+ QualType SecondType) {
+ assert(
+ ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) ||
+ (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) &&
+ "Expected RVV builtin type and vector type!");
+
+ auto IsValidCast = [this](QualType FirstType, QualType SecondType) {
+ if (const auto *BT = FirstType->getAs<BuiltinType>()) {
+ if (const auto *VT = SecondType->getAs<VectorType>()) {
+ if (VT->getVectorKind() == VectorType::RVVFixedLengthDataVector ||
+ VT->getVectorKind() == VectorType::GenericVector)
+ return FirstType->isRVVVLSBuiltinType() &&
+ getTypeSize(SecondType) == getRVVTypeSize(*this, BT) &&
+ hasSameType(VT->getElementType(),
+ getBuiltinVectorTypeInfo(BT).ElementType);
+ }
+ }
+ return false;
+ };
+
+ return IsValidCast(FirstType, SecondType) ||
+ IsValidCast(SecondType, FirstType);
+}
+
+bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType,
+ QualType SecondType) {
+ assert(
+ ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) ||
+ (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) &&
+ "Expected RVV builtin type and vector type!");
+
+ auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) {
+ const auto *BT = FirstType->getAs<BuiltinType>();
+ if (!BT)
+ return false;
+
+ if (!BT->isRVVVLSBuiltinType())
+ return false;
+
+ const auto *VecTy = SecondType->getAs<VectorType>();
+ if (VecTy &&
+ (VecTy->getVectorKind() == VectorType::RVVFixedLengthDataVector ||
+ VecTy->getVectorKind() == VectorType::GenericVector)) {
+ const LangOptions::LaxVectorConversionKind LVCKind =
+ getLangOpts().getLaxVectorConversions();
+
+ // If __riscv_v_fixed_vlen != N do not allow GNU vector lax conversion.
+ if (VecTy->getVectorKind() == VectorType::GenericVector &&
+ getTypeSize(SecondType) != getRVVTypeSize(*this, BT))
+ return false;
+
+ // If -flax-vector-conversions=all is specified, the types are
+ // certainly compatible.
+ if (LVCKind == LangOptions::LaxVectorConversionKind::All)
+ return true;
+
+ // If -flax-vector-conversions=integer is specified, the types are
+ // compatible if the elements are integer types.
+ if (LVCKind == LangOptions::LaxVectorConversionKind::Integer)
+ return VecTy->getElementType().getCanonicalType()->isIntegerType() &&
+ FirstType->getRVVEltType(*this)->isIntegerType();
+ }
+
+ return false;
+ };
+
+ return IsLaxCompatible(FirstType, SecondType) ||
+ IsLaxCompatible(SecondType, FirstType);
+}
+
bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const {
while (true) {
// __strong id
@@ -9894,6 +10026,9 @@ static bool sameObjCTypeArgs(ASTContext &ctx,
return false;
ObjCTypeParamList *typeParams = iface->getTypeParamList();
+ if (!typeParams)
+ return false;
+
for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) {
if (ctx.hasSameType(lhsArgs[i], rhsArgs[i]))
continue;
@@ -11295,6 +11430,17 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
Type = Context.getScalableVectorType(ElementType, NumElements);
break;
}
+ case 'Q': {
+ switch (*Str++) {
+ case 'a': {
+ Type = Context.SveCountTy;
+ break;
+ }
+ default:
+ llvm_unreachable("Unexpected target builtin type");
+ }
+ break;
+ }
case 'V': {
char *End;
unsigned NumElements = strtoul(Str, &End, 10);
@@ -11492,9 +11638,8 @@ static GVALinkage basicGVALinkageForFunction(const ASTContext &Context,
// Non-user-provided functions get emitted as weak definitions with every
// use, no matter whether they've been explicitly instantiated etc.
- if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
- if (!MD->isUserProvided())
- return GVA_DiscardableODR;
+ if (!FD->isUserProvided())
+ return GVA_DiscardableODR;
GVALinkage External;
switch (FD->getTemplateSpecializationKind()) {
@@ -11683,7 +11828,7 @@ static GVALinkage basicGVALinkageForVariable(const ASTContext &Context,
llvm_unreachable("Invalid Linkage!");
}
-GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) {
+GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) const {
return adjustGVALinkageForExternalDefinitionKind(*this, VD,
adjustGVALinkageForAttributes(*this, VD,
basicGVALinkageForVariable(*this, VD)));
@@ -11775,6 +11920,10 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
!isMSStaticDataMemberInlineDefinition(VD))
return false;
+ // Variables in other module units shouldn't be forced to be emitted.
+ if (VD->isInAnotherModuleUnit())
+ return false;
+
// Variables that can be needed in other TUs are required.
auto Linkage = GetGVALinkageForVariable(VD);
if (!isDiscardableGVALinkage(Linkage))
@@ -11818,7 +11967,7 @@ void ASTContext::forEachMultiversionedFunctionVersion(
FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) {
FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl();
if (CurFD && hasSameType(CurFD->getType(), FD->getType()) &&
- !llvm::is_contained(SeenDecls, CurFD)) {
+ !SeenDecls.contains(CurFD)) {
SeenDecls.insert(CurFD);
Pred(CurFD);
}
@@ -12921,8 +13070,10 @@ static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X,
SmallVector<TemplateArgument, 8> As;
if (CD &&
getCommonTemplateArguments(Ctx, As, AX->getTypeConstraintArguments(),
- AY->getTypeConstraintArguments()))
+ AY->getTypeConstraintArguments())) {
CD = nullptr; // The arguments differ, so make it unconstrained.
+ As.clear();
+ }
// Both auto types can't be dependent, otherwise they wouldn't have been
// sugar. This implies they can't contain unexpanded packs either.
@@ -13035,7 +13186,7 @@ static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X,
static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) {
SmallVector<SplitQualType, 8> R;
while (true) {
- QTotal += T.Quals;
+ QTotal.addConsistentQualifiers(T.Quals);
QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType();
if (NT == QualType(T.Ty, 0))
break;
@@ -13317,6 +13468,7 @@ std::vector<std::string> ASTContext::filterFunctionTargetVersionAttrs(
TV->getFeatures(Feats);
for (auto &Feature : Feats)
if (Target->validateCpuSupports(Feature.str()))
+ // Use '?' to mark features that came from TargetVersion.
ResFeats.push_back("?" + Feature.str());
return ResFeats;
}
@@ -13386,6 +13538,7 @@ void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
VersionStr.split(VersionFeatures, "+");
for (auto &VFeature : VersionFeatures) {
VFeature = VFeature.trim();
+ // Use '?' to mark features that came from AArch64 TargetClones.
Features.push_back((StringRef{"?"} + VFeature).str());
}
}
@@ -13424,16 +13577,17 @@ operator<<(const StreamingDiagnostic &DB,
}
bool ASTContext::mayExternalize(const Decl *D) const {
- bool IsStaticVar =
- isa<VarDecl>(D) && cast<VarDecl>(D)->getStorageClass() == SC_Static;
+ bool IsInternalVar =
+ isa<VarDecl>(D) &&
+ basicGVALinkageForVariable(*this, cast<VarDecl>(D)) == GVA_Internal;
bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() &&
!D->getAttr<CUDADeviceAttr>()->isImplicit()) ||
(D->hasAttr<CUDAConstantAttr>() &&
!D->getAttr<CUDAConstantAttr>()->isImplicit());
- // CUDA/HIP: static managed variables need to be externalized since it is
+ // CUDA/HIP: managed variables need to be externalized since it is
// a declaration in IR, therefore cannot have internal linkage. Kernels in
// anonymous name space needs to be externalized to avoid duplicate symbols.
- return (IsStaticVar &&
+ return (IsInternalVar &&
(D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) ||
(D->hasAttr<CUDAGlobalAttr>() &&
basicGVALinkageForFunction(*this, cast<FunctionDecl>(D)) ==
diff --git a/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp b/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp
index 08877aa12c02..f96a4fa3c35b 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp
@@ -25,7 +25,8 @@
using namespace clang;
// Returns a desugared version of the QualType, and marks ShouldAKA as true
-// whenever we remove significant sugar from the type.
+// whenever we remove significant sugar from the type. Make sure ShouldAKA
+// is initialized before passing it in.
QualType clang::desugarForDiagnostic(ASTContext &Context, QualType QT,
bool &ShouldAKA) {
QualifierCollector QC;
diff --git a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
index 6f367ef053d2..39c7a8fa3970 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
@@ -433,6 +433,7 @@ namespace clang {
Decl *From, DeclContext *&ToDC, DeclContext *&ToLexicalDC);
Error ImportImplicitMethods(const CXXRecordDecl *From, CXXRecordDecl *To);
+ Error ImportFieldDeclDefinition(const FieldDecl *From, const FieldDecl *To);
Expected<CXXCastPath> ImportCastPath(CastExpr *E);
Expected<APValue> ImportAPValue(const APValue &FromValue);
@@ -640,6 +641,7 @@ namespace clang {
ExpectedStmt VisitBinaryOperator(BinaryOperator *E);
ExpectedStmt VisitConditionalOperator(ConditionalOperator *E);
ExpectedStmt VisitBinaryConditionalOperator(BinaryConditionalOperator *E);
+ ExpectedStmt VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E);
ExpectedStmt VisitOpaqueValueExpr(OpaqueValueExpr *E);
ExpectedStmt VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E);
ExpectedStmt VisitExpressionTraitExpr(ExpressionTraitExpr *E);
@@ -836,7 +838,8 @@ ASTNodeImporter::import(const TemplateArgument &From) {
ExpectedType ToTypeOrErr = import(From.getAsType());
if (!ToTypeOrErr)
return ToTypeOrErr.takeError();
- return TemplateArgument(*ToTypeOrErr);
+ return TemplateArgument(*ToTypeOrErr, /*isNullPtr*/ false,
+ From.getIsDefaulted());
}
case TemplateArgument::Integral: {
@@ -853,14 +856,15 @@ ASTNodeImporter::import(const TemplateArgument &From) {
ExpectedType ToTypeOrErr = import(From.getParamTypeForDecl());
if (!ToTypeOrErr)
return ToTypeOrErr.takeError();
- return TemplateArgument(*ToOrErr, *ToTypeOrErr);
+ return TemplateArgument(*ToOrErr, *ToTypeOrErr, From.getIsDefaulted());
}
case TemplateArgument::NullPtr: {
ExpectedType ToTypeOrErr = import(From.getNullPtrType());
if (!ToTypeOrErr)
return ToTypeOrErr.takeError();
- return TemplateArgument(*ToTypeOrErr, /*isNullPtr*/true);
+ return TemplateArgument(*ToTypeOrErr, /*isNullPtr*/ true,
+ From.getIsDefaulted());
}
case TemplateArgument::Template: {
@@ -868,7 +872,7 @@ ASTNodeImporter::import(const TemplateArgument &From) {
if (!ToTemplateOrErr)
return ToTemplateOrErr.takeError();
- return TemplateArgument(*ToTemplateOrErr);
+ return TemplateArgument(*ToTemplateOrErr, From.getIsDefaulted());
}
case TemplateArgument::TemplateExpansion: {
@@ -877,13 +881,13 @@ ASTNodeImporter::import(const TemplateArgument &From) {
if (!ToTemplateOrErr)
return ToTemplateOrErr.takeError();
- return TemplateArgument(
- *ToTemplateOrErr, From.getNumTemplateExpansions());
+ return TemplateArgument(*ToTemplateOrErr, From.getNumTemplateExpansions(),
+ From.getIsDefaulted());
}
case TemplateArgument::Expression:
if (ExpectedExpr ToExpr = import(From.getAsExpr()))
- return TemplateArgument(*ToExpr);
+ return TemplateArgument(*ToExpr, From.getIsDefaulted());
else
return ToExpr.takeError();
@@ -974,7 +978,8 @@ ASTNodeImporter::import(const Designator &D) {
if (!ToFieldLocOrErr)
return ToFieldLocOrErr.takeError();
- return Designator(ToFieldName, *ToDotLocOrErr, *ToFieldLocOrErr);
+ return DesignatedInitExpr::Designator::CreateFieldDesignator(
+ ToFieldName, *ToDotLocOrErr, *ToFieldLocOrErr);
}
ExpectedSLoc ToLBracketLocOrErr = import(D.getLBracketLoc());
@@ -986,16 +991,17 @@ ASTNodeImporter::import(const Designator &D) {
return ToRBracketLocOrErr.takeError();
if (D.isArrayDesignator())
- return Designator(D.getFirstExprIndex(),
- *ToLBracketLocOrErr, *ToRBracketLocOrErr);
+ return Designator::CreateArrayDesignator(D.getArrayIndex(),
+ *ToLBracketLocOrErr,
+ *ToRBracketLocOrErr);
ExpectedSLoc ToEllipsisLocOrErr = import(D.getEllipsisLoc());
if (!ToEllipsisLocOrErr)
return ToEllipsisLocOrErr.takeError();
assert(D.isArrayRangeDesignator());
- return Designator(
- D.getFirstExprIndex(), *ToLBracketLocOrErr, *ToEllipsisLocOrErr,
+ return Designator::CreateArrayRangeDesignator(
+ D.getArrayIndex(), *ToLBracketLocOrErr, *ToEllipsisLocOrErr,
*ToRBracketLocOrErr);
}
@@ -1094,6 +1100,10 @@ ExpectedType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
case BuiltinType::Id: \
return Importer.getToContext().SingletonId;
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) \
+ case BuiltinType::Id: \
+ return Importer.getToContext().SingletonId;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
#define SHARED_SINGLETON_TYPE(Expansion)
#define BUILTIN_TYPE(Id, SingletonId) \
case BuiltinType::Id: return Importer.getToContext().SingletonId;
@@ -1357,12 +1367,16 @@ ExpectedType ASTNodeImporter::VisitTypedefType(const TypedefType *T) {
Expected<TypedefNameDecl *> ToDeclOrErr = import(T->getDecl());
if (!ToDeclOrErr)
return ToDeclOrErr.takeError();
+
+ TypedefNameDecl *ToDecl = *ToDeclOrErr;
+ if (ToDecl->getTypeForDecl())
+ return QualType(ToDecl->getTypeForDecl(), 0);
+
ExpectedType ToUnderlyingTypeOrErr = import(T->desugar());
if (!ToUnderlyingTypeOrErr)
return ToUnderlyingTypeOrErr.takeError();
- return Importer.getToContext().getTypedefType(*ToDeclOrErr,
- *ToUnderlyingTypeOrErr);
+ return Importer.getToContext().getTypedefType(ToDecl, *ToUnderlyingTypeOrErr);
}
ExpectedType ASTNodeImporter::VisitTypeOfExprType(const TypeOfExprType *T) {
@@ -1460,20 +1474,11 @@ ExpectedType ASTNodeImporter::VisitInjectedClassNameType(
if (!ToDeclOrErr)
return ToDeclOrErr.takeError();
- ExpectedType ToInjTypeOrErr = import(T->getInjectedSpecializationType());
- if (!ToInjTypeOrErr)
- return ToInjTypeOrErr.takeError();
-
- // FIXME: ASTContext::getInjectedClassNameType is not suitable for AST reading
- // See comments in InjectedClassNameType definition for details
- // return Importer.getToContext().getInjectedClassNameType(D, InjType);
- enum {
- TypeAlignmentInBits = 4,
- TypeAlignment = 1 << TypeAlignmentInBits
- };
-
- return QualType(new (Importer.getToContext(), TypeAlignment)
- InjectedClassNameType(*ToDeclOrErr, *ToInjTypeOrErr), 0);
+ // The InjectedClassNameType is created in VisitRecordDecl when the
+ // T->getDecl() is imported. Here we can return the existing type.
+ const Type *Ty = (*ToDeclOrErr)->getTypeForDecl();
+ assert(Ty && isa<InjectedClassNameType>(Ty));
+ return QualType(Ty, 0);
}
ExpectedType ASTNodeImporter::VisitRecordType(const RecordType *T) {
@@ -1846,52 +1851,33 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
// different values in two distinct translation units.
ChildErrorHandlingStrategy HandleChildErrors(FromDC);
+ auto MightNeedReordering = [](const Decl *D) {
+ return isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D) || isa<FriendDecl>(D);
+ };
+
+ // Import everything that might need reordering first.
Error ChildErrors = Error::success();
for (auto *From : FromDC->decls()) {
+ if (!MightNeedReordering(From))
+ continue;
+
ExpectedDecl ImportedOrErr = import(From);
// If we are in the process of ImportDefinition(...) for a RecordDecl we
// want to make sure that we are also completing each FieldDecl. There
// are currently cases where this does not happen and this is correctness
// fix since operations such as code generation will expect this to be so.
- if (ImportedOrErr) {
- FieldDecl *FieldFrom = dyn_cast_or_null<FieldDecl>(From);
- Decl *ImportedDecl = *ImportedOrErr;
- FieldDecl *FieldTo = dyn_cast_or_null<FieldDecl>(ImportedDecl);
- if (FieldFrom && FieldTo) {
- RecordDecl *FromRecordDecl = nullptr;
- RecordDecl *ToRecordDecl = nullptr;
- // If we have a field that is an ArrayType we need to check if the array
- // element is a RecordDecl and if so we need to import the definition.
- if (FieldFrom->getType()->isArrayType()) {
- // getBaseElementTypeUnsafe(...) handles multi-dimensonal arrays for us.
- FromRecordDecl = FieldFrom->getType()->getBaseElementTypeUnsafe()->getAsRecordDecl();
- ToRecordDecl = FieldTo->getType()->getBaseElementTypeUnsafe()->getAsRecordDecl();
- }
-
- if (!FromRecordDecl || !ToRecordDecl) {
- const RecordType *RecordFrom =
- FieldFrom->getType()->getAs<RecordType>();
- const RecordType *RecordTo = FieldTo->getType()->getAs<RecordType>();
-
- if (RecordFrom && RecordTo) {
- FromRecordDecl = RecordFrom->getDecl();
- ToRecordDecl = RecordTo->getDecl();
- }
- }
-
- if (FromRecordDecl && ToRecordDecl) {
- if (FromRecordDecl->isCompleteDefinition() &&
- !ToRecordDecl->isCompleteDefinition()) {
- Error Err = ImportDefinition(FromRecordDecl, ToRecordDecl);
- HandleChildErrors.handleChildImportResult(ChildErrors,
- std::move(Err));
- }
- }
- }
- } else {
+ if (!ImportedOrErr) {
HandleChildErrors.handleChildImportResult(ChildErrors,
ImportedOrErr.takeError());
+ continue;
+ }
+ FieldDecl *FieldFrom = dyn_cast_or_null<FieldDecl>(From);
+ Decl *ImportedDecl = *ImportedOrErr;
+ FieldDecl *FieldTo = dyn_cast_or_null<FieldDecl>(ImportedDecl);
+ if (FieldFrom && FieldTo) {
+ Error Err = ImportFieldDeclDefinition(FieldFrom, FieldTo);
+ HandleChildErrors.handleChildImportResult(ChildErrors, std::move(Err));
}
}
@@ -1906,7 +1892,7 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
// During the import of `a` we import first the dependencies in sequence,
// thus the order would be `c`, `b`, `a`. We will get the normal order by
// first removing the already imported members and then adding them in the
- // order as they apper in the "from" context.
+ // order as they appear in the "from" context.
//
// Keeping field order is vital because it determines structure layout.
//
@@ -1918,9 +1904,6 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
// interface in LLDB is implemented by the means of the ASTImporter. However,
// calling an import at this point would result in an uncontrolled import, we
// must avoid that.
- const auto *FromRD = dyn_cast<RecordDecl>(FromDC);
- if (!FromRD)
- return ChildErrors;
auto ToDCOrErr = Importer.ImportContext(FromDC);
if (!ToDCOrErr) {
@@ -1931,26 +1914,70 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
DeclContext *ToDC = *ToDCOrErr;
// Remove all declarations, which may be in wrong order in the
// lexical DeclContext and then add them in the proper order.
- for (auto *D : FromRD->decls()) {
- if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D) || isa<FriendDecl>(D)) {
- assert(D && "DC contains a null decl");
- Decl *ToD = Importer.GetAlreadyImportedOrNull(D);
+ for (auto *D : FromDC->decls()) {
+ if (!MightNeedReordering(D))
+ continue;
+
+ assert(D && "DC contains a null decl");
+ if (Decl *ToD = Importer.GetAlreadyImportedOrNull(D)) {
// Remove only the decls which we successfully imported.
- if (ToD) {
- assert(ToDC == ToD->getLexicalDeclContext() && ToDC->containsDecl(ToD));
- // Remove the decl from its wrong place in the linked list.
- ToDC->removeDecl(ToD);
- // Add the decl to the end of the linked list.
- // This time it will be at the proper place because the enclosing for
- // loop iterates in the original (good) order of the decls.
- ToDC->addDeclInternal(ToD);
- }
+ assert(ToDC == ToD->getLexicalDeclContext() && ToDC->containsDecl(ToD));
+ // Remove the decl from its wrong place in the linked list.
+ ToDC->removeDecl(ToD);
+ // Add the decl to the end of the linked list.
+ // This time it will be at the proper place because the enclosing for
+ // loop iterates in the original (good) order of the decls.
+ ToDC->addDeclInternal(ToD);
}
}
+ // Import everything else.
+ for (auto *From : FromDC->decls()) {
+ if (MightNeedReordering(From))
+ continue;
+
+ ExpectedDecl ImportedOrErr = import(From);
+ if (!ImportedOrErr)
+ HandleChildErrors.handleChildImportResult(ChildErrors,
+ ImportedOrErr.takeError());
+ }
+
return ChildErrors;
}
+Error ASTNodeImporter::ImportFieldDeclDefinition(const FieldDecl *From,
+ const FieldDecl *To) {
+ RecordDecl *FromRecordDecl = nullptr;
+ RecordDecl *ToRecordDecl = nullptr;
+ // If we have a field that is an ArrayType we need to check if the array
+ // element is a RecordDecl and if so we need to import the definition.
+ QualType FromType = From->getType();
+ QualType ToType = To->getType();
+ if (FromType->isArrayType()) {
+ // getBaseElementTypeUnsafe(...) handles multi-dimensonal arrays for us.
+ FromRecordDecl = FromType->getBaseElementTypeUnsafe()->getAsRecordDecl();
+ ToRecordDecl = ToType->getBaseElementTypeUnsafe()->getAsRecordDecl();
+ }
+
+ if (!FromRecordDecl || !ToRecordDecl) {
+ const RecordType *RecordFrom = FromType->getAs<RecordType>();
+ const RecordType *RecordTo = ToType->getAs<RecordType>();
+
+ if (RecordFrom && RecordTo) {
+ FromRecordDecl = RecordFrom->getDecl();
+ ToRecordDecl = RecordTo->getDecl();
+ }
+ }
+
+ if (FromRecordDecl && ToRecordDecl) {
+ if (FromRecordDecl->isCompleteDefinition() &&
+ !ToRecordDecl->isCompleteDefinition())
+ return ImportDefinition(FromRecordDecl, ToRecordDecl);
+ }
+
+ return Error::success();
+}
+
Error ASTNodeImporter::ImportDeclContext(
Decl *FromD, DeclContext *&ToDC, DeclContext *&ToLexicalDC) {
auto ToDCOrErr = Importer.ImportContext(FromD->getDeclContext());
@@ -2512,6 +2539,22 @@ ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
QualType FromUT = D->getUnderlyingType();
QualType FoundUT = FoundTypedef->getUnderlyingType();
if (Importer.IsStructurallyEquivalent(FromUT, FoundUT)) {
+ // If the underlying declarations are unnamed records these can be
+ // imported as different types. We should create a distinct typedef
+ // node in this case.
+ // If we found an existing underlying type with a record in a
+ // different context (than the imported), this is already reason for
+ // having distinct typedef nodes for these.
+ // Again this can create situation like
+ // 'typedef int T; typedef int T;' but this is hard to avoid without
+ // a rename strategy at import.
+ if (!FromUT.isNull() && !FoundUT.isNull()) {
+ RecordDecl *FromR = FromUT->getAsRecordDecl();
+ RecordDecl *FoundR = FoundUT->getAsRecordDecl();
+ if (FromR && FoundR &&
+ !hasSameVisibilityContextAndLinkage(FoundR, FromR))
+ continue;
+ }
// If the "From" context has a complete underlying type but we
// already have a complete underlying type then return with that.
if (!FromUT->isIncompleteType() && !FoundUT->isIncompleteType())
@@ -2906,13 +2949,12 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
DC, *TInfoOrErr, Loc, DCXX->getLambdaDependencyKind(),
DCXX->isGenericLambda(), DCXX->getLambdaCaptureDefault()))
return D2CXX;
- ExpectedDecl CDeclOrErr = import(DCXX->getLambdaContextDecl());
+ CXXRecordDecl::LambdaNumbering Numbering = DCXX->getLambdaNumbering();
+ ExpectedDecl CDeclOrErr = import(Numbering.ContextDecl);
if (!CDeclOrErr)
return CDeclOrErr.takeError();
- D2CXX->setLambdaMangling(DCXX->getLambdaManglingNumber(), *CDeclOrErr,
- DCXX->hasKnownLambdaInternalLinkage());
- D2CXX->setDeviceLambdaManglingNumber(
- DCXX->getDeviceLambdaManglingNumber());
+ Numbering.ContextDecl = *CDeclOrErr;
+ D2CXX->setLambdaNumbering(Numbering);
} else if (DCXX->isInjectedClassName()) {
// We have to be careful to do a similar dance to the one in
// Sema::ActOnStartCXXMemberDeclarations
@@ -2948,8 +2990,6 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
// InjectedClassNameType (see Sema::CheckClassTemplate). Update the
// previously set type to the correct value here (ToDescribed is not
// available at record create).
- // FIXME: The previous type is cleared but not removed from
- // ASTContext's internal storage.
CXXRecordDecl *Injected = nullptr;
for (NamedDecl *Found : D2CXX->noload_lookup(Name)) {
auto *Record = dyn_cast<CXXRecordDecl>(Found);
@@ -2959,20 +2999,34 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
}
}
// Create an injected type for the whole redecl chain.
+ // The chain may contain an already existing injected type at the start,
+ // if yes this should be reused. We must ensure that only one type
+ // object exists for the injected type (including the injected record
+ // declaration), ASTContext does not check it.
SmallVector<Decl *, 2> Redecls =
getCanonicalForwardRedeclChain(D2CXX);
+ const Type *FrontTy =
+ cast<CXXRecordDecl>(Redecls.front())->getTypeForDecl();
+ QualType InjSpec;
+ if (auto *InjTy = FrontTy->getAs<InjectedClassNameType>())
+ InjSpec = InjTy->getInjectedSpecializationType();
+ else
+ InjSpec = ToDescribed->getInjectedClassNameSpecialization();
for (auto *R : Redecls) {
auto *RI = cast<CXXRecordDecl>(R);
- RI->setTypeForDecl(nullptr);
- // Below we create a new injected type and assign that to the
- // canonical decl, subsequent declarations in the chain will reuse
- // that type.
- Importer.getToContext().getInjectedClassNameType(
- RI, ToDescribed->getInjectedClassNameSpecialization());
+ if (R != Redecls.front() ||
+ !isa<InjectedClassNameType>(RI->getTypeForDecl()))
+ RI->setTypeForDecl(nullptr);
+ // This function tries to get the injected type from getTypeForDecl,
+ // then from the previous declaration if possible. If not, it creates
+ // a new type.
+ Importer.getToContext().getInjectedClassNameType(RI, InjSpec);
}
- // Set the new type for the previous injected decl too.
+ // Set the new type for the injected decl too.
if (Injected) {
Injected->setTypeForDecl(nullptr);
+ // This function will copy the injected type from D2CXX into Injected.
+ // The injected decl does not have a previous decl to copy from.
Importer.getToContext().getTypeDeclType(Injected, D2CXX);
}
}
@@ -3668,7 +3722,7 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
NameInfo, T, TInfo, ToEndLoc, Ctor))
return ToFunction;
cast<CXXDeductionGuideDecl>(ToFunction)
- ->setIsCopyDeductionCandidate(Guide->isCopyDeductionCandidate());
+ ->setDeductionCandidateKind(Guide->getDeductionCandidateKind());
} else {
if (GetImportedOrCreateDecl(
ToFunction, D, Importer.getToContext(), DC, ToInnerLocStart,
@@ -3751,6 +3805,11 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
if (Error Err = ImportTemplateInformation(D, ToFunction))
return std::move(Err);
+ if (auto *FromCXXMethod = dyn_cast<CXXMethodDecl>(D))
+ if (Error Err = ImportOverriddenMethods(cast<CXXMethodDecl>(ToFunction),
+ FromCXXMethod))
+ return std::move(Err);
+
if (D->doesThisDeclarationHaveABody()) {
Error Err = ImportFunctionDeclBody(D, ToFunction);
@@ -3774,11 +3833,6 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
addDeclToContexts(D, ToFunction);
- if (auto *FromCXXMethod = dyn_cast<CXXMethodDecl>(D))
- if (Error Err = ImportOverriddenMethods(cast<CXXMethodDecl>(ToFunction),
- FromCXXMethod))
- return std::move(Err);
-
// Import the rest of the chain. I.e. import all subsequent declarations.
for (++RedeclIt; RedeclIt != Redecls.end(); ++RedeclIt) {
ExpectedDecl ToRedeclOrErr = import(*RedeclIt);
@@ -3886,6 +3940,12 @@ ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
D->getInClassInitStyle()))
return ToField;
+ // We need [[no_unqiue_address]] attributes to be added to FieldDecl, before
+ // we add fields in CXXRecordDecl::addedMember, otherwise record will be
+ // marked as having non-zero size.
+ Err = Importer.ImportAttrs(ToField, D);
+ if (Err)
+ return std::move(Err);
ToField->setAccess(D->getAccess());
ToField->setLexicalDeclContext(LexicalDC);
if (ToInitializer)
@@ -6762,8 +6822,8 @@ ExpectedStmt ASTNodeImporter::VisitCXXTryStmt(CXXTryStmt *S) {
return ToHandlerOrErr.takeError();
}
- return CXXTryStmt::Create(
- Importer.getToContext(), *ToTryLocOrErr,*ToTryBlockOrErr, ToHandlers);
+ return CXXTryStmt::Create(Importer.getToContext(), *ToTryLocOrErr,
+ cast<CompoundStmt>(*ToTryBlockOrErr), ToHandlers);
}
ExpectedStmt ASTNodeImporter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
@@ -6992,7 +7052,14 @@ ExpectedStmt
ASTNodeImporter::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
Error Err = Error::success();
auto ToGenericLoc = importChecked(Err, E->getGenericLoc());
- auto *ToControllingExpr = importChecked(Err, E->getControllingExpr());
+ Expr *ToControllingExpr = nullptr;
+ TypeSourceInfo *ToControllingType = nullptr;
+ if (E->isExprPredicate())
+ ToControllingExpr = importChecked(Err, E->getControllingExpr());
+ else
+ ToControllingType = importChecked(Err, E->getControllingType());
+ assert((ToControllingExpr || ToControllingType) &&
+ "Either the controlling expr or type must be nonnull");
auto ToDefaultLoc = importChecked(Err, E->getDefaultLoc());
auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
if (Err)
@@ -7010,14 +7077,26 @@ ASTNodeImporter::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
const ASTContext &ToCtx = Importer.getToContext();
if (E->isResultDependent()) {
+ if (ToControllingExpr) {
+ return GenericSelectionExpr::Create(
+ ToCtx, ToGenericLoc, ToControllingExpr, llvm::ArrayRef(ToAssocTypes),
+ llvm::ArrayRef(ToAssocExprs), ToDefaultLoc, ToRParenLoc,
+ E->containsUnexpandedParameterPack());
+ }
return GenericSelectionExpr::Create(
- ToCtx, ToGenericLoc, ToControllingExpr, llvm::ArrayRef(ToAssocTypes),
+ ToCtx, ToGenericLoc, ToControllingType, llvm::ArrayRef(ToAssocTypes),
llvm::ArrayRef(ToAssocExprs), ToDefaultLoc, ToRParenLoc,
E->containsUnexpandedParameterPack());
}
+ if (ToControllingExpr) {
+ return GenericSelectionExpr::Create(
+ ToCtx, ToGenericLoc, ToControllingExpr, llvm::ArrayRef(ToAssocTypes),
+ llvm::ArrayRef(ToAssocExprs), ToDefaultLoc, ToRParenLoc,
+ E->containsUnexpandedParameterPack(), E->getResultIndex());
+ }
return GenericSelectionExpr::Create(
- ToCtx, ToGenericLoc, ToControllingExpr, llvm::ArrayRef(ToAssocTypes),
+ ToCtx, ToGenericLoc, ToControllingType, llvm::ArrayRef(ToAssocTypes),
llvm::ArrayRef(ToAssocExprs), ToDefaultLoc, ToRParenLoc,
E->containsUnexpandedParameterPack(), E->getResultIndex());
}
@@ -7032,7 +7111,8 @@ ExpectedStmt ASTNodeImporter::VisitPredefinedExpr(PredefinedExpr *E) {
return std::move(Err);
return PredefinedExpr::Create(Importer.getToContext(), ToBeginLoc, ToType,
- E->getIdentKind(), ToFunctionName);
+ E->getIdentKind(), E->isTransparent(),
+ ToFunctionName);
}
ExpectedStmt ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
@@ -7070,6 +7150,7 @@ ExpectedStmt ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
E->getValueKind(), ToFoundD, ToResInfo, E->isNonOdrUse());
if (E->hadMultipleCandidates())
ToE->setHadMultipleCandidates(true);
+ ToE->setIsImmediateEscalating(E->isImmediateEscalating());
return ToE;
}
@@ -7399,6 +7480,17 @@ ASTNodeImporter::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
E->getObjectKind());
}
+ExpectedStmt ASTNodeImporter::VisitCXXRewrittenBinaryOperator(
+ CXXRewrittenBinaryOperator *E) {
+ Error Err = Error::success();
+ auto ToSemanticForm = importChecked(Err, E->getSemanticForm());
+ if (Err)
+ return std::move(Err);
+
+ return new (Importer.getToContext())
+ CXXRewrittenBinaryOperator(ToSemanticForm, E->isReversed());
+}
+
ExpectedStmt ASTNodeImporter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
Error Err = Error::success();
auto ToBeginLoc = importChecked(Err, E->getBeginLoc());
@@ -7887,12 +7979,14 @@ ExpectedStmt ASTNodeImporter::VisitCXXConstructExpr(CXXConstructExpr *E) {
if (Error Err = ImportContainerChecked(E->arguments(), ToArgs))
return std::move(Err);
- return CXXConstructExpr::Create(
+ CXXConstructExpr *ToE = CXXConstructExpr::Create(
Importer.getToContext(), ToType, ToLocation, ToConstructor,
E->isElidable(), ToArgs, E->hadMultipleCandidates(),
E->isListInitialization(), E->isStdInitListInitialization(),
E->requiresZeroInitialization(), E->getConstructionKind(),
ToParenOrBraceRange);
+ ToE->setIsImmediateEscalating(E->isImmediateEscalating());
+ return ToE;
}
ExpectedStmt ASTNodeImporter::VisitExprWithCleanups(ExprWithCleanups *E) {
@@ -8111,7 +8205,7 @@ ExpectedStmt ASTNodeImporter::VisitCXXUnresolvedConstructExpr(
return CXXUnresolvedConstructExpr::Create(
Importer.getToContext(), ToType, ToTypeSourceInfo, ToLParenLoc,
- llvm::ArrayRef(ToArgs), ToRParenLoc);
+ llvm::ArrayRef(ToArgs), ToRParenLoc, E->isListInitialization());
}
ExpectedStmt
@@ -8779,8 +8873,7 @@ public:
return;
AttributeCommonInfo ToI(ToAttrName, ToScopeName, ToAttrRange, ToScopeLoc,
- FromAttr->getParsedKind(), FromAttr->getSyntax(),
- FromAttr->getAttributeSpellingListIndex());
+ FromAttr->getParsedKind(), FromAttr->getForm());
// The "SemanticSpelling" is not needed to be passed to the constructor.
// That value is recalculated from the SpellingListIndex if needed.
ToAttr = T::Create(Importer.getToContext(),
@@ -8958,11 +9051,7 @@ Expected<Attr *> ASTImporter::Import(const Attr *FromAttr) {
}
Decl *ASTImporter::GetAlreadyImportedOrNull(const Decl *FromD) const {
- auto Pos = ImportedDecls.find(FromD);
- if (Pos != ImportedDecls.end())
- return Pos->second;
- else
- return nullptr;
+ return ImportedDecls.lookup(FromD);
}
TranslationUnitDecl *ASTImporter::GetFromTU(Decl *ToD) {
@@ -8972,6 +9061,19 @@ TranslationUnitDecl *ASTImporter::GetFromTU(Decl *ToD) {
return FromDPos->second->getTranslationUnitDecl();
}
+Error ASTImporter::ImportAttrs(Decl *ToD, Decl *FromD) {
+ if (!FromD->hasAttrs() || ToD->hasAttrs())
+ return Error::success();
+ for (const Attr *FromAttr : FromD->getAttrs()) {
+ auto ToAttrOrErr = Import(FromAttr);
+ if (ToAttrOrErr)
+ ToD->addAttr(*ToAttrOrErr);
+ else
+ return ToAttrOrErr.takeError();
+ }
+ return Error::success();
+}
+
Expected<Decl *> ASTImporter::Import(Decl *FromD) {
if (!FromD)
return nullptr;
@@ -9106,15 +9208,8 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
// Make sure that ImportImpl registered the imported decl.
assert(ImportedDecls.count(FromD) != 0 && "Missing call to MapImported?");
-
- if (FromD->hasAttrs())
- for (const Attr *FromAttr : FromD->getAttrs()) {
- auto ToAttrOrErr = Import(FromAttr);
- if (ToAttrOrErr)
- ToD->addAttr(*ToAttrOrErr);
- else
- return ToAttrOrErr.takeError();
- }
+ if (auto Error = ImportAttrs(ToD, FromD))
+ return std::move(Error);
// Notify subclasses.
Imported(FromD, ToD);
@@ -9376,7 +9471,7 @@ Expected<TemplateName> ASTImporter::Import(TemplateName From) {
switch (From.getKind()) {
case TemplateName::Template:
if (ExpectedDecl ToTemplateOrErr = Import(From.getAsTemplateDecl()))
- return TemplateName(cast<TemplateDecl>(*ToTemplateOrErr));
+ return TemplateName(cast<TemplateDecl>((*ToTemplateOrErr)->getCanonicalDecl()));
else
return ToTemplateOrErr.takeError();
diff --git a/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp b/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp
index b7d17a5e92d0..07d39dcee258 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp
@@ -67,6 +67,8 @@ struct Builder : RecursiveASTVisitor<Builder> {
} else if (isa<TypedefType>(Ty)) {
// We do not put friend typedefs to the lookup table because
// ASTImporter does not organize typedefs into redecl chains.
+ } else if (isa<UsingType>(Ty)) {
+ // Similar to TypedefType, not putting into lookup table.
} else {
llvm_unreachable("Unhandled type of friend class");
}
@@ -85,6 +87,18 @@ struct Builder : RecursiveASTVisitor<Builder> {
ASTImporterLookupTable::ASTImporterLookupTable(TranslationUnitDecl &TU) {
Builder B(*this);
B.TraverseDecl(&TU);
+ // The VaList declaration may be created on demand only or not traversed.
+ // To ensure it is present and found during import, add it to the table now.
+ if (auto *D =
+ dyn_cast_or_null<NamedDecl>(TU.getASTContext().getVaListTagDecl())) {
+ // On some platforms (AArch64) the VaList declaration can be inside a 'std'
+ // namespace. This is handled specially and not visible by AST traversal.
+ // ASTImporter must be able to find this namespace to import the VaList
+ // declaration (and the namespace) correctly.
+ if (auto *Ns = dyn_cast<NamespaceDecl>(D->getDeclContext()))
+ add(&TU, Ns);
+ add(D->getDeclContext(), D);
+ }
}
void ASTImporterLookupTable::add(DeclContext *DC, NamedDecl *ND) {
diff --git a/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp b/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp
index ba7dfc35edf2..f867b6bf84be 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp
@@ -1453,19 +1453,23 @@ static bool IsRecordContextStructurallyEquivalent(RecordDecl *D1,
return true;
}
+static bool NameIsStructurallyEquivalent(const TagDecl &D1, const TagDecl &D2) {
+ auto GetName = [](const TagDecl &D) -> const IdentifierInfo * {
+ if (const IdentifierInfo *Name = D.getIdentifier())
+ return Name;
+ if (const TypedefNameDecl *TypedefName = D.getTypedefNameForAnonDecl())
+ return TypedefName->getIdentifier();
+ return nullptr;
+ };
+ return IsStructurallyEquivalent(GetName(D1), GetName(D2));
+}
+
/// Determine structural equivalence of two records.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
RecordDecl *D1, RecordDecl *D2) {
-
- // Check for equivalent structure names.
- IdentifierInfo *Name1 = D1->getIdentifier();
- if (!Name1 && D1->getTypedefNameForAnonDecl())
- Name1 = D1->getTypedefNameForAnonDecl()->getIdentifier();
- IdentifierInfo *Name2 = D2->getIdentifier();
- if (!Name2 && D2->getTypedefNameForAnonDecl())
- Name2 = D2->getTypedefNameForAnonDecl()->getIdentifier();
- if (!IsStructurallyEquivalent(Name1, Name2))
+ if (!NameIsStructurallyEquivalent(*D1, *D2)) {
return false;
+ }
if (D1->isUnion() != D2->isUnion()) {
if (Context.Complain) {
@@ -1727,16 +1731,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
/// Determine structural equivalence of two enums.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
EnumDecl *D1, EnumDecl *D2) {
-
- // Check for equivalent enum names.
- IdentifierInfo *Name1 = D1->getIdentifier();
- if (!Name1 && D1->getTypedefNameForAnonDecl())
- Name1 = D1->getTypedefNameForAnonDecl()->getIdentifier();
- IdentifierInfo *Name2 = D2->getIdentifier();
- if (!Name2 && D2->getTypedefNameForAnonDecl())
- Name2 = D2->getTypedefNameForAnonDecl()->getIdentifier();
- if (!IsStructurallyEquivalent(Name1, Name2))
+ if (!NameIsStructurallyEquivalent(*D1, *D2)) {
return false;
+ }
// Compare the definitions of these two enums. If either or both are
// incomplete (i.e. forward declared), we assume that they are equivalent.
@@ -2060,8 +2057,13 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (!IsStructurallyEquivalent(D1->getIdentifier(), D2->getIdentifier()))
return false;
- if (!IsStructurallyEquivalent(D1->getClassInterface()->getIdentifier(),
- D2->getClassInterface()->getIdentifier()))
+ const ObjCInterfaceDecl *Intf1 = D1->getClassInterface(),
+ *Intf2 = D2->getClassInterface();
+ if ((!Intf1 || !Intf2) && (Intf1 != Intf2))
+ return false;
+
+ if (Intf1 &&
+ !IsStructurallyEquivalent(Intf1->getIdentifier(), Intf2->getIdentifier()))
return false;
// Compare protocols.
@@ -2080,7 +2082,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
// Compare ivars.
- QualType D2Type = Context.ToCtx.getObjCInterfaceType(D2->getClassInterface());
+ QualType D2Type =
+ Intf2 ? Context.ToCtx.getObjCInterfaceType(Intf2) : QualType();
ObjCCategoryDecl::ivar_iterator Ivar2 = D2->ivar_begin(),
Ivar2End = D2->ivar_end();
for (ObjCCategoryDecl::ivar_iterator Ivar1 = D1->ivar_begin(),
diff --git a/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp b/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp
index 64823f77e58a..fb9fe39e7778 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp
@@ -56,10 +56,23 @@ const ASTNodeKind::KindInfo ASTNodeKind::AllKindInfo[] = {
{NKI_None, "ObjCProtocolLoc"},
};
+bool ASTNodeKind::isBaseOf(ASTNodeKind Other) const {
+ return isBaseOf(KindId, Other.KindId);
+}
+
bool ASTNodeKind::isBaseOf(ASTNodeKind Other, unsigned *Distance) const {
return isBaseOf(KindId, Other.KindId, Distance);
}
+bool ASTNodeKind::isBaseOf(NodeKindId Base, NodeKindId Derived) {
+ if (Base == NKI_None || Derived == NKI_None)
+ return false;
+ while (Derived != Base && Derived != NKI_None) {
+ Derived = AllKindInfo[Derived].ParentId;
+ }
+ return Derived == Base;
+}
+
bool ASTNodeKind::isBaseOf(NodeKindId Base, NodeKindId Derived,
unsigned *Distance) {
if (Base == NKI_None || Derived == NKI_None) return false;
@@ -96,7 +109,7 @@ ASTNodeKind ASTNodeKind::getMostDerivedType(ASTNodeKind Kind1,
ASTNodeKind ASTNodeKind::getMostDerivedCommonAncestor(ASTNodeKind Kind1,
ASTNodeKind Kind2) {
NodeKindId Parent = Kind1.KindId;
- while (!isBaseOf(Parent, Kind2.KindId, nullptr) && Parent != NKI_None) {
+ while (!isBaseOf(Parent, Kind2.KindId) && Parent != NKI_None) {
Parent = AllKindInfo[Parent].ParentId;
}
return ASTNodeKind(Parent);
diff --git a/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp b/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp
index 0adcca7731d9..f198a9acf848 100644
--- a/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp
+++ b/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp
@@ -151,14 +151,16 @@ void OMPDeclareTargetDeclAttr::printPrettyPragma(
std::optional<OMPDeclareTargetDeclAttr *>
OMPDeclareTargetDeclAttr::getActiveAttr(const ValueDecl *VD) {
- if (!VD->hasAttrs())
+ if (llvm::all_of(VD->redecls(), [](const Decl *D) { return !D->hasAttrs(); }))
return std::nullopt;
unsigned Level = 0;
OMPDeclareTargetDeclAttr *FoundAttr = nullptr;
- for (auto *Attr : VD->specific_attrs<OMPDeclareTargetDeclAttr>()) {
- if (Level <= Attr->getLevel()) {
- Level = Attr->getLevel();
- FoundAttr = Attr;
+ for (const Decl *D : VD->redecls()) {
+ for (auto *Attr : D->specific_attrs<OMPDeclareTargetDeclAttr>()) {
+ if (Level <= Attr->getLevel()) {
+ Level = Attr->getLevel();
+ FoundAttr = Attr;
+ }
}
}
if (FoundAttr)
@@ -239,4 +241,33 @@ void OMPDeclareVariantAttr::printPrettyPragma(
}
}
+unsigned AlignedAttr::getAlignment(ASTContext &Ctx) const {
+ assert(!isAlignmentDependent());
+ if (getCachedAlignmentValue())
+ return *getCachedAlignmentValue();
+
+ // Handle alignmentType case.
+ if (!isAlignmentExpr()) {
+ QualType T = getAlignmentType()->getType();
+
+ // C++ [expr.alignof]p3:
+ // When alignof is applied to a reference type, the result is the
+ // alignment of the referenced type.
+ T = T.getNonReferenceType();
+
+ if (T.getQualifiers().hasUnaligned())
+ return Ctx.getCharWidth();
+
+ return Ctx.getTypeAlignInChars(T.getTypePtr()).getQuantity() *
+ Ctx.getCharWidth();
+ }
+
+ // Handle alignmentExpr case.
+ if (alignmentExpr)
+ return alignmentExpr->EvaluateKnownConstInt(Ctx).getZExtValue() *
+ Ctx.getCharWidth();
+
+ return Ctx.getTargetDefaultAlignForAttributeAligned();
+}
+
#include "clang/AST/AttrImpl.inc"
diff --git a/contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp b/contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp
index 1abbe8139ae9..25de2a20a7f3 100644
--- a/contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp
+++ b/contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp
@@ -80,7 +80,8 @@ bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base,
const CXXRecordDecl *BaseDecl = Base->getCanonicalDecl();
return lookupInBases(
[BaseDecl](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
- return FindBaseClass(Specifier, Path, BaseDecl);
+ return Specifier->getType()->getAsRecordDecl() &&
+ FindBaseClass(Specifier, Path, BaseDecl);
},
Paths);
}
diff --git a/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp b/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp
index b5c783b07d92..09df5401d669 100644
--- a/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp
@@ -227,7 +227,7 @@ ExprDependence clang::computeDependence(VAArgExpr *E) {
auto D = toExprDependenceAsWritten(
E->getWrittenTypeInfo()->getType()->getDependence()) |
(E->getSubExpr()->getDependence() & ~ExprDependence::Type);
- return D & ~ExprDependence::Value;
+ return D;
}
ExprDependence clang::computeDependence(NoInitExpr *E) {
@@ -611,9 +611,24 @@ ExprDependence clang::computeDependence(OffsetOfExpr *E) {
return D;
}
+static inline ExprDependence getDependenceInExpr(DeclarationNameInfo Name) {
+ auto D = ExprDependence::None;
+ if (Name.isInstantiationDependent())
+ D |= ExprDependence::Instantiation;
+ if (Name.containsUnexpandedParameterPack())
+ D |= ExprDependence::UnexpandedPack;
+ return D;
+}
+
ExprDependence clang::computeDependence(MemberExpr *E) {
- auto *MemberDecl = E->getMemberDecl();
auto D = E->getBase()->getDependence();
+ D |= getDependenceInExpr(E->getMemberNameInfo());
+
+ if (auto *NNS = E->getQualifier())
+ D |= toExprDependence(NNS->getDependence() &
+ ~NestedNameSpecifierDependence::Dependent);
+
+ auto *MemberDecl = E->getMemberDecl();
if (FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl)) {
DeclContext *DC = MemberDecl->getDeclContext();
// dyn_cast_or_null is used to handle objC variables which do not
@@ -653,7 +668,12 @@ ExprDependence clang::computeDependence(GenericSelectionExpr *E,
: ExprDependence::None;
for (auto *AE : E->getAssocExprs())
D |= AE->getDependence() & ExprDependence::Error;
- D |= E->getControllingExpr()->getDependence() & ExprDependence::Error;
+
+ if (E->isExprPredicate())
+ D |= E->getControllingExpr()->getDependence() & ExprDependence::Error;
+ else
+ D |= toExprDependenceAsWritten(
+ E->getControllingType()->getType()->getDependence());
if (E->isResultDependent())
return D | ExprDependence::TypeValueInstantiation;
@@ -663,7 +683,7 @@ ExprDependence clang::computeDependence(GenericSelectionExpr *E,
ExprDependence clang::computeDependence(DesignatedInitExpr *E) {
auto Deps = E->getInit()->getDependence();
- for (auto D : E->designators()) {
+ for (const auto &D : E->designators()) {
auto DesignatorDeps = ExprDependence::None;
if (D.isArrayDesignator())
DesignatorDeps |= E->getArrayIndex(D)->getDependence();
@@ -718,15 +738,6 @@ ExprDependence clang::computeDependence(CXXPseudoDestructorExpr *E) {
return D;
}
-static inline ExprDependence getDependenceInExpr(DeclarationNameInfo Name) {
- auto D = ExprDependence::None;
- if (Name.isInstantiationDependent())
- D |= ExprDependence::Instantiation;
- if (Name.containsUnexpandedParameterPack())
- D |= ExprDependence::UnexpandedPack;
- return D;
-}
-
ExprDependence
clang::computeDependence(OverloadExpr *E, bool KnownDependent,
bool KnownInstantiationDependent,
@@ -750,7 +761,7 @@ clang::computeDependence(OverloadExpr *E, bool KnownDependent,
// If we have explicit template arguments, check for dependent
// template arguments and whether they contain any unexpanded pack
// expansions.
- for (auto A : E->template_arguments())
+ for (const auto &A : E->template_arguments())
Deps |= toExprDependence(A.getArgument().getDependence());
return Deps;
}
@@ -760,7 +771,7 @@ ExprDependence clang::computeDependence(DependentScopeDeclRefExpr *E) {
D |= getDependenceInExpr(E->getNameInfo());
if (auto *Q = E->getQualifier())
D |= toExprDependence(Q->getDependence());
- for (auto A : E->template_arguments())
+ for (const auto &A : E->template_arguments())
D |= toExprDependence(A.getArgument().getDependence());
return D;
}
@@ -813,7 +824,7 @@ ExprDependence clang::computeDependence(CXXDependentScopeMemberExpr *E) {
if (auto *Q = E->getQualifier())
D |= toExprDependence(Q->getDependence());
D |= getDependenceInExpr(E->getMemberNameInfo());
- for (auto A : E->template_arguments())
+ for (const auto &A : E->template_arguments())
D |= toExprDependence(A.getArgument().getDependence());
return D;
}
diff --git a/contrib/llvm-project/clang/lib/AST/Decl.cpp b/contrib/llvm-project/clang/lib/AST/Decl.cpp
index 6f4c5192b7be..fbc45fb6397f 100644
--- a/contrib/llvm-project/clang/lib/AST/Decl.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Decl.cpp
@@ -58,10 +58,10 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
@@ -600,33 +600,13 @@ static bool isExportedFromModuleInterfaceUnit(const NamedDecl *D) {
llvm_unreachable("unexpected module ownership kind");
}
-static LinkageInfo getInternalLinkageFor(const NamedDecl *D) {
- // (for the modules ts) Internal linkage declarations within a module
- // interface unit are modeled as "module-internal linkage", which means that
- // they have internal linkage formally but can be indirectly accessed from
- // outside the module via inline functions and templates defined within the
- // module.
- if (isInModulePurview(D) && D->getASTContext().getLangOpts().ModulesTS)
- return LinkageInfo(ModuleInternalLinkage, DefaultVisibility, false);
-
- return LinkageInfo::internal();
+static bool isDeclaredInModuleInterfaceOrPartition(const NamedDecl *D) {
+ if (auto *M = D->getOwningModule())
+ return M->isInterfaceOrPartition();
+ return false;
}
static LinkageInfo getExternalLinkageFor(const NamedDecl *D) {
- // C++ Modules TS [basic.link]/6.8:
- // - A name declared at namespace scope that does not have internal linkage
- // by the previous rules and that is introduced by a non-exported
- // declaration has module linkage.
- //
- // [basic.namespace.general]/p2
- // A namespace is never attached to a named module and never has a name with
- // module linkage.
- if (isInModulePurview(D) &&
- !isExportedFromModuleInterfaceUnit(
- cast<NamedDecl>(D->getCanonicalDecl())) &&
- !isa<NamespaceDecl>(D))
- return LinkageInfo(ModuleLinkage, DefaultVisibility, false);
-
return LinkageInfo::external();
}
@@ -658,21 +638,21 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
// - a variable, variable template, function, or function template
// that is explicitly declared static; or
// (This bullet corresponds to C99 6.2.2p3.)
- return getInternalLinkageFor(D);
+ return LinkageInfo::internal();
}
if (const auto *Var = dyn_cast<VarDecl>(D)) {
// - a non-template variable of non-volatile const-qualified type, unless
// - it is explicitly declared extern, or
- // - it is inline or exported, or
+ // - it is declared in the purview of a module interface unit
+ // (outside the private-module-fragment, if any) or module partition, or
+ // - it is inline, or
// - it was previously declared and the prior declaration did not have
// internal linkage
// (There is no equivalent in C99.)
- if (Context.getLangOpts().CPlusPlus &&
- Var->getType().isConstQualified() &&
- !Var->getType().isVolatileQualified() &&
- !Var->isInline() &&
- !isExportedFromModuleInterfaceUnit(Var) &&
+ if (Context.getLangOpts().CPlusPlus && Var->getType().isConstQualified() &&
+ !Var->getType().isVolatileQualified() && !Var->isInline() &&
+ !isDeclaredInModuleInterfaceOrPartition(Var) &&
!isa<VarTemplateSpecializationDecl>(Var) &&
!Var->getDescribedVarTemplate()) {
const VarDecl *PrevVar = Var->getPreviousDecl();
@@ -682,7 +662,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
if (Var->getStorageClass() != SC_Extern &&
Var->getStorageClass() != SC_PrivateExtern &&
!isSingleLineLanguageLinkage(*Var))
- return getInternalLinkageFor(Var);
+ return LinkageInfo::internal();
}
for (const VarDecl *PrevVar = Var->getPreviousDecl(); PrevVar;
@@ -692,7 +672,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
return getDeclLinkageAndVisibility(PrevVar);
// Explicitly declared static.
if (PrevVar->getStorageClass() == SC_Static)
- return getInternalLinkageFor(Var);
+ return LinkageInfo::internal();
}
} else if (const auto *IFD = dyn_cast<IndirectFieldDecl>(D)) {
// - a data member of an anonymous union.
@@ -716,7 +696,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
// within an unnamed namespace has internal linkage.
if ((!Var || !isFirstInExternCContext(Var)) &&
(!Func || !isFirstInExternCContext(Func)))
- return getInternalLinkageFor(D);
+ return LinkageInfo::internal();
}
// Set up the defaults.
@@ -837,7 +817,8 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
// OpenMP target declare device functions are not callable from the host so
// they should not be exported from the device image. This applies to all
// functions as the host-callable kernel functions are emitted at codegen.
- if (Context.getLangOpts().OpenMP && Context.getLangOpts().OpenMPIsDevice &&
+ if (Context.getLangOpts().OpenMP &&
+ Context.getLangOpts().OpenMPIsTargetDevice &&
((Context.getTargetInfo().getTriple().isAMDGPU() ||
Context.getTargetInfo().getTriple().isNVPTX()) ||
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(Function)))
@@ -1025,7 +1006,8 @@ LinkageComputer::getLVForClassMember(const NamedDecl *D,
// they should not be exported from the device image. This applies to all
// functions as the host-callable kernel functions are emitted at codegen.
ASTContext &Context = D->getASTContext();
- if (Context.getLangOpts().OpenMP && Context.getLangOpts().OpenMPIsDevice &&
+ if (Context.getLangOpts().OpenMP &&
+ Context.getLangOpts().OpenMPIsTargetDevice &&
((Context.getTargetInfo().getTriple().isAMDGPU() ||
Context.getTargetInfo().getTriple().isNVPTX()) ||
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(MD)))
@@ -1173,6 +1155,29 @@ Linkage NamedDecl::getLinkageInternal() const {
.getLinkage();
}
+/// Get the linkage from a semantic point of view. Entities in
+/// anonymous namespaces are external (in c++98).
+Linkage NamedDecl::getFormalLinkage() const {
+ Linkage InternalLinkage = getLinkageInternal();
+
+ // C++ [basic.link]p4.8:
+ // - if the declaration of the name is attached to a named module and is not
+ // exported
+ // the name has module linkage;
+ //
+ // [basic.namespace.general]/p2
+ // A namespace is never attached to a named module and never has a name with
+ // module linkage.
+ if (isInModulePurview(this) &&
+ InternalLinkage == ExternalLinkage &&
+ !isExportedFromModuleInterfaceUnit(
+ cast<NamedDecl>(this->getCanonicalDecl())) &&
+ !isa<NamespaceDecl>(this))
+ InternalLinkage = ModuleLinkage;
+
+ return clang::getFormalLinkage(InternalLinkage);
+}
+
LinkageInfo NamedDecl::getLinkageAndVisibility() const {
return LinkageComputer{}.getDeclLinkageAndVisibility(this);
}
@@ -1305,11 +1310,11 @@ LinkageInfo LinkageComputer::getLVForLocalDecl(const NamedDecl *D,
if (const auto *Function = dyn_cast<FunctionDecl>(D)) {
if (Function->isInAnonymousNamespace() &&
!isFirstInExternCContext(Function))
- return getInternalLinkageFor(Function);
+ return LinkageInfo::internal();
// This is a "void f();" which got merged with a file static.
if (Function->getCanonicalDecl()->getStorageClass() == SC_Static)
- return getInternalLinkageFor(Function);
+ return LinkageInfo::internal();
LinkageInfo LV;
if (!hasExplicitVisibilityAlready(computation)) {
@@ -1328,7 +1333,7 @@ LinkageInfo LinkageComputer::getLVForLocalDecl(const NamedDecl *D,
if (const auto *Var = dyn_cast<VarDecl>(D)) {
if (Var->hasExternalStorage()) {
if (Var->isInAnonymousNamespace() && !isFirstInExternCContext(Var))
- return getInternalLinkageFor(Var);
+ return LinkageInfo::internal();
LinkageInfo LV;
if (Var->getStorageClass() == SC_PrivateExtern)
@@ -1408,7 +1413,7 @@ LinkageInfo LinkageComputer::computeLVForDecl(const NamedDecl *D,
bool IgnoreVarTypeLinkage) {
// Internal_linkage attribute overrides other considerations.
if (D->hasAttr<InternalLinkageAttr>())
- return getInternalLinkageFor(D);
+ return LinkageInfo::internal();
// Objective-C: treat all Objective-C declarations as having external
// linkage.
@@ -1466,7 +1471,7 @@ LinkageInfo LinkageComputer::computeLVForDecl(const NamedDecl *D,
if (Record->hasKnownLambdaInternalLinkage() ||
!Record->getLambdaManglingNumber()) {
// This lambda has no mangling number, so it's internal.
- return getInternalLinkageFor(D);
+ return LinkageInfo::internal();
}
return getLVForClosure(
@@ -1525,7 +1530,7 @@ LinkageInfo LinkageComputer::getLVForDecl(const NamedDecl *D,
LVComputationKind computation) {
// Internal_linkage attribute overrides other considerations.
if (D->hasAttr<InternalLinkageAttr>())
- return getInternalLinkageFor(D);
+ return LinkageInfo::internal();
if (computation.IgnoreAllVisibility && D->hasCachedLinkage())
return LinkageInfo(D->getCachedLinkage(), DefaultVisibility, false);
@@ -1593,12 +1598,14 @@ Module *Decl::getOwningModuleForLinkage(bool IgnoreLinkage) const {
return nullptr;
case Module::ModuleInterfaceUnit:
+ case Module::ModuleImplementationUnit:
case Module::ModulePartitionInterface:
case Module::ModulePartitionImplementation:
return M;
case Module::ModuleHeaderUnit:
- case Module::GlobalModuleFragment: {
+ case Module::ExplicitGlobalModuleFragment:
+ case Module::ImplicitGlobalModuleFragment: {
// External linkage declarations in the global module have no owning module
// for linkage purposes. But internal linkage declarations in the global
// module fragment of a particular module are owned by that module for
@@ -1626,8 +1633,8 @@ Module *Decl::getOwningModuleForLinkage(bool IgnoreLinkage) const {
llvm_unreachable("unknown module kind");
}
-void NamedDecl::printName(raw_ostream &OS, const PrintingPolicy&) const {
- OS << Name;
+void NamedDecl::printName(raw_ostream &OS, const PrintingPolicy &Policy) const {
+ Name.print(OS, Policy);
}
void NamedDecl::printName(raw_ostream &OS) const {
@@ -2347,12 +2354,15 @@ Expr *VarDecl::getInit() {
if (auto *S = Init.dyn_cast<Stmt *>())
return cast<Expr>(S);
- return cast_or_null<Expr>(Init.get<EvaluatedStmt *>()->Value);
+ auto *Eval = getEvaluatedStmt();
+ return cast<Expr>(Eval->Value.isOffset()
+ ? Eval->Value.get(getASTContext().getExternalSource())
+ : Eval->Value.get(nullptr));
}
Stmt **VarDecl::getInitAddress() {
if (auto *ES = Init.dyn_cast<EvaluatedStmt *>())
- return &ES->Value;
+ return ES->Value.getAddressOfPointer(getASTContext().getExternalSource());
return Init.getAddrOfPtr1();
}
@@ -2482,14 +2492,14 @@ EvaluatedStmt *VarDecl::getEvaluatedStmt() const {
APValue *VarDecl::evaluateValue() const {
SmallVector<PartialDiagnosticAt, 8> Notes;
- return evaluateValue(Notes);
+ return evaluateValueImpl(Notes, hasConstantInitialization());
}
-APValue *VarDecl::evaluateValue(
- SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
+APValue *VarDecl::evaluateValueImpl(SmallVectorImpl<PartialDiagnosticAt> &Notes,
+ bool IsConstantInitialization) const {
EvaluatedStmt *Eval = ensureEvaluatedStmt();
- const auto *Init = cast<Expr>(Eval->Value);
+ const auto *Init = getInit();
assert(!Init->isValueDependent());
// We only produce notes indicating why an initializer is non-constant the
@@ -2505,8 +2515,16 @@ APValue *VarDecl::evaluateValue(
Eval->IsEvaluating = true;
- bool Result = Init->EvaluateAsInitializer(Eval->Evaluated, getASTContext(),
- this, Notes);
+ ASTContext &Ctx = getASTContext();
+ bool Result = Init->EvaluateAsInitializer(Eval->Evaluated, Ctx, this, Notes,
+ IsConstantInitialization);
+
+ // In C++11, this isn't a constant initializer if we produced notes. In that
+ // case, we can't keep the result, because it may only be correct under the
+ // assumption that the initializer is a constant context.
+ if (IsConstantInitialization && Ctx.getLangOpts().CPlusPlus11 &&
+ !Notes.empty())
+ Result = false;
// Ensure the computed APValue is cleaned up later if evaluation succeeded,
// or that it's empty (so that there's nothing to clean up) if evaluation
@@ -2514,7 +2532,7 @@ APValue *VarDecl::evaluateValue(
if (!Result)
Eval->Evaluated = APValue();
else if (Eval->Evaluated.needsCleanup())
- getASTContext().addDestruction(&Eval->Evaluated);
+ Ctx.addDestruction(&Eval->Evaluated);
Eval->IsEvaluating = false;
Eval->WasEvaluated = true;
@@ -2565,10 +2583,17 @@ bool VarDecl::checkForConstantInitialization(
"already evaluated var value before checking for constant init");
assert(getASTContext().getLangOpts().CPlusPlus && "only meaningful in C++");
- assert(!cast<Expr>(Eval->Value)->isValueDependent());
+ assert(!getInit()->isValueDependent());
// Evaluate the initializer to check whether it's a constant expression.
- Eval->HasConstantInitialization = evaluateValue(Notes) && Notes.empty();
+ Eval->HasConstantInitialization =
+ evaluateValueImpl(Notes, true) && Notes.empty();
+
+ // If evaluation as a constant initializer failed, allow re-evaluation as a
+ // non-constant initializer if we later find we want the value.
+ if (!Eval->HasConstantInitialization)
+ Eval->WasEvaluated = false;
+
return Eval->HasConstantInitialization;
}
@@ -2975,13 +3000,15 @@ FunctionDecl::FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC,
FunctionDeclBits.HasImplicitReturnZero = false;
FunctionDeclBits.IsLateTemplateParsed = false;
FunctionDeclBits.ConstexprKind = static_cast<uint64_t>(ConstexprKind);
+ FunctionDeclBits.BodyContainsImmediateEscalatingExpression = false;
FunctionDeclBits.InstantiationIsPending = false;
FunctionDeclBits.UsesSEHTry = false;
FunctionDeclBits.UsesFPIntrin = UsesFPIntrin;
FunctionDeclBits.HasSkippedBody = false;
FunctionDeclBits.WillHaveBody = false;
FunctionDeclBits.IsMultiVersion = false;
- FunctionDeclBits.IsCopyDeductionCandidate = false;
+ FunctionDeclBits.DeductionCandidateKind =
+ static_cast<unsigned char>(DeductionCandidate::Normal);
FunctionDeclBits.HasODRHash = false;
FunctionDeclBits.FriendConstraintRefersToEnclosingTemplate = false;
if (TrailingRequiresClause)
@@ -3140,6 +3167,44 @@ static bool isNamed(const NamedDecl *ND, const char (&Str)[Len]) {
return II && II->isStr(Str);
}
+bool FunctionDecl::isImmediateEscalating() const {
+ // C++23 [expr.const]/p17
+ // An immediate-escalating function is
+ // - the call operator of a lambda that is not declared with the consteval
+ // specifier,
+ if (isLambdaCallOperator(this) && !isConsteval())
+ return true;
+ // - a defaulted special member function that is not declared with the
+ // consteval specifier,
+ if (isDefaulted() && !isConsteval())
+ return true;
+ // - a function that results from the instantiation of a templated entity
+ // defined with the constexpr specifier.
+ TemplatedKind TK = getTemplatedKind();
+ if (TK != TK_NonTemplate && TK != TK_DependentNonTemplate &&
+ isConstexprSpecified())
+ return true;
+ return false;
+}
+
+bool FunctionDecl::isImmediateFunction() const {
+ // C++23 [expr.const]/p18
+ // An immediate function is a function or constructor that is
+ // - declared with the consteval specifier
+ if (isConsteval())
+ return true;
+ // - an immediate-escalating function F whose function body contains an
+ // immediate-escalating expression
+ if (isImmediateEscalating() && BodyContainsImmediateEscalatingExpressions())
+ return true;
+
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(this);
+ MD && MD->isLambdaStaticInvoker())
+ return MD->getParent()->getLambdaCallOperator()->isImmediateFunction();
+
+ return false;
+}
+
bool FunctionDecl::isMain() const {
const TranslationUnitDecl *tunit =
dyn_cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext());
@@ -3218,7 +3283,7 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction(
return false;
const auto *FPT = getType()->castAs<FunctionProtoType>();
- if (FPT->getNumParams() == 0 || FPT->getNumParams() > 3 || FPT->isVariadic())
+ if (FPT->getNumParams() == 0 || FPT->getNumParams() > 4 || FPT->isVariadic())
return false;
// If this is a single-parameter function, it must be a replaceable global
@@ -3253,8 +3318,8 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction(
*AlignmentParam = Params;
}
- // Finally, if this is not a sized delete, the final parameter can
- // be a 'const std::nothrow_t&'.
+ // If this is not a sized delete, the next parameter can be a
+ // 'const std::nothrow_t&'.
if (!IsSizedDelete && !Ty.isNull() && Ty->isReferenceType()) {
Ty = Ty->getPointeeType();
if (Ty.getCVRQualifiers() != Qualifiers::Const)
@@ -3266,6 +3331,19 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction(
}
}
+ // Finally, recognize the not yet standard versions of new that take a
+ // hot/cold allocation hint (__hot_cold_t). These are currently supported by
+ // tcmalloc (see
+ // https://github.com/google/tcmalloc/blob/220043886d4e2efff7a5702d5172cb8065253664/tcmalloc/malloc_extension.h#L53).
+ if (!IsSizedDelete && !Ty.isNull() && Ty->isEnumeralType()) {
+ QualType T = Ty;
+ while (const auto *TD = T->getAs<TypedefType>())
+ T = TD->getDecl()->getUnderlyingType();
+ IdentifierInfo *II = T->castAs<EnumType>()->getDecl()->getIdentifier();
+ if (II && II->isStr("__hot_cold_t"))
+ Consume();
+ }
+
return Params == FPT->getNumParams();
}
@@ -3274,9 +3352,24 @@ bool FunctionDecl::isInlineBuiltinDeclaration() const {
return false;
const FunctionDecl *Definition;
- return hasBody(Definition) && Definition->isInlineSpecified() &&
- Definition->hasAttr<AlwaysInlineAttr>() &&
- Definition->hasAttr<GNUInlineAttr>();
+ if (!hasBody(Definition))
+ return false;
+
+ if (!Definition->isInlineSpecified() ||
+ !Definition->hasAttr<AlwaysInlineAttr>())
+ return false;
+
+ ASTContext &Context = getASTContext();
+ switch (Context.GetGVALinkageForFunction(Definition)) {
+ case GVA_Internal:
+ case GVA_DiscardableODR:
+ case GVA_StrongODR:
+ return false;
+ case GVA_AvailableExternally:
+ case GVA_StrongExternal:
+ return true;
+ }
+ llvm_unreachable("Unknown GVALinkage");
}
bool FunctionDecl::isDestroyingOperatorDelete() const {
@@ -3341,6 +3434,27 @@ bool FunctionDecl::isNoReturn() const {
return false;
}
+bool FunctionDecl::isMemberLikeConstrainedFriend() const {
+ // C++20 [temp.friend]p9:
+ // A non-template friend declaration with a requires-clause [or]
+ // a friend function template with a constraint that depends on a template
+ // parameter from an enclosing template [...] does not declare the same
+ // function or function template as a declaration in any other scope.
+
+ // If this isn't a friend then it's not a member-like constrained friend.
+ if (!getFriendObjectKind()) {
+ return false;
+ }
+
+ if (!getDescribedFunctionTemplate()) {
+ // If these friends don't have constraints, they aren't constrained, and
+ // thus don't fall under temp.friend p9. Else the simple presence of a
+ // constraint makes them unique.
+ return getTrailingRequiresClause();
+ }
+
+ return FriendConstraintRefersToEnclosingTemplate();
+}
MultiVersionKind FunctionDecl::getMultiVersionKind() const {
if (hasAttr<TargetAttr>())
@@ -3452,7 +3566,7 @@ unsigned FunctionDecl::getBuiltinID(bool ConsiderWrapperFunctions) const {
// library, none of the predefined library functions except printf and malloc
// should be treated as a builtin i.e. 0 should be returned for them.
if (Context.getTargetInfo().getTriple().isAMDGCN() &&
- Context.getLangOpts().OpenMPIsDevice &&
+ Context.getLangOpts().OpenMPIsTargetDevice &&
Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID) &&
!(BuiltinID == Builtin::BIprintf || BuiltinID == Builtin::BImalloc))
return 0;
@@ -4297,6 +4411,28 @@ bool FieldDecl::isAnonymousStructOrUnion() const {
return false;
}
+Expr *FieldDecl::getInClassInitializer() const {
+ if (!hasInClassInitializer())
+ return nullptr;
+
+ LazyDeclStmtPtr InitPtr = BitField ? InitAndBitWidth->Init : Init;
+ return cast_or_null<Expr>(
+ InitPtr.isOffset() ? InitPtr.get(getASTContext().getExternalSource())
+ : InitPtr.get(nullptr));
+}
+
+void FieldDecl::setInClassInitializer(Expr *NewInit) {
+ setLazyInClassInitializer(LazyDeclStmtPtr(NewInit));
+}
+
+void FieldDecl::setLazyInClassInitializer(LazyDeclStmtPtr NewInit) {
+ assert(hasInClassInitializer() && !getInClassInitializer());
+ if (BitField)
+ InitAndBitWidth->Init = NewInit;
+ else
+ Init = NewInit;
+}
+
unsigned FieldDecl::getBitWidthValue(const ASTContext &Ctx) const {
assert(isBitField() && "not a bitfield");
return getBitWidth()->EvaluateKnownConstInt(Ctx).getZExtValue();
@@ -4340,6 +4476,10 @@ bool FieldDecl::isZeroSize(const ASTContext &Ctx) const {
return true;
}
+bool FieldDecl::isPotentiallyOverlapping() const {
+ return hasAttr<NoUniqueAddressAttr>() && getType()->getAsCXXRecordDecl();
+}
+
unsigned FieldDecl::getFieldIndex() const {
const FieldDecl *Canonical = getCanonicalDecl();
if (Canonical != this)
@@ -4353,6 +4493,8 @@ unsigned FieldDecl::getFieldIndex() const {
for (auto *Field : RD->fields()) {
Field->getCanonicalDecl()->CachedFieldIndex = Index + 1;
+ assert(Field->getCanonicalDecl()->CachedFieldIndex == Index + 1 &&
+ "overflow in field numbering");
++Index;
}
@@ -4372,11 +4514,11 @@ SourceRange FieldDecl::getSourceRange() const {
void FieldDecl::setCapturedVLAType(const VariableArrayType *VLAType) {
assert((getParent()->isLambda() || getParent()->isCapturedRecord()) &&
"capturing type in non-lambda or captured record.");
- assert(InitStorage.getInt() == ISK_NoInit &&
- InitStorage.getPointer() == nullptr &&
- "bit width, initializer or captured type already set");
- InitStorage.setPointerAndInt(const_cast<VariableArrayType *>(VLAType),
- ISK_CapturedVLAType);
+ assert(StorageKind == ISK_NoInit && !BitField &&
+ "bit-field or field with default member initializer cannot capture "
+ "VLA type");
+ StorageKind = ISK_CapturedVLAType;
+ CapturedVLAType = VLAType;
}
//===----------------------------------------------------------------------===//
@@ -4755,7 +4897,10 @@ bool RecordDecl::isOrContainsUnion() const {
RecordDecl::field_iterator RecordDecl::field_begin() const {
if (hasExternalLexicalStorage() && !hasLoadedFieldsFromExternalStorage())
LoadFieldsFromExternalStorage();
-
+ // This is necessary for correctness for C++ with modules.
+ // FIXME: Come up with a test case that breaks without definition.
+ if (RecordDecl *D = getDefinition(); D && D != this)
+ return D->field_begin();
return field_iterator(decl_iterator(FirstDecl));
}
@@ -4808,8 +4953,13 @@ void RecordDecl::LoadFieldsFromExternalStorage() const {
if (Decls.empty())
return;
- std::tie(FirstDecl, LastDecl) = BuildDeclChain(Decls,
- /*FieldsAlreadyLoaded=*/false);
+ auto [ExternalFirst, ExternalLast] =
+ BuildDeclChain(Decls,
+ /*FieldsAlreadyLoaded=*/false);
+ ExternalLast->NextInContextAndBits.setPointer(FirstDecl);
+ FirstDecl = ExternalFirst;
+ if (!LastDecl)
+ LastDecl = ExternalLast;
}
bool RecordDecl::mayInsertExtraPadding(bool EmitRemark) const {
diff --git a/contrib/llvm-project/clang/lib/AST/DeclBase.cpp b/contrib/llvm-project/clang/lib/AST/DeclBase.cpp
index c94fc602155b..834beef49a44 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclBase.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclBase.cpp
@@ -30,6 +30,7 @@
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/Module.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceLocation.h"
@@ -1022,6 +1023,28 @@ bool Decl::isInExportDeclContext() const {
return DC && isa<ExportDecl>(DC);
}
+bool Decl::isInAnotherModuleUnit() const {
+ auto *M = getOwningModule();
+
+ if (!M)
+ return false;
+
+ M = M->getTopLevelModule();
+ // FIXME: It is problematic if the header module lives in another module
+ // unit. Consider to fix this by techniques like
+ // ExternalASTSource::hasExternalDefinitions.
+ if (M->isHeaderLikeModule())
+ return false;
+
+ // A global module without parent implies that we're parsing the global
+ // module. So it can't be in another module unit.
+ if (M->isGlobalModule())
+ return false;
+
+ assert(M->isModulePurview() && "New module kind?");
+ return M != getASTContext().getCurrentNamedModule();
+}
+
static Decl::Kind getKind(const Decl *D) { return D->getKind(); }
static Decl::Kind getKind(const DeclContext *DC) { return DC->getDeclKind(); }
@@ -1048,6 +1071,18 @@ const FunctionType *Decl::getFunctionType(bool BlocksToo) const {
return Ty->getAs<FunctionType>();
}
+bool Decl::isFunctionPointerType() const {
+ QualType Ty;
+ if (const auto *D = dyn_cast<ValueDecl>(this))
+ Ty = D->getType();
+ else if (const auto *D = dyn_cast<TypedefNameDecl>(this))
+ Ty = D->getUnderlyingType();
+ else
+ return false;
+
+ return Ty.getCanonicalType()->isFunctionPointerType();
+}
+
DeclContext *Decl::getNonTransparentDeclContext() {
assert(getDeclContext());
return getDeclContext()->getNonTransparentContext();
diff --git a/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp b/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp
index 3cf355714107..e4572aab5b09 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp
@@ -1646,18 +1646,20 @@ Decl *CXXRecordDecl::getLambdaContextDecl() const {
return getLambdaData().ContextDecl.get(Source);
}
-void CXXRecordDecl::setDeviceLambdaManglingNumber(unsigned Num) const {
+void CXXRecordDecl::setLambdaNumbering(LambdaNumbering Numbering) {
assert(isLambda() && "Not a lambda closure type!");
- if (Num)
- getASTContext().DeviceLambdaManglingNumbers[this] = Num;
+ getLambdaData().ManglingNumber = Numbering.ManglingNumber;
+ if (Numbering.DeviceManglingNumber)
+ getASTContext().DeviceLambdaManglingNumbers[this] =
+ Numbering.DeviceManglingNumber;
+ getLambdaData().IndexInContext = Numbering.IndexInContext;
+ getLambdaData().ContextDecl = Numbering.ContextDecl;
+ getLambdaData().HasKnownInternalLinkage = Numbering.HasKnownInternalLinkage;
}
unsigned CXXRecordDecl::getDeviceLambdaManglingNumber() const {
assert(isLambda() && "Not a lambda closure type!");
- auto I = getASTContext().DeviceLambdaManglingNumbers.find(this);
- if (I != getASTContext().DeviceLambdaManglingNumbers.end())
- return I->second;
- return 0;
+ return getASTContext().DeviceLambdaManglingNumbers.lookup(this);
}
static CanQualType GetConversionType(ASTContext &Context, NamedDecl *Conv) {
@@ -2110,21 +2112,21 @@ ExplicitSpecifier ExplicitSpecifier::getFromDecl(FunctionDecl *Function) {
}
}
-CXXDeductionGuideDecl *
-CXXDeductionGuideDecl::Create(ASTContext &C, DeclContext *DC,
- SourceLocation StartLoc, ExplicitSpecifier ES,
- const DeclarationNameInfo &NameInfo, QualType T,
- TypeSourceInfo *TInfo, SourceLocation EndLocation,
- CXXConstructorDecl *Ctor) {
+CXXDeductionGuideDecl *CXXDeductionGuideDecl::Create(
+ ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
+ ExplicitSpecifier ES, const DeclarationNameInfo &NameInfo, QualType T,
+ TypeSourceInfo *TInfo, SourceLocation EndLocation, CXXConstructorDecl *Ctor,
+ DeductionCandidate Kind) {
return new (C, DC) CXXDeductionGuideDecl(C, DC, StartLoc, ES, NameInfo, T,
- TInfo, EndLocation, Ctor);
+ TInfo, EndLocation, Ctor, Kind);
}
CXXDeductionGuideDecl *CXXDeductionGuideDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) CXXDeductionGuideDecl(
C, nullptr, SourceLocation(), ExplicitSpecifier(), DeclarationNameInfo(),
- QualType(), nullptr, SourceLocation(), nullptr);
+ QualType(), nullptr, SourceLocation(), nullptr,
+ DeductionCandidate::Normal);
}
RequiresExprBodyDecl *RequiresExprBodyDecl::Create(
@@ -3235,8 +3237,7 @@ void StaticAssertDecl::anchor() {}
StaticAssertDecl *StaticAssertDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StaticAssertLoc,
- Expr *AssertExpr,
- StringLiteral *Message,
+ Expr *AssertExpr, Expr *Message,
SourceLocation RParenLoc,
bool Failed) {
return new (C, DC) StaticAssertDecl(DC, StaticAssertLoc, AssertExpr, Message,
diff --git a/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp b/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp
index f3bad9f45b74..a6a9911c8992 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp
@@ -622,6 +622,8 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (D->isConstexprSpecified() && !D->isExplicitlyDefaulted())
Out << "constexpr ";
if (D->isConsteval()) Out << "consteval ";
+ else if (D->isImmediateFunction())
+ Out << "immediate ";
ExplicitSpecifier ExplicitSpec = ExplicitSpecifier::getFromDecl(D);
if (ExplicitSpec.isSpecified())
printExplicitSpecifier(ExplicitSpec, Out, Policy, Indentation, Context);
@@ -947,9 +949,9 @@ void DeclPrinter::VisitStaticAssertDecl(StaticAssertDecl *D) {
Out << "static_assert(";
D->getAssertExpr()->printPretty(Out, nullptr, Policy, Indentation, "\n",
&Context);
- if (StringLiteral *SL = D->getMessage()) {
+ if (Expr *E = D->getMessage()) {
Out << ", ";
- SL->printPretty(Out, nullptr, Policy, Indentation, "\n", &Context);
+ E->printPretty(Out, nullptr, Policy, Indentation, "\n", &Context);
}
Out << ")";
}
@@ -997,7 +999,10 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
prettyPrintAttributes(D);
if (D->getIdentifier()) {
- Out << ' ' << *D;
+ Out << ' ';
+ if (auto *NNS = D->getQualifier())
+ NNS->print(Out, Policy);
+ Out << *D;
if (auto S = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
ArrayRef<TemplateArgument> Args = S->getTemplateArgs().asArray();
diff --git a/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp b/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp
index 531be708b6fd..fd4c8eafc1c3 100755
--- a/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp
@@ -127,6 +127,39 @@ TemplateParameterList::Create(const ASTContext &C, SourceLocation TemplateLoc,
RAngleLoc, RequiresClause);
}
+void TemplateParameterList::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &C) const {
+ const Expr *RC = getRequiresClause();
+ ID.AddBoolean(RC != nullptr);
+ if (RC)
+ RC->Profile(ID, C, /*Canonical=*/true);
+ ID.AddInteger(size());
+ for (NamedDecl *D : *this) {
+ if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D)) {
+ ID.AddInteger(0);
+ ID.AddBoolean(NTTP->isParameterPack());
+ NTTP->getType().getCanonicalType().Profile(ID);
+ ID.AddBoolean(NTTP->hasPlaceholderTypeConstraint());
+ if (const Expr *E = NTTP->getPlaceholderTypeConstraint())
+ E->Profile(ID, C, /*Canonical=*/true);
+ continue;
+ }
+ if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(D)) {
+ ID.AddInteger(1);
+ ID.AddBoolean(TTP->isParameterPack());
+ ID.AddBoolean(TTP->hasTypeConstraint());
+ if (const TypeConstraint *TC = TTP->getTypeConstraint())
+ TC->getImmediatelyDeclaredConstraint()->Profile(ID, C,
+ /*Canonical=*/true);
+ continue;
+ }
+ const auto *TTP = cast<TemplateTemplateParmDecl>(D);
+ ID.AddInteger(2);
+ ID.AddBoolean(TTP->isParameterPack());
+ TTP->getTemplateParameters()->Profile(ID, C);
+ }
+}
+
unsigned TemplateParameterList::getMinRequiredArguments() const {
unsigned NumRequiredArgs = 0;
for (const NamedDecl *P : asArray()) {
@@ -517,47 +550,13 @@ ClassTemplateDecl::findPartialSpecialization(
TPL);
}
-static void ProfileTemplateParameterList(ASTContext &C,
- llvm::FoldingSetNodeID &ID, const TemplateParameterList *TPL) {
- const Expr *RC = TPL->getRequiresClause();
- ID.AddBoolean(RC != nullptr);
- if (RC)
- RC->Profile(ID, C, /*Canonical=*/true);
- ID.AddInteger(TPL->size());
- for (NamedDecl *D : *TPL) {
- if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D)) {
- ID.AddInteger(0);
- ID.AddBoolean(NTTP->isParameterPack());
- NTTP->getType().getCanonicalType().Profile(ID);
- ID.AddBoolean(NTTP->hasPlaceholderTypeConstraint());
- if (const Expr *E = NTTP->getPlaceholderTypeConstraint())
- E->Profile(ID, C, /*Canonical=*/true);
- continue;
- }
- if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(D)) {
- ID.AddInteger(1);
- ID.AddBoolean(TTP->isParameterPack());
- ID.AddBoolean(TTP->hasTypeConstraint());
- if (const TypeConstraint *TC = TTP->getTypeConstraint())
- TC->getImmediatelyDeclaredConstraint()->Profile(ID, C,
- /*Canonical=*/true);
- continue;
- }
- const auto *TTP = cast<TemplateTemplateParmDecl>(D);
- ID.AddInteger(2);
- ID.AddBoolean(TTP->isParameterPack());
- ProfileTemplateParameterList(C, ID, TTP->getTemplateParameters());
- }
-}
-
-void
-ClassTemplatePartialSpecializationDecl::Profile(llvm::FoldingSetNodeID &ID,
- ArrayRef<TemplateArgument> TemplateArgs, TemplateParameterList *TPL,
- ASTContext &Context) {
+void ClassTemplatePartialSpecializationDecl::Profile(
+ llvm::FoldingSetNodeID &ID, ArrayRef<TemplateArgument> TemplateArgs,
+ TemplateParameterList *TPL, const ASTContext &Context) {
ID.AddInteger(TemplateArgs.size());
for (const TemplateArgument &TemplateArg : TemplateArgs)
TemplateArg.Profile(ID, Context);
- ProfileTemplateParameterList(Context, ID, TPL);
+ TPL->Profile(ID, Context);
}
void ClassTemplateDecl::AddPartialSpecialization(
@@ -1283,14 +1282,13 @@ VarTemplateDecl::findPartialSpecialization(ArrayRef<TemplateArgument> Args,
TPL);
}
-void
-VarTemplatePartialSpecializationDecl::Profile(llvm::FoldingSetNodeID &ID,
- ArrayRef<TemplateArgument> TemplateArgs, TemplateParameterList *TPL,
- ASTContext &Context) {
+void VarTemplatePartialSpecializationDecl::Profile(
+ llvm::FoldingSetNodeID &ID, ArrayRef<TemplateArgument> TemplateArgs,
+ TemplateParameterList *TPL, const ASTContext &Context) {
ID.AddInteger(TemplateArgs.size());
for (const TemplateArgument &TemplateArg : TemplateArgs)
TemplateArg.Profile(ID, Context);
- ProfileTemplateParameterList(Context, ID, TPL);
+ TPL->Profile(ID, Context);
}
void VarTemplateDecl::AddPartialSpecialization(
@@ -1402,6 +1400,15 @@ void VarTemplateSpecializationDecl::setTemplateArgsInfo(
ASTTemplateArgumentListInfo::Create(getASTContext(), ArgsInfo);
}
+SourceRange VarTemplateSpecializationDecl::getSourceRange() const {
+ if (isExplicitSpecialization() && !hasInit()) {
+ if (const ASTTemplateArgumentListInfo *Info = getTemplateArgsInfo())
+ return SourceRange(getOuterLocStart(), Info->getRAngleLoc());
+ }
+ return VarDecl::getSourceRange();
+}
+
+
//===----------------------------------------------------------------------===//
// VarTemplatePartialSpecializationDecl Implementation
//===----------------------------------------------------------------------===//
@@ -1447,6 +1454,14 @@ VarTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
return new (C, ID) VarTemplatePartialSpecializationDecl(C);
}
+SourceRange VarTemplatePartialSpecializationDecl::getSourceRange() const {
+ if (isExplicitSpecialization() && !hasInit()) {
+ if (const ASTTemplateArgumentListInfo *Info = getTemplateArgsAsWritten())
+ return SourceRange(getOuterLocStart(), Info->getRAngleLoc());
+ }
+ return VarDecl::getSourceRange();
+}
+
static TemplateParameterList *
createMakeIntegerSeqParameterList(const ASTContext &C, DeclContext *DC) {
// typename T
diff --git a/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp b/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp
index c1219041a466..a3ac5551e0cc 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp
@@ -117,12 +117,12 @@ static void printCXXConstructorDestructorName(QualType ClassType,
Policy.adjustForCPlusPlus();
if (const RecordType *ClassRec = ClassType->getAs<RecordType>()) {
- OS << *ClassRec->getDecl();
+ ClassRec->getDecl()->printName(OS, Policy);
return;
}
if (Policy.SuppressTemplateArgsInCXXConstructors) {
if (auto *InjTy = ClassType->getAs<InjectedClassNameType>()) {
- OS << *InjTy->getDecl();
+ InjTy->getDecl()->printName(OS, Policy);
return;
}
}
@@ -365,7 +365,7 @@ DeclarationNameTable::getCXXSpecialName(DeclarationName::NameKind Kind,
}
DeclarationName
-DeclarationNameTable::getCXXLiteralOperatorName(IdentifierInfo *II) {
+DeclarationNameTable::getCXXLiteralOperatorName(const IdentifierInfo *II) {
llvm::FoldingSetNodeID ID;
ID.AddPointer(II);
diff --git a/contrib/llvm-project/clang/lib/AST/Expr.cpp b/contrib/llvm-project/clang/lib/AST/Expr.cpp
index e45ae68cd5fe..6164a419d213 100644
--- a/contrib/llvm-project/clang/lib/AST/Expr.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Expr.cpp
@@ -492,6 +492,7 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx, ValueDecl *D,
DeclRefExprBits.RefersToEnclosingVariableOrCapture =
RefersToEnclosingVariableOrCapture;
DeclRefExprBits.NonOdrUseReason = NOUR;
+ DeclRefExprBits.IsImmediateEscalating = false;
DeclRefExprBits.Loc = L;
setDependence(computeDependence(this, Ctx));
}
@@ -529,6 +530,7 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc);
}
+ DeclRefExprBits.IsImmediateEscalating = false;
DeclRefExprBits.HadMultipleCandidates = 0;
setDependence(computeDependence(this, Ctx));
}
@@ -663,13 +665,14 @@ std::string SYCLUniqueStableNameExpr::ComputeName(ASTContext &Context,
}
PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy, IdentKind IK,
- StringLiteral *SL)
+ bool IsTransparent, StringLiteral *SL)
: Expr(PredefinedExprClass, FNTy, VK_LValue, OK_Ordinary) {
PredefinedExprBits.Kind = IK;
assert((getIdentKind() == IK) &&
"IdentKind do not fit in PredefinedExprBitfields!");
bool HasFunctionName = SL != nullptr;
PredefinedExprBits.HasFunctionName = HasFunctionName;
+ PredefinedExprBits.IsTransparent = IsTransparent;
PredefinedExprBits.Loc = L;
if (HasFunctionName)
setFunctionName(SL);
@@ -683,11 +686,11 @@ PredefinedExpr::PredefinedExpr(EmptyShell Empty, bool HasFunctionName)
PredefinedExpr *PredefinedExpr::Create(const ASTContext &Ctx, SourceLocation L,
QualType FNTy, IdentKind IK,
- StringLiteral *SL) {
+ bool IsTransparent, StringLiteral *SL) {
bool HasFunctionName = SL != nullptr;
void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *>(HasFunctionName),
alignof(PredefinedExpr));
- return new (Mem) PredefinedExpr(L, FNTy, IK, SL);
+ return new (Mem) PredefinedExpr(L, FNTy, IK, IsTransparent, SL);
}
PredefinedExpr *PredefinedExpr::CreateEmpty(const ASTContext &Ctx,
@@ -783,7 +786,21 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
Out << "static ";
}
+ class PrettyCallbacks final : public PrintingCallbacks {
+ public:
+ PrettyCallbacks(const LangOptions &LO) : LO(LO) {}
+ std::string remapPath(StringRef Path) const override {
+ SmallString<128> p(Path);
+ LO.remapPathPrefix(p);
+ return std::string(p);
+ }
+
+ private:
+ const LangOptions &LO;
+ };
PrintingPolicy Policy(Context.getLangOpts());
+ PrettyCallbacks PrettyCB(Context.getLangOpts());
+ Policy.Callbacks = &PrettyCB;
std::string Proto;
llvm::raw_string_ostream POut(Proto);
@@ -1119,6 +1136,8 @@ unsigned StringLiteral::mapCharByteWidth(TargetInfo const &Target,
case UTF32:
CharByteWidth = Target.getChar32Width();
break;
+ case Unevaluated:
+ return sizeof(char); // Host;
}
assert((CharByteWidth & 7) == 0 && "Assumes character size is byte multiple");
CharByteWidth /= 8;
@@ -1132,35 +1151,45 @@ StringLiteral::StringLiteral(const ASTContext &Ctx, StringRef Str,
const SourceLocation *Loc,
unsigned NumConcatenated)
: Expr(StringLiteralClass, Ty, VK_LValue, OK_Ordinary) {
- assert(Ctx.getAsConstantArrayType(Ty) &&
- "StringLiteral must be of constant array type!");
- unsigned CharByteWidth = mapCharByteWidth(Ctx.getTargetInfo(), Kind);
- unsigned ByteLength = Str.size();
- assert((ByteLength % CharByteWidth == 0) &&
- "The size of the data must be a multiple of CharByteWidth!");
-
- // Avoid the expensive division. The compiler should be able to figure it
- // out by itself. However as of clang 7, even with the appropriate
- // llvm_unreachable added just here, it is not able to do so.
- unsigned Length;
- switch (CharByteWidth) {
- case 1:
- Length = ByteLength;
- break;
- case 2:
- Length = ByteLength / 2;
- break;
- case 4:
- Length = ByteLength / 4;
- break;
- default:
- llvm_unreachable("Unsupported character width!");
- }
+
+ unsigned Length = Str.size();
StringLiteralBits.Kind = Kind;
- StringLiteralBits.CharByteWidth = CharByteWidth;
- StringLiteralBits.IsPascal = Pascal;
StringLiteralBits.NumConcatenated = NumConcatenated;
+
+ if (Kind != StringKind::Unevaluated) {
+ assert(Ctx.getAsConstantArrayType(Ty) &&
+ "StringLiteral must be of constant array type!");
+ unsigned CharByteWidth = mapCharByteWidth(Ctx.getTargetInfo(), Kind);
+ unsigned ByteLength = Str.size();
+ assert((ByteLength % CharByteWidth == 0) &&
+ "The size of the data must be a multiple of CharByteWidth!");
+
+ // Avoid the expensive division. The compiler should be able to figure it
+ // out by itself. However as of clang 7, even with the appropriate
+ // llvm_unreachable added just here, it is not able to do so.
+ switch (CharByteWidth) {
+ case 1:
+ Length = ByteLength;
+ break;
+ case 2:
+ Length = ByteLength / 2;
+ break;
+ case 4:
+ Length = ByteLength / 4;
+ break;
+ default:
+ llvm_unreachable("Unsupported character width!");
+ }
+
+ StringLiteralBits.CharByteWidth = CharByteWidth;
+ StringLiteralBits.IsPascal = Pascal;
+ } else {
+ assert(!Pascal && "Can't make an unevaluated Pascal string");
+ StringLiteralBits.CharByteWidth = 1;
+ StringLiteralBits.IsPascal = false;
+ }
+
*getTrailingObjects<unsigned>() = Length;
// Initialize the trailing array of SourceLocation.
@@ -1169,7 +1198,7 @@ StringLiteral::StringLiteral(const ASTContext &Ctx, StringRef Str,
NumConcatenated * sizeof(SourceLocation));
// Initialize the trailing array of char holding the string data.
- std::memcpy(getTrailingObjects<char>(), Str.data(), ByteLength);
+ std::memcpy(getTrailingObjects<char>(), Str.data(), Str.size());
setDependence(ExprDependence::None);
}
@@ -1206,6 +1235,7 @@ StringLiteral *StringLiteral::CreateEmpty(const ASTContext &Ctx,
void StringLiteral::outputString(raw_ostream &OS) const {
switch (getKind()) {
+ case Unevaluated:
case Ordinary:
break; // no prefix.
case Wide: OS << 'L'; break;
@@ -1316,7 +1346,8 @@ StringLiteral::getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
const TargetInfo &Target, unsigned *StartToken,
unsigned *StartTokenByteOffset) const {
assert((getKind() == StringLiteral::Ordinary ||
- getKind() == StringLiteral::UTF8) &&
+ getKind() == StringLiteral::UTF8 ||
+ getKind() == StringLiteral::Unevaluated) &&
"Only narrow string literals are currently supported");
// Loop over all of the tokens in this string until we find the one that
@@ -1529,19 +1560,17 @@ unsigned CallExpr::offsetToTrailingObjects(StmtClass SC) {
Decl *Expr::getReferencedDeclOfCallee() {
Expr *CEE = IgnoreParenImpCasts();
- while (SubstNonTypeTemplateParmExpr *NTTP =
- dyn_cast<SubstNonTypeTemplateParmExpr>(CEE)) {
+ while (auto *NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(CEE))
CEE = NTTP->getReplacement()->IgnoreParenImpCasts();
- }
// If we're calling a dereference, look at the pointer instead.
while (true) {
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CEE)) {
+ if (auto *BO = dyn_cast<BinaryOperator>(CEE)) {
if (BO->isPtrMemOp()) {
CEE = BO->getRHS()->IgnoreParenImpCasts();
continue;
}
- } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(CEE)) {
+ } else if (auto *UO = dyn_cast<UnaryOperator>(CEE)) {
if (UO->getOpcode() == UO_Deref || UO->getOpcode() == UO_AddrOf ||
UO->getOpcode() == UO_Plus) {
CEE = UO->getSubExpr()->IgnoreParenImpCasts();
@@ -1551,9 +1580,9 @@ Decl *Expr::getReferencedDeclOfCallee() {
break;
}
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE))
+ if (auto *DRE = dyn_cast<DeclRefExpr>(CEE))
return DRE->getDecl();
- if (MemberExpr *ME = dyn_cast<MemberExpr>(CEE))
+ if (auto *ME = dyn_cast<MemberExpr>(CEE))
return ME->getMemberDecl();
if (auto *BE = dyn_cast<BlockExpr>(CEE))
return BE->getBlockDecl();
@@ -1563,7 +1592,7 @@ Decl *Expr::getReferencedDeclOfCallee() {
/// If this is a call to a builtin, return the builtin ID. If not, return 0.
unsigned CallExpr::getBuiltinCallee() const {
- auto *FDecl = getDirectCallee();
+ const auto *FDecl = getDirectCallee();
return FDecl ? FDecl->getBuiltinID() : 0;
}
@@ -1618,8 +1647,8 @@ const Attr *CallExpr::getUnusedResultAttr(const ASTContext &Ctx) const {
}
SourceLocation CallExpr::getBeginLoc() const {
- if (isa<CXXOperatorCallExpr>(this))
- return cast<CXXOperatorCallExpr>(this)->getBeginLoc();
+ if (const auto *OCE = dyn_cast<CXXOperatorCallExpr>(this))
+ return OCE->getBeginLoc();
SourceLocation begin = getCallee()->getBeginLoc();
if (begin.isInvalid() && getNumArgs() > 0 && getArg(0))
@@ -1627,8 +1656,8 @@ SourceLocation CallExpr::getBeginLoc() const {
return begin;
}
SourceLocation CallExpr::getEndLoc() const {
- if (isa<CXXOperatorCallExpr>(this))
- return cast<CXXOperatorCallExpr>(this)->getEndLoc();
+ if (const auto *OCE = dyn_cast<CXXOperatorCallExpr>(this))
+ return OCE->getEndLoc();
SourceLocation end = getRParenLoc();
if (end.isInvalid() && getNumArgs() > 0 && getArg(getNumArgs() - 1))
@@ -1730,16 +1759,7 @@ MemberExpr *MemberExpr::Create(
MemberExpr *E = new (Mem) MemberExpr(Base, IsArrow, OperatorLoc, MemberDecl,
NameInfo, T, VK, OK, NOUR);
- // FIXME: remove remaining dependence computation to computeDependence().
- auto Deps = E->getDependence();
if (HasQualOrFound) {
- // FIXME: Wrong. We should be looking at the member declaration we found.
- if (QualifierLoc && QualifierLoc.getNestedNameSpecifier()->isDependent())
- Deps |= ExprDependence::TypeValueInstantiation;
- else if (QualifierLoc &&
- QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())
- Deps |= ExprDependence::Instantiation;
-
E->MemberExprBits.HasQualifierOrFoundDecl = true;
MemberExprNameQualifier *NQ =
@@ -1751,13 +1771,16 @@ MemberExpr *MemberExpr::Create(
E->MemberExprBits.HasTemplateKWAndArgsInfo =
TemplateArgs || TemplateKWLoc.isValid();
+ // FIXME: remove remaining dependence computation to computeDependence().
+ auto Deps = E->getDependence();
if (TemplateArgs) {
auto TemplateArgDeps = TemplateArgumentDependence::None;
E->getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc, *TemplateArgs,
E->getTrailingObjects<TemplateArgumentLoc>(), TemplateArgDeps);
- if (TemplateArgDeps & TemplateArgumentDependence::Instantiation)
- Deps |= ExprDependence::Instantiation;
+ for (const TemplateArgumentLoc &ArgLoc : TemplateArgs->arguments()) {
+ Deps |= toExprDependence(ArgLoc.getArgument().getDependence());
+ }
} else if (TemplateKWLoc.isValid()) {
E->getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc);
@@ -1953,7 +1976,7 @@ const char *CastExpr::getCastKindName(CastKind CK) {
namespace {
// Skip over implicit nodes produced as part of semantic analysis.
// Designed for use with IgnoreExprNodes.
-Expr *ignoreImplicitSemaNodes(Expr *E) {
+static Expr *ignoreImplicitSemaNodes(Expr *E) {
if (auto *Materialize = dyn_cast<MaterializeTemporaryExpr>(E))
return Materialize->getSubExpr();
@@ -2198,12 +2221,13 @@ OverloadedOperatorKind BinaryOperator::getOverloadedOperator(Opcode Opc) {
bool BinaryOperator::isNullPointerArithmeticExtension(ASTContext &Ctx,
Opcode Opc,
- Expr *LHS, Expr *RHS) {
+ const Expr *LHS,
+ const Expr *RHS) {
if (Opc != BO_Add)
return false;
// Check that we have one pointer and one integer operand.
- Expr *PExp;
+ const Expr *PExp;
if (LHS->getType()->isPointerType()) {
if (!RHS->getType()->isIntegerType())
return false;
@@ -2243,8 +2267,12 @@ StringRef SourceLocExpr::getBuiltinStr() const {
switch (getIdentKind()) {
case File:
return "__builtin_FILE";
+ case FileName:
+ return "__builtin_FILE_NAME";
case Function:
return "__builtin_FUNCTION";
+ case FuncSig:
+ return "__builtin_FUNCSIG";
case Line:
return "__builtin_LINE";
case Column:
@@ -2260,14 +2288,17 @@ APValue SourceLocExpr::EvaluateInContext(const ASTContext &Ctx,
SourceLocation Loc;
const DeclContext *Context;
- std::tie(Loc,
- Context) = [&]() -> std::pair<SourceLocation, const DeclContext *> {
- if (auto *DIE = dyn_cast_or_null<CXXDefaultInitExpr>(DefaultExpr))
- return {DIE->getUsedLocation(), DIE->getUsedContext()};
- if (auto *DAE = dyn_cast_or_null<CXXDefaultArgExpr>(DefaultExpr))
- return {DAE->getUsedLocation(), DAE->getUsedContext()};
- return {this->getLocation(), this->getParentContext()};
- }();
+ if (const auto *DIE = dyn_cast_if_present<CXXDefaultInitExpr>(DefaultExpr)) {
+ Loc = DIE->getUsedLocation();
+ Context = DIE->getUsedContext();
+ } else if (const auto *DAE =
+ dyn_cast_if_present<CXXDefaultArgExpr>(DefaultExpr)) {
+ Loc = DAE->getUsedLocation();
+ Context = DAE->getUsedContext();
+ } else {
+ Loc = getLocation();
+ Context = getParentContext();
+ }
PresumedLoc PLoc = Ctx.getSourceManager().getPresumedLoc(
Ctx.getSourceManager().getExpansionRange(Loc).getEnd());
@@ -2281,26 +2312,33 @@ APValue SourceLocExpr::EvaluateInContext(const ASTContext &Ctx,
};
switch (getIdentKind()) {
+ case SourceLocExpr::FileName: {
+ // __builtin_FILE_NAME() is a Clang-specific extension that expands to the
+ // the last part of __builtin_FILE().
+ SmallString<256> FileName;
+ clang::Preprocessor::processPathToFileName(
+ FileName, PLoc, Ctx.getLangOpts(), Ctx.getTargetInfo());
+ return MakeStringLiteral(FileName);
+ }
case SourceLocExpr::File: {
SmallString<256> Path(PLoc.getFilename());
clang::Preprocessor::processPathForFileMacro(Path, Ctx.getLangOpts(),
Ctx.getTargetInfo());
return MakeStringLiteral(Path);
}
- case SourceLocExpr::Function: {
+ case SourceLocExpr::Function:
+ case SourceLocExpr::FuncSig: {
const auto *CurDecl = dyn_cast<Decl>(Context);
+ const auto Kind = getIdentKind() == SourceLocExpr::Function
+ ? PredefinedExpr::Function
+ : PredefinedExpr::FuncSig;
return MakeStringLiteral(
- CurDecl ? PredefinedExpr::ComputeName(PredefinedExpr::Function, CurDecl)
- : std::string(""));
+ CurDecl ? PredefinedExpr::ComputeName(Kind, CurDecl) : std::string(""));
}
case SourceLocExpr::Line:
- case SourceLocExpr::Column: {
- llvm::APSInt IntVal(Ctx.getIntWidth(Ctx.UnsignedIntTy),
- /*isUnsigned=*/true);
- IntVal = getIdentKind() == SourceLocExpr::Line ? PLoc.getLine()
- : PLoc.getColumn();
- return APValue(IntVal);
- }
+ return APValue(Ctx.MakeIntValue(PLoc.getLine(), Ctx.UnsignedIntTy));
+ case SourceLocExpr::Column:
+ return APValue(Ctx.MakeIntValue(PLoc.getColumn(), Ctx.UnsignedIntTy));
case SourceLocExpr::SourceLocStruct: {
// Fill in a std::source_location::__impl structure, by creating an
// artificial file-scoped CompoundLiteralExpr, and returning a pointer to
@@ -2313,7 +2351,7 @@ APValue SourceLocExpr::EvaluateInContext(const ASTContext &Ctx,
// the ImplDecl type is as expected.
APValue Value(APValue::UninitStruct(), 0, 4);
- for (FieldDecl *F : ImplDecl->fields()) {
+ for (const FieldDecl *F : ImplDecl->fields()) {
StringRef Name = F->getName();
if (Name == "_M_file_name") {
SmallString<256> Path(PLoc.getFilename());
@@ -2330,16 +2368,10 @@ APValue SourceLocExpr::EvaluateInContext(const ASTContext &Ctx,
PredefinedExpr::PrettyFunction, CurDecl))
: "");
} else if (Name == "_M_line") {
- QualType Ty = F->getType();
- llvm::APSInt IntVal(Ctx.getIntWidth(Ty),
- Ty->hasUnsignedIntegerRepresentation());
- IntVal = PLoc.getLine();
+ llvm::APSInt IntVal = Ctx.MakeIntValue(PLoc.getLine(), F->getType());
Value.getStructField(F->getFieldIndex()) = APValue(IntVal);
} else if (Name == "_M_column") {
- QualType Ty = F->getType();
- llvm::APSInt IntVal(Ctx.getIntWidth(Ty),
- Ty->hasUnsignedIntegerRepresentation());
- IntVal = PLoc.getColumn();
+ llvm::APSInt IntVal = Ctx.MakeIntValue(PLoc.getColumn(), F->getType());
Value.getStructField(F->getFieldIndex()) = APValue(IntVal);
}
}
@@ -3280,6 +3312,10 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
// kill the second parameter.
if (IsForRef) {
+ if (auto *EWC = dyn_cast<ExprWithCleanups>(this))
+ return EWC->getSubExpr()->isConstantInitializer(Ctx, true, Culprit);
+ if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(this))
+ return MTE->getSubExpr()->isConstantInitializer(Ctx, false, Culprit);
EvalResult Result;
if (EvaluateAsLValue(Result, Ctx) && !Result.HasSideEffects)
return true;
@@ -3417,6 +3453,7 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
CE->getCastKind() == CK_ConstructorConversion ||
CE->getCastKind() == CK_NonAtomicToAtomic ||
CE->getCastKind() == CK_AtomicToNonAtomic ||
+ CE->getCastKind() == CK_NullToPointer ||
CE->getCastKind() == CK_IntToOCLSampler)
return CE->getSubExpr()->isConstantInitializer(Ctx, false, Culprit);
@@ -4310,18 +4347,48 @@ GenericSelectionExpr::GenericSelectionExpr(
AssocExprs[ResultIndex]->getValueKind(),
AssocExprs[ResultIndex]->getObjectKind()),
NumAssocs(AssocExprs.size()), ResultIndex(ResultIndex),
- DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
+ IsExprPredicate(true), DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
+ assert(AssocTypes.size() == AssocExprs.size() &&
+ "Must have the same number of association expressions"
+ " and TypeSourceInfo!");
+ assert(ResultIndex < NumAssocs && "ResultIndex is out-of-bounds!");
+
+ GenericSelectionExprBits.GenericLoc = GenericLoc;
+ getTrailingObjects<Stmt *>()[getIndexOfControllingExpression()] =
+ ControllingExpr;
+ std::copy(AssocExprs.begin(), AssocExprs.end(),
+ getTrailingObjects<Stmt *>() + getIndexOfStartOfAssociatedExprs());
+ std::copy(AssocTypes.begin(), AssocTypes.end(),
+ getTrailingObjects<TypeSourceInfo *>() +
+ getIndexOfStartOfAssociatedTypes());
+
+ setDependence(computeDependence(this, ContainsUnexpandedParameterPack));
+}
+
+GenericSelectionExpr::GenericSelectionExpr(
+ const ASTContext &, SourceLocation GenericLoc,
+ TypeSourceInfo *ControllingType, ArrayRef<TypeSourceInfo *> AssocTypes,
+ ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack,
+ unsigned ResultIndex)
+ : Expr(GenericSelectionExprClass, AssocExprs[ResultIndex]->getType(),
+ AssocExprs[ResultIndex]->getValueKind(),
+ AssocExprs[ResultIndex]->getObjectKind()),
+ NumAssocs(AssocExprs.size()), ResultIndex(ResultIndex),
+ IsExprPredicate(false), DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
assert(AssocTypes.size() == AssocExprs.size() &&
"Must have the same number of association expressions"
" and TypeSourceInfo!");
assert(ResultIndex < NumAssocs && "ResultIndex is out-of-bounds!");
GenericSelectionExprBits.GenericLoc = GenericLoc;
- getTrailingObjects<Stmt *>()[ControllingIndex] = ControllingExpr;
+ getTrailingObjects<TypeSourceInfo *>()[getIndexOfControllingType()] =
+ ControllingType;
std::copy(AssocExprs.begin(), AssocExprs.end(),
- getTrailingObjects<Stmt *>() + AssocExprStartIndex);
+ getTrailingObjects<Stmt *>() + getIndexOfStartOfAssociatedExprs());
std::copy(AssocTypes.begin(), AssocTypes.end(),
- getTrailingObjects<TypeSourceInfo *>());
+ getTrailingObjects<TypeSourceInfo *>() +
+ getIndexOfStartOfAssociatedTypes());
setDependence(computeDependence(this, ContainsUnexpandedParameterPack));
}
@@ -4334,17 +4401,44 @@ GenericSelectionExpr::GenericSelectionExpr(
: Expr(GenericSelectionExprClass, Context.DependentTy, VK_PRValue,
OK_Ordinary),
NumAssocs(AssocExprs.size()), ResultIndex(ResultDependentIndex),
- DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
+ IsExprPredicate(true), DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
assert(AssocTypes.size() == AssocExprs.size() &&
"Must have the same number of association expressions"
" and TypeSourceInfo!");
GenericSelectionExprBits.GenericLoc = GenericLoc;
- getTrailingObjects<Stmt *>()[ControllingIndex] = ControllingExpr;
+ getTrailingObjects<Stmt *>()[getIndexOfControllingExpression()] =
+ ControllingExpr;
std::copy(AssocExprs.begin(), AssocExprs.end(),
- getTrailingObjects<Stmt *>() + AssocExprStartIndex);
+ getTrailingObjects<Stmt *>() + getIndexOfStartOfAssociatedExprs());
std::copy(AssocTypes.begin(), AssocTypes.end(),
- getTrailingObjects<TypeSourceInfo *>());
+ getTrailingObjects<TypeSourceInfo *>() +
+ getIndexOfStartOfAssociatedTypes());
+
+ setDependence(computeDependence(this, ContainsUnexpandedParameterPack));
+}
+
+GenericSelectionExpr::GenericSelectionExpr(
+ const ASTContext &Context, SourceLocation GenericLoc,
+ TypeSourceInfo *ControllingType, ArrayRef<TypeSourceInfo *> AssocTypes,
+ ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack)
+ : Expr(GenericSelectionExprClass, Context.DependentTy, VK_PRValue,
+ OK_Ordinary),
+ NumAssocs(AssocExprs.size()), ResultIndex(ResultDependentIndex),
+ IsExprPredicate(false), DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
+ assert(AssocTypes.size() == AssocExprs.size() &&
+ "Must have the same number of association expressions"
+ " and TypeSourceInfo!");
+
+ GenericSelectionExprBits.GenericLoc = GenericLoc;
+ getTrailingObjects<TypeSourceInfo *>()[getIndexOfControllingType()] =
+ ControllingType;
+ std::copy(AssocExprs.begin(), AssocExprs.end(),
+ getTrailingObjects<Stmt *>() + getIndexOfStartOfAssociatedExprs());
+ std::copy(AssocTypes.begin(), AssocTypes.end(),
+ getTrailingObjects<TypeSourceInfo *>() +
+ getIndexOfStartOfAssociatedTypes());
setDependence(computeDependence(this, ContainsUnexpandedParameterPack));
}
@@ -4380,6 +4474,35 @@ GenericSelectionExpr *GenericSelectionExpr::Create(
RParenLoc, ContainsUnexpandedParameterPack);
}
+GenericSelectionExpr *GenericSelectionExpr::Create(
+ const ASTContext &Context, SourceLocation GenericLoc,
+ TypeSourceInfo *ControllingType, ArrayRef<TypeSourceInfo *> AssocTypes,
+ ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack,
+ unsigned ResultIndex) {
+ unsigned NumAssocs = AssocExprs.size();
+ void *Mem = Context.Allocate(
+ totalSizeToAlloc<Stmt *, TypeSourceInfo *>(1 + NumAssocs, NumAssocs),
+ alignof(GenericSelectionExpr));
+ return new (Mem) GenericSelectionExpr(
+ Context, GenericLoc, ControllingType, AssocTypes, AssocExprs, DefaultLoc,
+ RParenLoc, ContainsUnexpandedParameterPack, ResultIndex);
+}
+
+GenericSelectionExpr *GenericSelectionExpr::Create(
+ const ASTContext &Context, SourceLocation GenericLoc,
+ TypeSourceInfo *ControllingType, ArrayRef<TypeSourceInfo *> AssocTypes,
+ ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack) {
+ unsigned NumAssocs = AssocExprs.size();
+ void *Mem = Context.Allocate(
+ totalSizeToAlloc<Stmt *, TypeSourceInfo *>(1 + NumAssocs, NumAssocs),
+ alignof(GenericSelectionExpr));
+ return new (Mem) GenericSelectionExpr(
+ Context, GenericLoc, ControllingType, AssocTypes, AssocExprs, DefaultLoc,
+ RParenLoc, ContainsUnexpandedParameterPack);
+}
+
GenericSelectionExpr *
GenericSelectionExpr::CreateEmpty(const ASTContext &Context,
unsigned NumAssocs) {
@@ -4393,11 +4516,11 @@ GenericSelectionExpr::CreateEmpty(const ASTContext &Context,
// DesignatedInitExpr
//===----------------------------------------------------------------------===//
-IdentifierInfo *DesignatedInitExpr::Designator::getFieldName() const {
- assert(Kind == FieldDesignator && "Only valid on a field designator");
- if (Field.NameOrField & 0x01)
- return reinterpret_cast<IdentifierInfo *>(Field.NameOrField & ~0x01);
- return getField()->getIdentifier();
+const IdentifierInfo *DesignatedInitExpr::Designator::getFieldName() const {
+ assert(isFieldDesignator() && "Only valid on a field designator");
+ if (FieldInfo.NameOrField & 0x01)
+ return reinterpret_cast<IdentifierInfo *>(FieldInfo.NameOrField & ~0x01);
+ return getFieldDecl()->getIdentifier();
}
DesignatedInitExpr::DesignatedInitExpr(const ASTContext &C, QualType Ty,
@@ -4472,14 +4595,11 @@ SourceRange DesignatedInitExpr::getDesignatorsSourceRange() const {
}
SourceLocation DesignatedInitExpr::getBeginLoc() const {
- SourceLocation StartLoc;
auto *DIE = const_cast<DesignatedInitExpr *>(this);
Designator &First = *DIE->getDesignator(0);
if (First.isFieldDesignator())
- StartLoc = GNUSyntax ? First.Field.FieldLoc : First.Field.DotLoc;
- else
- StartLoc = First.ArrayOrRange.LBracketLoc;
- return StartLoc;
+ return GNUSyntax ? First.getFieldLoc() : First.getDotLoc();
+ return First.getLBracketLoc();
}
SourceLocation DesignatedInitExpr::getEndLoc() const {
@@ -4487,20 +4607,18 @@ SourceLocation DesignatedInitExpr::getEndLoc() const {
}
Expr *DesignatedInitExpr::getArrayIndex(const Designator& D) const {
- assert(D.Kind == Designator::ArrayDesignator && "Requires array designator");
- return getSubExpr(D.ArrayOrRange.Index + 1);
+ assert(D.isArrayDesignator() && "Requires array designator");
+ return getSubExpr(D.getArrayIndex() + 1);
}
Expr *DesignatedInitExpr::getArrayRangeStart(const Designator &D) const {
- assert(D.Kind == Designator::ArrayRangeDesignator &&
- "Requires array range designator");
- return getSubExpr(D.ArrayOrRange.Index + 1);
+ assert(D.isArrayRangeDesignator() && "Requires array range designator");
+ return getSubExpr(D.getArrayIndex() + 1);
}
Expr *DesignatedInitExpr::getArrayRangeEnd(const Designator &D) const {
- assert(D.Kind == Designator::ArrayRangeDesignator &&
- "Requires array range designator");
- return getSubExpr(D.ArrayOrRange.Index + 2);
+ assert(D.isArrayRangeDesignator() && "Requires array range designator");
+ return getSubExpr(D.getArrayIndex() + 2);
}
/// Replaces the designator at index @p Idx with the series
@@ -4846,6 +4964,7 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
case AO__hip_atomic_exchange:
case AO__hip_atomic_fetch_add:
+ case AO__hip_atomic_fetch_sub:
case AO__hip_atomic_fetch_and:
case AO__hip_atomic_fetch_or:
case AO__hip_atomic_fetch_xor:
diff --git a/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp b/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp
index 2a9e33595013..72feb206cc3e 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp
@@ -276,6 +276,8 @@ CXXNewExpr *CXXNewExpr::CreateEmpty(const ASTContext &Ctx, bool IsArray,
}
bool CXXNewExpr::shouldNullCheckAllocation() const {
+ if (getOperatorNew()->getLangOpts().CheckNew)
+ return true;
return !getOperatorNew()->hasAttr<ReturnsNonNullAttr>() &&
getOperatorNew()
->getType()
@@ -765,29 +767,35 @@ CXXDynamicCastExpr *CXXDynamicCastExpr::CreateEmpty(const ASTContext &C,
/// struct C { };
///
/// C *f(B* b) { return dynamic_cast<C*>(b); }
-bool CXXDynamicCastExpr::isAlwaysNull() const
-{
+bool CXXDynamicCastExpr::isAlwaysNull() const {
+ if (isValueDependent() || getCastKind() != CK_Dynamic)
+ return false;
+
QualType SrcType = getSubExpr()->getType();
QualType DestType = getType();
- if (const auto *SrcPTy = SrcType->getAs<PointerType>()) {
- SrcType = SrcPTy->getPointeeType();
- DestType = DestType->castAs<PointerType>()->getPointeeType();
- }
-
- if (DestType->isVoidType())
+ if (DestType->isVoidPointerType())
return false;
- const auto *SrcRD =
- cast<CXXRecordDecl>(SrcType->castAs<RecordType>()->getDecl());
+ if (DestType->isPointerType()) {
+ SrcType = SrcType->getPointeeType();
+ DestType = DestType->getPointeeType();
+ }
- if (!SrcRD->hasAttr<FinalAttr>())
- return false;
+ const auto *SrcRD = SrcType->getAsCXXRecordDecl();
+ const auto *DestRD = DestType->getAsCXXRecordDecl();
+ assert(SrcRD && DestRD);
- const auto *DestRD =
- cast<CXXRecordDecl>(DestType->castAs<RecordType>()->getDecl());
+ if (SrcRD->isEffectivelyFinal()) {
+ assert(!SrcRD->isDerivedFrom(DestRD) &&
+ "upcasts should not use CK_Dynamic");
+ return true;
+ }
+
+ if (DestRD->isEffectivelyFinal() && !DestRD->isDerivedFrom(SrcRD))
+ return true;
- return !DestRD->isDerivedFrom(SrcRD);
+ return false;
}
CXXReinterpretCastExpr *
@@ -1136,6 +1144,7 @@ CXXConstructExpr::CXXConstructExpr(
CXXConstructExprBits.StdInitListInitialization = StdInitListInitialization;
CXXConstructExprBits.ZeroInitialization = ZeroInitialization;
CXXConstructExprBits.ConstructionKind = ConstructKind;
+ CXXConstructExprBits.IsImmediateEscalating = false;
CXXConstructExprBits.Loc = Loc;
Stmt **TrailingArgs = getTrailingArgs();
@@ -1392,17 +1401,16 @@ ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C,
return new (buffer) ExprWithCleanups(empty, numObjects);
}
-CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(QualType T,
- TypeSourceInfo *TSI,
- SourceLocation LParenLoc,
- ArrayRef<Expr *> Args,
- SourceLocation RParenLoc)
+CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(
+ QualType T, TypeSourceInfo *TSI, SourceLocation LParenLoc,
+ ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool IsListInit)
: Expr(CXXUnresolvedConstructExprClass, T,
(TSI->getType()->isLValueReferenceType() ? VK_LValue
: TSI->getType()->isRValueReferenceType() ? VK_XValue
: VK_PRValue),
OK_Ordinary),
- TSI(TSI), LParenLoc(LParenLoc), RParenLoc(RParenLoc) {
+ TypeAndInitForm(TSI, IsListInit), LParenLoc(LParenLoc),
+ RParenLoc(RParenLoc) {
CXXUnresolvedConstructExprBits.NumArgs = Args.size();
auto **StoredArgs = getTrailingObjects<Expr *>();
for (unsigned I = 0; I != Args.size(); ++I)
@@ -1411,11 +1419,12 @@ CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(QualType T,
}
CXXUnresolvedConstructExpr *CXXUnresolvedConstructExpr::Create(
- const ASTContext &Context, QualType T, TypeSourceInfo *TSI, SourceLocation LParenLoc,
- ArrayRef<Expr *> Args, SourceLocation RParenLoc) {
+ const ASTContext &Context, QualType T, TypeSourceInfo *TSI,
+ SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc,
+ bool IsListInit) {
void *Mem = Context.Allocate(totalSizeToAlloc<Expr *>(Args.size()));
- return new (Mem)
- CXXUnresolvedConstructExpr(T, TSI, LParenLoc, Args, RParenLoc);
+ return new (Mem) CXXUnresolvedConstructExpr(T, TSI, LParenLoc, Args,
+ RParenLoc, IsListInit);
}
CXXUnresolvedConstructExpr *
@@ -1426,7 +1435,7 @@ CXXUnresolvedConstructExpr::CreateEmpty(const ASTContext &Context,
}
SourceLocation CXXUnresolvedConstructExpr::getBeginLoc() const {
- return TSI->getTypeLoc().getBeginLoc();
+ return TypeAndInitForm.getPointer()->getTypeLoc().getBeginLoc();
}
CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(
diff --git a/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp b/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp
index cdc13c2d3969..5698b78d193e 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp
@@ -59,15 +59,6 @@ ConceptSpecializationExpr::ConceptSpecializationExpr(EmptyShell Empty)
: Expr(ConceptSpecializationExprClass, Empty) {}
ConceptSpecializationExpr *ConceptSpecializationExpr::Create(
- const ASTContext &C, ConceptDecl *NamedConcept,
- ImplicitConceptSpecializationDecl *SpecDecl,
- const ConstraintSatisfaction *Satisfaction, bool Dependent,
- bool ContainsUnexpandedParameterPack) {
- return Create(C, NamedConcept, /*ArgsAsWritten*/ nullptr, SpecDecl, Satisfaction,
- Dependent, ContainsUnexpandedParameterPack);
-}
-
-ConceptSpecializationExpr *ConceptSpecializationExpr::Create(
const ASTContext &C, NestedNameSpecifierLoc NNS,
SourceLocation TemplateKWLoc, DeclarationNameInfo ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
@@ -123,6 +114,19 @@ concepts::ExprRequirement::ReturnTypeRequirement::getTypeConstraint() const {
->getTypeConstraint();
}
+// Search through the requirements, and see if any have a RecoveryExpr in it,
+// which means this RequiresExpr ALSO needs to be invalid.
+static bool RequirementContainsError(concepts::Requirement *R) {
+ if (auto *ExprReq = dyn_cast<concepts::ExprRequirement>(R))
+ return ExprReq->getExpr() && ExprReq->getExpr()->containsErrors();
+
+ if (auto *NestedReq = dyn_cast<concepts::NestedRequirement>(R))
+ return !NestedReq->hasInvalidConstraint() &&
+ NestedReq->getConstraintExpr() &&
+ NestedReq->getConstraintExpr()->containsErrors();
+ return false;
+}
+
RequiresExpr::RequiresExpr(ASTContext &C, SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
@@ -149,6 +153,9 @@ RequiresExpr::RequiresExpr(ASTContext &C, SourceLocation RequiresKWLoc,
if (!RequiresExprBits.IsSatisfied)
break;
}
+
+ if (RequirementContainsError(R))
+ setDependence(getDependence() | ExprDependence::Error);
}
std::copy(LocalParameters.begin(), LocalParameters.end(),
getTrailingObjects<ParmVarDecl *>());
diff --git a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
index db6c07d4ab7f..f1c842e26199 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
@@ -50,9 +50,11 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/APFixedPoint.h"
#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/TimeProfiler.h"
@@ -530,6 +532,9 @@ namespace {
/// This - The binding for the this pointer in this call, if any.
const LValue *This;
+ /// CallExpr - The syntactical structure of member function calls
+ const Expr *CallExpr;
+
/// Information on how to find the arguments to this call. Our arguments
/// are stored in our parent's CallStackFrame, using the ParmVarDecl* as a
/// key and this value as the version.
@@ -579,11 +584,11 @@ namespace {
/// LambdaCaptureFields - Mapping from captured variables/this to
/// corresponding data members in the closure class.
llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
- FieldDecl *LambdaThisCaptureField;
+ FieldDecl *LambdaThisCaptureField = nullptr;
CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
const FunctionDecl *Callee, const LValue *This,
- CallRef Arguments);
+ const Expr *CallExpr, CallRef Arguments);
~CallStackFrame();
// Return the temporary for Key whose version number is Version.
@@ -622,7 +627,7 @@ namespace {
/// Allocate storage for a parameter of a function call made in this frame.
APValue &createParam(CallRef Args, const ParmVarDecl *PVD, LValue &LV);
- void describe(llvm::raw_ostream &OS) override;
+ void describe(llvm::raw_ostream &OS) const override;
Frame *getCaller() const override { return Caller; }
SourceLocation getCallLocation() const override { return CallLoc; }
@@ -977,7 +982,9 @@ namespace {
CallStackDepth(0), NextCallIndex(1),
StepsLeft(C.getLangOpts().ConstexprStepLimit),
EnableNewConstInterp(C.getLangOpts().EnableNewConstInterp),
- BottomFrame(*this, SourceLocation(), nullptr, nullptr, CallRef()),
+ BottomFrame(*this, SourceLocation(), /*Callee=*/nullptr,
+ /*This=*/nullptr,
+ /*CallExpr=*/nullptr, CallRef()),
EvaluatingDecl((const ValueDecl *)nullptr),
EvaluatingDeclValue(nullptr), HasActiveDiagnostic(false),
HasFoldFailureDiagnostic(false), EvalMode(Mode) {}
@@ -1294,7 +1301,7 @@ namespace {
class SpeculativeEvaluationRAII {
EvalInfo *Info = nullptr;
Expr::EvalStatus OldStatus;
- unsigned OldSpeculativeEvaluationDepth;
+ unsigned OldSpeculativeEvaluationDepth = 0;
void moveFromAndCancel(SpeculativeEvaluationRAII &&Other) {
Info = Other.Info;
@@ -1435,9 +1442,10 @@ void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
CallStackFrame::CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
const FunctionDecl *Callee, const LValue *This,
- CallRef Call)
+ const Expr *CallExpr, CallRef Call)
: Info(Info), Caller(Info.CurrentCall), Callee(Callee), This(This),
- Arguments(Call), CallLoc(CallLoc), Index(Info.NextCallIndex++) {
+ CallExpr(CallExpr), Arguments(Call), CallLoc(CallLoc),
+ Index(Info.NextCallIndex++) {
Info.CurrentCall = this;
++Info.CallStackDepth;
}
@@ -1907,7 +1915,7 @@ APValue *EvalInfo::createHeapAlloc(const Expr *E, QualType T, LValue &LV) {
}
/// Produce a string describing the given constexpr call.
-void CallStackFrame::describe(raw_ostream &Out) {
+void CallStackFrame::describe(raw_ostream &Out) const {
unsigned ArgIndex = 0;
bool IsMemberCall = isa<CXXMethodDecl>(Callee) &&
!isa<CXXConstructorDecl>(Callee) &&
@@ -1917,12 +1925,29 @@ void CallStackFrame::describe(raw_ostream &Out) {
Out << *Callee << '(';
if (This && IsMemberCall) {
- APValue Val;
- This->moveInto(Val);
- Val.printPretty(Out, Info.Ctx,
- This->Designator.MostDerivedType);
- // FIXME: Add parens around Val if needed.
- Out << "->" << *Callee << '(';
+ if (const auto *MCE = dyn_cast_if_present<CXXMemberCallExpr>(CallExpr)) {
+ const Expr *Object = MCE->getImplicitObjectArgument();
+ Object->printPretty(Out, /*Helper=*/nullptr, Info.Ctx.getPrintingPolicy(),
+ /*Indentation=*/0);
+ if (Object->getType()->isPointerType())
+ Out << "->";
+ else
+ Out << ".";
+ } else if (const auto *OCE =
+ dyn_cast_if_present<CXXOperatorCallExpr>(CallExpr)) {
+ OCE->getArg(0)->printPretty(Out, /*Helper=*/nullptr,
+ Info.Ctx.getPrintingPolicy(),
+ /*Indentation=*/0);
+ Out << ".";
+ } else {
+ APValue Val;
+ This->moveInto(Val);
+ Val.printPretty(
+ Out, Info.Ctx,
+ Info.Ctx.getLValueReferenceType(This->Designator.MostDerivedType));
+ Out << ".";
+ }
+ Out << *Callee << '(';
IsMemberCall = false;
}
@@ -1971,7 +1996,8 @@ static bool IsGlobalLValue(APValue::LValueBase B) {
// ... a null pointer value, or a prvalue core constant expression of type
// std::nullptr_t.
- if (!B) return true;
+ if (!B)
+ return true;
if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
// ... the address of an object with static storage duration,
@@ -2102,6 +2128,7 @@ static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) {
Info.Note((*Alloc)->AllocExpr->getExprLoc(),
diag::note_constexpr_dynamic_alloc_here);
}
+
// We have no information to show for a typeid(T) object.
}
@@ -2119,7 +2146,7 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
EvalInfo &Info, SourceLocation DiagLoc,
QualType Type, const APValue &Value,
ConstantExprKind Kind,
- SourceLocation SubobjectLoc,
+ const FieldDecl *SubobjectDecl,
CheckedTemporaries &CheckedTemps);
/// Check that this reference or pointer core constant expression is a valid
@@ -2163,13 +2190,12 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
}
}
- if (auto *FD = dyn_cast_or_null<FunctionDecl>(BaseVD)) {
- if (FD->isConsteval()) {
- Info.FFDiag(Loc, diag::note_consteval_address_accessible)
- << !Type->isAnyPointerType();
- Info.Note(FD->getLocation(), diag::note_declared_at);
- return false;
- }
+ if (auto *FD = dyn_cast_or_null<FunctionDecl>(BaseVD);
+ FD && FD->isImmediateFunction()) {
+ Info.FFDiag(Loc, diag::note_consteval_address_accessible)
+ << !Type->isAnyPointerType();
+ Info.Note(FD->getLocation(), diag::note_declared_at);
+ return false;
}
// Check that the object is a global. Note that the fake 'this' object we
@@ -2266,8 +2292,8 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
APValue *V = MTE->getOrCreateValue(false);
assert(V && "evasluation result refers to uninitialised temporary");
if (!CheckEvaluationResult(CheckEvaluationResultKind::ConstantExpression,
- Info, MTE->getExprLoc(), TempType, *V,
- Kind, SourceLocation(), CheckedTemps))
+ Info, MTE->getExprLoc(), TempType, *V, Kind,
+ /*SubobjectDecl=*/nullptr, CheckedTemps))
return false;
}
}
@@ -2305,7 +2331,7 @@ static bool CheckMemberPointerConstantExpression(EvalInfo &Info,
const auto *FD = dyn_cast_or_null<CXXMethodDecl>(Member);
if (!FD)
return true;
- if (FD->isConsteval()) {
+ if (FD->isImmediateFunction()) {
Info.FFDiag(Loc, diag::note_consteval_address_accessible) << /*pointer*/ 0;
Info.Note(FD->getLocation(), diag::note_declared_at);
return false;
@@ -2350,13 +2376,13 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
EvalInfo &Info, SourceLocation DiagLoc,
QualType Type, const APValue &Value,
ConstantExprKind Kind,
- SourceLocation SubobjectLoc,
+ const FieldDecl *SubobjectDecl,
CheckedTemporaries &CheckedTemps) {
if (!Value.hasValue()) {
- Info.FFDiag(DiagLoc, diag::note_constexpr_uninitialized)
- << true << Type;
- if (SubobjectLoc.isValid())
- Info.Note(SubobjectLoc, diag::note_constexpr_subobject_declared_here);
+ assert(SubobjectDecl && "SubobjectDecl shall be non-null");
+ Info.FFDiag(DiagLoc, diag::note_constexpr_uninitialized) << SubobjectDecl;
+ Info.Note(SubobjectDecl->getLocation(),
+ diag::note_constexpr_subobject_declared_here);
return false;
}
@@ -2373,20 +2399,19 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
for (unsigned I = 0, N = Value.getArrayInitializedElts(); I != N; ++I) {
if (!CheckEvaluationResult(CERK, Info, DiagLoc, EltTy,
Value.getArrayInitializedElt(I), Kind,
- SubobjectLoc, CheckedTemps))
+ SubobjectDecl, CheckedTemps))
return false;
}
if (!Value.hasArrayFiller())
return true;
return CheckEvaluationResult(CERK, Info, DiagLoc, EltTy,
- Value.getArrayFiller(), Kind, SubobjectLoc,
+ Value.getArrayFiller(), Kind, SubobjectDecl,
CheckedTemps);
}
if (Value.isUnion() && Value.getUnionField()) {
return CheckEvaluationResult(
CERK, Info, DiagLoc, Value.getUnionField()->getType(),
- Value.getUnionValue(), Kind, Value.getUnionField()->getLocation(),
- CheckedTemps);
+ Value.getUnionValue(), Kind, Value.getUnionField(), CheckedTemps);
}
if (Value.isStruct()) {
RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
@@ -2395,7 +2420,7 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
for (const CXXBaseSpecifier &BS : CD->bases()) {
if (!CheckEvaluationResult(CERK, Info, DiagLoc, BS.getType(),
Value.getStructBase(BaseIndex), Kind,
- BS.getBeginLoc(), CheckedTemps))
+ /*SubobjectDecl=*/nullptr, CheckedTemps))
return false;
++BaseIndex;
}
@@ -2405,8 +2430,8 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
continue;
if (!CheckEvaluationResult(CERK, Info, DiagLoc, I->getType(),
- Value.getStructField(I->getFieldIndex()),
- Kind, I->getLocation(), CheckedTemps))
+ Value.getStructField(I->getFieldIndex()), Kind,
+ I, CheckedTemps))
return false;
}
}
@@ -2440,7 +2465,7 @@ static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc,
CheckedTemporaries CheckedTemps;
return CheckEvaluationResult(CheckEvaluationResultKind::ConstantExpression,
Info, DiagLoc, Type, Value, Kind,
- SourceLocation(), CheckedTemps);
+ /*SubobjectDecl=*/nullptr, CheckedTemps);
}
/// Check that this evaluated value is fully-initialized and can be loaded by
@@ -2450,7 +2475,7 @@ static bool CheckFullyInitialized(EvalInfo &Info, SourceLocation DiagLoc,
CheckedTemporaries CheckedTemps;
return CheckEvaluationResult(
CheckEvaluationResultKind::FullyInitialized, Info, DiagLoc, Type, Value,
- ConstantExprKind::Normal, SourceLocation(), CheckedTemps);
+ ConstantExprKind::Normal, /*SubobjectDecl=*/nullptr, CheckedTemps);
}
/// Enforce C++2a [expr.const]/4.17, which disallows new-expressions unless
@@ -2802,7 +2827,7 @@ static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS,
// E1 x 2^E2 module 2^N.
if (LHS.isNegative())
Info.CCEDiag(E, diag::note_constexpr_lshift_of_negative) << LHS;
- else if (LHS.countLeadingZeros() < SA)
+ else if (LHS.countl_zero() < SA)
Info.CCEDiag(E, diag::note_constexpr_lshift_discards);
}
Result = LHS << SA;
@@ -3326,12 +3351,9 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
// Check that we can fold the initializer. In C++, we will have already done
// this in the cases where it matters for conformance.
- SmallVector<PartialDiagnosticAt, 8> Notes;
- if (!VD->evaluateValue(Notes)) {
- Info.FFDiag(E, diag::note_constexpr_var_init_non_constant,
- Notes.size() + 1) << VD;
+ if (!VD->evaluateValue()) {
+ Info.FFDiag(E, diag::note_constexpr_var_init_non_constant, 1) << VD;
NoteLValueLocation(Info, Base);
- Info.addNotes(Notes);
return false;
}
@@ -4988,12 +5010,13 @@ static EvalStmtResult EvaluateSwitch(StmtResult &Result, EvalInfo &Info,
!EvaluateDecl(Info, SS->getConditionVariable()))
return ESR_Failed;
if (SS->getCond()->isValueDependent()) {
- if (!EvaluateDependentExpr(SS->getCond(), Info))
- return ESR_Failed;
- } else {
- if (!EvaluateInteger(SS->getCond(), Value, Info))
- return ESR_Failed;
+ // We don't know what the value is, and which branch should jump to.
+ EvaluateDependentExpr(SS->getCond(), Info);
+ return ESR_Failed;
}
+ if (!EvaluateInteger(SS->getCond(), Value, Info))
+ return ESR_Failed;
+
if (!CondScope.destroy())
return ESR_Failed;
}
@@ -5046,7 +5069,7 @@ static EvalStmtResult EvaluateSwitch(StmtResult &Result, EvalInfo &Info,
static bool CheckLocalVariableDeclaration(EvalInfo &Info, const VarDecl *VD) {
// An expression E is a core constant expression unless the evaluation of E
- // would evaluate one of the following: [C++2b] - a control flow that passes
+ // would evaluate one of the following: [C++23] - a control flow that passes
// through a declaration of a variable with static or thread storage duration
// unless that variable is usable in constant expressions.
if (VD->isLocalVarDecl() && VD->isStaticLocal() &&
@@ -6165,13 +6188,13 @@ static bool handleTrivialCopy(EvalInfo &Info, const ParmVarDecl *Param,
/// Evaluate a function call.
static bool HandleFunctionCall(SourceLocation CallLoc,
const FunctionDecl *Callee, const LValue *This,
- ArrayRef<const Expr *> Args, CallRef Call,
- const Stmt *Body, EvalInfo &Info,
+ const Expr *E, ArrayRef<const Expr *> Args,
+ CallRef Call, const Stmt *Body, EvalInfo &Info,
APValue &Result, const LValue *ResultSlot) {
if (!Info.CheckCallLimit(CallLoc))
return false;
- CallStackFrame Frame(Info, CallLoc, Callee, This, Call);
+ CallStackFrame Frame(Info, CallLoc, Callee, This, E, Call);
// For a trivial copy or move assignment, perform an APValue copy. This is
// essential for unions, where the operations performed by the assignment
@@ -6236,7 +6259,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
Info,
ObjectUnderConstruction{This.getLValueBase(), This.Designator.Entries},
RD->getNumBases());
- CallStackFrame Frame(Info, CallLoc, Definition, &This, Call);
+ CallStackFrame Frame(Info, CallLoc, Definition, &This, E, Call);
// FIXME: Creating an APValue just to hold a nonexistent return value is
// wasteful.
@@ -6537,7 +6560,8 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc,
if (!CheckConstexprFunction(Info, CallLoc, DD, Definition, Body))
return false;
- CallStackFrame Frame(Info, CallLoc, Definition, &This, CallRef());
+ CallStackFrame Frame(Info, CallLoc, Definition, &This, /*CallExpr=*/nullptr,
+ CallRef());
// We're now in the period of destruction of this object.
unsigned BasesLeft = RD->getNumBases();
@@ -7671,6 +7695,11 @@ public:
if (!CalleeLV.getLValueOffset().isZero())
return Error(Callee);
+ if (CalleeLV.isNullPointer()) {
+ Info.FFDiag(Callee, diag::note_constexpr_null_callee)
+ << const_cast<Expr *>(Callee);
+ return false;
+ }
FD = dyn_cast_or_null<FunctionDecl>(
CalleeLV.getLValueBase().dyn_cast<const ValueDecl *>());
if (!FD)
@@ -7801,7 +7830,7 @@ public:
Stmt *Body = FD->getBody(Definition);
if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition, Body) ||
- !HandleFunctionCall(E->getExprLoc(), Definition, This, Args, Call,
+ !HandleFunctionCall(E->getExprLoc(), Definition, This, E, Args, Call,
Body, Info, Result, ResultSlot))
return false;
@@ -8342,6 +8371,7 @@ bool LValueExprEvaluator::VisitCallExpr(const CallExpr *E) {
return false;
case Builtin::BIas_const:
case Builtin::BIforward:
+ case Builtin::BIforward_like:
case Builtin::BImove:
case Builtin::BImove_if_noexcept:
if (cast<FunctionDecl>(E->getCalleeDecl())->isConstexpr())
@@ -8361,8 +8391,8 @@ bool LValueExprEvaluator::VisitMaterializeTemporaryExpr(
E->getSubExpr()->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
// If we passed any comma operators, evaluate their LHSs.
- for (unsigned I = 0, N = CommaLHSs.size(); I != N; ++I)
- if (!EvaluateIgnoredValue(Info, CommaLHSs[I]))
+ for (const Expr *E : CommaLHSs)
+ if (!EvaluateIgnoredValue(Info, E))
return false;
// A materialized temporary with static storage duration can appear within the
@@ -8370,6 +8400,8 @@ bool LValueExprEvaluator::VisitMaterializeTemporaryExpr(
// value for use outside this evaluation.
APValue *Value;
if (E->getStorageDuration() == SD_Static) {
+ if (Info.EvalMode == EvalInfo::EM_ConstantFold)
+ return false;
// FIXME: What about SD_Thread?
Value = E->getOrCreateValue(true);
*Value = APValue();
@@ -8760,16 +8792,19 @@ public:
return false;
}
Result = *Info.CurrentCall->This;
- // If we are inside a lambda's call operator, the 'this' expression refers
- // to the enclosing '*this' object (either by value or reference) which is
- // either copied into the closure object's field that represents the '*this'
- // or refers to '*this'.
+
if (isLambdaCallOperator(Info.CurrentCall->Callee)) {
- // Ensure we actually have captured 'this'. (an error will have
- // been previously reported if not).
+ // Ensure we actually have captured 'this'. If something was wrong with
+ // 'this' capture, the error would have been previously reported.
+ // Otherwise we can be inside of a default initialization of an object
+ // declared by lambda's body, so no need to return false.
if (!Info.CurrentCall->LambdaThisCaptureField)
- return false;
+ return true;
+ // If we have captured 'this', the 'this' expression refers
+ // to the enclosing '*this' object (either by value or reference) which is
+ // either copied into the closure object's field that represents the
+ // '*this' or refers to '*this'.
// Update 'Result' to refer to the data member/field of the closure object
// that represents the '*this' capture.
if (!HandleLValueMember(Info, E, Result,
@@ -8892,9 +8927,10 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
if (!E->getType()->isVoidPointerType()) {
// In some circumstances, we permit casting from void* to cv1 T*, when the
// actual pointee object is actually a cv2 T.
+ bool HasValidResult = !Result.InvalidBase && !Result.Designator.Invalid &&
+ !Result.IsNullPtr;
bool VoidPtrCastMaybeOK =
- !Result.InvalidBase && !Result.Designator.Invalid &&
- !Result.IsNullPtr &&
+ HasValidResult &&
Info.Ctx.hasSameUnqualifiedType(Result.Designator.getType(Info.Ctx),
E->getType()->getPointeeType());
// 1. We'll allow it in std::allocator::allocate, and anything which that
@@ -8906,16 +8942,23 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
// that back to `const __impl*` in its body.
if (VoidPtrCastMaybeOK &&
(Info.getStdAllocatorCaller("allocate") ||
- IsDeclSourceLocationCurrent(Info.CurrentCall->Callee))) {
+ IsDeclSourceLocationCurrent(Info.CurrentCall->Callee) ||
+ Info.getLangOpts().CPlusPlus26)) {
// Permitted.
} else {
- Result.Designator.setInvalid();
- if (SubExpr->getType()->isVoidPointerType())
- CCEDiag(E, diag::note_constexpr_invalid_cast)
- << 3 << SubExpr->getType();
- else
+ if (SubExpr->getType()->isVoidPointerType()) {
+ if (HasValidResult)
+ CCEDiag(E, diag::note_constexpr_invalid_void_star_cast)
+ << SubExpr->getType() << Info.getLangOpts().CPlusPlus26
+ << Result.Designator.getType(Info.Ctx).getCanonicalType()
+ << E->getType()->getPointeeType();
+ else
+ CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << 3 << SubExpr->getType();
+ } else
CCEDiag(E, diag::note_constexpr_invalid_cast)
<< 2 << Info.Ctx.getLangOpts().CPlusPlus;
+ Result.Designator.setInvalid();
}
}
if (E->getCastKind() == CK_AddressSpaceConversion && Result.IsNullPtr)
@@ -9020,8 +9063,7 @@ static CharUnits GetAlignOfType(EvalInfo &Info, QualType T,
// C++ [expr.alignof]p3:
// When alignof is applied to a reference type, the result is the
// alignment of the referenced type.
- if (const ReferenceType *Ref = T->getAs<ReferenceType>())
- T = Ref->getPointeeType();
+ T = T.getNonReferenceType();
if (T.getQualifiers().hasUnaligned())
return CharUnits::One();
@@ -10168,6 +10210,8 @@ bool RecordExprEvaluator::VisitCXXStdInitializerListExpr(
if (!EvaluateLValue(E->getSubExpr(), Array, Info))
return false;
+ assert(ArrayType && "unexpected type for array initializer");
+
// Get a pointer to the first element of the array.
Array.addArray(Info, E, ArrayType);
@@ -11284,7 +11328,6 @@ EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) {
assert(!T->isDependentType() && "unexpected dependent type");
QualType CanTy = T.getCanonicalType();
- const BuiltinType *BT = dyn_cast<BuiltinType>(CanTy);
switch (CanTy->getTypeClass()) {
#define TYPE(ID, BASE)
@@ -11297,7 +11340,7 @@ EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) {
llvm_unreachable("unexpected non-canonical or dependent type");
case Type::Builtin:
- switch (BT->getKind()) {
+ switch (cast<BuiltinType>(CanTy)->getKind()) {
#define BUILTIN_TYPE(ID, SINGLETON_ID)
#define SIGNED_TYPE(ID, SINGLETON_ID) \
case BuiltinType::ID: return GCCTypeClass::Integer;
@@ -11363,6 +11406,8 @@ EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) {
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
return GCCTypeClass::None;
case BuiltinType::Dependent:
@@ -11711,6 +11756,18 @@ static bool convertUnsignedAPIntToCharUnits(const llvm::APInt &Int,
return true;
}
+/// If we're evaluating the object size of an instance of a struct that
+/// contains a flexible array member, add the size of the initializer.
+static void addFlexibleArrayMemberInitSize(EvalInfo &Info, const QualType &T,
+ const LValue &LV, CharUnits &Size) {
+ if (!T.isNull() && T->isStructureType() &&
+ T->getAsStructureType()->getDecl()->hasFlexibleArrayMember())
+ if (const auto *V = LV.getLValueBase().dyn_cast<const ValueDecl *>())
+ if (const auto *VD = dyn_cast<VarDecl>(V))
+ if (VD->hasInit())
+ Size += VD->getFlexibleArrayInitChars(Info.Ctx);
+}
+
/// Helper for tryEvaluateBuiltinObjectSize -- Given an LValue, this will
/// determine how many bytes exist from the beginning of the object to either
/// the end of the current subobject, or the end of the object itself, depending
@@ -11745,7 +11802,9 @@ static bool determineEndOffset(EvalInfo &Info, SourceLocation ExprLoc,
return false;
QualType BaseTy = getObjectType(LVal.getLValueBase());
- return CheckedHandleSizeof(BaseTy, EndOffset);
+ const bool Ret = CheckedHandleSizeof(BaseTy, EndOffset);
+ addFlexibleArrayMemberInitSize(Info, BaseTy, LVal, EndOffset);
+ return Ret;
}
// We want to evaluate the size of a subobject.
@@ -12008,7 +12067,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
- return Success(Val.getBitWidth() - Val.getMinSignedBits(), E);
+ return Success(Val.getBitWidth() - Val.getSignificantBits(), E);
}
case Builtin::BI__builtin_clz:
@@ -12021,7 +12080,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (!Val)
return Error(E);
- return Success(Val.countLeadingZeros(), E);
+ return Success(Val.countl_zero(), E);
}
case Builtin::BI__builtin_constant_p: {
@@ -12067,7 +12126,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (!Val)
return Error(E);
- return Success(Val.countTrailingZeros(), E);
+ return Success(Val.countr_zero(), E);
}
case Builtin::BI__builtin_eh_return_data_regno: {
@@ -12087,7 +12146,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
- unsigned N = Val.countTrailingZeros();
+ unsigned N = Val.countr_zero();
return Success(N == Val.getBitWidth() ? 0 : N + 1, E);
}
@@ -12135,6 +12194,16 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
Success(Val.isNormal() ? 1 : 0, E);
}
+ case Builtin::BI__builtin_isfpclass: {
+ APSInt MaskVal;
+ if (!EvaluateInteger(E->getArg(1), MaskVal, Info))
+ return false;
+ unsigned Test = static_cast<llvm::FPClassTest>(MaskVal.getZExtValue());
+ APFloat Val(0.0);
+ return EvaluateFloat(E->getArg(0), Val, Info) &&
+ Success((Val.classify() & Test) ? 1 : 0, E);
+ }
+
case Builtin::BI__builtin_parity:
case Builtin::BI__builtin_parityl:
case Builtin::BI__builtin_parityll: {
@@ -12142,7 +12211,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
- return Success(Val.countPopulation() % 2, E);
+ return Success(Val.popcount() % 2, E);
}
case Builtin::BI__builtin_popcount:
@@ -12152,7 +12221,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
- return Success(Val.countPopulation(), E);
+ return Success(Val.popcount(), E);
}
case Builtin::BI__builtin_rotateleft8:
@@ -13134,12 +13203,12 @@ EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
if (LHSValue.getDecl() && LHSValue.getDecl()->isWeak()) {
Info.FFDiag(E, diag::note_constexpr_mem_pointer_weak_comparison)
<< LHSValue.getDecl();
- return true;
+ return false;
}
if (RHSValue.getDecl() && RHSValue.getDecl()->isWeak()) {
Info.FFDiag(E, diag::note_constexpr_mem_pointer_weak_comparison)
<< RHSValue.getDecl();
- return true;
+ return false;
}
// C++11 [expr.eq]p2:
@@ -13502,10 +13571,16 @@ bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
return false;
if (!Result.isInt()) return Error(E);
const APSInt &Value = Result.getInt();
- if (Value.isSigned() && Value.isMinSignedValue() && E->canOverflow() &&
- !HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1),
- E->getType()))
- return false;
+ if (Value.isSigned() && Value.isMinSignedValue() && E->canOverflow()) {
+ if (Info.checkingForUndefinedBehavior())
+ Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
+ diag::warn_integer_constant_overflow)
+ << toString(Value, 10) << E->getType();
+
+ if (!HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1),
+ E->getType()))
+ return false;
+ }
return Success(-Value, E);
}
case UO_Not: {
@@ -14826,6 +14901,9 @@ public:
switch (E->getCastKind()) {
default:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
+ case CK_NullToPointer:
+ VisitIgnoredValue(E->getSubExpr());
+ return ZeroInitialization(E);
case CK_NonAtomicToAtomic:
return This ? EvaluateInPlace(Result, Info, *This, E->getSubExpr())
: Evaluate(Result, Info, E->getSubExpr());
@@ -15038,6 +15116,7 @@ static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
E, Unqual, ScopeKind::FullExpression, LV);
if (!EvaluateAtomic(E, &LV, Value, Info))
return false;
+ Result = Value;
} else {
if (!EvaluateAtomic(E, nullptr, Result, Info))
return false;
@@ -15340,8 +15419,16 @@ bool Expr::EvaluateAsConstantExpr(EvalResult &Result, const ASTContext &Ctx,
LValue LVal;
LVal.set(Base);
- if (!::EvaluateInPlace(Result.Val, Info, LVal, this) || Result.HasSideEffects)
- return false;
+ {
+ // C++23 [intro.execution]/p5
+ // A full-expression is [...] a constant-expression
+ // So we need to make sure temporary objects are destroyed after having
+ // evaluating the expression (per C++23 [class.temporary]/p4).
+ FullExpressionRAII Scope(Info);
+ if (!::EvaluateInPlace(Result.Val, Info, LVal, this) ||
+ Result.HasSideEffects || !Scope.destroy())
+ return false;
+ }
if (!Info.discardCleanups())
llvm_unreachable("Unhandled cleanup; missing full expression marker?");
@@ -15368,7 +15455,8 @@ bool Expr::EvaluateAsConstantExpr(EvalResult &Result, const ASTContext &Ctx,
bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
const VarDecl *VD,
- SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
+ SmallVectorImpl<PartialDiagnosticAt> &Notes,
+ bool IsConstantInitialization) const {
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
@@ -15388,14 +15476,12 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
Expr::EvalStatus EStatus;
EStatus.Diag = &Notes;
- EvalInfo Info(Ctx, EStatus, VD->isConstexpr()
- ? EvalInfo::EM_ConstantExpression
- : EvalInfo::EM_ConstantFold);
+ EvalInfo Info(Ctx, EStatus,
+ (IsConstantInitialization && Ctx.getLangOpts().CPlusPlus)
+ ? EvalInfo::EM_ConstantExpression
+ : EvalInfo::EM_ConstantFold);
Info.setEvaluatingDecl(VD, Value);
- Info.InConstantContext = true;
-
- SourceLocation DeclLoc = VD->getLocation();
- QualType DeclTy = VD->getType();
+ Info.InConstantContext = IsConstantInitialization;
if (Info.EnableNewConstInterp) {
auto &InterpCtx = const_cast<ASTContext &>(Ctx).getInterpContext();
@@ -15417,6 +15503,9 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
if (!Info.discardCleanups())
llvm_unreachable("Unhandled cleanup; missing full expression marker?");
}
+
+ SourceLocation DeclLoc = VD->getLocation();
+ QualType DeclTy = VD->getType();
return CheckConstantExpression(Info, DeclLoc, DeclTy, Value,
ConstantExprKind::Normal) &&
CheckMemoryLeaks(Info);
@@ -16147,7 +16236,8 @@ bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx,
Info.EvalStatus.HasSideEffects = false;
// Build fake call to Callee.
- CallStackFrame Frame(Info, Callee->getLocation(), Callee, ThisPtr, Call);
+ CallStackFrame Frame(Info, Callee->getLocation(), Callee, ThisPtr, This,
+ Call);
// FIXME: Missing ExprWithCleanups in enable_if conditions?
FullExpressionRAII Scope(Info);
return Evaluate(Value, Info, this) && Scope.destroy() &&
@@ -16204,7 +16294,8 @@ bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
} else {
SourceLocation Loc = FD->getLocation();
HandleFunctionCall(Loc, FD, (MD && MD->isInstance()) ? &This : nullptr,
- Args, CallRef(), FD->getBody(), Info, Scratch, nullptr);
+ &VIE, Args, CallRef(), FD->getBody(), Info, Scratch,
+ /*ResultSlot=*/nullptr);
}
return Diags.empty();
@@ -16226,7 +16317,8 @@ bool Expr::isPotentialConstantExprUnevaluated(Expr *E,
Info.CheckingPotentialConstantExpression = true;
// Fabricate a call stack frame to give the arguments a plausible cover story.
- CallStackFrame Frame(Info, SourceLocation(), FD, /*This*/ nullptr, CallRef());
+ CallStackFrame Frame(Info, SourceLocation(), FD, /*This=*/nullptr,
+ /*CallExpr=*/nullptr, CallRef());
APValue ResultScratch;
Evaluate(ResultScratch, Info, E);
@@ -16292,6 +16384,45 @@ static bool EvaluateBuiltinStrLen(const Expr *E, uint64_t &Result,
}
}
+bool Expr::EvaluateCharRangeAsString(std::string &Result,
+ const Expr *SizeExpression,
+ const Expr *PtrExpression, ASTContext &Ctx,
+ EvalResult &Status) const {
+ LValue String;
+ EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpression);
+ Info.InConstantContext = true;
+
+ FullExpressionRAII Scope(Info);
+ APSInt SizeValue;
+ if (!::EvaluateInteger(SizeExpression, SizeValue, Info))
+ return false;
+
+ int64_t Size = SizeValue.getExtValue();
+
+ if (!::EvaluatePointer(PtrExpression, String, Info))
+ return false;
+
+ QualType CharTy = PtrExpression->getType()->getPointeeType();
+ for (int64_t I = 0; I < Size; ++I) {
+ APValue Char;
+ if (!handleLValueToRValueConversion(Info, PtrExpression, CharTy, String,
+ Char))
+ return false;
+
+ APSInt C = Char.getInt();
+ Result.push_back(static_cast<char>(C.getExtValue()));
+ if (!HandleLValueArrayAdjustment(Info, PtrExpression, String, CharTy, 1))
+ return false;
+ }
+ if (!Scope.destroy())
+ return false;
+
+ if (!CheckMemoryLeaks(Info))
+ return false;
+
+ return true;
+}
+
bool Expr::tryEvaluateStrLen(uint64_t &Result, ASTContext &Ctx) const {
Expr::EvalStatus Status;
EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold);
diff --git a/contrib/llvm-project/clang/lib/AST/ExternalASTMerger.cpp b/contrib/llvm-project/clang/lib/AST/ExternalASTMerger.cpp
index a2ef270d7a9c..8bad3b36244e 100644
--- a/contrib/llvm-project/clang/lib/AST/ExternalASTMerger.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExternalASTMerger.cpp
@@ -187,10 +187,7 @@ public:
/// Implements the ASTImporter interface for tracking back a declaration
/// to its original declaration it came from.
Decl *GetOriginalDecl(Decl *To) override {
- auto It = ToOrigin.find(To);
- if (It != ToOrigin.end())
- return It->second;
- return nullptr;
+ return ToOrigin.lookup(To);
}
/// Whenever a DeclContext is imported, ensure that ExternalASTSource's origin
@@ -541,4 +538,3 @@ void ExternalASTMerger::FindExternalLexicalDecls(
return false;
});
}
-
diff --git a/contrib/llvm-project/clang/lib/AST/FormatString.cpp b/contrib/llvm-project/clang/lib/AST/FormatString.cpp
index c7dee2d421bb..ad5af9508983 100644
--- a/contrib/llvm-project/clang/lib/AST/FormatString.cpp
+++ b/contrib/llvm-project/clang/lib/AST/FormatString.cpp
@@ -351,10 +351,12 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
case AnyCharTy: {
if (const auto *ETy = argTy->getAs<EnumType>()) {
// If the enum is incomplete we know nothing about the underlying type.
- // Assume that it's 'int'.
+ // Assume that it's 'int'. Do not use the underlying type for a scoped
+ // enumeration.
if (!ETy->getDecl()->isComplete())
return NoMatch;
- argTy = ETy->getDecl()->getIntegerType();
+ if (ETy->isUnscopedEnumerationType())
+ argTy = ETy->getDecl()->getIntegerType();
}
if (const auto *BT = argTy->getAs<BuiltinType>()) {
@@ -391,10 +393,11 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
case SpecificTy: {
if (const EnumType *ETy = argTy->getAs<EnumType>()) {
// If the enum is incomplete we know nothing about the underlying type.
- // Assume that it's 'int'.
+ // Assume that it's 'int'. Do not use the underlying type for a scoped
+ // enumeration as that needs an exact match.
if (!ETy->getDecl()->isComplete())
argTy = C.IntTy;
- else
+ else if (ETy->isUnscopedEnumerationType())
argTy = ETy->getDecl()->getIntegerType();
}
argTy = C.getCanonicalType(argTy).getUnqualifiedType();
@@ -848,6 +851,8 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target,
}
switch (CS.getKind()) {
+ case ConversionSpecifier::bArg:
+ case ConversionSpecifier::BArg:
case ConversionSpecifier::dArg:
case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h b/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h
index 3122388a49a5..579842ce46aa 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h
@@ -27,12 +27,10 @@ class Boolean final {
/// Underlying boolean.
bool V;
- /// Construct a wrapper from a boolean.
- explicit Boolean(bool V) : V(V) {}
-
public:
/// Zero-initializes a boolean.
Boolean() : V(false) {}
+ explicit Boolean(bool V) : V(V) {}
bool operator<(Boolean RHS) const { return V < RHS.V; }
bool operator>(Boolean RHS) const { return V > RHS.V; }
@@ -66,7 +64,7 @@ class Boolean final {
Boolean toUnsigned() const { return *this; }
- constexpr static unsigned bitWidth() { return true; }
+ constexpr static unsigned bitWidth() { return 1; }
bool isZero() const { return !V; }
bool isMin() const { return isZero(); }
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp
index 4633d1e0823b..f2072f974c40 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp
@@ -8,8 +8,10 @@
#include "ByteCodeEmitter.h"
#include "Context.h"
+#include "Floating.h"
#include "Opcode.h"
#include "Program.h"
+#include "clang/AST/ASTLambda.h"
#include "clang/AST/DeclCXX.h"
#include <type_traits>
@@ -21,59 +23,77 @@ using Error = llvm::Error;
Expected<Function *>
ByteCodeEmitter::compileFunc(const FunctionDecl *FuncDecl) {
- // Function is not defined at all or not yet. We will
- // create a Function instance but not compile the body. That
- // will (maybe) happen later.
- bool HasBody = FuncDecl->hasBody(FuncDecl);
-
- // Create a handle over the emitted code.
- Function *Func = P.getFunction(FuncDecl);
- if (!Func) {
- // Set up argument indices.
- unsigned ParamOffset = 0;
- SmallVector<PrimType, 8> ParamTypes;
- llvm::DenseMap<unsigned, Function::ParamDescriptor> ParamDescriptors;
-
- // If the return is not a primitive, a pointer to the storage where the
- // value is initialized in is passed as the first argument. See 'RVO'
- // elsewhere in the code.
- QualType Ty = FuncDecl->getReturnType();
- bool HasRVO = false;
- if (!Ty->isVoidType() && !Ctx.classify(Ty)) {
- HasRVO = true;
- ParamTypes.push_back(PT_Ptr);
- ParamOffset += align(primSize(PT_Ptr));
- }
+ // Set up argument indices.
+ unsigned ParamOffset = 0;
+ SmallVector<PrimType, 8> ParamTypes;
+ SmallVector<unsigned, 8> ParamOffsets;
+ llvm::DenseMap<unsigned, Function::ParamDescriptor> ParamDescriptors;
+
+ // If the return is not a primitive, a pointer to the storage where the
+ // value is initialized in is passed as the first argument. See 'RVO'
+ // elsewhere in the code.
+ QualType Ty = FuncDecl->getReturnType();
+ bool HasRVO = false;
+ if (!Ty->isVoidType() && !Ctx.classify(Ty)) {
+ HasRVO = true;
+ ParamTypes.push_back(PT_Ptr);
+ ParamOffsets.push_back(ParamOffset);
+ ParamOffset += align(primSize(PT_Ptr));
+ }
- // If the function decl is a member decl, the next parameter is
- // the 'this' pointer. This parameter is pop()ed from the
- // InterpStack when calling the function.
- bool HasThisPointer = false;
- if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl);
- MD && MD->isInstance()) {
+ // If the function decl is a member decl, the next parameter is
+ // the 'this' pointer. This parameter is pop()ed from the
+ // InterpStack when calling the function.
+ bool HasThisPointer = false;
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl)) {
+ if (MD->isInstance()) {
HasThisPointer = true;
ParamTypes.push_back(PT_Ptr);
+ ParamOffsets.push_back(ParamOffset);
ParamOffset += align(primSize(PT_Ptr));
}
- // Assign descriptors to all parameters.
- // Composite objects are lowered to pointers.
- for (const ParmVarDecl *PD : FuncDecl->parameters()) {
- PrimType Ty = Ctx.classify(PD->getType()).value_or(PT_Ptr);
- Descriptor *Desc = P.createDescriptor(PD, Ty);
- ParamDescriptors.insert({ParamOffset, {Ty, Desc}});
- Params.insert({PD, ParamOffset});
- ParamOffset += align(primSize(Ty));
- ParamTypes.push_back(Ty);
+ // Set up lambda capture to closure record field mapping.
+ if (isLambdaCallOperator(MD)) {
+ const Record *R = P.getOrCreateRecord(MD->getParent());
+ llvm::DenseMap<const ValueDecl *, FieldDecl *> LC;
+ FieldDecl *LTC;
+
+ MD->getParent()->getCaptureFields(LC, LTC);
+
+ for (auto Cap : LC) {
+ unsigned Offset = R->getField(Cap.second)->Offset;
+ this->LambdaCaptures[Cap.first] = {
+ Offset, Cap.second->getType()->isReferenceType()};
+ }
+ // FIXME: LambdaThisCapture
+ (void)LTC;
}
+ }
- Func =
- P.createFunction(FuncDecl, ParamOffset, std::move(ParamTypes),
- std::move(ParamDescriptors), HasThisPointer, HasRVO);
+ // Assign descriptors to all parameters.
+ // Composite objects are lowered to pointers.
+ for (const ParmVarDecl *PD : FuncDecl->parameters()) {
+ PrimType Ty = Ctx.classify(PD->getType()).value_or(PT_Ptr);
+ Descriptor *Desc = P.createDescriptor(PD, Ty);
+ ParamDescriptors.insert({ParamOffset, {Ty, Desc}});
+ Params.insert({PD, ParamOffset});
+ ParamOffsets.push_back(ParamOffset);
+ ParamOffset += align(primSize(Ty));
+ ParamTypes.push_back(Ty);
}
+ // Create a handle over the emitted code.
+ Function *Func = P.getFunction(FuncDecl);
+ if (!Func)
+ Func = P.createFunction(FuncDecl, ParamOffset, std::move(ParamTypes),
+ std::move(ParamDescriptors),
+ std::move(ParamOffsets), HasThisPointer, HasRVO);
+
assert(Func);
- if (!HasBody)
+ // For not-yet-defined functions, we only create a Function instance and
+ // compile their body later.
+ if (!FuncDecl->isDefined())
return Func;
// Compile the function body.
@@ -94,7 +114,7 @@ ByteCodeEmitter::compileFunc(const FunctionDecl *FuncDecl) {
// Set the function's code.
Func->setCode(NextLocalOffset, std::move(Code), std::move(SrcMap),
- std::move(Scopes));
+ std::move(Scopes), FuncDecl->hasBody());
Func->setIsFullyCompiled(true);
return Func;
}
@@ -110,12 +130,13 @@ Scope::Local ByteCodeEmitter::createLocal(Descriptor *D) {
void ByteCodeEmitter::emitLabel(LabelTy Label) {
const size_t Target = Code.size();
LabelOffsets.insert({Label, Target});
- auto It = LabelRelocs.find(Label);
- if (It != LabelRelocs.end()) {
+
+ if (auto It = LabelRelocs.find(Label);
+ It != LabelRelocs.end()) {
for (unsigned Reloc : It->second) {
using namespace llvm::support;
- /// Rewrite the operand of all jumps to this label.
+ // Rewrite the operand of all jumps to this label.
void *Location = Code.data() + Reloc - align(sizeof(int32_t));
assert(aligned(Location));
const int32_t Offset = Target - static_cast<int64_t>(Reloc);
@@ -132,10 +153,9 @@ int32_t ByteCodeEmitter::getOffset(LabelTy Label) {
assert(aligned(Position));
// If target is known, compute jump offset.
- auto It = LabelOffsets.find(Label);
- if (It != LabelOffsets.end()) {
+ if (auto It = LabelOffsets.find(Label);
+ It != LabelOffsets.end())
return It->second - Position;
- }
// Otherwise, record relocation and return dummy offset.
LabelRelocs[Label].push_back(Position);
@@ -151,7 +171,7 @@ bool ByteCodeEmitter::bail(const SourceLocation &Loc) {
/// Helper to write bytecode and bail out if 32-bit offsets become invalid.
/// Pointers will be automatically marshalled as 32-bit IDs.
template <typename T>
-static void emit(Program &P, std::vector<char> &Code, const T &Val,
+static void emit(Program &P, std::vector<std::byte> &Code, const T &Val,
bool &Success) {
size_t Size;
@@ -183,14 +203,14 @@ template <typename... Tys>
bool ByteCodeEmitter::emitOp(Opcode Op, const Tys &... Args, const SourceInfo &SI) {
bool Success = true;
- /// The opcode is followed by arguments. The source info is
- /// attached to the address after the opcode.
+ // The opcode is followed by arguments. The source info is
+ // attached to the address after the opcode.
emit(P, Code, Op, Success);
if (SI)
SrcMap.emplace_back(Code.size(), SI);
- /// The initializer list forces the expression to be evaluated
- /// for each argument in the variadic template, in order.
+ // The initializer list forces the expression to be evaluated
+ // for each argument in the variadic template, in order.
(void)std::initializer_list<int>{(emit(P, Code, Args, Success), 0)...};
return Success;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h
index 30da06b20250..795534696d92 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h
@@ -70,6 +70,10 @@ protected:
/// Parameter indices.
llvm::DenseMap<const ParmVarDecl *, unsigned> Params;
+ /// Lambda captures.
+ /// Map from Decl* to [Offset, IsReference] pair.
+ llvm::DenseMap<const ValueDecl *, std::pair<unsigned, bool>> LambdaCaptures;
+ unsigned LambdaThisCapture;
/// Local descriptors.
llvm::SmallVector<SmallVector<Local, 8>, 2> Descriptors;
@@ -89,7 +93,7 @@ private:
/// Location of label relocations.
llvm::DenseMap<LabelTy, llvm::SmallVector<unsigned, 5>> LabelRelocs;
/// Program code.
- std::vector<char> Code;
+ std::vector<std::byte> Code;
/// Opcode to expression mapping.
SourceMap SrcMap;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp
index 615dbdefefbe..9f3eb158576f 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp
@@ -11,6 +11,7 @@
#include "ByteCodeGenError.h"
#include "ByteCodeStmtGen.h"
#include "Context.h"
+#include "Floating.h"
#include "Function.h"
#include "PrimType.h"
#include "Program.h"
@@ -25,10 +26,10 @@ namespace clang {
namespace interp {
/// Scope used to handle temporaries in toplevel variable declarations.
-template <class Emitter> class DeclScope final : public LocalScope<Emitter> {
+template <class Emitter> class DeclScope final : public VariableScope<Emitter> {
public:
- DeclScope(ByteCodeExprGen<Emitter> *Ctx, const VarDecl *VD)
- : LocalScope<Emitter>(Ctx), Scope(Ctx->P, VD) {}
+ DeclScope(ByteCodeExprGen<Emitter> *Ctx, const ValueDecl *VD)
+ : VariableScope<Emitter>(Ctx), Scope(Ctx->P, VD) {}
void addExtended(const Scope::Local &Local) override {
return this->addLocal(Local);
@@ -39,7 +40,7 @@ private:
};
/// Scope used to handle initialization methods.
-template <class Emitter> class OptionScope {
+template <class Emitter> class OptionScope final {
public:
/// Root constructor, compiling or discarding primitives.
OptionScope(ByteCodeExprGen<Emitter> *Ctx, bool NewDiscardResult)
@@ -66,7 +67,7 @@ bool ByteCodeExprGen<Emitter>::VisitCastExpr(const CastExpr *CE) {
case CK_LValueToRValue: {
return dereference(
- CE->getSubExpr(), DerefKind::Read,
+ SubExpr, DerefKind::Read,
[](PrimType) {
// Value loaded - nothing to do here.
return true;
@@ -84,17 +85,48 @@ bool ByteCodeExprGen<Emitter>::VisitCastExpr(const CastExpr *CE) {
if (!this->visit(SubExpr))
return false;
- const CXXRecordDecl *FromDecl = getRecordDecl(SubExpr);
- assert(FromDecl);
- const CXXRecordDecl *ToDecl = getRecordDecl(CE);
- assert(ToDecl);
- const Record *R = getRecord(FromDecl);
- const Record::Base *ToBase = R->getBase(ToDecl);
- assert(ToBase);
+ return this->emitDerivedToBaseCasts(getRecordTy(SubExpr->getType()),
+ getRecordTy(CE->getType()), CE);
+ }
+
+ case CK_FloatingCast: {
+ if (!this->visit(SubExpr))
+ return false;
+ const auto *TargetSemantics = &Ctx.getFloatSemantics(CE->getType());
+ return this->emitCastFP(TargetSemantics, getRoundingMode(CE), CE);
+ }
+
+ case CK_IntegralToFloating: {
+ std::optional<PrimType> FromT = classify(SubExpr->getType());
+ if (!FromT)
+ return false;
+
+ if (!this->visit(SubExpr))
+ return false;
- return this->emitGetPtrBase(ToBase->Offset, CE);
+ const auto *TargetSemantics = &Ctx.getFloatSemantics(CE->getType());
+ llvm::RoundingMode RM = getRoundingMode(CE);
+ return this->emitCastIntegralFloating(*FromT, TargetSemantics, RM, CE);
}
+ case CK_FloatingToBoolean:
+ case CK_FloatingToIntegral: {
+ std::optional<PrimType> ToT = classify(CE->getType());
+
+ if (!ToT)
+ return false;
+
+ if (!this->visit(SubExpr))
+ return false;
+
+ return this->emitCastFloatingIntegral(*ToT, CE);
+ }
+
+ case CK_NullToPointer:
+ if (DiscardResult)
+ return true;
+ return this->emitNull(classifyPrim(CE->getType()), CE);
+
case CK_ArrayToPointerDecay:
case CK_AtomicToNonAtomic:
case CK_ConstructorConversion:
@@ -102,7 +134,6 @@ bool ByteCodeExprGen<Emitter>::VisitCastExpr(const CastExpr *CE) {
case CK_NonAtomicToAtomic:
case CK_NoOp:
case CK_UserDefinedConversion:
- case CK_NullToPointer:
return this->visit(SubExpr);
case CK_IntegralToBoolean:
@@ -115,10 +146,23 @@ bool ByteCodeExprGen<Emitter>::VisitCastExpr(const CastExpr *CE) {
if (!this->visit(SubExpr))
return false;
- // TODO: Emit this only if FromT != ToT.
+ if (FromT == ToT)
+ return true;
+
return this->emitCast(*FromT, *ToT, CE);
}
+ case CK_PointerToBoolean: {
+ // Just emit p != nullptr for this.
+ if (!this->visit(SubExpr))
+ return false;
+
+ if (!this->emitNullPtr(CE))
+ return false;
+
+ return this->emitNEPtr(CE);
+ }
+
case CK_ToVoid:
return discard(SubExpr);
@@ -137,27 +181,32 @@ bool ByteCodeExprGen<Emitter>::VisitIntegerLiteral(const IntegerLiteral *LE) {
}
template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitFloatingLiteral(const FloatingLiteral *E) {
+ if (DiscardResult)
+ return true;
+
+ return this->emitConstFloat(E->getValue(), E);
+}
+
+template <class Emitter>
bool ByteCodeExprGen<Emitter>::VisitParenExpr(const ParenExpr *PE) {
- return this->visit(PE->getSubExpr());
+ const Expr *SubExpr = PE->getSubExpr();
+
+ if (DiscardResult)
+ return this->discard(SubExpr);
+
+ return this->visit(SubExpr);
}
template <class Emitter>
bool ByteCodeExprGen<Emitter>::VisitBinaryOperator(const BinaryOperator *BO) {
+ // Need short-circuiting for these.
+ if (BO->isLogicalOp())
+ return this->VisitLogicalBinOp(BO);
+
const Expr *LHS = BO->getLHS();
const Expr *RHS = BO->getRHS();
- // Deal with operations which have composite or void types.
- switch (BO->getOpcode()) {
- case BO_Comma:
- if (!discard(LHS))
- return false;
- if (!this->visit(RHS))
- return false;
- return true;
- default:
- break;
- }
-
// Typecheck the args.
std::optional<PrimType> LT = classify(LHS->getType());
std::optional<PrimType> RT = classify(RHS->getType());
@@ -172,37 +221,64 @@ bool ByteCodeExprGen<Emitter>::VisitBinaryOperator(const BinaryOperator *BO) {
return DiscardResult ? this->emitPop(*T, BO) : true;
};
+ // Deal with operations which have composite or void types.
+ if (BO->isCommaOp()) {
+ if (!discard(LHS))
+ return false;
+ return Discard(this->visit(RHS));
+ }
+
// Pointer arithmetic special case.
if (BO->getOpcode() == BO_Add || BO->getOpcode() == BO_Sub) {
- if (*T == PT_Ptr || (*LT == PT_Ptr && *RT == PT_Ptr))
+ if (T == PT_Ptr || (LT == PT_Ptr && RT == PT_Ptr))
return this->VisitPointerArithBinOp(BO);
}
if (!visit(LHS) || !visit(RHS))
return false;
+ // For languages such as C, cast the result of one
+ // of our comparision opcodes to T (which is usually int).
+ auto MaybeCastToBool = [this, T, BO](bool Result) {
+ if (!Result)
+ return false;
+ if (DiscardResult)
+ return this->emitPop(*T, BO);
+ if (T != PT_Bool)
+ return this->emitCast(PT_Bool, *T, BO);
+ return true;
+ };
+
switch (BO->getOpcode()) {
case BO_EQ:
- return Discard(this->emitEQ(*LT, BO));
+ return MaybeCastToBool(this->emitEQ(*LT, BO));
case BO_NE:
- return Discard(this->emitNE(*LT, BO));
+ return MaybeCastToBool(this->emitNE(*LT, BO));
case BO_LT:
- return Discard(this->emitLT(*LT, BO));
+ return MaybeCastToBool(this->emitLT(*LT, BO));
case BO_LE:
- return Discard(this->emitLE(*LT, BO));
+ return MaybeCastToBool(this->emitLE(*LT, BO));
case BO_GT:
- return Discard(this->emitGT(*LT, BO));
+ return MaybeCastToBool(this->emitGT(*LT, BO));
case BO_GE:
- return Discard(this->emitGE(*LT, BO));
+ return MaybeCastToBool(this->emitGE(*LT, BO));
case BO_Sub:
+ if (BO->getType()->isFloatingType())
+ return Discard(this->emitSubf(getRoundingMode(BO), BO));
return Discard(this->emitSub(*T, BO));
case BO_Add:
+ if (BO->getType()->isFloatingType())
+ return Discard(this->emitAddf(getRoundingMode(BO), BO));
return Discard(this->emitAdd(*T, BO));
case BO_Mul:
+ if (BO->getType()->isFloatingType())
+ return Discard(this->emitMulf(getRoundingMode(BO), BO));
return Discard(this->emitMul(*T, BO));
case BO_Rem:
return Discard(this->emitRem(*T, BO));
case BO_Div:
+ if (BO->getType()->isFloatingType())
+ return Discard(this->emitDivf(getRoundingMode(BO), BO));
return Discard(this->emitDiv(*T, BO));
case BO_Assign:
if (DiscardResult)
@@ -218,8 +294,9 @@ bool ByteCodeExprGen<Emitter>::VisitBinaryOperator(const BinaryOperator *BO) {
return Discard(this->emitShr(*LT, *RT, BO));
case BO_Xor:
return Discard(this->emitBitXor(*T, BO));
- case BO_LAnd:
case BO_LOr:
+ case BO_LAnd:
+ llvm_unreachable("Already handled earlier");
default:
return this->bail(BO);
}
@@ -278,11 +355,72 @@ bool ByteCodeExprGen<Emitter>::VisitPointerArithBinOp(const BinaryOperator *E) {
}
template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitLogicalBinOp(const BinaryOperator *E) {
+ assert(E->isLogicalOp());
+ BinaryOperatorKind Op = E->getOpcode();
+ const Expr *LHS = E->getLHS();
+ const Expr *RHS = E->getRHS();
+
+ if (Op == BO_LOr) {
+ // Logical OR. Visit LHS and only evaluate RHS if LHS was FALSE.
+ LabelTy LabelTrue = this->getLabel();
+ LabelTy LabelEnd = this->getLabel();
+
+ if (!this->visit(LHS))
+ return false;
+ if (!this->jumpTrue(LabelTrue))
+ return false;
+
+ if (!this->visit(RHS))
+ return false;
+ if (!this->jump(LabelEnd))
+ return false;
+
+ this->emitLabel(LabelTrue);
+ this->emitConstBool(true, E);
+ this->fallthrough(LabelEnd);
+ this->emitLabel(LabelEnd);
+
+ if (DiscardResult)
+ return this->emitPopBool(E);
+
+ return true;
+ }
+
+ // Logical AND.
+ // Visit LHS. Only visit RHS if LHS was TRUE.
+ LabelTy LabelFalse = this->getLabel();
+ LabelTy LabelEnd = this->getLabel();
+
+ if (!this->visit(LHS))
+ return false;
+ if (!this->jumpFalse(LabelFalse))
+ return false;
+
+ if (!this->visit(RHS))
+ return false;
+ if (!this->jump(LabelEnd))
+ return false;
+
+ this->emitLabel(LabelFalse);
+ this->emitConstBool(false, E);
+ this->fallthrough(LabelEnd);
+ this->emitLabel(LabelEnd);
+
+ if (DiscardResult)
+ return this->emitPopBool(E);
+
+ return true;
+}
+
+template <class Emitter>
bool ByteCodeExprGen<Emitter>::VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
- if (std::optional<PrimType> T = classify(E))
- return this->emitZero(*T, E);
+ std::optional<PrimType> T = classify(E);
- return false;
+ if (!T)
+ return false;
+
+ return this->visitZeroInitializer(E->getType(), E);
}
template <class Emitter>
@@ -292,7 +430,7 @@ bool ByteCodeExprGen<Emitter>::VisitArraySubscriptExpr(
const Expr *Index = E->getIdx();
PrimType IndexT = classifyPrim(Index->getType());
- // Take pointer of LHS, add offset from RHS, narrow result.
+ // Take pointer of LHS, add offset from RHS.
// What's left on the stack after this is a pointer.
if (!this->visit(Base))
return false;
@@ -300,10 +438,7 @@ bool ByteCodeExprGen<Emitter>::VisitArraySubscriptExpr(
if (!this->visit(Index))
return false;
- if (!this->emitAddOffset(IndexT, E))
- return false;
-
- if (!this->emitNarrowPtr(E))
+ if (!this->emitArrayElemPtrPop(IndexT, E))
return false;
if (DiscardResult)
@@ -453,110 +588,394 @@ bool ByteCodeExprGen<Emitter>::VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
template <class Emitter>
bool ByteCodeExprGen<Emitter>::VisitAbstractConditionalOperator(
const AbstractConditionalOperator *E) {
- const Expr *Condition = E->getCond();
- const Expr *TrueExpr = E->getTrueExpr();
- const Expr *FalseExpr = E->getFalseExpr();
+ return this->visitConditional(
+ E, [this](const Expr *E) { return this->visit(E); });
+}
- LabelTy LabelEnd = this->getLabel(); // Label after the operator.
- LabelTy LabelFalse = this->getLabel(); // Label for the false expr.
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitStringLiteral(const StringLiteral *E) {
+ unsigned StringIndex = P.createGlobalString(E);
+ return this->emitGetPtrGlobal(StringIndex, E);
+}
- if (!this->visit(Condition))
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitCharacterLiteral(
+ const CharacterLiteral *E) {
+ return this->emitConst(E->getValue(), E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitFloatCompoundAssignOperator(
+ const CompoundAssignOperator *E) {
+ assert(E->getType()->isFloatingType());
+
+ const Expr *LHS = E->getLHS();
+ const Expr *RHS = E->getRHS();
+ llvm::RoundingMode RM = getRoundingMode(E);
+ QualType LHSComputationType = E->getComputationLHSType();
+ QualType ResultType = E->getComputationResultType();
+ std::optional<PrimType> LT = classify(LHSComputationType);
+ std::optional<PrimType> RT = classify(ResultType);
+
+ if (!LT || !RT)
return false;
- if (!this->jumpFalse(LabelFalse))
+
+ // C++17 onwards require that we evaluate the RHS first.
+ // Compute RHS and save it in a temporary variable so we can
+ // load it again later.
+ if (!visit(RHS))
return false;
- if (!this->visit(TrueExpr))
+ unsigned TempOffset = this->allocateLocalPrimitive(E, *RT, /*IsConst=*/true);
+ if (!this->emitSetLocal(*RT, TempOffset, E))
return false;
- if (!this->jump(LabelEnd))
+
+ // First, visit LHS.
+ if (!visit(LHS))
+ return false;
+ if (!this->emitLoad(*LT, E))
return false;
- this->emitLabel(LabelFalse);
+ // If necessary, convert LHS to its computation type.
+ if (LHS->getType() != LHSComputationType) {
+ const auto *TargetSemantics = &Ctx.getFloatSemantics(LHSComputationType);
- if (!this->visit(FalseExpr))
+ if (!this->emitCastFP(TargetSemantics, RM, E))
+ return false;
+ }
+
+ // Now load RHS.
+ if (!this->emitGetLocal(*RT, TempOffset, E))
return false;
- this->fallthrough(LabelEnd);
- this->emitLabel(LabelEnd);
+ switch (E->getOpcode()) {
+ case BO_AddAssign:
+ if (!this->emitAddf(RM, E))
+ return false;
+ break;
+ case BO_SubAssign:
+ if (!this->emitSubf(RM, E))
+ return false;
+ break;
+ case BO_MulAssign:
+ if (!this->emitMulf(RM, E))
+ return false;
+ break;
+ case BO_DivAssign:
+ if (!this->emitDivf(RM, E))
+ return false;
+ break;
+ default:
+ return false;
+ }
- return true;
-}
+ // If necessary, convert result to LHS's type.
+ if (LHS->getType() != ResultType) {
+ const auto *TargetSemantics = &Ctx.getFloatSemantics(LHS->getType());
-template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitStringLiteral(const StringLiteral *E) {
- unsigned StringIndex = P.createGlobalString(E);
- return this->emitGetPtrGlobal(StringIndex, E);
+ if (!this->emitCastFP(TargetSemantics, RM, E))
+ return false;
+ }
+
+ if (DiscardResult)
+ return this->emitStorePop(*LT, E);
+ return this->emitStore(*LT, E);
}
template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitCharacterLiteral(
- const CharacterLiteral *E) {
- return this->emitConst(E->getValue(), E);
+bool ByteCodeExprGen<Emitter>::VisitPointerCompoundAssignOperator(
+ const CompoundAssignOperator *E) {
+ BinaryOperatorKind Op = E->getOpcode();
+ const Expr *LHS = E->getLHS();
+ const Expr *RHS = E->getRHS();
+ std::optional<PrimType> LT = classify(LHS->getType());
+ std::optional<PrimType> RT = classify(RHS->getType());
+
+ if (Op != BO_AddAssign && Op != BO_SubAssign)
+ return false;
+
+ if (!LT || !RT)
+ return false;
+ assert(*LT == PT_Ptr);
+
+ if (!visit(LHS))
+ return false;
+
+ if (!this->emitLoadPtr(LHS))
+ return false;
+
+ if (!visit(RHS))
+ return false;
+
+ if (Op == BO_AddAssign)
+ this->emitAddOffset(*RT, E);
+ else
+ this->emitSubOffset(*RT, E);
+
+ if (DiscardResult)
+ return this->emitStorePopPtr(E);
+ return this->emitStorePtr(E);
}
template <class Emitter>
bool ByteCodeExprGen<Emitter>::VisitCompoundAssignOperator(
const CompoundAssignOperator *E) {
+
+ // Handle floating point operations separately here, since they
+ // require special care.
+ if (E->getType()->isFloatingType())
+ return VisitFloatCompoundAssignOperator(E);
+
+ if (E->getType()->isPointerType())
+ return VisitPointerCompoundAssignOperator(E);
+
const Expr *LHS = E->getLHS();
const Expr *RHS = E->getRHS();
- std::optional<PrimType> LT = classify(E->getLHS()->getType());
- std::optional<PrimType> RT = classify(E->getRHS()->getType());
+ std::optional<PrimType> LHSComputationT =
+ classify(E->getComputationLHSType());
+ std::optional<PrimType> LT = classify(LHS->getType());
+ std::optional<PrimType> RT = classify(E->getComputationResultType());
+ std::optional<PrimType> ResultT = classify(E->getType());
- if (!LT || !RT)
+ if (!LT || !RT || !ResultT || !LHSComputationT)
return false;
- assert(!E->getType()->isPointerType() &&
- "Support pointer arithmethic in compound assignment operators");
+ assert(!E->getType()->isPointerType() && "Handled above");
+ assert(!E->getType()->isFloatingType() && "Handled above");
- // Get LHS pointer, load its value and get RHS value.
+ // C++17 onwards require that we evaluate the RHS first.
+ // Compute RHS and save it in a temporary variable so we can
+ // load it again later.
+ // FIXME: Compound assignments are unsequenced in C, so we might
+ // have to figure out how to reject them.
+ if (!visit(RHS))
+ return false;
+
+ unsigned TempOffset = this->allocateLocalPrimitive(E, *RT, /*IsConst=*/true);
+
+ if (!this->emitSetLocal(*RT, TempOffset, E))
+ return false;
+
+ // Get LHS pointer, load its value and cast it to the
+ // computation type if necessary.
if (!visit(LHS))
return false;
if (!this->emitLoad(*LT, E))
return false;
- if (!visit(RHS))
+ if (*LT != *LHSComputationT) {
+ if (!this->emitCast(*LT, *LHSComputationT, E))
+ return false;
+ }
+
+ // Get the RHS value on the stack.
+ if (!this->emitGetLocal(*RT, TempOffset, E))
return false;
// Perform operation.
switch (E->getOpcode()) {
case BO_AddAssign:
- if (!this->emitAdd(*LT, E))
+ if (!this->emitAdd(*LHSComputationT, E))
return false;
break;
case BO_SubAssign:
- if (!this->emitSub(*LT, E))
+ if (!this->emitSub(*LHSComputationT, E))
return false;
break;
-
case BO_MulAssign:
+ if (!this->emitMul(*LHSComputationT, E))
+ return false;
+ break;
case BO_DivAssign:
+ if (!this->emitDiv(*LHSComputationT, E))
+ return false;
+ break;
case BO_RemAssign:
+ if (!this->emitRem(*LHSComputationT, E))
+ return false;
+ break;
case BO_ShlAssign:
- if (!this->emitShl(*LT, *RT, E))
+ if (!this->emitShl(*LHSComputationT, *RT, E))
return false;
break;
case BO_ShrAssign:
- if (!this->emitShr(*LT, *RT, E))
+ if (!this->emitShr(*LHSComputationT, *RT, E))
return false;
break;
case BO_AndAssign:
+ if (!this->emitBitAnd(*LHSComputationT, E))
+ return false;
+ break;
case BO_XorAssign:
+ if (!this->emitBitXor(*LHSComputationT, E))
+ return false;
+ break;
case BO_OrAssign:
+ if (!this->emitBitOr(*LHSComputationT, E))
+ return false;
+ break;
default:
llvm_unreachable("Unimplemented compound assign operator");
}
+ // And now cast from LHSComputationT to ResultT.
+ if (*ResultT != *LHSComputationT) {
+ if (!this->emitCast(*LHSComputationT, *ResultT, E))
+ return false;
+ }
+
// And store the result in LHS.
if (DiscardResult)
- return this->emitStorePop(*LT, E);
- return this->emitStore(*LT, E);
+ return this->emitStorePop(*ResultT, E);
+ return this->emitStore(*ResultT, E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitExprWithCleanups(
+ const ExprWithCleanups *E) {
+ const Expr *SubExpr = E->getSubExpr();
+
+ assert(E->getNumObjects() == 0 && "TODO: Implement cleanups");
+ if (DiscardResult)
+ return this->discard(SubExpr);
+
+ return this->visit(SubExpr);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *E) {
+ const Expr *SubExpr = E->getSubExpr();
+ std::optional<PrimType> SubExprT = classify(SubExpr);
+
+ if (E->getStorageDuration() == SD_Static) {
+ if (std::optional<unsigned> GlobalIndex = P.createGlobal(E)) {
+ const LifetimeExtendedTemporaryDecl *TempDecl =
+ E->getLifetimeExtendedTemporaryDecl();
+
+ if (!this->visitInitializer(SubExpr))
+ return false;
+
+ if (!this->emitInitGlobalTemp(*SubExprT, *GlobalIndex, TempDecl, E))
+ return false;
+ return this->emitGetPtrGlobal(*GlobalIndex, E);
+ }
+
+ return false;
+ }
+
+ // For everyhing else, use local variables.
+ if (SubExprT) {
+ if (std::optional<unsigned> LocalIndex = allocateLocalPrimitive(
+ SubExpr, *SubExprT, /*IsConst=*/true, /*IsExtended=*/true)) {
+ if (!this->visitInitializer(SubExpr))
+ return false;
+ this->emitSetLocal(*SubExprT, *LocalIndex, E);
+ return this->emitGetPtrLocal(*LocalIndex, E);
+ }
+ } else {
+ if (std::optional<unsigned> LocalIndex =
+ allocateLocal(SubExpr, /*IsExtended=*/true)) {
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ return this->visitInitializer(SubExpr);
+ }
+ }
+ return false;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitCompoundLiteralExpr(
+ const CompoundLiteralExpr *E) {
+ std::optional<PrimType> T = classify(E->getType());
+ const Expr *Init = E->getInitializer();
+ if (E->isFileScope()) {
+ if (std::optional<unsigned> GlobalIndex = P.createGlobal(E)) {
+ if (classify(E->getType()))
+ return this->visit(Init);
+ if (!this->emitGetPtrGlobal(*GlobalIndex, E))
+ return false;
+ return this->visitInitializer(Init);
+ }
+ }
+
+ // Otherwise, use a local variable.
+ if (T) {
+ // For primitive types, we just visit the initializer.
+ return this->visit(Init);
+ } else {
+ if (std::optional<unsigned> LocalIndex = allocateLocal(Init)) {
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ return this->visitInitializer(Init);
+ }
+ }
+
+ return false;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitTypeTraitExpr(const TypeTraitExpr *E) {
+ return this->emitConstBool(E->getValue(), E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitLambdaExpr(const LambdaExpr *E) {
+ // XXX We assume here that a pointer-to-initialize is on the stack.
+
+ const Record *R = P.getOrCreateRecord(E->getLambdaClass());
+
+ auto *CaptureInitIt = E->capture_init_begin();
+ // Initialize all fields (which represent lambda captures) of the
+ // record with their initializers.
+ for (const Record::Field &F : R->fields()) {
+ const Expr *Init = *CaptureInitIt;
+ ++CaptureInitIt;
+
+ if (std::optional<PrimType> T = classify(Init)) {
+ if (!this->visit(Init))
+ return false;
+
+ if (!this->emitSetField(*T, F.Offset, E))
+ return false;
+ } else {
+ if (!this->emitDupPtr(E))
+ return false;
+
+ if (!this->emitGetPtrField(F.Offset, E))
+ return false;
+
+ if (!this->visitInitializer(Init))
+ return false;
+
+ if (!this->emitPopPtr(E))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitPredefinedExpr(const PredefinedExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ return this->visit(E->getFunctionName());
}
template <class Emitter> bool ByteCodeExprGen<Emitter>::discard(const Expr *E) {
+ if (E->containsErrors())
+ return false;
+
OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/true);
return this->Visit(E);
}
template <class Emitter>
bool ByteCodeExprGen<Emitter>::visit(const Expr *E) {
+ if (E->containsErrors())
+ return false;
+
OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/false);
return this->Visit(E);
}
@@ -570,8 +989,57 @@ bool ByteCodeExprGen<Emitter>::visitBool(const Expr *E) {
}
}
+/// Visit a conditional operator, i.e. `A ? B : C`.
+/// \V determines what function to call for the B and C expressions.
template <class Emitter>
-bool ByteCodeExprGen<Emitter>::visitZeroInitializer(PrimType T, const Expr *E) {
+bool ByteCodeExprGen<Emitter>::visitConditional(
+ const AbstractConditionalOperator *E,
+ llvm::function_ref<bool(const Expr *)> V) {
+
+ const Expr *Condition = E->getCond();
+ const Expr *TrueExpr = E->getTrueExpr();
+ const Expr *FalseExpr = E->getFalseExpr();
+
+ LabelTy LabelEnd = this->getLabel(); // Label after the operator.
+ LabelTy LabelFalse = this->getLabel(); // Label for the false expr.
+
+ if (!this->visit(Condition))
+ return false;
+
+ // C special case: Convert to bool because our jump ops need that.
+ // TODO: We probably want this to be done in visitBool().
+ if (std::optional<PrimType> CondT = classify(Condition->getType());
+ CondT && CondT != PT_Bool) {
+ if (!this->emitCast(*CondT, PT_Bool, E))
+ return false;
+ }
+
+ if (!this->jumpFalse(LabelFalse))
+ return false;
+
+ if (!V(TrueExpr))
+ return false;
+ if (!this->jump(LabelEnd))
+ return false;
+
+ this->emitLabel(LabelFalse);
+
+ if (!V(FalseExpr))
+ return false;
+
+ this->fallthrough(LabelEnd);
+ this->emitLabel(LabelEnd);
+
+ return true;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::visitZeroInitializer(QualType QT,
+ const Expr *E) {
+ // FIXME: We need the QualType to get the float semantics, but that means we
+ // classify it over and over again in array situations.
+ PrimType T = classifyPrim(QT);
+
switch (T) {
case PT_Bool:
return this->emitZeroBool(E);
@@ -593,6 +1061,11 @@ bool ByteCodeExprGen<Emitter>::visitZeroInitializer(PrimType T, const Expr *E) {
return this->emitZeroUint64(E);
case PT_Ptr:
return this->emitNullPtr(E);
+ case PT_FnPtr:
+ return this->emitNullFnPtr(E);
+ case PT_Float: {
+ return this->emitConstFloat(APFloat::getZero(Ctx.getFloatSemantics(QT)), E);
+ }
}
llvm_unreachable("unknown primitive type");
}
@@ -604,11 +1077,11 @@ bool ByteCodeExprGen<Emitter>::dereference(
if (std::optional<PrimType> T = classify(LV->getType())) {
if (!LV->refersToBitField()) {
// Only primitive, non bit-field types can be dereferenced directly.
- if (auto *DE = dyn_cast<DeclRefExpr>(LV)) {
+ if (const auto *DE = dyn_cast<DeclRefExpr>(LV)) {
if (!DE->getDecl()->getType()->isReferenceType()) {
- if (auto *PD = dyn_cast<ParmVarDecl>(DE->getDecl()))
+ if (const auto *PD = dyn_cast<ParmVarDecl>(DE->getDecl()))
return dereferenceParam(LV, *T, PD, AK, Direct, Indirect);
- if (auto *VD = dyn_cast<VarDecl>(DE->getDecl()))
+ if (const auto *VD = dyn_cast<VarDecl>(DE->getDecl()))
return dereferenceVar(LV, *T, VD, AK, Direct, Indirect);
}
}
@@ -757,6 +1230,8 @@ bool ByteCodeExprGen<Emitter>::emitConst(T Value, const Expr *E) {
case PT_Bool:
return this->emitConstBool(Value, E);
case PT_Ptr:
+ case PT_FnPtr:
+ case PT_Float:
llvm_unreachable("Invalid integral type");
break;
}
@@ -779,7 +1254,7 @@ unsigned ByteCodeExprGen<Emitter>::allocateLocalPrimitive(DeclTy &&Src,
if (const auto *VD =
dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) {
assert(!P.getGlobal(VD));
- assert(Locals.find(VD) == Locals.end());
+ assert(!Locals.contains(VD));
}
// FIXME: There are cases where Src.is<Expr*>() is wrong, e.g.
@@ -788,7 +1263,7 @@ unsigned ByteCodeExprGen<Emitter>::allocateLocalPrimitive(DeclTy &&Src,
Descriptor *D = P.createDescriptor(Src, Ty, Descriptor::InlineDescMD, IsConst,
Src.is<const Expr *>());
Scope::Local Local = this->createLocal(D);
- if (auto *VD = dyn_cast_or_null<ValueDecl>(Src.dyn_cast<const Decl *>()))
+ if (auto *VD = dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>()))
Locals.insert({VD, Local});
VarScope->add(Local, IsExtended);
return Local.Offset;
@@ -801,7 +1276,7 @@ ByteCodeExprGen<Emitter>::allocateLocal(DeclTy &&Src, bool IsExtended) {
if (const auto *VD =
dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) {
assert(!P.getGlobal(VD));
- assert(Locals.find(VD) == Locals.end());
+ assert(!Locals.contains(VD));
}
QualType Ty;
@@ -845,29 +1320,22 @@ bool ByteCodeExprGen<Emitter>::visitArrayInitializer(const Expr *Initializer) {
for (const Expr *Init : InitList->inits()) {
if (std::optional<PrimType> T = classify(Init->getType())) {
// Visit the primitive element like normal.
- if (!this->emitDupPtr(Init))
- return false;
if (!this->visit(Init))
return false;
if (!this->emitInitElem(*T, ElementIndex, Init))
return false;
} else {
// Advance the pointer currently on the stack to the given
- // dimension and narrow().
- if (!this->emitDupPtr(Init))
- return false;
+ // dimension.
if (!this->emitConstUint32(ElementIndex, Init))
return false;
- if (!this->emitAddOffsetUint32(Init))
+ if (!this->emitArrayElemPtrUint32(Init))
return false;
- if (!this->emitNarrowPtr(Init))
- return false;
-
if (!visitInitializer(Init))
return false;
- }
if (!this->emitPopPtr(Init))
return false;
+ }
++ElementIndex;
}
@@ -888,31 +1356,22 @@ bool ByteCodeExprGen<Emitter>::visitArrayInitializer(const Expr *Initializer) {
for (size_t I = 0; I != Size; ++I) {
ArrayIndexScope<Emitter> IndexScope(this, I);
- if (!this->emitDupPtr(SubExpr)) // LHS
- return false;
-
if (ElemT) {
if (!this->visit(SubExpr))
return false;
if (!this->emitInitElem(*ElemT, I, Initializer))
return false;
} else {
- // Narrow to our array element and recurse into visitInitializer()
+ // Get to our array element and recurse into visitInitializer()
if (!this->emitConstUint64(I, SubExpr))
return false;
-
- if (!this->emitAddOffsetUint64(SubExpr))
- return false;
-
- if (!this->emitNarrowPtr(SubExpr))
+ if (!this->emitArrayElemPtrUint64(SubExpr))
return false;
-
if (!visitInitializer(SubExpr))
return false;
+ if (!this->emitPopPtr(Initializer))
+ return false;
}
-
- if (!this->emitPopPtr(Initializer))
- return false;
}
return true;
} else if (const auto *IVIE = dyn_cast<ImplicitValueInitExpr>(Initializer)) {
@@ -926,7 +1385,7 @@ bool ByteCodeExprGen<Emitter>::visitArrayInitializer(const Expr *Initializer) {
// since we memset our Block*s to 0 and so we have the desired value
// without this.
for (size_t I = 0; I != NumElems; ++I) {
- if (!this->emitZero(*ElemT, Initializer))
+ if (!this->visitZeroInitializer(CAT->getElementType(), Initializer))
return false;
if (!this->emitInitElem(*ElemT, I, Initializer))
return false;
@@ -948,13 +1407,9 @@ bool ByteCodeExprGen<Emitter>::visitArrayInitializer(const Expr *Initializer) {
// FIXME(perf): We're calling the constructor once per array element here,
// in the old intepreter we had a special-case for trivial constructors.
for (size_t I = 0; I != NumElems; ++I) {
- if (!this->emitDupPtr(Initializer))
- return false;
if (!this->emitConstUint64(I, Initializer))
return false;
- if (!this->emitAddOffsetUint64(Initializer))
- return false;
- if (!this->emitNarrowPtr(Initializer))
+ if (!this->emitArrayElemPtrUint64(Initializer))
return false;
// Constructor arguments.
@@ -967,6 +1422,38 @@ bool ByteCodeExprGen<Emitter>::visitArrayInitializer(const Expr *Initializer) {
return false;
}
return true;
+ } else if (const auto *SL = dyn_cast<StringLiteral>(Initializer)) {
+ const ConstantArrayType *CAT =
+ Ctx.getASTContext().getAsConstantArrayType(SL->getType());
+ assert(CAT && "a string literal that's not a constant array?");
+
+ // If the initializer string is too long, a diagnostic has already been
+ // emitted. Read only the array length from the string literal.
+ unsigned N =
+ std::min(unsigned(CAT->getSize().getZExtValue()), SL->getLength());
+ size_t CharWidth = SL->getCharByteWidth();
+
+ for (unsigned I = 0; I != N; ++I) {
+ uint32_t CodeUnit = SL->getCodeUnit(I);
+
+ if (CharWidth == 1) {
+ this->emitConstSint8(CodeUnit, SL);
+ this->emitInitElemSint8(I, SL);
+ } else if (CharWidth == 2) {
+ this->emitConstUint16(CodeUnit, SL);
+ this->emitInitElemUint16(I, SL);
+ } else if (CharWidth == 4) {
+ this->emitConstUint32(CodeUnit, SL);
+ this->emitInitElemUint32(I, SL);
+ } else {
+ llvm_unreachable("unsupported character width");
+ }
+ }
+ return true;
+ } else if (const auto *CLE = dyn_cast<CompoundLiteralExpr>(Initializer)) {
+ return visitInitializer(CLE->getInitializer());
+ } else if (const auto *EWC = dyn_cast<ExprWithCleanups>(Initializer)) {
+ return visitInitializer(EWC->getSubExpr());
}
assert(false && "Unknown expression for array initialization");
@@ -981,7 +1468,7 @@ bool ByteCodeExprGen<Emitter>::visitRecordInitializer(const Expr *Initializer) {
if (const auto CtorExpr = dyn_cast<CXXConstructExpr>(Initializer)) {
const Function *Func = getFunction(CtorExpr->getConstructor());
- if (!Func || !Func->isConstexpr())
+ if (!Func)
return false;
// The This pointer is already on the stack because this is an initializer,
@@ -1001,12 +1488,12 @@ bool ByteCodeExprGen<Emitter>::visitRecordInitializer(const Expr *Initializer) {
unsigned InitIndex = 0;
for (const Expr *Init : InitList->inits()) {
- const Record::Field *FieldToInit = R->getField(InitIndex);
if (!this->emitDupPtr(Initializer))
return false;
if (std::optional<PrimType> T = classify(Init)) {
+ const Record::Field *FieldToInit = R->getField(InitIndex);
if (!this->visit(Init))
return false;
@@ -1015,19 +1502,35 @@ bool ByteCodeExprGen<Emitter>::visitRecordInitializer(const Expr *Initializer) {
if (!this->emitPopPtr(Initializer))
return false;
+ ++InitIndex;
} else {
- // Non-primitive case. Get a pointer to the field-to-initialize
- // on the stack and recurse into visitInitializer().
- if (!this->emitGetPtrField(FieldToInit->Offset, Init))
- return false;
+ // Initializer for a direct base class.
+ if (const Record::Base *B = R->getBase(Init->getType())) {
+ if (!this->emitGetPtrBasePop(B->Offset, Init))
+ return false;
- if (!this->visitInitializer(Init))
- return false;
+ if (!this->visitInitializer(Init))
+ return false;
- if (!this->emitPopPtr(Initializer))
- return false;
+ if (!this->emitPopPtr(Initializer))
+ return false;
+ // Base initializers don't increase InitIndex, since they don't count
+ // into the Record's fields.
+ } else {
+ const Record::Field *FieldToInit = R->getField(InitIndex);
+ // Non-primitive case. Get a pointer to the field-to-initialize
+ // on the stack and recurse into visitInitializer().
+ if (!this->emitGetPtrField(FieldToInit->Offset, Init))
+ return false;
+
+ if (!this->visitInitializer(Init))
+ return false;
+
+ if (!this->emitPopPtr(Initializer))
+ return false;
+ ++InitIndex;
+ }
}
- ++InitIndex;
}
return true;
@@ -1037,9 +1540,19 @@ bool ByteCodeExprGen<Emitter>::visitRecordInitializer(const Expr *Initializer) {
if (!this->emitDupPtr(Initializer))
return false;
- return this->VisitCallExpr(CE);
+ return this->visit(CE);
} else if (const auto *DIE = dyn_cast<CXXDefaultInitExpr>(Initializer)) {
return this->visitInitializer(DIE->getExpr());
+ } else if (const auto *CE = dyn_cast<CastExpr>(Initializer)) {
+ return this->visitInitializer(CE->getSubExpr());
+ } else if (const auto *CE = dyn_cast<CXXBindTemporaryExpr>(Initializer)) {
+ return this->visitInitializer(CE->getSubExpr());
+ } else if (const auto *ACO =
+ dyn_cast<AbstractConditionalOperator>(Initializer)) {
+ return this->visitConditional(
+ ACO, [this](const Expr *E) { return this->visitRecordInitializer(E); });
+ } else if (const auto *LE = dyn_cast<LambdaExpr>(Initializer)) {
+ return this->VisitLambdaExpr(LE);
}
return false;
@@ -1085,7 +1598,7 @@ const Function *ByteCodeExprGen<Emitter>::getFunction(const FunctionDecl *FD) {
assert(FD);
const Function *Func = P.getFunction(FD);
bool IsBeingCompiled = Func && !Func->isFullyCompiled();
- bool WasNotDefined = Func && !Func->hasBody();
+ bool WasNotDefined = Func && !Func->isConstexpr() && !Func->hasBody();
if (IsBeingCompiled)
return Func;
@@ -1119,14 +1632,14 @@ bool ByteCodeExprGen<Emitter>::visitExpr(const Expr *Exp) {
/// We need to evaluate the initializer and return its value.
template <class Emitter>
bool ByteCodeExprGen<Emitter>::visitDecl(const VarDecl *VD) {
- std::optional<PrimType> VarT = classify(VD->getType());
+ assert(!VD->isInvalidDecl() && "Trying to constant evaluate an invalid decl");
// Create and initialize the variable.
if (!this->visitVarDecl(VD))
return false;
// Get a pointer to the variable
- if (shouldBeGloballyIndexed(VD)) {
+ if (Context::shouldBeGloballyIndexed(VD)) {
auto GlobalIndex = P.getGlobal(VD);
assert(GlobalIndex); // visitVarDecl() didn't return false.
if (!this->emitGetPtrGlobal(*GlobalIndex, VD))
@@ -1139,7 +1652,7 @@ bool ByteCodeExprGen<Emitter>::visitDecl(const VarDecl *VD) {
}
// Return the value
- if (VarT) {
+ if (std::optional<PrimType> VarT = classify(VD->getType())) {
if (!this->emitLoadPop(*VarT, VD))
return false;
@@ -1151,11 +1664,19 @@ bool ByteCodeExprGen<Emitter>::visitDecl(const VarDecl *VD) {
template <class Emitter>
bool ByteCodeExprGen<Emitter>::visitVarDecl(const VarDecl *VD) {
+ // We don't know what to do with these, so just return false.
+ if (VD->getType().isNull())
+ return false;
+
const Expr *Init = VD->getInit();
std::optional<PrimType> VarT = classify(VD->getType());
- if (shouldBeGloballyIndexed(VD)) {
- std::optional<unsigned> GlobalIndex = P.getOrCreateGlobal(VD, Init);
+ if (Context::shouldBeGloballyIndexed(VD)) {
+ // We've already seen and initialized this global.
+ if (P.getGlobal(VD))
+ return true;
+
+ std::optional<unsigned> GlobalIndex = P.createGlobal(VD, Init);
if (!GlobalIndex)
return this->bail(VD);
@@ -1197,11 +1718,55 @@ bool ByteCodeExprGen<Emitter>::visitVarDecl(const VarDecl *VD) {
}
template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitBuiltinCallExpr(const CallExpr *E) {
+ const Function *Func = getFunction(E->getDirectCallee());
+ if (!Func)
+ return false;
+
+ // Put arguments on the stack.
+ for (const auto *Arg : E->arguments()) {
+ if (!this->visit(Arg))
+ return false;
+ }
+
+ if (!this->emitCallBI(Func, E))
+ return false;
+
+ QualType ReturnType = E->getCallReturnType(Ctx.getASTContext());
+ if (DiscardResult && !ReturnType->isVoidType()) {
+ PrimType T = classifyPrim(ReturnType);
+ return this->emitPop(T, E);
+ }
+
+ return true;
+}
+
+template <class Emitter>
bool ByteCodeExprGen<Emitter>::VisitCallExpr(const CallExpr *E) {
- assert(!E->getBuiltinCallee() && "Builtin functions aren't supported yet");
+ if (E->getBuiltinCallee())
+ return VisitBuiltinCallExpr(E);
+
+ QualType ReturnType = E->getCallReturnType(Ctx.getASTContext());
+ std::optional<PrimType> T = classify(ReturnType);
+ bool HasRVO = !ReturnType->isVoidType() && !T;
+
+ if (HasRVO && DiscardResult) {
+ // If we need to discard the return value but the function returns its
+ // value via an RVO pointer, we need to create one such pointer just
+ // for this call.
+ if (std::optional<unsigned> LocalIndex = allocateLocal(E)) {
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ }
+ }
- const Decl *Callee = E->getCalleeDecl();
- if (const auto *FuncDecl = dyn_cast_or_null<FunctionDecl>(Callee)) {
+ // Put arguments on the stack.
+ for (const auto *Arg : E->arguments()) {
+ if (!this->visit(Arg))
+ return false;
+ }
+
+ if (const FunctionDecl *FuncDecl = E->getDirectCallee()) {
const Function *Func = getFunction(FuncDecl);
if (!Func)
return false;
@@ -1213,40 +1778,42 @@ bool ByteCodeExprGen<Emitter>::VisitCallExpr(const CallExpr *E) {
if (Func->isFullyCompiled() && !Func->isConstexpr())
return false;
- QualType ReturnType = E->getCallReturnType(Ctx.getASTContext());
- std::optional<PrimType> T = classify(ReturnType);
+ assert(HasRVO == Func->hasRVO());
- if (Func->hasRVO() && DiscardResult) {
- // If we need to discard the return value but the function returns its
- // value via an RVO pointer, we need to create one such pointer just
- // for this call.
- if (std::optional<unsigned> LocalIndex = allocateLocal(E)) {
- if (!this->emitGetPtrLocal(*LocalIndex, E))
- return false;
- }
- }
+ bool HasQualifier = false;
+ if (const auto *ME = dyn_cast<MemberExpr>(E->getCallee()))
+ HasQualifier = ME->hasQualifier();
- // Put arguments on the stack.
- for (const auto *Arg : E->arguments()) {
- if (!this->visit(Arg))
+ bool IsVirtual = false;
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl))
+ IsVirtual = MD->isVirtual();
+
+ // In any case call the function. The return value will end up on the stack
+ // and if the function has RVO, we already have the pointer on the stack to
+ // write the result into.
+ if (IsVirtual && !HasQualifier) {
+ if (!this->emitCallVirt(Func, E))
+ return false;
+ } else {
+ if (!this->emitCall(Func, E))
return false;
}
-
- // In any case call the function. The return value will end up on the stack and
- // if the function has RVO, we already have the pointer on the stack to write
- // the result into.
- if (!this->emitCall(Func, E))
+ } else {
+ // Indirect call. Visit the callee, which will leave a FunctionPointer on
+ // the stack. Cleanup of the returned value if necessary will be done after
+ // the function call completed.
+ if (!this->visit(E->getCallee()))
return false;
- if (DiscardResult && !ReturnType->isVoidType() && T)
- return this->emitPop(*T, E);
-
- return true;
- } else {
- assert(false && "We don't support non-FunctionDecl callees right now.");
+ if (!this->emitCallPtr(E))
+ return false;
}
- return false;
+ // Cleanup for discarded return values.
+ if (DiscardResult && !ReturnType->isVoidType() && T)
+ return this->emitPop(*T, E);
+
+ return true;
}
template <class Emitter>
@@ -1262,6 +1829,7 @@ bool ByteCodeExprGen<Emitter>::VisitCXXMemberCallExpr(
template <class Emitter>
bool ByteCodeExprGen<Emitter>::VisitCXXDefaultInitExpr(
const CXXDefaultInitExpr *E) {
+ assert(classify(E->getType()));
return this->visit(E->getExpr());
}
@@ -1301,28 +1869,68 @@ bool ByteCodeExprGen<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
const Expr *SubExpr = E->getSubExpr();
std::optional<PrimType> T = classify(SubExpr->getType());
- // TODO: Support pointers for inc/dec operators.
switch (E->getOpcode()) {
case UO_PostInc: { // x++
if (!this->visit(SubExpr))
return false;
+ if (T == PT_Ptr) {
+ if (!this->emitIncPtr(E))
+ return false;
+
+ return DiscardResult ? this->emitPopPtr(E) : true;
+ }
+
+ if (T == PT_Float) {
+ return DiscardResult ? this->emitIncfPop(getRoundingMode(E), E)
+ : this->emitIncf(getRoundingMode(E), E);
+ }
+
return DiscardResult ? this->emitIncPop(*T, E) : this->emitInc(*T, E);
}
case UO_PostDec: { // x--
if (!this->visit(SubExpr))
return false;
+ if (T == PT_Ptr) {
+ if (!this->emitDecPtr(E))
+ return false;
+
+ return DiscardResult ? this->emitPopPtr(E) : true;
+ }
+
+ if (T == PT_Float) {
+ return DiscardResult ? this->emitDecfPop(getRoundingMode(E), E)
+ : this->emitDecf(getRoundingMode(E), E);
+ }
+
return DiscardResult ? this->emitDecPop(*T, E) : this->emitDec(*T, E);
}
case UO_PreInc: { // ++x
if (!this->visit(SubExpr))
return false;
+ if (T == PT_Ptr) {
+ this->emitLoadPtr(E);
+ this->emitConstUint8(1, E);
+ this->emitAddOffsetUint8(E);
+ return DiscardResult ? this->emitStorePopPtr(E) : this->emitStorePtr(E);
+ }
+
// Post-inc and pre-inc are the same if the value is to be discarded.
- if (DiscardResult)
+ if (DiscardResult) {
+ if (T == PT_Float)
+ return this->emitIncfPop(getRoundingMode(E), E);
return this->emitIncPop(*T, E);
+ }
+ if (T == PT_Float) {
+ const auto &TargetSemantics = Ctx.getFloatSemantics(E->getType());
+ this->emitLoadFloat(E);
+ this->emitConstFloat(llvm::APFloat(TargetSemantics, 1), E);
+ this->emitAddf(getRoundingMode(E), E);
+ return this->emitStoreFloat(E);
+ }
this->emitLoad(*T, E);
this->emitConst(1, E);
this->emitAdd(*T, E);
@@ -1332,10 +1940,27 @@ bool ByteCodeExprGen<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
if (!this->visit(SubExpr))
return false;
+ if (T == PT_Ptr) {
+ this->emitLoadPtr(E);
+ this->emitConstUint8(1, E);
+ this->emitSubOffsetUint8(E);
+ return DiscardResult ? this->emitStorePopPtr(E) : this->emitStorePtr(E);
+ }
+
// Post-dec and pre-dec are the same if the value is to be discarded.
- if (DiscardResult)
+ if (DiscardResult) {
+ if (T == PT_Float)
+ return this->emitDecfPop(getRoundingMode(E), E);
return this->emitDecPop(*T, E);
+ }
+ if (T == PT_Float) {
+ const auto &TargetSemantics = Ctx.getFloatSemantics(E->getType());
+ this->emitLoadFloat(E);
+ this->emitConstFloat(llvm::APFloat(TargetSemantics, 1), E);
+ this->emitSubf(getRoundingMode(E), E);
+ return this->emitStoreFloat(E);
+ }
this->emitLoad(*T, E);
this->emitConst(1, E);
this->emitSub(*T, E);
@@ -1385,32 +2010,54 @@ bool ByteCodeExprGen<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
template <class Emitter>
bool ByteCodeExprGen<Emitter>::VisitDeclRefExpr(const DeclRefExpr *E) {
- const auto *Decl = E->getDecl();
+ if (DiscardResult)
+ return true;
+
+ const auto *D = E->getDecl();
+
+ if (const auto *ECD = dyn_cast<EnumConstantDecl>(D)) {
+ return this->emitConst(ECD->getInitVal(), E);
+ } else if (const auto *BD = dyn_cast<BindingDecl>(D)) {
+ return this->visit(BD->getBinding());
+ } else if (const auto *FuncDecl = dyn_cast<FunctionDecl>(D)) {
+ const Function *F = getFunction(FuncDecl);
+ return F && this->emitGetFnPtr(F, E);
+ }
+
// References are implemented via pointers, so when we see a DeclRefExpr
// pointing to a reference, we need to get its value directly (i.e. the
// pointer to the actual value) instead of a pointer to the pointer to the
// value.
- bool IsReference = Decl->getType()->isReferenceType();
+ bool IsReference = D->getType()->isReferenceType();
- if (auto It = Locals.find(Decl); It != Locals.end()) {
+ // Check for local/global variables and parameters.
+ if (auto It = Locals.find(D); It != Locals.end()) {
const unsigned Offset = It->second.Offset;
if (IsReference)
return this->emitGetLocal(PT_Ptr, Offset, E);
return this->emitGetPtrLocal(Offset, E);
- } else if (auto GlobalIndex = P.getGlobal(Decl)) {
+ } else if (auto GlobalIndex = P.getGlobal(D)) {
if (IsReference)
- return this->emitGetGlobal(PT_Ptr, *GlobalIndex, E);
+ return this->emitGetGlobalPtr(*GlobalIndex, E);
return this->emitGetPtrGlobal(*GlobalIndex, E);
- } else if (const auto *PVD = dyn_cast<ParmVarDecl>(Decl)) {
+ } else if (const auto *PVD = dyn_cast<ParmVarDecl>(D)) {
if (auto It = this->Params.find(PVD); It != this->Params.end()) {
if (IsReference)
- return this->emitGetParam(PT_Ptr, It->second, E);
+ return this->emitGetParamPtr(It->second, E);
return this->emitGetPtrParam(It->second, E);
}
- } else if (const auto *ECD = dyn_cast<EnumConstantDecl>(Decl)) {
- return this->emitConst(ECD->getInitVal(), E);
+ }
+
+ // Handle lambda captures.
+ if (auto It = this->LambdaCaptures.find(D);
+ It != this->LambdaCaptures.end()) {
+ auto [Offset, IsReference] = It->second;
+
+ if (IsReference)
+ return this->emitGetThisFieldPtr(Offset, E);
+ return this->emitGetPtrThisField(Offset, E);
}
return false;
@@ -1422,6 +2069,112 @@ void ByteCodeExprGen<Emitter>::emitCleanup() {
C->emitDestruction();
}
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::emitDerivedToBaseCasts(
+ const RecordType *DerivedType, const RecordType *BaseType, const Expr *E) {
+ // Pointer of derived type is already on the stack.
+ const auto *FinalDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ const RecordDecl *CurDecl = DerivedType->getDecl();
+ const Record *CurRecord = getRecord(CurDecl);
+ assert(CurDecl && FinalDecl);
+ for (;;) {
+ assert(CurRecord->getNumBases() > 0);
+ // One level up
+ for (const Record::Base &B : CurRecord->bases()) {
+ const auto *BaseDecl = cast<CXXRecordDecl>(B.Decl);
+
+ if (BaseDecl == FinalDecl || BaseDecl->isDerivedFrom(FinalDecl)) {
+ // This decl will lead us to the final decl, so emit a base cast.
+ if (!this->emitGetPtrBasePop(B.Offset, E))
+ return false;
+
+ CurRecord = B.R;
+ CurDecl = BaseDecl;
+ break;
+ }
+ }
+ if (CurDecl == FinalDecl)
+ return true;
+ }
+
+ llvm_unreachable("Couldn't find the base class?");
+ return false;
+}
+
+/// When calling this, we have a pointer of the local-to-destroy
+/// on the stack.
+/// Emit destruction of record types (or arrays of record types).
+/// FIXME: Handle virtual destructors.
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::emitRecordDestruction(const Descriptor *Desc) {
+ assert(Desc);
+ assert(!Desc->isPrimitive());
+ assert(!Desc->isPrimitiveArray());
+
+ // Arrays.
+ if (Desc->isArray()) {
+ const Descriptor *ElemDesc = Desc->ElemDesc;
+ const Record *ElemRecord = ElemDesc->ElemRecord;
+ assert(ElemRecord); // This is not a primitive array.
+
+ if (const CXXDestructorDecl *Dtor = ElemRecord->getDestructor();
+ Dtor && !Dtor->isTrivial()) {
+ for (ssize_t I = Desc->getNumElems() - 1; I >= 0; --I) {
+ if (!this->emitConstUint64(I, SourceInfo{}))
+ return false;
+ if (!this->emitArrayElemPtrUint64(SourceInfo{}))
+ return false;
+ if (!this->emitRecordDestruction(Desc->ElemDesc))
+ return false;
+ }
+ }
+ return this->emitPopPtr(SourceInfo{});
+ }
+
+ const Record *R = Desc->ElemRecord;
+ assert(R);
+ // First, destroy all fields.
+ for (const Record::Field &Field : llvm::reverse(R->fields())) {
+ const Descriptor *D = Field.Desc;
+ if (!D->isPrimitive() && !D->isPrimitiveArray()) {
+ if (!this->emitDupPtr(SourceInfo{}))
+ return false;
+ if (!this->emitGetPtrField(Field.Offset, SourceInfo{}))
+ return false;
+ if (!this->emitRecordDestruction(D))
+ return false;
+ }
+ }
+
+ // FIXME: Unions need to be handled differently here. We don't want to
+ // call the destructor of its members.
+
+ // Now emit the destructor and recurse into base classes.
+ if (const CXXDestructorDecl *Dtor = R->getDestructor();
+ Dtor && !Dtor->isTrivial()) {
+ const Function *DtorFunc = getFunction(Dtor);
+ if (DtorFunc && DtorFunc->isConstexpr()) {
+ assert(DtorFunc->hasThisPointer());
+ assert(DtorFunc->getNumParams() == 1);
+ if (!this->emitDupPtr(SourceInfo{}))
+ return false;
+ if (!this->emitCall(DtorFunc, SourceInfo{}))
+ return false;
+ }
+ }
+
+ for (const Record::Base &Base : llvm::reverse(R->bases())) {
+ if (!this->emitGetPtrBase(Base.Offset, SourceInfo{}))
+ return false;
+ if (!this->emitRecordDestruction(Base.Desc))
+ return false;
+ }
+ // FIXME: Virtual bases.
+
+ // Remove the instance pointer.
+ return this->emitPopPtr(SourceInfo{});
+}
+
namespace clang {
namespace interp {
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h
index c7fcc59e5a60..57b0af9459e3 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h
@@ -29,6 +29,7 @@ class QualType;
namespace interp {
template <class Emitter> class LocalScope;
+template <class Emitter> class DestructorScope;
template <class Emitter> class RecordScope;
template <class Emitter> class VariableScope;
template <class Emitter> class DeclScope;
@@ -58,11 +59,14 @@ public:
// Expression visitors - result returned on interp stack.
bool VisitCastExpr(const CastExpr *E);
bool VisitIntegerLiteral(const IntegerLiteral *E);
+ bool VisitFloatingLiteral(const FloatingLiteral *E);
bool VisitParenExpr(const ParenExpr *E);
bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitLogicalBinOp(const BinaryOperator *E);
bool VisitPointerArithBinOp(const BinaryOperator *E);
bool VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E);
bool VisitCallExpr(const CallExpr *E);
+ bool VisitBuiltinCallExpr(const CallExpr *E);
bool VisitCXXMemberCallExpr(const CXXMemberCallExpr *E);
bool VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E);
bool VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E);
@@ -83,6 +87,14 @@ public:
bool VisitStringLiteral(const StringLiteral *E);
bool VisitCharacterLiteral(const CharacterLiteral *E);
bool VisitCompoundAssignOperator(const CompoundAssignOperator *E);
+ bool VisitFloatCompoundAssignOperator(const CompoundAssignOperator *E);
+ bool VisitPointerCompoundAssignOperator(const CompoundAssignOperator *E);
+ bool VisitExprWithCleanups(const ExprWithCleanups *E);
+ bool VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
+ bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
+ bool VisitTypeTraitExpr(const TypeTraitExpr *E);
+ bool VisitLambdaExpr(const LambdaExpr *E);
+ bool VisitPredefinedExpr(const PredefinedExpr *E);
protected:
bool visitExpr(const Expr *E) override;
@@ -154,6 +166,10 @@ protected:
if (!visitInitializer(Init))
return false;
+ if ((Init->getType()->isArrayType() || Init->getType()->isRecordType()) &&
+ !this->emitCheckGlobalCtor(Init))
+ return false;
+
return this->emitPopPtr(Init);
}
@@ -168,8 +184,11 @@ protected:
return this->emitPopPtr(I);
}
+ bool visitConditional(const AbstractConditionalOperator *E,
+ llvm::function_ref<bool(const Expr *)> V);
+
/// Creates a local primitive value.
- unsigned allocateLocalPrimitive(DeclTy &&Decl, PrimType Ty, bool IsMutable,
+ unsigned allocateLocalPrimitive(DeclTy &&Decl, PrimType Ty, bool IsConst,
bool IsExtended = false);
/// Allocates a space storing a local given its type.
@@ -178,13 +197,14 @@ protected:
private:
friend class VariableScope<Emitter>;
friend class LocalScope<Emitter>;
+ friend class DestructorScope<Emitter>;
friend class RecordScope<Emitter>;
friend class DeclScope<Emitter>;
friend class OptionScope<Emitter>;
friend class ArrayIndexScope<Emitter>;
/// Emits a zero initializer.
- bool visitZeroInitializer(PrimType T, const Expr *E);
+ bool visitZeroInitializer(QualType QT, const Expr *E);
enum class DerefKind {
/// Value is read and pushed to stack.
@@ -210,9 +230,9 @@ private:
llvm::function_ref<bool(PrimType)> Indirect);
/// Emits an APSInt constant.
- bool emitConst(const APSInt &Value, const Expr *E);
- bool emitConst(const APInt &Value, const Expr *E) {
- return emitConst(static_cast<APSInt>(Value), E);
+ bool emitConst(const llvm::APSInt &Value, const Expr *E);
+ bool emitConst(const llvm::APInt &Value, const Expr *E) {
+ return emitConst(static_cast<llvm::APSInt>(Value), E);
}
/// Emits an integer constant.
@@ -227,12 +247,19 @@ private:
return T->getAsCXXRecordDecl();
}
- /// Returns whether we should create a global variable for the
- /// given VarDecl.
- bool shouldBeGloballyIndexed(const VarDecl *VD) const {
- return VD->hasGlobalStorage() || VD->isConstexpr();
+ llvm::RoundingMode getRoundingMode(const Expr *E) const {
+ FPOptions FPO = E->getFPFeaturesInEffect(Ctx.getLangOpts());
+
+ if (FPO.getRoundingMode() == llvm::RoundingMode::Dynamic)
+ return llvm::RoundingMode::NearestTiesToEven;
+
+ return FPO.getRoundingMode();
}
+ bool emitRecordDestruction(const Descriptor *Desc);
+ bool emitDerivedToBaseCasts(const RecordType *DerivedType,
+ const RecordType *BaseType, const Expr *E);
+
protected:
/// Variable to storage mapping.
llvm::DenseMap<const ValueDecl *, Scope::Local> Locals;
@@ -281,8 +308,8 @@ public:
}
virtual void emitDestruction() {}
-
- VariableScope *getParent() { return Parent; }
+ virtual void emitDestructors() {}
+ VariableScope *getParent() const { return Parent; }
protected:
/// ByteCodeExprGen instance.
@@ -291,15 +318,26 @@ protected:
VariableScope *Parent;
};
-/// Scope for local variables.
-///
-/// When the scope is destroyed, instructions are emitted to tear down
-/// all variables declared in this scope.
+/// Generic scope for local variables.
template <class Emitter> class LocalScope : public VariableScope<Emitter> {
public:
LocalScope(ByteCodeExprGen<Emitter> *Ctx) : VariableScope<Emitter>(Ctx) {}
- ~LocalScope() override { this->emitDestruction(); }
+ /// Emit a Destroy op for this scope.
+ ~LocalScope() override {
+ if (!Idx)
+ return;
+ this->Ctx->emitDestroy(*Idx, SourceInfo{});
+ }
+
+ /// Overriden to support explicit destruction.
+ void emitDestruction() override {
+ if (!Idx)
+ return;
+ this->emitDestructors();
+ this->Ctx->emitDestroy(*Idx, SourceInfo{});
+ this->Idx = std::nullopt;
+ }
void addLocal(const Scope::Local &Local) override {
if (!Idx) {
@@ -310,36 +348,69 @@ public:
this->Ctx->Descriptors[*Idx].emplace_back(Local);
}
- void emitDestruction() override {
+ void emitDestructors() override {
if (!Idx)
return;
- this->Ctx->emitDestroy(*Idx, SourceInfo{});
+ // Emit destructor calls for local variables of record
+ // type with a destructor.
+ for (Scope::Local &Local : this->Ctx->Descriptors[*Idx]) {
+ if (!Local.Desc->isPrimitive() && !Local.Desc->isPrimitiveArray()) {
+ this->Ctx->emitGetPtrLocal(Local.Offset, SourceInfo{});
+ this->Ctx->emitRecordDestruction(Local.Desc);
+ }
+ }
}
-protected:
/// Index of the scope in the chain.
std::optional<unsigned> Idx;
};
+/// Emits the destructors of the variables of \param OtherScope
+/// when this scope is destroyed. Does not create a Scope in the bytecode at
+/// all, this is just a RAII object to emit destructors.
+template <class Emitter> class DestructorScope final {
+public:
+ DestructorScope(LocalScope<Emitter> &OtherScope) : OtherScope(OtherScope) {}
+
+ ~DestructorScope() { OtherScope.emitDestructors(); }
+
+private:
+ LocalScope<Emitter> &OtherScope;
+};
+
+/// Like a regular LocalScope, except that the destructors of all local
+/// variables are automatically emitted when the AutoScope is destroyed.
+template <class Emitter> class AutoScope : public LocalScope<Emitter> {
+public:
+ AutoScope(ByteCodeExprGen<Emitter> *Ctx)
+ : LocalScope<Emitter>(Ctx), DS(*this) {}
+
+private:
+ DestructorScope<Emitter> DS;
+};
+
/// Scope for storage declared in a compound statement.
-template <class Emitter> class BlockScope final : public LocalScope<Emitter> {
+template <class Emitter> class BlockScope final : public AutoScope<Emitter> {
public:
- BlockScope(ByteCodeExprGen<Emitter> *Ctx) : LocalScope<Emitter>(Ctx) {}
+ BlockScope(ByteCodeExprGen<Emitter> *Ctx) : AutoScope<Emitter>(Ctx) {}
void addExtended(const Scope::Local &Local) override {
- llvm_unreachable("Cannot create temporaries in full scopes");
+ // If we to this point, just add the variable as a normal local
+ // variable. It will be destroyed at the end of the block just
+ // like all others.
+ this->addLocal(Local);
}
};
/// Expression scope which tracks potentially lifetime extended
/// temporaries which are hoisted to the parent scope on exit.
-template <class Emitter> class ExprScope final : public LocalScope<Emitter> {
+template <class Emitter> class ExprScope final : public AutoScope<Emitter> {
public:
- ExprScope(ByteCodeExprGen<Emitter> *Ctx) : LocalScope<Emitter>(Ctx) {}
+ ExprScope(ByteCodeExprGen<Emitter> *Ctx) : AutoScope<Emitter>(Ctx) {}
void addExtended(const Scope::Local &Local) override {
- assert(this->Parent);
- this->Parent->addLocal(Local);
+ if (this->Parent)
+ this->Parent->addLocal(Local);
}
};
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.cpp b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.cpp
index af97c57c98b7..0c512950c292 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.cpp
@@ -95,28 +95,25 @@ bool ByteCodeStmtGen<Emitter>::visitFunc(const FunctionDecl *F) {
ReturnType = this->classify(F->getReturnType());
// Constructor. Set up field initializers.
- if (const auto Ctor = dyn_cast<CXXConstructorDecl>(F)) {
+ if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(F)) {
const RecordDecl *RD = Ctor->getParent();
const Record *R = this->getRecord(RD);
if (!R)
return false;
for (const auto *Init : Ctor->inits()) {
+ // Scope needed for the initializers.
+ BlockScope<Emitter> Scope(this);
+
const Expr *InitExpr = Init->getInit();
if (const FieldDecl *Member = Init->getMember()) {
const Record::Field *F = R->getField(Member);
if (std::optional<PrimType> T = this->classify(InitExpr)) {
- if (!this->emitThis(InitExpr))
- return false;
-
if (!this->visit(InitExpr))
return false;
- if (!this->emitInitField(*T, F->Offset, InitExpr))
- return false;
-
- if (!this->emitPopPtr(InitExpr))
+ if (!this->emitInitThisField(*T, F->Offset, InitExpr))
return false;
} else {
// Non-primitive case. Get a pointer to the field-to-initialize
@@ -136,7 +133,7 @@ bool ByteCodeStmtGen<Emitter>::visitFunc(const FunctionDecl *F) {
} else if (const Type *Base = Init->getBaseClass()) {
// Base class initializer.
// Get This Base and call initializer on it.
- auto *BaseDecl = Base->getAsCXXRecordDecl();
+ const auto *BaseDecl = Base->getAsCXXRecordDecl();
assert(BaseDecl);
const Record::Base *B = R->getBase(BaseDecl);
assert(B);
@@ -178,10 +175,18 @@ bool ByteCodeStmtGen<Emitter>::visitStmt(const Stmt *S) {
return visitDoStmt(cast<DoStmt>(S));
case Stmt::ForStmtClass:
return visitForStmt(cast<ForStmt>(S));
+ case Stmt::CXXForRangeStmtClass:
+ return visitCXXForRangeStmt(cast<CXXForRangeStmt>(S));
case Stmt::BreakStmtClass:
return visitBreakStmt(cast<BreakStmt>(S));
case Stmt::ContinueStmtClass:
return visitContinueStmt(cast<ContinueStmt>(S));
+ case Stmt::SwitchStmtClass:
+ return visitSwitchStmt(cast<SwitchStmt>(S));
+ case Stmt::CaseStmtClass:
+ return visitCaseStmt(cast<CaseStmt>(S));
+ case Stmt::DefaultStmtClass:
+ return visitDefaultStmt(cast<DefaultStmt>(S));
case Stmt::NullStmtClass:
return true;
default: {
@@ -192,6 +197,23 @@ bool ByteCodeStmtGen<Emitter>::visitStmt(const Stmt *S) {
}
}
+/// Visits the given statment without creating a variable
+/// scope for it in case it is a compound statement.
+template <class Emitter>
+bool ByteCodeStmtGen<Emitter>::visitLoopBody(const Stmt *S) {
+ if (isa<NullStmt>(S))
+ return true;
+
+ if (const auto *CS = dyn_cast<CompoundStmt>(S)) {
+ for (auto *InnerStmt : CS->body())
+ if (!visitStmt(InnerStmt))
+ return false;
+ return true;
+ }
+
+ return this->visitStmt(S);
+}
+
template <class Emitter>
bool ByteCodeStmtGen<Emitter>::visitCompoundStmt(
const CompoundStmt *CompoundStmt) {
@@ -205,17 +227,14 @@ bool ByteCodeStmtGen<Emitter>::visitCompoundStmt(
template <class Emitter>
bool ByteCodeStmtGen<Emitter>::visitDeclStmt(const DeclStmt *DS) {
for (auto *D : DS->decls()) {
- // Variable declarator.
- if (auto *VD = dyn_cast<VarDecl>(D)) {
- if (!this->visitVarDecl(VD))
- return false;
+ if (isa<StaticAssertDecl, TagDecl, TypedefNameDecl>(D))
continue;
- }
- // Decomposition declarator.
- if (auto *DD = dyn_cast<DecompositionDecl>(D)) {
- return this->bail(DD);
- }
+ const auto *VD = dyn_cast<VarDecl>(D);
+ if (!VD)
+ return false;
+ if (!this->visitVarDecl(VD))
+ return false;
}
return true;
@@ -310,11 +329,15 @@ bool ByteCodeStmtGen<Emitter>::visitWhileStmt(const WhileStmt *S) {
if (!this->jumpFalse(EndLabel))
return false;
- if (!this->visitStmt(Body))
- return false;
+ LocalScope<Emitter> Scope(this);
+ {
+ DestructorScope<Emitter> DS(Scope);
+ if (!this->visitLoopBody(Body))
+ return false;
+ }
+
if (!this->jump(CondLabel))
return false;
-
this->emitLabel(EndLabel);
return true;
@@ -329,15 +352,21 @@ bool ByteCodeStmtGen<Emitter>::visitDoStmt(const DoStmt *S) {
LabelTy EndLabel = this->getLabel();
LabelTy CondLabel = this->getLabel();
LoopScope<Emitter> LS(this, EndLabel, CondLabel);
+ LocalScope<Emitter> Scope(this);
this->emitLabel(StartLabel);
- if (!this->visitStmt(Body))
- return false;
- this->emitLabel(CondLabel);
- if (!this->visitBool(Cond))
- return false;
+ {
+ DestructorScope<Emitter> DS(Scope);
+
+ if (!this->visitLoopBody(Body))
+ return false;
+ this->emitLabel(CondLabel);
+ if (!this->visitBool(Cond))
+ return false;
+ }
if (!this->jumpTrue(StartLabel))
return false;
+
this->emitLabel(EndLabel);
return true;
}
@@ -354,6 +383,7 @@ bool ByteCodeStmtGen<Emitter>::visitForStmt(const ForStmt *S) {
LabelTy CondLabel = this->getLabel();
LabelTy IncLabel = this->getLabel();
LoopScope<Emitter> LS(this, EndLabel, IncLabel);
+ LocalScope<Emitter> Scope(this);
if (Init && !this->visitStmt(Init))
return false;
@@ -364,13 +394,73 @@ bool ByteCodeStmtGen<Emitter>::visitForStmt(const ForStmt *S) {
if (!this->jumpFalse(EndLabel))
return false;
}
- if (Body && !this->visitStmt(Body))
+
+ {
+ DestructorScope<Emitter> DS(Scope);
+
+ if (Body && !this->visitLoopBody(Body))
+ return false;
+ this->emitLabel(IncLabel);
+ if (Inc && !this->discard(Inc))
+ return false;
+ }
+
+ if (!this->jump(CondLabel))
+ return false;
+ this->emitLabel(EndLabel);
+ return true;
+}
+
+template <class Emitter>
+bool ByteCodeStmtGen<Emitter>::visitCXXForRangeStmt(const CXXForRangeStmt *S) {
+ const Stmt *Init = S->getInit();
+ const Expr *Cond = S->getCond();
+ const Expr *Inc = S->getInc();
+ const Stmt *Body = S->getBody();
+ const Stmt *BeginStmt = S->getBeginStmt();
+ const Stmt *RangeStmt = S->getRangeStmt();
+ const Stmt *EndStmt = S->getEndStmt();
+ const VarDecl *LoopVar = S->getLoopVariable();
+
+ LabelTy EndLabel = this->getLabel();
+ LabelTy CondLabel = this->getLabel();
+ LabelTy IncLabel = this->getLabel();
+ LoopScope<Emitter> LS(this, EndLabel, IncLabel);
+
+ // Emit declarations needed in the loop.
+ if (Init && !this->visitStmt(Init))
+ return false;
+ if (!this->visitStmt(RangeStmt))
return false;
- this->emitLabel(IncLabel);
- if (Inc && !this->discard(Inc))
+ if (!this->visitStmt(BeginStmt))
return false;
+ if (!this->visitStmt(EndStmt))
+ return false;
+
+ // Now the condition as well as the loop variable assignment.
+ this->emitLabel(CondLabel);
+ if (!this->visitBool(Cond))
+ return false;
+ if (!this->jumpFalse(EndLabel))
+ return false;
+
+ if (!this->visitVarDecl(LoopVar))
+ return false;
+
+ // Body.
+ LocalScope<Emitter> Scope(this);
+ {
+ DestructorScope<Emitter> DS(Scope);
+
+ if (!this->visitLoopBody(Body))
+ return false;
+ this->emitLabel(IncLabel);
+ if (!this->discard(Inc))
+ return false;
+ }
if (!this->jump(CondLabel))
return false;
+
this->emitLabel(EndLabel);
return true;
}
@@ -380,6 +470,7 @@ bool ByteCodeStmtGen<Emitter>::visitBreakStmt(const BreakStmt *S) {
if (!BreakLabel)
return false;
+ this->VarScope->emitDestructors();
return this->jump(*BreakLabel);
}
@@ -388,9 +479,88 @@ bool ByteCodeStmtGen<Emitter>::visitContinueStmt(const ContinueStmt *S) {
if (!ContinueLabel)
return false;
+ this->VarScope->emitDestructors();
return this->jump(*ContinueLabel);
}
+template <class Emitter>
+bool ByteCodeStmtGen<Emitter>::visitSwitchStmt(const SwitchStmt *S) {
+ const Expr *Cond = S->getCond();
+ PrimType CondT = this->classifyPrim(Cond->getType());
+
+ LabelTy EndLabel = this->getLabel();
+ OptLabelTy DefaultLabel = std::nullopt;
+ unsigned CondVar = this->allocateLocalPrimitive(Cond, CondT, true, false);
+
+ if (const auto *CondInit = S->getInit())
+ if (!visitStmt(CondInit))
+ return false;
+
+ // Initialize condition variable.
+ if (!this->visit(Cond))
+ return false;
+ if (!this->emitSetLocal(CondT, CondVar, S))
+ return false;
+
+ CaseMap CaseLabels;
+ // Create labels and comparison ops for all case statements.
+ for (const SwitchCase *SC = S->getSwitchCaseList(); SC;
+ SC = SC->getNextSwitchCase()) {
+ if (const auto *CS = dyn_cast<CaseStmt>(SC)) {
+ // FIXME: Implement ranges.
+ if (CS->caseStmtIsGNURange())
+ return false;
+ CaseLabels[SC] = this->getLabel();
+
+ const Expr *Value = CS->getLHS();
+ PrimType ValueT = this->classifyPrim(Value->getType());
+
+ // Compare the case statement's value to the switch condition.
+ if (!this->emitGetLocal(CondT, CondVar, CS))
+ return false;
+ if (!this->visit(Value))
+ return false;
+
+ // Compare and jump to the case label.
+ if (!this->emitEQ(ValueT, S))
+ return false;
+ if (!this->jumpTrue(CaseLabels[CS]))
+ return false;
+ } else {
+ assert(!DefaultLabel);
+ DefaultLabel = this->getLabel();
+ }
+ }
+
+ // If none of the conditions above were true, fall through to the default
+ // statement or jump after the switch statement.
+ if (DefaultLabel) {
+ if (!this->jump(*DefaultLabel))
+ return false;
+ } else {
+ if (!this->jump(EndLabel))
+ return false;
+ }
+
+ SwitchScope<Emitter> SS(this, std::move(CaseLabels), EndLabel, DefaultLabel);
+ if (!this->visitStmt(S->getBody()))
+ return false;
+ this->emitLabel(EndLabel);
+ return true;
+}
+
+template <class Emitter>
+bool ByteCodeStmtGen<Emitter>::visitCaseStmt(const CaseStmt *S) {
+ this->emitLabel(CaseLabels[S]);
+ return this->visitStmt(S->getSubStmt());
+}
+
+template <class Emitter>
+bool ByteCodeStmtGen<Emitter>::visitDefaultStmt(const DefaultStmt *S) {
+ this->emitLabel(*DefaultLabel);
+ return this->visitStmt(S->getSubStmt());
+}
+
namespace clang {
namespace interp {
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h
index 829e199f827c..8d9277a11dd7 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h
@@ -54,14 +54,19 @@ private:
// Statement visitors.
bool visitStmt(const Stmt *S);
bool visitCompoundStmt(const CompoundStmt *S);
+ bool visitLoopBody(const Stmt *S);
bool visitDeclStmt(const DeclStmt *DS);
bool visitReturnStmt(const ReturnStmt *RS);
bool visitIfStmt(const IfStmt *IS);
bool visitWhileStmt(const WhileStmt *S);
bool visitDoStmt(const DoStmt *S);
bool visitForStmt(const ForStmt *S);
+ bool visitCXXForRangeStmt(const CXXForRangeStmt *S);
bool visitBreakStmt(const BreakStmt *S);
bool visitContinueStmt(const ContinueStmt *S);
+ bool visitSwitchStmt(const SwitchStmt *S);
+ bool visitCaseStmt(const CaseStmt *S);
+ bool visitDefaultStmt(const DefaultStmt *S);
/// Type of the expression returned by the function.
std::optional<PrimType> ReturnType;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp
index 16471242f328..eeb7fa9379f5 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp
@@ -42,6 +42,11 @@ bool Context::isPotentialConstantExpr(State &Parent, const FunctionDecl *FD) {
}
}
+ APValue DummyResult;
+ if (!Run(Parent, Func, DummyResult)) {
+ return false;
+ }
+
return Func->isConstexpr();
}
@@ -50,6 +55,11 @@ bool Context::evaluateAsRValue(State &Parent, const Expr *E, APValue &Result) {
ByteCodeExprGen<EvalEmitter> C(*this, *P, Parent, Stk, Result);
if (Check(Parent, C.interpretExpr(E))) {
assert(Stk.empty());
+#ifndef NDEBUG
+ // Make sure we don't rely on some value being still alive in
+ // InterpStack memory.
+ Stk.clear();
+#endif
return true;
}
@@ -63,6 +73,11 @@ bool Context::evaluateAsInitializer(State &Parent, const VarDecl *VD,
ByteCodeExprGen<EvalEmitter> C(*this, *P, Parent, Stk, Result);
if (Check(Parent, C.interpretDecl(VD))) {
assert(Stk.empty());
+#ifndef NDEBUG
+ // Make sure we don't rely on some value being still alive in
+ // InterpStack memory.
+ Stk.clear();
+#endif
return true;
}
@@ -73,9 +88,11 @@ bool Context::evaluateAsInitializer(State &Parent, const VarDecl *VD,
const LangOptions &Context::getLangOpts() const { return Ctx.getLangOpts(); }
std::optional<PrimType> Context::classify(QualType T) const {
- if (T->isReferenceType() || T->isPointerType()) {
+ if (T->isFunctionPointerType() || T->isFunctionReferenceType())
+ return PT_FnPtr;
+
+ if (T->isReferenceType() || T->isPointerType())
return PT_Ptr;
- }
if (T->isBooleanType())
return PT_Bool;
@@ -113,6 +130,9 @@ std::optional<PrimType> Context::classify(QualType T) const {
if (T->isNullPtrType())
return PT_Ptr;
+ if (T->isFloatingType())
+ return PT_Float;
+
if (auto *AT = dyn_cast<AtomicType>(T))
return classify(AT->getValueType());
@@ -123,7 +143,13 @@ unsigned Context::getCharBit() const {
return Ctx.getTargetInfo().getCharWidth();
}
-bool Context::Run(State &Parent, Function *Func, APValue &Result) {
+/// Simple wrapper around getFloatTypeSemantics() to make code a
+/// little shorter.
+const llvm::fltSemantics &Context::getFloatSemantics(QualType T) const {
+ return Ctx.getFloatTypeSemantics(T);
+}
+
+bool Context::Run(State &Parent, const Function *Func, APValue &Result) {
InterpState State(Parent, *P, Stk, *this);
State.Current = new InterpFrame(State, Func, /*Caller=*/nullptr, {});
if (Interpret(State, Result))
@@ -142,3 +168,38 @@ bool Context::Check(State &Parent, llvm::Expected<bool> &&Flag) {
});
return false;
}
+
+// TODO: Virtual bases?
+const CXXMethodDecl *
+Context::getOverridingFunction(const CXXRecordDecl *DynamicDecl,
+ const CXXRecordDecl *StaticDecl,
+ const CXXMethodDecl *InitialFunction) const {
+
+ const CXXRecordDecl *CurRecord = DynamicDecl;
+ const CXXMethodDecl *FoundFunction = InitialFunction;
+ for (;;) {
+ const CXXMethodDecl *Overrider =
+ FoundFunction->getCorrespondingMethodDeclaredInClass(CurRecord, false);
+ if (Overrider)
+ return Overrider;
+
+ // Common case of only one base class.
+ if (CurRecord->getNumBases() == 1) {
+ CurRecord = CurRecord->bases_begin()->getType()->getAsCXXRecordDecl();
+ continue;
+ }
+
+ // Otherwise, go to the base class that will lead to the StaticDecl.
+ for (const CXXBaseSpecifier &Spec : CurRecord->bases()) {
+ const CXXRecordDecl *Base = Spec.getType()->getAsCXXRecordDecl();
+ if (Base == StaticDecl || Base->isDerivedFrom(StaticDecl)) {
+ CurRecord = Base;
+ break;
+ }
+ }
+ }
+
+ llvm_unreachable(
+ "Couldn't find an overriding function in the class hierarchy?");
+ return nullptr;
+}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Context.h b/contrib/llvm-project/clang/lib/AST/Interp/Context.h
index e49422e64b87..19d480d91211 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Context.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Context.h
@@ -57,13 +57,28 @@ public:
InterpStack &getStack() { return Stk; }
/// Returns CHAR_BIT.
unsigned getCharBit() const;
+ /// Return the floating-point semantics for T.
+ const llvm::fltSemantics &getFloatSemantics(QualType T) const;
/// Classifies an expression.
std::optional<PrimType> classify(QualType T) const;
+ const CXXMethodDecl *
+ getOverridingFunction(const CXXRecordDecl *DynamicDecl,
+ const CXXRecordDecl *StaticDecl,
+ const CXXMethodDecl *InitialFunction) const;
+ /// Returns whether we should create a global variable for the
+ /// given ValueDecl.
+ static bool shouldBeGloballyIndexed(const ValueDecl *VD) {
+ if (const auto *V = dyn_cast<VarDecl>(VD))
+ return V->hasGlobalStorage() || V->isConstexpr();
+
+ return false;
+ }
+
private:
/// Runs a function.
- bool Run(State &Parent, Function *Func, APValue &Result);
+ bool Run(State &Parent, const Function *Func, APValue &Result);
/// Checks a result from the interpreter.
bool Check(State &Parent, llvm::Expected<bool> &&R);
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp
index 04bc8681dd6e..ccd2a993e9f7 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp
@@ -8,6 +8,8 @@
#include "Descriptor.h"
#include "Boolean.h"
+#include "Floating.h"
+#include "FunctionPointer.h"
#include "Pointer.h"
#include "PrimType.h"
#include "Record.h"
@@ -16,30 +18,32 @@ using namespace clang;
using namespace clang::interp;
template <typename T>
-static void ctorTy(Block *, char *Ptr, bool, bool, bool, Descriptor *) {
+static void ctorTy(Block *, char *Ptr, bool, bool, bool, const Descriptor *) {
new (Ptr) T();
}
-template <typename T> static void dtorTy(Block *, char *Ptr, Descriptor *) {
+template <typename T>
+static void dtorTy(Block *, char *Ptr, const Descriptor *) {
reinterpret_cast<T *>(Ptr)->~T();
}
template <typename T>
-static void moveTy(Block *, char *Src, char *Dst, Descriptor *) {
- auto *SrcPtr = reinterpret_cast<T *>(Src);
+static void moveTy(Block *, const char *Src, char *Dst, const Descriptor *) {
+ const auto *SrcPtr = reinterpret_cast<const T *>(Src);
auto *DstPtr = reinterpret_cast<T *>(Dst);
new (DstPtr) T(std::move(*SrcPtr));
}
template <typename T>
-static void ctorArrayTy(Block *, char *Ptr, bool, bool, bool, Descriptor *D) {
+static void ctorArrayTy(Block *, char *Ptr, bool, bool, bool,
+ const Descriptor *D) {
for (unsigned I = 0, NE = D->getNumElems(); I < NE; ++I) {
new (&reinterpret_cast<T *>(Ptr)[I]) T();
}
}
template <typename T>
-static void dtorArrayTy(Block *, char *Ptr, Descriptor *D) {
+static void dtorArrayTy(Block *, char *Ptr, const Descriptor *D) {
InitMap *IM = *reinterpret_cast<InitMap **>(Ptr);
if (IM != (InitMap *)-1)
free(IM);
@@ -51,16 +55,17 @@ static void dtorArrayTy(Block *, char *Ptr, Descriptor *D) {
}
template <typename T>
-static void moveArrayTy(Block *, char *Src, char *Dst, Descriptor *D) {
+static void moveArrayTy(Block *, const char *Src, char *Dst,
+ const Descriptor *D) {
for (unsigned I = 0, NE = D->getNumElems(); I < NE; ++I) {
- auto *SrcPtr = &reinterpret_cast<T *>(Src)[I];
+ const auto *SrcPtr = &reinterpret_cast<const T *>(Src)[I];
auto *DstPtr = &reinterpret_cast<T *>(Dst)[I];
new (DstPtr) T(std::move(*SrcPtr));
}
}
static void ctorArrayDesc(Block *B, char *Ptr, bool IsConst, bool IsMutable,
- bool IsActive, Descriptor *D) {
+ bool IsActive, const Descriptor *D) {
const unsigned NumElems = D->getNumElems();
const unsigned ElemSize =
D->ElemDesc->getAllocSize() + sizeof(InlineDescriptor);
@@ -85,7 +90,7 @@ static void ctorArrayDesc(Block *B, char *Ptr, bool IsConst, bool IsMutable,
}
}
-static void dtorArrayDesc(Block *B, char *Ptr, Descriptor *D) {
+static void dtorArrayDesc(Block *B, char *Ptr, const Descriptor *D) {
const unsigned NumElems = D->getNumElems();
const unsigned ElemSize =
D->ElemDesc->getAllocSize() + sizeof(InlineDescriptor);
@@ -100,18 +105,19 @@ static void dtorArrayDesc(Block *B, char *Ptr, Descriptor *D) {
}
}
-static void moveArrayDesc(Block *B, char *Src, char *Dst, Descriptor *D) {
+static void moveArrayDesc(Block *B, const char *Src, char *Dst,
+ const Descriptor *D) {
const unsigned NumElems = D->getNumElems();
const unsigned ElemSize =
D->ElemDesc->getAllocSize() + sizeof(InlineDescriptor);
unsigned ElemOffset = 0;
for (unsigned I = 0; I < NumElems; ++I, ElemOffset += ElemSize) {
- auto *SrcPtr = Src + ElemOffset;
+ const auto *SrcPtr = Src + ElemOffset;
auto *DstPtr = Dst + ElemOffset;
- auto *SrcDesc = reinterpret_cast<InlineDescriptor *>(SrcPtr);
- auto *SrcElemLoc = reinterpret_cast<char *>(SrcDesc + 1);
+ const auto *SrcDesc = reinterpret_cast<const InlineDescriptor *>(SrcPtr);
+ const auto *SrcElemLoc = reinterpret_cast<const char *>(SrcDesc + 1);
auto *DstDesc = reinterpret_cast<InlineDescriptor *>(DstPtr);
auto *DstElemLoc = reinterpret_cast<char *>(DstDesc + 1);
@@ -122,7 +128,7 @@ static void moveArrayDesc(Block *B, char *Src, char *Dst, Descriptor *D) {
}
static void ctorRecord(Block *B, char *Ptr, bool IsConst, bool IsMutable,
- bool IsActive, Descriptor *D) {
+ bool IsActive, const Descriptor *D) {
const bool IsUnion = D->ElemRecord->isUnion();
auto CtorSub = [=](unsigned SubOff, Descriptor *F, bool IsBase) {
auto *Desc = reinterpret_cast<InlineDescriptor *>(Ptr + SubOff) - 1;
@@ -145,7 +151,7 @@ static void ctorRecord(Block *B, char *Ptr, bool IsConst, bool IsMutable,
CtorSub(V.Offset, V.Desc, /*isBase=*/true);
}
-static void dtorRecord(Block *B, char *Ptr, Descriptor *D) {
+static void dtorRecord(Block *B, char *Ptr, const Descriptor *D) {
auto DtorSub = [=](unsigned SubOff, Descriptor *F) {
if (auto Fn = F->DtorFn)
Fn(B, Ptr + SubOff, F);
@@ -158,7 +164,8 @@ static void dtorRecord(Block *B, char *Ptr, Descriptor *D) {
DtorSub(F.Offset, F.Desc);
}
-static void moveRecord(Block *B, char *Src, char *Dst, Descriptor *D) {
+static void moveRecord(Block *B, const char *Src, char *Dst,
+ const Descriptor *D) {
for (const auto &F : D->ElemRecord->fields()) {
auto FieldOff = F.Offset;
auto FieldDesc = F.Desc;
@@ -170,10 +177,20 @@ static void moveRecord(Block *B, char *Src, char *Dst, Descriptor *D) {
}
static BlockCtorFn getCtorPrim(PrimType Type) {
+ // Floating types are special. They are primitives, but need their
+ // constructor called.
+ if (Type == PT_Float)
+ return ctorTy<PrimConv<PT_Float>::T>;
+
COMPOSITE_TYPE_SWITCH(Type, return ctorTy<T>, return nullptr);
}
static BlockDtorFn getDtorPrim(PrimType Type) {
+ // Floating types are special. They are primitives, but need their
+ // destructor called, since they might allocate memory.
+ if (Type == PT_Float)
+ return dtorTy<PrimConv<PT_Float>::T>;
+
COMPOSITE_TYPE_SWITCH(Type, return dtorTy<T>, return nullptr);
}
@@ -262,6 +279,8 @@ QualType Descriptor::getType() const {
return E->getType();
if (auto *D = asValueDecl())
return D->getType();
+ if (auto *T = dyn_cast<TypeDecl>(asDecl()))
+ return QualType(T->getTypeForDecl(), 0);
llvm_unreachable("Invalid descriptor type");
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h b/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h
index 6ef4fc2f4c9b..b2dbd892b55b 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h
@@ -30,22 +30,19 @@ using DeclTy = llvm::PointerUnion<const Decl *, const Expr *>;
/// all the fields which contain non-trivial types.
using BlockCtorFn = void (*)(Block *Storage, char *FieldPtr, bool IsConst,
bool IsMutable, bool IsActive,
- Descriptor *FieldDesc);
+ const Descriptor *FieldDesc);
/// Invoked when a block is destroyed. Invokes the destructors of all
/// non-trivial nested fields of arrays and records.
using BlockDtorFn = void (*)(Block *Storage, char *FieldPtr,
- Descriptor *FieldDesc);
+ const Descriptor *FieldDesc);
/// Invoked when a block with pointers referencing it goes out of scope. Such
/// blocks are persisted: the move function copies all inline descriptors and
/// non-trivial fields, as existing pointers might need to reference those
/// descriptors. Data is not copied since it cannot be legally read.
-using BlockMoveFn = void (*)(Block *Storage, char *SrcFieldPtr,
- char *DstFieldPtr, Descriptor *FieldDesc);
-
-/// Object size as used by the interpreter.
-using InterpSize = unsigned;
+using BlockMoveFn = void (*)(Block *Storage, const char *SrcFieldPtr,
+ char *DstFieldPtr, const Descriptor *FieldDesc);
/// Inline descriptor embedded in structures and arrays.
///
@@ -81,13 +78,13 @@ private:
/// Original declaration, used to emit the error message.
const DeclTy Source;
/// Size of an element, in host bytes.
- const InterpSize ElemSize;
+ const unsigned ElemSize;
/// Size of the storage, in host bytes.
- const InterpSize Size;
+ const unsigned Size;
// Size of the metadata.
- const InterpSize MDSize;
+ const unsigned MDSize;
/// Size of the allocation (storage + metadata), in host bytes.
- const InterpSize AllocSize;
+ const unsigned AllocSize;
/// Value to denote arrays of unknown size.
static constexpr unsigned UnknownSizeMark = (unsigned)-1;
@@ -96,7 +93,7 @@ public:
/// Token to denote structures of unknown size.
struct UnknownSize {};
- using MetadataSize = std::optional<InterpSize>;
+ using MetadataSize = std::optional<unsigned>;
static constexpr MetadataSize InlineDescMD = sizeof(InlineDescriptor);
/// Pointer to the record, if block contains records.
@@ -177,6 +174,8 @@ public:
/// Checks if the descriptor is of an array of primitives.
bool isPrimitiveArray() const { return IsArray && !ElemDesc; }
+ /// Checks if the descriptor is of an array of composites.
+ bool isCompositeArray() const { return IsArray && ElemDesc; }
/// Checks if the descriptor is of an array of zero size.
bool isZeroSizeArray() const { return Size == 0; }
/// Checks if the descriptor is of an array of unknown size.
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp
index d31e879d516f..35ed5d128697 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "Floating.h"
#include "Function.h"
#include "Opcode.h"
#include "PrimType.h"
@@ -33,18 +34,7 @@ template <typename T> inline T ReadArg(Program &P, CodePtr &OpPC) {
LLVM_DUMP_METHOD void Function::dump() const { dump(llvm::errs()); }
LLVM_DUMP_METHOD void Function::dump(llvm::raw_ostream &OS) const {
- if (F) {
- if (auto *Cons = dyn_cast<CXXConstructorDecl>(F)) {
- DeclarationName Name = Cons->getParent()->getDeclName();
- OS << Name << "::" << Name;
- } else {
- OS << F->getDeclName();
- }
- OS << " " << (const void*)this << ":\n";
- } else {
- OS << "<<expr>>\n";
- }
-
+ OS << getName() << " " << (const void *)this << "\n";
OS << "frame size: " << getFrameSize() << "\n";
OS << "arg size: " << getArgSize() << "\n";
OS << "rvo: " << hasRVO() << "\n";
@@ -52,9 +42,9 @@ LLVM_DUMP_METHOD void Function::dump(llvm::raw_ostream &OS) const {
auto PrintName = [&OS](const char *Name) {
OS << Name;
- for (long I = 0, N = strlen(Name); I < 30 - N; ++I) {
- OS << ' ';
- }
+ long N = 30 - strlen(Name);
+ if (N > 0)
+ OS.indent(N);
};
for (CodePtr Start = getCodeBegin(), PC = Start; PC != getCodeEnd();) {
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp b/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp
index 72fd3b45254b..f22cca90d4f4 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp
@@ -123,7 +123,7 @@ bool EvalEmitter::emitRetValue(const SourceInfo &Info) {
Ty = AT->getValueType();
if (auto *RT = Ty->getAs<RecordType>()) {
- auto *Record = Ptr.getRecord();
+ const auto *Record = Ptr.getRecord();
assert(Record && "Missing record descriptor");
bool Ok = true;
@@ -208,9 +208,7 @@ bool EvalEmitter::emitGetPtrLocal(uint32_t I, const SourceInfo &Info) {
if (!isActive())
return true;
- auto It = Locals.find(I);
- assert(It != Locals.end() && "Missing local variable");
- Block *B = reinterpret_cast<Block *>(It->second.get());
+ Block *B = getLocal(I);
S.Stk.push<Pointer>(B, sizeof(InlineDescriptor));
return true;
}
@@ -222,9 +220,7 @@ bool EvalEmitter::emitGetLocal(uint32_t I, const SourceInfo &Info) {
using T = typename PrimConv<OpType>::T;
- auto It = Locals.find(I);
- assert(It != Locals.end() && "Missing local variable");
- auto *B = reinterpret_cast<Block *>(It->second.get());
+ Block *B = getLocal(I);
S.Stk.push<T>(*reinterpret_cast<T *>(B->data()));
return true;
}
@@ -236,9 +232,7 @@ bool EvalEmitter::emitSetLocal(uint32_t I, const SourceInfo &Info) {
using T = typename PrimConv<OpType>::T;
- auto It = Locals.find(I);
- assert(It != Locals.end() && "Missing local variable");
- auto *B = reinterpret_cast<Block *>(It->second.get());
+ Block *B = getLocal(I);
*reinterpret_cast<T *>(B->data()) = S.Stk.pop<T>();
InlineDescriptor &Desc = *reinterpret_cast<InlineDescriptor *>(B->rawData());
Desc.IsInitialized = true;
@@ -251,9 +245,8 @@ bool EvalEmitter::emitDestroy(uint32_t I, const SourceInfo &Info) {
return true;
for (auto &Local : Descriptors[I]) {
- auto It = Locals.find(Local.Offset);
- assert(It != Locals.end() && "Missing local variable");
- S.deallocate(reinterpret_cast<Block *>(It->second.get()));
+ Block *B = getLocal(Local.Offset);
+ S.deallocate(B);
}
return true;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h b/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h
index 6b6d0d621901..d1901359f2c2 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h
@@ -71,11 +71,15 @@ protected:
/// Returns the source location of the current opcode.
SourceInfo getSource(const Function *F, CodePtr PC) const override {
- return F ? F->getSource(PC) : CurrentSource;
+ return (F && F->hasBody()) ? F->getSource(PC) : CurrentSource;
}
/// Parameter indices.
llvm::DenseMap<const ParmVarDecl *, unsigned> Params;
+ /// Lambda captures.
+ /// Map from Decl* to [Offset, IsReference] pair.
+ llvm::DenseMap<const ValueDecl *, std::pair<unsigned, bool>> LambdaCaptures;
+ unsigned LambdaThisCapture;
/// Local descriptors.
llvm::SmallVector<SmallVector<Local, 8>, 2> Descriptors;
@@ -92,6 +96,12 @@ private:
/// Temporaries which require storage.
llvm::DenseMap<unsigned, std::unique_ptr<char[]>> Locals;
+ Block *getLocal(unsigned Index) const {
+ auto It = Locals.find(Index);
+ assert(It != Locals.end() && "Missing local variable");
+ return reinterpret_cast<Block *>(It->second.get());
+ }
+
// The emitter always tracks the current instruction and sets OpPC to a token
// value which is mapped to the location of the opcode being evaluated.
CodePtr OpPC;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Floating.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Floating.cpp
new file mode 100644
index 000000000000..922e17ad1450
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Floating.cpp
@@ -0,0 +1,22 @@
+//===---- Floating.cpp - Support for floating point values ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "Floating.h"
+
+namespace clang {
+namespace interp {
+
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, Floating F) {
+ F.print(OS);
+ return OS;
+}
+
+Floating getSwappedBytes(Floating F) { return F; }
+
+} // namespace interp
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Floating.h b/contrib/llvm-project/clang/lib/AST/Interp/Floating.h
new file mode 100644
index 000000000000..85876236a999
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Floating.h
@@ -0,0 +1,158 @@
+//===--- Floating.h - Types for the constexpr VM ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the VM types and helpers operating on types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_INTERP_FLOATING_H
+#define LLVM_CLANG_AST_INTERP_FLOATING_H
+
+#include "Primitives.h"
+#include "clang/AST/APValue.h"
+#include "llvm/ADT/APFloat.h"
+
+namespace clang {
+namespace interp {
+
+using APFloat = llvm::APFloat;
+using APSInt = llvm::APSInt;
+
+class Floating final {
+private:
+ // The underlying value storage.
+ APFloat F;
+
+public:
+ /// Zero-initializes a Floating.
+ Floating() : F(0.0f) {}
+ Floating(const APFloat &F) : F(F) {}
+
+ // Static constructors for special floating point values.
+ static Floating getInf(const llvm::fltSemantics &Sem) {
+ return Floating(APFloat::getInf(Sem));
+ }
+ const APFloat &getAPFloat() const { return F; }
+
+ bool operator<(Floating RHS) const { return F < RHS.F; }
+ bool operator>(Floating RHS) const { return F > RHS.F; }
+ bool operator<=(Floating RHS) const { return F <= RHS.F; }
+ bool operator>=(Floating RHS) const { return F >= RHS.F; }
+ bool operator==(Floating RHS) const { return F == RHS.F; }
+ bool operator!=(Floating RHS) const { return F != RHS.F; }
+ Floating operator-() const { return Floating(-F); }
+
+ APFloat::opStatus convertToInteger(APSInt &Result) const {
+ bool IsExact;
+ return F.convertToInteger(Result, llvm::APFloat::rmTowardZero, &IsExact);
+ }
+
+ Floating toSemantics(const llvm::fltSemantics *Sem,
+ llvm::RoundingMode RM) const {
+ APFloat Copy = F;
+ bool LosesInfo;
+ Copy.convert(*Sem, RM, &LosesInfo);
+ (void)LosesInfo;
+ return Floating(Copy);
+ }
+
+ /// Convert this Floating to one with the same semantics as \Other.
+ Floating toSemantics(const Floating &Other, llvm::RoundingMode RM) const {
+ return toSemantics(&Other.F.getSemantics(), RM);
+ }
+
+ APSInt toAPSInt(unsigned NumBits = 0) const {
+ return APSInt(F.bitcastToAPInt());
+ }
+ APValue toAPValue() const { return APValue(F); }
+ void print(llvm::raw_ostream &OS) const {
+ // Can't use APFloat::print() since it appends a newline.
+ SmallVector<char, 16> Buffer;
+ F.toString(Buffer);
+ OS << Buffer;
+ }
+
+ unsigned bitWidth() const { return F.semanticsSizeInBits(F.getSemantics()); }
+
+ bool isSigned() const { return true; }
+ bool isNegative() const { return F.isNegative(); }
+ bool isPositive() const { return !F.isNegative(); }
+ bool isZero() const { return F.isZero(); }
+ bool isNonZero() const { return F.isNonZero(); }
+ bool isMin() const { return F.isSmallest(); }
+ bool isMinusOne() const { return F.isExactlyValue(-1.0); }
+ bool isNan() const { return F.isNaN(); }
+ bool isFinite() const { return F.isFinite(); }
+
+ ComparisonCategoryResult compare(const Floating &RHS) const {
+ return Compare(F, RHS.F);
+ }
+
+ static APFloat::opStatus fromIntegral(APSInt Val,
+ const llvm::fltSemantics &Sem,
+ llvm::RoundingMode RM,
+ Floating &Result) {
+ APFloat F = APFloat(Sem);
+ APFloat::opStatus Status = F.convertFromAPInt(Val, Val.isSigned(), RM);
+ Result = Floating(F);
+ return Status;
+ }
+
+ // -------
+
+ static APFloat::opStatus add(const Floating &A, const Floating &B,
+ llvm::RoundingMode RM, Floating *R) {
+ *R = Floating(A.F);
+ return R->F.add(B.F, RM);
+ }
+
+ static APFloat::opStatus increment(const Floating &A, llvm::RoundingMode RM,
+ Floating *R) {
+ APFloat One(A.F.getSemantics(), 1);
+ *R = Floating(A.F);
+ return R->F.add(One, RM);
+ }
+
+ static APFloat::opStatus sub(const Floating &A, const Floating &B,
+ llvm::RoundingMode RM, Floating *R) {
+ *R = Floating(A.F);
+ return R->F.subtract(B.F, RM);
+ }
+
+ static APFloat::opStatus decrement(const Floating &A, llvm::RoundingMode RM,
+ Floating *R) {
+ APFloat One(A.F.getSemantics(), 1);
+ *R = Floating(A.F);
+ return R->F.subtract(One, RM);
+ }
+
+ static APFloat::opStatus mul(const Floating &A, const Floating &B,
+ llvm::RoundingMode RM, Floating *R) {
+ *R = Floating(A.F);
+ return R->F.multiply(B.F, RM);
+ }
+
+ static APFloat::opStatus div(const Floating &A, const Floating &B,
+ llvm::RoundingMode RM, Floating *R) {
+ *R = Floating(A.F);
+ return R->F.divide(B.F, RM);
+ }
+
+ static bool neg(const Floating &A, Floating *R) {
+ *R = -A;
+ return false;
+ }
+};
+
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, Floating F);
+Floating getSwappedBytes(Floating F);
+
+} // namespace interp
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Frame.h b/contrib/llvm-project/clang/lib/AST/Interp/Frame.h
index b9a0ea9412f8..304f0d108cab 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Frame.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Frame.h
@@ -27,7 +27,7 @@ public:
virtual ~Frame();
/// Generates a human-readable description of the call site.
- virtual void describe(llvm::raw_ostream &OS) = 0;
+ virtual void describe(llvm::raw_ostream &OS) const = 0;
/// Returns a pointer to the caller frame.
virtual Frame *getCaller() const = 0;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp
index 40001faad411..75312999d23d 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp
@@ -16,12 +16,14 @@ using namespace clang;
using namespace clang::interp;
Function::Function(Program &P, const FunctionDecl *F, unsigned ArgSize,
- llvm::SmallVector<PrimType, 8> &&ParamTypes,
+ llvm::SmallVectorImpl<PrimType> &&ParamTypes,
llvm::DenseMap<unsigned, ParamDescriptor> &&Params,
+ llvm::SmallVectorImpl<unsigned> &&ParamOffsets,
bool HasThisPointer, bool HasRVO)
: P(P), Loc(F->getBeginLoc()), F(F), ArgSize(ArgSize),
ParamTypes(std::move(ParamTypes)), Params(std::move(Params)),
- HasThisPointer(HasThisPointer), HasRVO(HasRVO) {}
+ ParamOffsets(std::move(ParamOffsets)), HasThisPointer(HasThisPointer),
+ HasRVO(HasRVO) {}
Function::ParamDescriptor Function::getParamDescriptor(unsigned Offset) const {
auto It = Params.find(Offset);
@@ -32,6 +34,7 @@ Function::ParamDescriptor Function::getParamDescriptor(unsigned Offset) const {
SourceInfo Function::getSource(CodePtr PC) const {
assert(PC >= getCodeBegin() && "PC does not belong to this function");
assert(PC <= getCodeEnd() && "PC Does not belong to this function");
+ assert(hasBody() && "Function has no body");
unsigned Offset = PC - getCodeBegin();
using Elem = std::pair<unsigned, SourceInfo>;
auto It = llvm::lower_bound(SrcMap, Elem{Offset, {}}, llvm::less_first());
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Function.h b/contrib/llvm-project/clang/lib/AST/Interp/Function.h
index 5b2a77f1a12d..55a23ff288e8 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Function.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Function.h
@@ -90,7 +90,12 @@ public:
/// Returns the name of the function decl this code
/// was generated for.
- const std::string getName() const { return F->getNameInfo().getAsString(); }
+ const std::string getName() const {
+ if (!F)
+ return "<<expr>>";
+
+ return F->getQualifiedNameAsString();
+ }
/// Returns the location.
SourceLocation getLoc() const { return Loc; }
@@ -129,33 +134,50 @@ public:
/// Checks if the function is a constructor.
bool isConstructor() const { return isa<CXXConstructorDecl>(F); }
+ /// Checks if the function is a destructor.
+ bool isDestructor() const { return isa<CXXDestructorDecl>(F); }
+
+ /// Returns the parent record decl, if any.
+ const CXXRecordDecl *getParentDecl() const {
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(F))
+ return MD->getParent();
+ return nullptr;
+ }
/// Checks if the function is fully done compiling.
bool isFullyCompiled() const { return IsFullyCompiled; }
bool hasThisPointer() const { return HasThisPointer; }
- // Checks if the funtion already has a body attached.
+ /// Checks if the function already has a body attached.
bool hasBody() const { return HasBody; }
+ unsigned getBuiltinID() const { return F->getBuiltinID(); }
+
unsigned getNumParams() const { return ParamTypes.size(); }
+ unsigned getParamOffset(unsigned ParamIndex) const {
+ return ParamOffsets[ParamIndex];
+ }
+
private:
/// Construct a function representing an actual function.
Function(Program &P, const FunctionDecl *F, unsigned ArgSize,
- llvm::SmallVector<PrimType, 8> &&ParamTypes,
+ llvm::SmallVectorImpl<PrimType> &&ParamTypes,
llvm::DenseMap<unsigned, ParamDescriptor> &&Params,
- bool HasThisPointer, bool HasRVO);
+ llvm::SmallVectorImpl<unsigned> &&ParamOffsets, bool HasThisPointer,
+ bool HasRVO);
/// Sets the code of a function.
- void setCode(unsigned NewFrameSize, std::vector<char> &&NewCode, SourceMap &&NewSrcMap,
- llvm::SmallVector<Scope, 2> &&NewScopes) {
+ void setCode(unsigned NewFrameSize, std::vector<std::byte> &&NewCode,
+ SourceMap &&NewSrcMap, llvm::SmallVector<Scope, 2> &&NewScopes,
+ bool NewHasBody) {
FrameSize = NewFrameSize;
Code = std::move(NewCode);
SrcMap = std::move(NewSrcMap);
Scopes = std::move(NewScopes);
IsValid = true;
- HasBody = true;
+ HasBody = NewHasBody;
}
void setIsFullyCompiled(bool FC) { IsFullyCompiled = FC; }
@@ -175,7 +197,7 @@ private:
/// Size of the argument stack.
unsigned ArgSize;
/// Program code.
- std::vector<char> Code;
+ std::vector<std::byte> Code;
/// Opcode-to-expression mapping.
SourceMap SrcMap;
/// List of block descriptors.
@@ -184,6 +206,8 @@ private:
llvm::SmallVector<PrimType, 8> ParamTypes;
/// Map from byte offset to parameter descriptor.
llvm::DenseMap<unsigned, ParamDescriptor> Params;
+ /// List of parameter offsets.
+ llvm::SmallVector<unsigned, 8> ParamOffsets;
/// Flag to indicate if the function is valid.
bool IsValid = false;
/// Flag to indicate if the function is done being
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/FunctionPointer.h b/contrib/llvm-project/clang/lib/AST/Interp/FunctionPointer.h
new file mode 100644
index 000000000000..4a3f993d4882
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/FunctionPointer.h
@@ -0,0 +1,71 @@
+//===--- FunctionPointer.h - Types for the constexpr VM ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_INTERP_FUNCTION_POINTER_H
+#define LLVM_CLANG_AST_INTERP_FUNCTION_POINTER_H
+
+#include "Function.h"
+#include "Primitives.h"
+#include "clang/AST/APValue.h"
+
+namespace clang {
+class ASTContext;
+namespace interp {
+
+class FunctionPointer final {
+private:
+ const Function *Func;
+
+public:
+ FunctionPointer() : Func(nullptr) {}
+ FunctionPointer(const Function *Func) : Func(Func) { assert(Func); }
+
+ const Function *getFunction() const { return Func; }
+
+ APValue toAPValue() const {
+ if (!Func)
+ return APValue(static_cast<Expr *>(nullptr), CharUnits::Zero(), {},
+ /*OnePastTheEnd=*/false, /*IsNull=*/true);
+
+ return APValue(Func->getDecl(), CharUnits::Zero(), {},
+ /*OnePastTheEnd=*/false, /*IsNull=*/false);
+ }
+
+ void print(llvm::raw_ostream &OS) const {
+ OS << "FnPtr(";
+ if (Func)
+ OS << Func->getName();
+ else
+ OS << "nullptr";
+ OS << ")";
+ }
+
+ std::string toDiagnosticString(const ASTContext &Ctx) const {
+ if (!Func)
+ return "nullptr";
+
+ return toAPValue().getAsString(Ctx, Func->getDecl()->getType());
+ }
+
+ ComparisonCategoryResult compare(const FunctionPointer &RHS) const {
+ if (Func == RHS.Func)
+ return ComparisonCategoryResult::Equal;
+ return ComparisonCategoryResult::Unordered;
+ }
+};
+
+inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
+ FunctionPointer FP) {
+ FP.print(OS);
+ return OS;
+}
+
+} // namespace interp
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Integral.h b/contrib/llvm-project/clang/lib/AST/Interp/Integral.h
index 8a742333ae57..de588ab8c9f1 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Integral.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Integral.h
@@ -21,22 +21,14 @@
#include <cstddef>
#include <cstdint>
+#include "Primitives.h"
+
namespace clang {
namespace interp {
using APInt = llvm::APInt;
using APSInt = llvm::APSInt;
-/// Helper to compare two comparable types.
-template <typename T>
-ComparisonCategoryResult Compare(const T &X, const T &Y) {
- if (X < Y)
- return ComparisonCategoryResult::Less;
- if (X > Y)
- return ComparisonCategoryResult::Greater;
- return ComparisonCategoryResult::Equal;
-}
-
// Helper structure to select the representation.
template <unsigned Bits, bool Signed> struct Repr;
template <> struct Repr<8, false> { using Type = uint8_t; };
@@ -136,7 +128,9 @@ public:
}
unsigned countLeadingZeros() const {
- return llvm::countLeadingZeros<ReprT>(V);
+ if constexpr (!Signed)
+ return llvm::countl_zero<ReprT>(V);
+ llvm_unreachable("Don't call countLeadingZeros() on signed types.");
}
Integral truncate(unsigned TruncBits) const {
@@ -233,6 +227,9 @@ public:
}
static bool neg(Integral A, Integral *R) {
+ if (Signed && A.isMin())
+ return true;
+
*R = -A;
return false;
}
@@ -242,6 +239,18 @@ public:
return false;
}
+ template <unsigned RHSBits, bool RHSSign>
+ static void shiftLeft(const Integral A, const Integral<RHSBits, RHSSign> B,
+ unsigned OpBits, Integral *R) {
+ *R = Integral::from(A.V << B.V, OpBits);
+ }
+
+ template <unsigned RHSBits, bool RHSSign>
+ static void shiftRight(const Integral A, const Integral<RHSBits, RHSSign> B,
+ unsigned OpBits, Integral *R) {
+ *R = Integral::from(A.V >> B.V, OpBits);
+ }
+
private:
template <typename T> static bool CheckAddUB(T A, T B, T &R) {
if constexpr (std::is_signed_v<T>) {
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp
index 6a600b306bad..4917f43f9512 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp
@@ -26,51 +26,6 @@
using namespace clang;
using namespace clang::interp;
-//===----------------------------------------------------------------------===//
-// Ret
-//===----------------------------------------------------------------------===//
-
-template <PrimType Name, class T = typename PrimConv<Name>::T>
-static bool Ret(InterpState &S, CodePtr &PC, APValue &Result) {
- S.CallStackDepth--;
- const T &Ret = S.Stk.pop<T>();
-
- assert(S.Current->getFrameOffset() == S.Stk.size() && "Invalid frame");
- if (!S.checkingPotentialConstantExpression())
- S.Current->popArgs();
-
- if (InterpFrame *Caller = S.Current->Caller) {
- PC = S.Current->getRetPC();
- delete S.Current;
- S.Current = Caller;
- S.Stk.push<T>(Ret);
- } else {
- delete S.Current;
- S.Current = nullptr;
- if (!ReturnValue<T>(Ret, Result))
- return false;
- }
- return true;
-}
-
-static bool RetVoid(InterpState &S, CodePtr &PC, APValue &Result) {
- S.CallStackDepth--;
-
- assert(S.Current->getFrameOffset() == S.Stk.size() && "Invalid frame");
- if (!S.checkingPotentialConstantExpression())
- S.Current->popArgs();
-
- if (InterpFrame *Caller = S.Current->Caller) {
- PC = S.Current->getRetPC();
- delete S.Current;
- S.Current = Caller;
- } else {
- delete S.Current;
- S.Current = nullptr;
- }
- return true;
-}
-
static bool RetValue(InterpState &S, CodePtr &Pt, APValue &Result) {
llvm::report_fatal_error("Interpreter cannot return values");
}
@@ -98,17 +53,6 @@ static bool Jf(InterpState &S, CodePtr &PC, int32_t Offset) {
return true;
}
-static bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK) {
- if (Ptr.isInitialized())
- return true;
- if (!S.checkingPotentialConstantExpression()) {
- const SourceInfo &Loc = S.Current->getSource(OpPC);
- S.FFDiag(Loc, diag::note_constexpr_access_uninit) << AK << false;
- }
- return false;
-}
-
static bool CheckActive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
AccessKinds AK) {
if (Ptr.isActive())
@@ -124,7 +68,7 @@ static bool CheckActive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
}
// Find the active field of the union.
- Record *R = U.getRecord();
+ const Record *R = U.getRecord();
assert(R && R->isUnion() && "Not a union");
const FieldDecl *ActiveField = nullptr;
for (unsigned I = 0, N = R->getNumFields(); I < N; ++I) {
@@ -183,7 +127,7 @@ bool CheckExtern(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
return true;
if (!S.checkingPotentialConstantExpression()) {
- auto *VD = Ptr.getDeclDesc()->asValueDecl();
+ const auto *VD = Ptr.getDeclDesc()->asValueDecl();
const SourceInfo &Loc = S.Current->getSource(OpPC);
S.FFDiag(Loc, diag::note_constexpr_ltor_non_constexpr, 1) << VD;
S.Note(VD->getLocation(), diag::note_declared_at);
@@ -258,7 +202,14 @@ bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
bool CheckConst(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
assert(Ptr.isLive() && "Pointer is not live");
- if (!Ptr.isConst()) {
+ if (!Ptr.isConst())
+ return true;
+
+ // The This pointer is writable in constructors and destructors,
+ // even if isConst() returns true.
+ if (const Function *Func = S.Current->getFunction();
+ Func && (Func->isConstructor() || Func->isDestructor()) &&
+ Ptr.block() == S.Current->getThis().block()) {
return true;
}
@@ -281,6 +232,19 @@ bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
return false;
}
+bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK) {
+ if (Ptr.isInitialized())
+ return true;
+
+ if (!S.checkingPotentialConstantExpression()) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_access_uninit)
+ << AK << /*uninitialized=*/true;
+ }
+ return false;
+}
+
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
if (!CheckLive(S, OpPC, Ptr, AK_Read))
return false;
@@ -333,24 +297,27 @@ bool CheckInit(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
bool CheckCallable(InterpState &S, CodePtr OpPC, const Function *F) {
- if (F->isVirtual()) {
- if (!S.getLangOpts().CPlusPlus20) {
- const SourceLocation &Loc = S.Current->getLocation(OpPC);
- S.CCEDiag(Loc, diag::note_constexpr_virtual_call);
- return false;
- }
+ if (F->isVirtual() && !S.getLangOpts().CPlusPlus20) {
+ const SourceLocation &Loc = S.Current->getLocation(OpPC);
+ S.CCEDiag(Loc, diag::note_constexpr_virtual_call);
+ return false;
}
if (!F->isConstexpr()) {
+ // Don't emit anything if we're checking for a potential constant
+ // expression. That will happen later when actually executing.
+ if (S.checkingPotentialConstantExpression())
+ return false;
+
const SourceLocation &Loc = S.Current->getLocation(OpPC);
if (S.getLangOpts().CPlusPlus11) {
const FunctionDecl *DiagDecl = F->getDecl();
// If this function is not constexpr because it is an inherited
// non-constexpr constructor, diagnose that directly.
- auto *CD = dyn_cast<CXXConstructorDecl>(DiagDecl);
+ const auto *CD = dyn_cast<CXXConstructorDecl>(DiagDecl);
if (CD && CD->isInheritingConstructor()) {
- auto *Inherited = CD->getInheritedConstructor().getConstructor();
+ const auto *Inherited = CD->getInheritedConstructor().getConstructor();
if (!Inherited->isConstexpr())
DiagDecl = CD = Inherited;
}
@@ -374,6 +341,17 @@ bool CheckCallable(InterpState &S, CodePtr OpPC, const Function *F) {
return true;
}
+bool CheckCallDepth(InterpState &S, CodePtr OpPC) {
+ if ((S.Current->getDepth() + 1) > S.getLangOpts().ConstexprCallDepth) {
+ S.FFDiag(S.Current->getSource(OpPC),
+ diag::note_constexpr_depth_limit_exceeded)
+ << S.getLangOpts().ConstexprCallDepth;
+ return false;
+ }
+
+ return true;
+}
+
bool CheckThis(InterpState &S, CodePtr OpPC, const Pointer &This) {
if (!This.isZero())
return true;
@@ -381,7 +359,7 @@ bool CheckThis(InterpState &S, CodePtr OpPC, const Pointer &This) {
const SourceInfo &Loc = S.Current->getSource(OpPC);
bool IsImplicit = false;
- if (auto *E = dyn_cast_or_null<CXXThisExpr>(Loc.asExpr()))
+ if (const auto *E = dyn_cast_if_present<CXXThisExpr>(Loc.asExpr()))
IsImplicit = E->isImplicit();
if (S.getLangOpts().CPlusPlus11)
@@ -402,11 +380,11 @@ bool CheckPure(InterpState &S, CodePtr OpPC, const CXXMethodDecl *MD) {
}
static void DiagnoseUninitializedSubobject(InterpState &S, const SourceInfo &SI,
- QualType SubObjType,
- SourceLocation SubObjLoc) {
- S.FFDiag(SI, diag::note_constexpr_uninitialized) << true << SubObjType;
- if (SubObjLoc.isValid())
- S.Note(SubObjLoc, diag::note_constexpr_subobject_declared_here);
+ const FieldDecl *SubObjDecl) {
+ assert(SubObjDecl && "Subobject declaration does not exist");
+ S.FFDiag(SI, diag::note_constexpr_uninitialized) << SubObjDecl;
+ S.Note(SubObjDecl->getLocation(),
+ diag::note_constexpr_subobject_declared_here);
}
static bool CheckFieldsInitialized(InterpState &S, CodePtr OpPC,
@@ -419,13 +397,13 @@ static bool CheckArrayInitialized(InterpState &S, CodePtr OpPC,
size_t NumElems = CAT->getSize().getZExtValue();
QualType ElemType = CAT->getElementType();
- if (isa<RecordType>(ElemType.getTypePtr())) {
+ if (ElemType->isRecordType()) {
const Record *R = BasePtr.getElemRecord();
for (size_t I = 0; I != NumElems; ++I) {
Pointer ElemPtr = BasePtr.atIndex(I).narrow();
Result &= CheckFieldsInitialized(S, OpPC, ElemPtr, R);
}
- } else if (auto *ElemCAT = dyn_cast<ConstantArrayType>(ElemType)) {
+ } else if (const auto *ElemCAT = dyn_cast<ConstantArrayType>(ElemType)) {
for (size_t I = 0; I != NumElems; ++I) {
Pointer ElemPtr = BasePtr.atIndex(I).narrow();
Result &= CheckArrayInitialized(S, OpPC, ElemPtr, ElemCAT);
@@ -433,8 +411,8 @@ static bool CheckArrayInitialized(InterpState &S, CodePtr OpPC,
} else {
for (size_t I = 0; I != NumElems; ++I) {
if (!BasePtr.atIndex(I).isInitialized()) {
- DiagnoseUninitializedSubobject(S, S.Current->getSource(OpPC), ElemType,
- BasePtr.getFieldDesc()->getLocation());
+ DiagnoseUninitializedSubobject(S, S.Current->getSource(OpPC),
+ BasePtr.getField());
Result = false;
}
}
@@ -459,18 +437,64 @@ static bool CheckFieldsInitialized(InterpState &S, CodePtr OpPC,
cast<ConstantArrayType>(FieldType->getAsArrayTypeUnsafe());
Result &= CheckArrayInitialized(S, OpPC, FieldPtr, CAT);
} else if (!FieldPtr.isInitialized()) {
- DiagnoseUninitializedSubobject(S, S.Current->getSource(OpPC),
- F.Decl->getType(), F.Decl->getLocation());
+ DiagnoseUninitializedSubobject(S, S.Current->getSource(OpPC), F.Decl);
Result = false;
}
}
+
+ // Check Fields in all bases
+ for (const Record::Base &B : R->bases()) {
+ Pointer P = BasePtr.atField(B.Offset);
+ Result &= CheckFieldsInitialized(S, OpPC, P, B.R);
+ }
+
+ // TODO: Virtual bases
+
return Result;
}
bool CheckCtorCall(InterpState &S, CodePtr OpPC, const Pointer &This) {
assert(!This.isZero());
- const Record *R = This.getRecord();
- return CheckFieldsInitialized(S, OpPC, This, R);
+ if (const Record *R = This.getRecord())
+ return CheckFieldsInitialized(S, OpPC, This, R);
+ const auto *CAT =
+ cast<ConstantArrayType>(This.getType()->getAsArrayTypeUnsafe());
+ return CheckArrayInitialized(S, OpPC, This, CAT);
+}
+
+bool CheckFloatResult(InterpState &S, CodePtr OpPC, APFloat::opStatus Status) {
+ // In a constant context, assume that any dynamic rounding mode or FP
+ // exception state matches the default floating-point environment.
+ if (S.inConstantContext())
+ return true;
+
+ const SourceInfo &E = S.Current->getSource(OpPC);
+ FPOptions FPO = E.asExpr()->getFPFeaturesInEffect(S.Ctx.getLangOpts());
+
+ if ((Status & APFloat::opInexact) &&
+ FPO.getRoundingMode() == llvm::RoundingMode::Dynamic) {
+ // Inexact result means that it depends on rounding mode. If the requested
+ // mode is dynamic, the evaluation cannot be made in compile time.
+ S.FFDiag(E, diag::note_constexpr_dynamic_rounding);
+ return false;
+ }
+
+ if ((Status != APFloat::opOK) &&
+ (FPO.getRoundingMode() == llvm::RoundingMode::Dynamic ||
+ FPO.getExceptionMode() != LangOptions::FPE_Ignore ||
+ FPO.getAllowFEnvAccess())) {
+ S.FFDiag(E, diag::note_constexpr_float_arithmetic_strict);
+ return false;
+ }
+
+ if ((Status & APFloat::opStatus::opInvalidOp) &&
+ FPO.getExceptionMode() != LangOptions::FPE_Ignore) {
+ // There is no usefully definable result.
+ S.FFDiag(E);
+ return false;
+ }
+
+ return true;
}
bool Interpret(InterpState &S, APValue &Result) {
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Interp.h b/contrib/llvm-project/clang/lib/AST/Interp/Interp.h
index ed3accd98a90..ff67e873a084 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Interp.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Interp.h
@@ -14,7 +14,9 @@
#define LLVM_CLANG_AST_INTERP_INTERP_H
#include "Boolean.h"
+#include "Floating.h"
#include "Function.h"
+#include "FunctionPointer.h"
#include "InterpFrame.h"
#include "InterpStack.h"
#include "InterpState.h"
@@ -74,6 +76,9 @@ bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
/// Checks if a value can be loaded from a block.
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
+bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK);
+
/// Checks if a value can be stored in a block.
bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
@@ -86,6 +91,10 @@ bool CheckInit(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
/// Checks if a method can be called.
bool CheckCallable(InterpState &S, CodePtr OpPC, const Function *F);
+/// Checks if calling the currently active function would exceed
+/// the allowed call depth.
+bool CheckCallDepth(InterpState &S, CodePtr OpPC);
+
/// Checks the 'this' pointer.
bool CheckThis(InterpState &S, CodePtr OpPC, const Pointer &This);
@@ -96,8 +105,9 @@ bool CheckPure(InterpState &S, CodePtr OpPC, const CXXMethodDecl *MD);
bool CheckCtorCall(InterpState &S, CodePtr OpPC, const Pointer &This);
/// Checks if the shift operation is legal.
-template <typename RT>
-bool CheckShift(InterpState &S, CodePtr OpPC, const RT &RHS, unsigned Bits) {
+template <typename LT, typename RT>
+bool CheckShift(InterpState &S, CodePtr OpPC, const LT &LHS, const RT &RHS,
+ unsigned Bits) {
if (RHS.isNegative()) {
const SourceInfo &Loc = S.Current->getSource(OpPC);
S.CCEDiag(Loc, diag::note_constexpr_negative_shift) << RHS.toAPSInt();
@@ -113,6 +123,20 @@ bool CheckShift(InterpState &S, CodePtr OpPC, const RT &RHS, unsigned Bits) {
S.CCEDiag(E, diag::note_constexpr_large_shift) << Val << Ty << Bits;
return false;
}
+
+ if (LHS.isSigned() && !S.getLangOpts().CPlusPlus20) {
+ const Expr *E = S.Current->getExpr(OpPC);
+ // C++11 [expr.shift]p2: A signed left shift must have a non-negative
+ // operand, and must not overflow the corresponding unsigned type.
+ if (LHS.isNegative())
+ S.CCEDiag(E, diag::note_constexpr_lshift_of_negative) << LHS.toAPSInt();
+ else if (LHS.toUnsigned().countLeadingZeros() < static_cast<unsigned>(RHS))
+ S.CCEDiag(E, diag::note_constexpr_lshift_discards);
+ }
+
+ // C++2a [expr.shift]p2: [P0907R4]:
+ // E1 << E2 is the unique value congruent to
+ // E1 x 2^E2 module 2^N.
return true;
}
@@ -137,9 +161,62 @@ bool CheckDivRem(InterpState &S, CodePtr OpPC, const T &LHS, const T &RHS) {
return true;
}
+/// Checks if the result is a floating-point operation is valid
+/// in the current context.
+bool CheckFloatResult(InterpState &S, CodePtr OpPC, APFloat::opStatus Status);
+
/// Interpreter entry point.
bool Interpret(InterpState &S, APValue &Result);
+/// Interpret a builtin function.
+bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F);
+
+enum class ArithOp { Add, Sub };
+
+//===----------------------------------------------------------------------===//
+// Returning values
+//===----------------------------------------------------------------------===//
+
+template <PrimType Name, bool Builtin = false,
+ class T = typename PrimConv<Name>::T>
+bool Ret(InterpState &S, CodePtr &PC, APValue &Result) {
+ const T &Ret = S.Stk.pop<T>();
+
+ assert(S.Current->getFrameOffset() == S.Stk.size() && "Invalid frame");
+ if (Builtin || !S.checkingPotentialConstantExpression())
+ S.Current->popArgs();
+
+ if (InterpFrame *Caller = S.Current->Caller) {
+ PC = S.Current->getRetPC();
+ delete S.Current;
+ S.Current = Caller;
+ S.Stk.push<T>(Ret);
+ } else {
+ delete S.Current;
+ S.Current = nullptr;
+ if (!ReturnValue<T>(Ret, Result))
+ return false;
+ }
+ return true;
+}
+
+template <bool Builtin = false>
+inline bool RetVoid(InterpState &S, CodePtr &PC, APValue &Result) {
+ assert(S.Current->getFrameOffset() == S.Stk.size() && "Invalid frame");
+ if (Builtin || !S.checkingPotentialConstantExpression())
+ S.Current->popArgs();
+
+ if (InterpFrame *Caller = S.Current->Caller) {
+ PC = S.Current->getRetPC();
+ delete S.Current;
+ S.Current = Caller;
+ } else {
+ delete S.Current;
+ S.Current = nullptr;
+ }
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// Add, Sub, Mul
//===----------------------------------------------------------------------===//
@@ -184,6 +261,16 @@ bool Add(InterpState &S, CodePtr OpPC) {
return AddSubMulHelper<T, T::add, std::plus>(S, OpPC, Bits, LHS, RHS);
}
+inline bool Addf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
+ const Floating &RHS = S.Stk.pop<Floating>();
+ const Floating &LHS = S.Stk.pop<Floating>();
+
+ Floating Result;
+ auto Status = Floating::add(LHS, RHS, RM, &Result);
+ S.Stk.push<Floating>(Result);
+ return CheckFloatResult(S, OpPC, Status);
+}
+
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool Sub(InterpState &S, CodePtr OpPC) {
const T &RHS = S.Stk.pop<T>();
@@ -192,6 +279,16 @@ bool Sub(InterpState &S, CodePtr OpPC) {
return AddSubMulHelper<T, T::sub, std::minus>(S, OpPC, Bits, LHS, RHS);
}
+inline bool Subf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
+ const Floating &RHS = S.Stk.pop<Floating>();
+ const Floating &LHS = S.Stk.pop<Floating>();
+
+ Floating Result;
+ auto Status = Floating::sub(LHS, RHS, RM, &Result);
+ S.Stk.push<Floating>(Result);
+ return CheckFloatResult(S, OpPC, Status);
+}
+
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool Mul(InterpState &S, CodePtr OpPC) {
const T &RHS = S.Stk.pop<T>();
@@ -200,6 +297,15 @@ bool Mul(InterpState &S, CodePtr OpPC) {
return AddSubMulHelper<T, T::mul, std::multiplies>(S, OpPC, Bits, LHS, RHS);
}
+inline bool Mulf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
+ const Floating &RHS = S.Stk.pop<Floating>();
+ const Floating &LHS = S.Stk.pop<Floating>();
+
+ Floating Result;
+ auto Status = Floating::mul(LHS, RHS, RM, &Result);
+ S.Stk.push<Floating>(Result);
+ return CheckFloatResult(S, OpPC, Status);
+}
/// 1) Pops the RHS from the stack.
/// 2) Pops the LHS from the stack.
/// 3) Pushes 'LHS & RHS' on the stack
@@ -291,6 +397,19 @@ bool Div(InterpState &S, CodePtr OpPC) {
return false;
}
+inline bool Divf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
+ const Floating &RHS = S.Stk.pop<Floating>();
+ const Floating &LHS = S.Stk.pop<Floating>();
+
+ if (!CheckDivRem(S, OpPC, LHS, RHS))
+ return false;
+
+ Floating Result;
+ auto Status = Floating::div(LHS, RHS, RM, &Result);
+ S.Stk.push<Floating>(Result);
+ return CheckFloatResult(S, OpPC, Status);
+}
+
//===----------------------------------------------------------------------===//
// Inv
//===----------------------------------------------------------------------===//
@@ -313,12 +432,32 @@ bool Inv(InterpState &S, CodePtr OpPC) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool Neg(InterpState &S, CodePtr OpPC) {
- const T &Val = S.Stk.pop<T>();
+ const T &Value = S.Stk.pop<T>();
T Result;
- T::neg(Val, &Result);
+ if (!T::neg(Value, &Result)) {
+ S.Stk.push<T>(Result);
+ return true;
+ }
+
+ assert(isIntegralType(Name) &&
+ "don't expect other types to fail at constexpr negation");
S.Stk.push<T>(Result);
- return true;
+
+ APSInt NegatedValue = -Value.toAPSInt(Value.bitWidth() + 1);
+ const Expr *E = S.Current->getExpr(OpPC);
+ QualType Type = E->getType();
+
+ if (S.checkingForUndefinedBehavior()) {
+ SmallString<32> Trunc;
+ NegatedValue.trunc(Result.bitWidth()).toString(Trunc, 10);
+ auto Loc = E->getExprLoc();
+ S.report(Loc, diag::warn_integer_constant_overflow) << Trunc << Type;
+ return true;
+ }
+
+ S.CCEDiag(E, diag::note_constexpr_overflow) << NegatedValue << Type;
+ return S.noteUndefinedBehavior();
}
enum class PushVal : bool {
@@ -336,7 +475,7 @@ bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
T Result;
if constexpr (DoPush == PushVal::Yes)
- S.Stk.push<T>(Result);
+ S.Stk.push<T>(Value);
if constexpr (Op == IncDecOp::Inc) {
if (!T::increment(Value, &Result)) {
@@ -380,9 +519,11 @@ bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
/// 4) Pushes the original (pre-inc) value on the stack.
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool Inc(InterpState &S, CodePtr OpPC) {
- // FIXME: Check initialization of Ptr
const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
+ return false;
+
return IncDecHelper<T, IncDecOp::Inc, PushVal::Yes>(S, OpPC, Ptr);
}
@@ -391,9 +532,11 @@ bool Inc(InterpState &S, CodePtr OpPC) {
/// 3) Writes the value increased by one back to the pointer
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool IncPop(InterpState &S, CodePtr OpPC) {
- // FIXME: Check initialization of Ptr
const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
+ return false;
+
return IncDecHelper<T, IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr);
}
@@ -403,9 +546,11 @@ bool IncPop(InterpState &S, CodePtr OpPC) {
/// 4) Pushes the original (pre-dec) value on the stack.
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool Dec(InterpState &S, CodePtr OpPC) {
- // FIXME: Check initialization of Ptr
const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
+ return false;
+
return IncDecHelper<T, IncDecOp::Dec, PushVal::Yes>(S, OpPC, Ptr);
}
@@ -414,12 +559,70 @@ bool Dec(InterpState &S, CodePtr OpPC) {
/// 3) Writes the value decreased by one back to the pointer
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool DecPop(InterpState &S, CodePtr OpPC) {
- // FIXME: Check initialization of Ptr
const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
+ return false;
+
return IncDecHelper<T, IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr);
}
+template <IncDecOp Op, PushVal DoPush>
+bool IncDecFloatHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ llvm::RoundingMode RM) {
+ Floating Value = Ptr.deref<Floating>();
+ Floating Result;
+
+ if constexpr (DoPush == PushVal::Yes)
+ S.Stk.push<Floating>(Value);
+
+ llvm::APFloat::opStatus Status;
+ if constexpr (Op == IncDecOp::Inc)
+ Status = Floating::increment(Value, RM, &Result);
+ else
+ Status = Floating::decrement(Value, RM, &Result);
+
+ Ptr.deref<Floating>() = Result;
+
+ return CheckFloatResult(S, OpPC, Status);
+}
+
+inline bool Incf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
+ return false;
+
+ return IncDecFloatHelper<IncDecOp::Inc, PushVal::Yes>(S, OpPC, Ptr, RM);
+}
+
+inline bool IncfPop(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
+ return false;
+
+ return IncDecFloatHelper<IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr, RM);
+}
+
+inline bool Decf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
+ return false;
+
+ return IncDecFloatHelper<IncDecOp::Dec, PushVal::Yes>(S, OpPC, Ptr, RM);
+}
+
+inline bool DecfPop(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
+ return false;
+
+ return IncDecFloatHelper<IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr, RM);
+}
+
/// 1) Pops the value from the stack.
/// 2) Pushes the bitwise complemented value on the stack (~V).
template <PrimType Name, class T = typename PrimConv<Name>::T>
@@ -454,6 +657,29 @@ bool CmpHelperEQ(InterpState &S, CodePtr OpPC, CompareFn Fn) {
return CmpHelper<T>(S, OpPC, Fn);
}
+/// Function pointers cannot be compared in an ordered way.
+template <>
+inline bool CmpHelper<FunctionPointer>(InterpState &S, CodePtr OpPC,
+ CompareFn Fn) {
+ const auto &RHS = S.Stk.pop<FunctionPointer>();
+ const auto &LHS = S.Stk.pop<FunctionPointer>();
+
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_pointer_comparison_unspecified)
+ << LHS.toDiagnosticString(S.getCtx())
+ << RHS.toDiagnosticString(S.getCtx());
+ return false;
+}
+
+template <>
+inline bool CmpHelperEQ<FunctionPointer>(InterpState &S, CodePtr OpPC,
+ CompareFn Fn) {
+ const auto &RHS = S.Stk.pop<FunctionPointer>();
+ const auto &LHS = S.Stk.pop<FunctionPointer>();
+ S.Stk.push<Boolean>(Boolean::from(Fn(LHS.compare(RHS))));
+ return true;
+}
+
template <>
inline bool CmpHelper<Pointer>(InterpState &S, CodePtr OpPC, CompareFn Fn) {
using BoolT = PrimConv<PT_Bool>::T;
@@ -601,6 +827,9 @@ bool GetLocal(InterpState &S, CodePtr OpPC, uint32_t I) {
return true;
}
+/// 1) Pops the value from the stack.
+/// 2) Writes the value to the local variable with the
+/// given offset.
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool SetLocal(InterpState &S, CodePtr OpPC, uint32_t I) {
S.Current->setLocal<T>(I, S.Stk.pop<T>());
@@ -649,6 +878,7 @@ bool SetField(InterpState &S, CodePtr OpPC, uint32_t I) {
const Pointer &Field = Obj.atField(I);
if (!CheckStore(S, OpPC, Field))
return false;
+ Field.initialize();
Field.deref<T>() = Value;
return true;
}
@@ -719,6 +949,22 @@ bool InitGlobal(InterpState &S, CodePtr OpPC, uint32_t I) {
return true;
}
+/// 1) Converts the value on top of the stack to an APValue
+/// 2) Sets that APValue on \Temp
+/// 3) Initialized global with index \I with that
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool InitGlobalTemp(InterpState &S, CodePtr OpPC, uint32_t I,
+ const LifetimeExtendedTemporaryDecl *Temp) {
+ assert(Temp);
+ const T Value = S.Stk.peek<T>();
+ APValue APV = Value.toAPValue();
+ APValue *Cached = Temp->getOrCreateValue(true);
+ *Cached = APV;
+
+ S.P.getGlobal(I)->deref<T>() = S.Stk.pop<T>();
+ return true;
+}
+
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool InitThisField(InterpState &S, CodePtr OpPC, uint32_t I) {
if (S.checkingPotentialConstantExpression())
@@ -867,6 +1113,14 @@ inline bool GetPtrActiveThisField(InterpState &S, CodePtr OpPC, uint32_t Off) {
}
inline bool GetPtrBase(InterpState &S, CodePtr OpPC, uint32_t Off) {
+ const Pointer &Ptr = S.Stk.peek<Pointer>();
+ if (!CheckNull(S, OpPC, Ptr, CSK_Base))
+ return false;
+ S.Stk.push<Pointer>(Ptr.atField(Off));
+ return true;
+}
+
+inline bool GetPtrBasePop(InterpState &S, CodePtr OpPC, uint32_t Off) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (!CheckNull(S, OpPC, Ptr, CSK_Base))
return false;
@@ -1031,11 +1285,9 @@ bool InitElemPop(InterpState &S, CodePtr OpPC, uint32_t Idx) {
// AddOffset, SubOffset
//===----------------------------------------------------------------------===//
-template <class T, bool Add> bool OffsetHelper(InterpState &S, CodePtr OpPC) {
- // Fetch the pointer and the offset.
- const T &Offset = S.Stk.pop<T>();
- const Pointer &Ptr = S.Stk.pop<Pointer>();
-
+template <class T, ArithOp Op>
+bool OffsetHelper(InterpState &S, CodePtr OpPC, const T &Offset,
+ const Pointer &Ptr) {
if (!CheckRange(S, OpPC, Ptr, CSK_ArrayToPointer))
return false;
@@ -1062,7 +1314,8 @@ template <class T, bool Add> bool OffsetHelper(InterpState &S, CodePtr OpPC) {
const unsigned Bits = Offset.bitWidth();
APSInt APOffset(Offset.toAPSInt().extend(Bits + 2), false);
APSInt APIndex(Index.toAPSInt().extend(Bits + 2), false);
- APSInt NewIndex = Add ? (APIndex + APOffset) : (APIndex - APOffset);
+ APSInt NewIndex =
+ (Op == ArithOp::Add) ? (APIndex + APOffset) : (APIndex - APOffset);
S.CCEDiag(S.Current->getSource(OpPC), diag::note_constexpr_array_index)
<< NewIndex
<< /*array*/ static_cast<int>(!Ptr.inArray())
@@ -1071,7 +1324,7 @@ template <class T, bool Add> bool OffsetHelper(InterpState &S, CodePtr OpPC) {
};
unsigned MaxOffset = MaxIndex - Ptr.getIndex();
- if constexpr (Add) {
+ if constexpr (Op == ArithOp::Add) {
// If the new offset would be negative, bail out.
if (Offset.isNegative() && (Offset.isMin() || -Offset > Index))
return InvalidOffset();
@@ -1093,7 +1346,7 @@ template <class T, bool Add> bool OffsetHelper(InterpState &S, CodePtr OpPC) {
int64_t WideIndex = static_cast<int64_t>(Index);
int64_t WideOffset = static_cast<int64_t>(Offset);
int64_t Result;
- if constexpr (Add)
+ if constexpr (Op == ArithOp::Add)
Result = WideIndex + WideOffset;
else
Result = WideIndex - WideOffset;
@@ -1104,12 +1357,43 @@ template <class T, bool Add> bool OffsetHelper(InterpState &S, CodePtr OpPC) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool AddOffset(InterpState &S, CodePtr OpPC) {
- return OffsetHelper<T, true>(S, OpPC);
+ const T &Offset = S.Stk.pop<T>();
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+ return OffsetHelper<T, ArithOp::Add>(S, OpPC, Offset, Ptr);
}
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool SubOffset(InterpState &S, CodePtr OpPC) {
- return OffsetHelper<T, false>(S, OpPC);
+ const T &Offset = S.Stk.pop<T>();
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+ return OffsetHelper<T, ArithOp::Sub>(S, OpPC, Offset, Ptr);
+}
+
+template <ArithOp Op>
+static inline bool IncDecPtrHelper(InterpState &S, CodePtr OpPC) {
+ using OneT = Integral<8, false>;
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ // Get the current value on the stack.
+ S.Stk.push<Pointer>(Ptr.deref<Pointer>());
+
+ // Now the current Ptr again and a constant 1.
+ Pointer P = Ptr.deref<Pointer>();
+ OneT One = OneT::from(1);
+ if (!OffsetHelper<OneT, Op>(S, OpPC, One, P))
+ return false;
+
+ // Store the new value.
+ Ptr.deref<Pointer>() = S.Stk.pop<Pointer>();
+ return true;
+}
+
+static inline bool IncPtr(InterpState &S, CodePtr OpPC) {
+ return IncDecPtrHelper<ArithOp::Add>(S, OpPC);
+}
+
+static inline bool DecPtr(InterpState &S, CodePtr OpPC) {
+ return IncDecPtrHelper<ArithOp::Sub>(S, OpPC);
}
/// 1) Pops a Pointer from the stack.
@@ -1150,6 +1434,56 @@ template <PrimType TIn, PrimType TOut> bool Cast(InterpState &S, CodePtr OpPC) {
return true;
}
+/// 1) Pops a Floating from the stack.
+/// 2) Pushes a new floating on the stack that uses the given semantics.
+inline bool CastFP(InterpState &S, CodePtr OpPC, const llvm::fltSemantics *Sem,
+ llvm::RoundingMode RM) {
+ Floating F = S.Stk.pop<Floating>();
+ Floating Result = F.toSemantics(Sem, RM);
+ S.Stk.push<Floating>(Result);
+ return true;
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool CastIntegralFloating(InterpState &S, CodePtr OpPC,
+ const llvm::fltSemantics *Sem,
+ llvm::RoundingMode RM) {
+ const T &From = S.Stk.pop<T>();
+ APSInt FromAP = From.toAPSInt();
+ Floating Result;
+
+ auto Status = Floating::fromIntegral(FromAP, *Sem, RM, Result);
+ S.Stk.push<Floating>(Result);
+
+ return CheckFloatResult(S, OpPC, Status);
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool CastFloatingIntegral(InterpState &S, CodePtr OpPC) {
+ const Floating &F = S.Stk.pop<Floating>();
+
+ if constexpr (std::is_same_v<T, Boolean>) {
+ S.Stk.push<T>(T(F.isNonZero()));
+ return true;
+ } else {
+ APSInt Result(std::max(8u, T::bitWidth() + 1),
+ /*IsUnsigned=*/!T::isSigned());
+ auto Status = F.convertToInteger(Result);
+
+ // Float-to-Integral overflow check.
+ if ((Status & APFloat::opStatus::opInvalidOp) && F.isFinite()) {
+ const Expr *E = S.Current->getExpr(OpPC);
+ QualType Type = E->getType();
+
+ S.CCEDiag(E, diag::note_constexpr_overflow) << F.getAPFloat() << Type;
+ return S.noteUndefinedBehavior();
+ }
+
+ S.Stk.push<T>(T(Result));
+ return CheckFloatResult(S, OpPC, Status);
+ }
+}
+
//===----------------------------------------------------------------------===//
// Zero, Nullptr
//===----------------------------------------------------------------------===//
@@ -1186,6 +1520,8 @@ inline bool This(InterpState &S, CodePtr OpPC) {
inline bool RVOPtr(InterpState &S, CodePtr OpPC) {
assert(S.Current->getFunction()->hasRVO());
+ if (S.checkingPotentialConstantExpression())
+ return false;
S.Stk.push<Pointer>(S.Current->getRVOPtr());
return true;
}
@@ -1202,11 +1538,12 @@ inline bool Shr(InterpState &S, CodePtr OpPC) {
const auto &LHS = S.Stk.pop<LT>();
const unsigned Bits = LHS.bitWidth();
- if (!CheckShift<RT>(S, OpPC, RHS, Bits))
+ if (!CheckShift(S, OpPC, LHS, RHS, Bits))
return false;
- unsigned URHS = static_cast<unsigned>(RHS);
- S.Stk.push<LT>(LT::from(static_cast<unsigned>(LHS) >> URHS, LHS.bitWidth()));
+ Integral<LT::bitWidth(), false> R;
+ Integral<LT::bitWidth(), false>::shiftRight(LHS.toUnsigned(), RHS, Bits, &R);
+ S.Stk.push<LT>(R);
return true;
}
@@ -1218,12 +1555,12 @@ inline bool Shl(InterpState &S, CodePtr OpPC) {
const auto &LHS = S.Stk.pop<LT>();
const unsigned Bits = LHS.bitWidth();
- if (!CheckShift<RT>(S, OpPC, RHS, Bits))
+ if (!CheckShift(S, OpPC, LHS, RHS, Bits))
return false;
- unsigned URHS = static_cast<unsigned>(RHS);
- S.Stk.push<LT>(LT::from(static_cast<unsigned>(LHS) << URHS, LHS.bitWidth()));
-
+ Integral<LT::bitWidth(), false> R;
+ Integral<LT::bitWidth(), false>::shiftLeft(LHS.toUnsigned(), RHS, Bits, &R);
+ S.Stk.push<LT>(R);
return true;
}
@@ -1253,19 +1590,62 @@ inline bool ExpandPtr(InterpState &S, CodePtr OpPC) {
return true;
}
-inline bool Call(InterpState &S, CodePtr &PC, const Function *Func) {
- auto NewFrame = std::make_unique<InterpFrame>(S, Func, PC);
- Pointer ThisPtr;
+// 1) Pops an integral value from the stack
+// 2) Peeks a pointer
+// 3) Pushes a new pointer that's a narrowed array
+// element of the peeked pointer with the value
+// from 1) added as offset.
+//
+// This leaves the original pointer on the stack and pushes a new one
+// with the offset applied and narrowed.
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+inline bool ArrayElemPtr(InterpState &S, CodePtr OpPC) {
+ const T &Offset = S.Stk.pop<T>();
+ const Pointer &Ptr = S.Stk.peek<Pointer>();
+
+ if (!OffsetHelper<T, ArithOp::Add>(S, OpPC, Offset, Ptr))
+ return false;
+
+ return NarrowPtr(S, OpPC);
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+inline bool ArrayElemPtrPop(InterpState &S, CodePtr OpPC) {
+ const T &Offset = S.Stk.pop<T>();
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!OffsetHelper<T, ArithOp::Add>(S, OpPC, Offset, Ptr))
+ return false;
+
+ return NarrowPtr(S, OpPC);
+}
+
+inline bool CheckGlobalCtor(InterpState &S, CodePtr OpPC) {
+ const Pointer &Obj = S.Stk.peek<Pointer>();
+ return CheckCtorCall(S, OpPC, Obj);
+}
+
+inline bool Call(InterpState &S, CodePtr OpPC, const Function *Func) {
if (Func->hasThisPointer()) {
- ThisPtr = NewFrame->getThis();
- if (!CheckInvoke(S, PC, ThisPtr)) {
+ size_t ThisOffset =
+ Func->getArgSize() + (Func->hasRVO() ? primSize(PT_Ptr) : 0);
+
+ const Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset);
+
+ if (!CheckInvoke(S, OpPC, ThisPtr))
+ return false;
+
+ if (S.checkingPotentialConstantExpression())
return false;
- }
}
- if (!CheckCallable(S, PC, Func))
+ if (!CheckCallable(S, OpPC, Func))
+ return false;
+
+ if (!CheckCallDepth(S, OpPC))
return false;
+ auto NewFrame = std::make_unique<InterpFrame>(S, Func, OpPC);
InterpFrame *FrameBefore = S.Current;
S.Current = NewFrame.get();
@@ -1276,11 +1656,6 @@ inline bool Call(InterpState &S, CodePtr &PC, const Function *Func) {
if (Interpret(S, CallResult)) {
NewFrame.release(); // Frame was delete'd already.
assert(S.Current == FrameBefore);
-
- // For constructors, check that all fields have been initialized.
- if (Func->isConstructor() && !CheckCtorCall(S, PC, ThisPtr))
- return false;
-
return true;
}
@@ -1290,6 +1665,66 @@ inline bool Call(InterpState &S, CodePtr &PC, const Function *Func) {
return false;
}
+inline bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func) {
+ assert(Func->hasThisPointer());
+ assert(Func->isVirtual());
+ size_t ThisOffset =
+ Func->getArgSize() + (Func->hasRVO() ? primSize(PT_Ptr) : 0);
+ Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset);
+
+ const CXXRecordDecl *DynamicDecl =
+ ThisPtr.getDeclDesc()->getType()->getAsCXXRecordDecl();
+ const auto *StaticDecl = cast<CXXRecordDecl>(Func->getParentDecl());
+ const auto *InitialFunction = cast<CXXMethodDecl>(Func->getDecl());
+ const CXXMethodDecl *Overrider = S.getContext().getOverridingFunction(
+ DynamicDecl, StaticDecl, InitialFunction);
+
+ if (Overrider != InitialFunction) {
+ Func = S.P.getFunction(Overrider);
+
+ const CXXRecordDecl *ThisFieldDecl =
+ ThisPtr.getFieldDesc()->getType()->getAsCXXRecordDecl();
+ if (Func->getParentDecl()->isDerivedFrom(ThisFieldDecl)) {
+ // If the function we call is further DOWN the hierarchy than the
+ // FieldDesc of our pointer, just get the DeclDesc instead, which
+ // is the furthest we might go up in the hierarchy.
+ ThisPtr = ThisPtr.getDeclPtr();
+ }
+ }
+
+ return Call(S, OpPC, Func);
+}
+
+inline bool CallBI(InterpState &S, CodePtr &PC, const Function *Func) {
+ auto NewFrame = std::make_unique<InterpFrame>(S, Func, PC);
+
+ InterpFrame *FrameBefore = S.Current;
+ S.Current = NewFrame.get();
+
+ if (InterpretBuiltin(S, PC, Func)) {
+ NewFrame.release();
+ return true;
+ }
+ S.Current = FrameBefore;
+ return false;
+}
+
+inline bool CallPtr(InterpState &S, CodePtr OpPC) {
+ const FunctionPointer &FuncPtr = S.Stk.pop<FunctionPointer>();
+
+ const Function *F = FuncPtr.getFunction();
+ if (!F || !F->isConstexpr())
+ return false;
+
+ return Call(S, OpPC, F);
+}
+
+inline bool GetFnPtr(InterpState &S, CodePtr OpPC, const Function *Func) {
+ assert(Func);
+ S.Stk.push<FunctionPointer>(Func);
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// Read opcode arguments
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp
index ed6e8910194d..a62128d9cfae 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp
@@ -16,11 +16,16 @@
using namespace clang;
using namespace clang::interp;
-
-
void Block::addPointer(Pointer *P) {
- if (IsStatic)
+ assert(P);
+ if (IsStatic) {
+ assert(!Pointers);
return;
+ }
+
+#ifndef NDEBUG
+ assert(!hasPointer(P));
+#endif
if (Pointers)
Pointers->Prev = P;
P->Next = Pointers;
@@ -29,10 +34,19 @@ void Block::addPointer(Pointer *P) {
}
void Block::removePointer(Pointer *P) {
- if (IsStatic)
+ assert(P);
+ if (IsStatic) {
+ assert(!Pointers);
return;
+ }
+
+#ifndef NDEBUG
+ assert(hasPointer(P));
+#endif
+
if (Pointers == P)
Pointers = P->Next;
+
if (P->Prev)
P->Prev->Next = P->Next;
if (P->Next)
@@ -44,21 +58,38 @@ void Block::cleanup() {
(reinterpret_cast<DeadBlock *>(this + 1) - 1)->free();
}
-void Block::movePointer(Pointer *From, Pointer *To) {
- if (IsStatic)
+void Block::replacePointer(Pointer *Old, Pointer *New) {
+ assert(Old);
+ assert(New);
+ if (IsStatic) {
+ assert(!Pointers);
return;
- To->Prev = From->Prev;
- if (To->Prev)
- To->Prev->Next = To;
- To->Next = From->Next;
- if (To->Next)
- To->Next->Prev = To;
- if (Pointers == From)
- Pointers = To;
-
- From->Prev = nullptr;
- From->Next = nullptr;
+ }
+
+#ifndef NDEBUG
+ assert(hasPointer(Old));
+#endif
+
+ removePointer(Old);
+ addPointer(New);
+
+ Old->Pointee = nullptr;
+
+#ifndef NDEBUG
+ assert(!hasPointer(Old));
+ assert(hasPointer(New));
+#endif
+}
+
+#ifndef NDEBUG
+bool Block::hasPointer(const Pointer *P) const {
+ for (const Pointer *C = Pointers; C; C = C->Next) {
+ if (C == P)
+ return true;
+ }
+ return false;
}
+#endif
DeadBlock::DeadBlock(DeadBlock *&Root, Block *Blk)
: Root(Root), B(Blk->Desc, Blk->IsStatic, Blk->IsExtern, /*isDead=*/true) {
@@ -83,5 +114,5 @@ void DeadBlock::free() {
Next->Prev = Prev;
if (Root == this)
Root = Next;
- ::free(this);
+ std::free(this);
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h
index f790c50a9123..0080dad718ed 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h
@@ -48,7 +48,7 @@ enum PrimType : unsigned;
///
class Block final {
public:
- // Creates a new block.
+ /// Creates a new block.
Block(const std::optional<unsigned> &DeclID, Descriptor *Desc,
bool IsStatic = false, bool IsExtern = false)
: DeclID(DeclID), IsStatic(IsStatic), IsExtern(IsExtern), Desc(Desc) {}
@@ -58,7 +58,7 @@ public:
Desc(Desc) {}
/// Returns the block's descriptor.
- Descriptor *getDescriptor() const { return Desc; }
+ const Descriptor *getDescriptor() const { return Desc; }
/// Checks if the block has any live pointers.
bool hasPointers() const { return Pointers; }
/// Checks if the block is extern.
@@ -68,7 +68,7 @@ public:
/// Checks if the block is temporary.
bool isTemporary() const { return Desc->IsTemporary; }
/// Returns the size of the block.
- InterpSize getSize() const { return Desc->getAllocSize(); }
+ unsigned getSize() const { return Desc->getAllocSize(); }
/// Returns the declaration ID.
std::optional<unsigned> getDeclID() const { return DeclID; }
@@ -104,7 +104,7 @@ public:
/*isActive=*/true, Desc);
}
- // Invokes the Destructor.
+ /// Invokes the Destructor.
void invokeDtor() {
if (Desc->DtorFn)
Desc->DtorFn(this, data(), Desc);
@@ -118,13 +118,16 @@ protected:
Block(Descriptor *Desc, bool IsExtern, bool IsStatic, bool IsDead)
: IsStatic(IsStatic), IsExtern(IsExtern), IsDead(true), Desc(Desc) {}
- // Deletes a dead block at the end of its lifetime.
+ /// Deletes a dead block at the end of its lifetime.
void cleanup();
- // Pointer chain management.
+ /// Pointer chain management.
void addPointer(Pointer *P);
void removePointer(Pointer *P);
- void movePointer(Pointer *From, Pointer *To);
+ void replacePointer(Pointer *Old, Pointer *New);
+#ifndef NDEBUG
+ bool hasPointer(const Pointer *P) const;
+#endif
/// Start of the chain of pointers.
Pointer *Pointers = nullptr;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpBuiltin.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpBuiltin.cpp
new file mode 100644
index 000000000000..c11f22aa94ca
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpBuiltin.cpp
@@ -0,0 +1,82 @@
+//===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "Boolean.h"
+#include "Interp.h"
+#include "PrimType.h"
+#include "clang/Basic/Builtins.h"
+
+namespace clang {
+namespace interp {
+
+template <typename T> T getParam(InterpFrame *Frame, unsigned Index) {
+ unsigned Offset = Frame->getFunction()->getParamOffset(Index);
+ return Frame->getParam<T>(Offset);
+}
+
+static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC,
+ InterpFrame *Frame) {
+ const Pointer &A = getParam<Pointer>(Frame, 0);
+ const Pointer &B = getParam<Pointer>(Frame, 1);
+
+ if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read))
+ return false;
+
+ assert(A.getFieldDesc()->isPrimitiveArray());
+ assert(B.getFieldDesc()->isPrimitiveArray());
+
+ unsigned IndexA = A.getIndex();
+ unsigned IndexB = B.getIndex();
+ int32_t Result = 0;
+ for (;; ++IndexA, ++IndexB) {
+ const Pointer &PA = A.atIndex(IndexA);
+ const Pointer &PB = B.atIndex(IndexB);
+ if (!CheckRange(S, OpPC, PA, AK_Read) ||
+ !CheckRange(S, OpPC, PB, AK_Read)) {
+ return false;
+ }
+ uint8_t CA = PA.deref<uint8_t>();
+ uint8_t CB = PB.deref<uint8_t>();
+
+ if (CA > CB) {
+ Result = 1;
+ break;
+ } else if (CA < CB) {
+ Result = -1;
+ break;
+ }
+ if (CA == 0 || CB == 0)
+ break;
+ }
+
+ S.Stk.push<Integral<32, true>>(Integral<32, true>::from(Result));
+ return true;
+}
+
+bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F) {
+ InterpFrame *Frame = S.Current;
+ APValue Dummy;
+
+ switch (F->getBuiltinID()) {
+ case Builtin::BI__builtin_is_constant_evaluated:
+ S.Stk.push<Boolean>(Boolean::from(S.inConstantContext()));
+ return Ret<PT_Bool, true>(S, OpPC, Dummy);
+ case Builtin::BI__builtin_assume:
+ return RetVoid<true>(S, OpPC, Dummy);
+ case Builtin::BI__builtin_strcmp:
+ if (interp__builtin_strcmp(S, OpPC, Frame))
+ return Ret<PT_Sint32, true>(S, OpPC, Dummy);
+ return false;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+} // namespace interp
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp
index 40644c538c6a..2229aa7c08f6 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp
@@ -8,6 +8,7 @@
#include "InterpFrame.h"
#include "Boolean.h"
+#include "Floating.h"
#include "Function.h"
#include "InterpStack.h"
#include "InterpState.h"
@@ -22,8 +23,8 @@ using namespace clang::interp;
InterpFrame::InterpFrame(InterpState &S, const Function *Func,
InterpFrame *Caller, CodePtr RetPC)
- : Caller(Caller), S(S), Func(Func), RetPC(RetPC),
- ArgSize(Func ? Func->getArgSize() : 0),
+ : Caller(Caller), S(S), Depth(Caller ? Caller->Depth + 1 : 0), Func(Func),
+ RetPC(RetPC), ArgSize(Func ? Func->getArgSize() : 0),
Args(static_cast<char *>(S.Stk.top())), FrameOffset(S.Stk.size()) {
if (!Func)
return;
@@ -75,7 +76,7 @@ InterpFrame::~InterpFrame() {
void InterpFrame::destroy(unsigned Idx) {
for (auto &Local : Func->getScope(Idx).locals()) {
- S.deallocate(reinterpret_cast<Block *>(localBlock(Local.Offset)));
+ S.deallocate(localBlock(Local.Offset));
}
}
@@ -97,20 +98,19 @@ void print(llvm::raw_ostream &OS, const Pointer &P, ASTContext &Ctx,
return;
}
- auto printDesc = [&OS, &Ctx](Descriptor *Desc) {
- if (auto *D = Desc->asDecl()) {
+ auto printDesc = [&OS, &Ctx](const Descriptor *Desc) {
+ if (const auto *D = Desc->asDecl()) {
// Subfields or named values.
- if (auto *VD = dyn_cast<ValueDecl>(D)) {
+ if (const auto *VD = dyn_cast<ValueDecl>(D)) {
OS << *VD;
return;
}
// Base classes.
- if (isa<RecordDecl>(D)) {
+ if (isa<RecordDecl>(D))
return;
- }
}
// Temporary expression.
- if (auto *E = Desc->asExpr()) {
+ if (const auto *E = Desc->asExpr()) {
E->printPretty(OS, nullptr, Ctx.getPrintingPolicy());
return;
}
@@ -125,6 +125,10 @@ void print(llvm::raw_ostream &OS, const Pointer &P, ASTContext &Ctx,
F = F.isArrayElement() ? F.getArray().expand() : F.getBase();
}
+ // Drop the first pointer since we print it unconditionally anyway.
+ if (!Levels.empty())
+ Levels.erase(Levels.begin());
+
printDesc(P.getDeclDesc());
for (const auto &It : Levels) {
if (It.inArray()) {
@@ -140,10 +144,10 @@ void print(llvm::raw_ostream &OS, const Pointer &P, ASTContext &Ctx,
}
}
-void InterpFrame::describe(llvm::raw_ostream &OS) {
+void InterpFrame::describe(llvm::raw_ostream &OS) const {
const FunctionDecl *F = getCallee();
- auto *M = dyn_cast<CXXMethodDecl>(F);
- if (M && M->isInstance() && !isa<CXXConstructorDecl>(F)) {
+ if (const auto *M = dyn_cast<CXXMethodDecl>(F);
+ M && M->isInstance() && !isa<CXXConstructorDecl>(F)) {
print(OS, This, S.getCtx(), S.getCtx().getRecordType(M->getParent()));
OS << "->";
}
@@ -184,8 +188,7 @@ const FunctionDecl *InterpFrame::getCallee() const {
Pointer InterpFrame::getLocalPointer(unsigned Offset) const {
assert(Offset < Func->getFrameSize() && "Invalid local offset.");
- return Pointer(reinterpret_cast<Block *>(localBlock(Offset)),
- sizeof(InlineDescriptor));
+ return Pointer(localBlock(Offset), sizeof(InlineDescriptor));
}
Pointer InterpFrame::getParamPointer(unsigned Off) {
@@ -210,6 +213,11 @@ Pointer InterpFrame::getParamPointer(unsigned Off) {
}
SourceInfo InterpFrame::getSource(CodePtr PC) const {
+ // Implicitly created functions don't have any code we could point at,
+ // so return the call site.
+ if (Func && Func->getDecl()->isImplicit() && Caller)
+ return Caller->getSource(RetPC);
+
return S.getSource(Func, PC);
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h
index bfa02c90ebec..ce58fb8d3f84 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h
@@ -15,7 +15,6 @@
#include "Frame.h"
#include "Program.h"
-#include "State.h"
#include <cstdint>
#include <vector>
@@ -51,7 +50,7 @@ public:
void popArgs();
/// Describes the frame with arguments for diagnostic purposes.
- void describe(llvm::raw_ostream &OS) override;
+ void describe(llvm::raw_ostream &OS) const override;
/// Returns the parent frame object.
Frame *getCaller() const override;
@@ -120,6 +119,8 @@ public:
const Expr *getExpr(CodePtr PC) const;
SourceLocation getLocation(CodePtr PC) const;
+ unsigned getDepth() const { return Depth; }
+
private:
/// Returns an original argument from the stack.
template <typename T> const T &stackRef(unsigned Offset) const {
@@ -133,8 +134,8 @@ private:
}
/// Returns a pointer to a local's block.
- void *localBlock(unsigned Offset) const {
- return Locals.get() + Offset - sizeof(Block);
+ Block *localBlock(unsigned Offset) const {
+ return reinterpret_cast<Block *>(Locals.get() + Offset - sizeof(Block));
}
// Returns the inline descriptor of the local.
@@ -145,6 +146,8 @@ private:
private:
/// Reference to the interpreter state.
InterpState &S;
+ /// Depth of this frame.
+ unsigned Depth;
/// Reference to the function being executed.
const Function *Func;
/// Current object pointer for methods.
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp
index 7fe678e62192..da4b36f8c1bf 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp
@@ -6,9 +6,12 @@
//
//===----------------------------------------------------------------------===//
+#include "InterpStack.h"
+#include "Boolean.h"
+#include "Floating.h"
+#include "Integral.h"
#include <cassert>
#include <cstdlib>
-#include "InterpStack.h"
using namespace clang;
using namespace clang::interp;
@@ -19,11 +22,14 @@ InterpStack::~InterpStack() {
void InterpStack::clear() {
if (Chunk && Chunk->Next)
- free(Chunk->Next);
+ std::free(Chunk->Next);
if (Chunk)
- free(Chunk);
+ std::free(Chunk);
Chunk = nullptr;
StackSize = 0;
+#ifndef NDEBUG
+ ItemTypes.clear();
+#endif
}
void *InterpStack::grow(size_t Size) {
@@ -33,7 +39,7 @@ void *InterpStack::grow(size_t Size) {
if (Chunk && Chunk->Next) {
Chunk = Chunk->Next;
} else {
- StackChunk *Next = new (malloc(ChunkSize)) StackChunk(Chunk);
+ StackChunk *Next = new (std::malloc(ChunkSize)) StackChunk(Chunk);
if (Chunk)
Chunk->Next = Next;
Chunk = Next;
@@ -46,7 +52,7 @@ void *InterpStack::grow(size_t Size) {
return Object;
}
-void *InterpStack::peek(size_t Size) const {
+void *InterpStack::peekData(size_t Size) const {
assert(Chunk && "Stack is empty!");
StackChunk *Ptr = Chunk;
@@ -65,7 +71,7 @@ void InterpStack::shrink(size_t Size) {
while (Size > Chunk->size()) {
Size -= Chunk->size();
if (Chunk->Next) {
- free(Chunk->Next);
+ std::free(Chunk->Next);
Chunk->Next = nullptr;
}
Chunk->End = Chunk->start();
@@ -76,3 +82,24 @@ void InterpStack::shrink(size_t Size) {
Chunk->End -= Size;
StackSize -= Size;
}
+
+void InterpStack::dump() const {
+#ifndef NDEBUG
+ llvm::errs() << "Items: " << ItemTypes.size() << ". Size: " << size() << "\n";
+ if (ItemTypes.empty())
+ return;
+
+ size_t Index = 0;
+ size_t Offset = align(primSize(ItemTypes[0]));
+ for (PrimType Ty : ItemTypes) {
+ llvm::errs() << Index << "/" << Offset << ": ";
+ TYPE_SWITCH(Ty, {
+ const T &V = peek<T>(Offset);
+ llvm::errs() << V;
+ });
+ llvm::errs() << "\n";
+ Offset += align(primSize(Ty));
+ ++Index;
+ }
+#endif
+}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h
index 3adaad96515e..ab4351a6dc67 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h
@@ -13,6 +13,7 @@
#ifndef LLVM_CLANG_AST_INTERP_INTERPSTACK_H
#define LLVM_CLANG_AST_INTERP_INTERPSTACK_H
+#include "FunctionPointer.h"
#include "PrimType.h"
#include <memory>
#include <vector>
@@ -43,8 +44,8 @@ public:
assert(ItemTypes.back() == toPrimType<T>());
ItemTypes.pop_back();
#endif
- auto *Ptr = &peek<T>();
- auto Value = std::move(*Ptr);
+ T *Ptr = &peekInternal<T>();
+ T Value = std::move(*Ptr);
Ptr->~T();
shrink(aligned_size<T>());
return Value;
@@ -53,21 +54,31 @@ public:
/// Discards the top value from the stack.
template <typename T> void discard() {
#ifndef NDEBUG
+ assert(!ItemTypes.empty());
assert(ItemTypes.back() == toPrimType<T>());
ItemTypes.pop_back();
#endif
- auto *Ptr = &peek<T>();
+ T *Ptr = &peekInternal<T>();
Ptr->~T();
shrink(aligned_size<T>());
}
/// Returns a reference to the value on the top of the stack.
template <typename T> T &peek() const {
- return *reinterpret_cast<T *>(peek(aligned_size<T>()));
+#ifndef NDEBUG
+ assert(!ItemTypes.empty());
+ assert(ItemTypes.back() == toPrimType<T>());
+#endif
+ return peekInternal<T>();
+ }
+
+ template <typename T> T &peek(size_t Offset) const {
+ assert(aligned(Offset));
+ return *reinterpret_cast<T *>(peekData(Offset));
}
/// Returns a pointer to the top object.
- void *top() const { return Chunk ? peek(0) : nullptr; }
+ void *top() const { return Chunk ? peekData(0) : nullptr; }
/// Returns the size of the stack in bytes.
size_t size() const { return StackSize; }
@@ -75,9 +86,12 @@ public:
/// Clears the stack without calling any destructors.
void clear();
- // Returns whether the stack is empty.
+ /// Returns whether the stack is empty.
bool empty() const { return StackSize == 0; }
+ /// dump the stack contents to stderr.
+ void dump() const;
+
private:
/// All stack slots are aligned to the native pointer alignment for storage.
/// The size of an object is rounded up to a pointer alignment multiple.
@@ -86,10 +100,15 @@ private:
return ((sizeof(T) + PtrAlign - 1) / PtrAlign) * PtrAlign;
}
+ /// Like the public peek(), but without the debug type checks.
+ template <typename T> T &peekInternal() const {
+ return *reinterpret_cast<T *>(peekData(aligned_size<T>()));
+ }
+
/// Grows the stack to accommodate a value and returns a pointer to it.
void *grow(size_t Size);
/// Returns a pointer from the top of the stack.
- void *peek(size_t Size) const;
+ void *peekData(size_t Size) const;
/// Shrinks the stack.
void shrink(size_t Size);
@@ -160,6 +179,10 @@ private:
else if constexpr (std::is_same_v<T, uint64_t> ||
std::is_same_v<T, Integral<64, false>>)
return PT_Uint64;
+ else if constexpr (std::is_same_v<T, Floating>)
+ return PT_Float;
+ else if constexpr (std::is_same_v<T, FunctionPointer>)
+ return PT_FnPtr;
llvm_unreachable("unknown type push()'ed into InterpStack");
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp
index 25684f3c0939..2596c56b4e09 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp
@@ -7,24 +7,17 @@
//===----------------------------------------------------------------------===//
#include "InterpState.h"
-#include <limits>
-#include "Function.h"
#include "InterpFrame.h"
#include "InterpStack.h"
-#include "Opcode.h"
-#include "PrimType.h"
#include "Program.h"
#include "State.h"
using namespace clang;
using namespace clang::interp;
-using APSInt = llvm::APSInt;
-
InterpState::InterpState(State &Parent, Program &P, InterpStack &Stk,
Context &Ctx, SourceMapper *M)
- : Parent(Parent), M(M), P(P), Stk(Stk), Ctx(Ctx), Current(nullptr),
- CallStackDepth(Parent.getCallStackDepth() + 1) {}
+ : Parent(Parent), M(M), P(P), Stk(Stk), Ctx(Ctx), Current(nullptr) {}
InterpState::~InterpState() {
while (Current) {
@@ -35,17 +28,15 @@ InterpState::~InterpState() {
while (DeadBlocks) {
DeadBlock *Next = DeadBlocks->Next;
- free(DeadBlocks);
+ std::free(DeadBlocks);
DeadBlocks = Next;
}
}
Frame *InterpState::getCurrentFrame() {
- if (Current && Current->Caller) {
+ if (Current && Current->Caller)
return Current;
- } else {
- return Parent.getCurrentFrame();
- }
+ return Parent.getCurrentFrame();
}
bool InterpState::reportOverflow(const Expr *E, const llvm::APSInt &Value) {
@@ -55,12 +46,16 @@ bool InterpState::reportOverflow(const Expr *E, const llvm::APSInt &Value) {
}
void InterpState::deallocate(Block *B) {
- Descriptor *Desc = B->getDescriptor();
+ assert(B);
+ const Descriptor *Desc = B->getDescriptor();
+ assert(Desc);
+
if (B->hasPointers()) {
size_t Size = B->getSize();
// Allocate a new block, transferring over pointers.
- char *Memory = reinterpret_cast<char *>(malloc(sizeof(DeadBlock) + Size));
+ char *Memory =
+ reinterpret_cast<char *>(std::malloc(sizeof(DeadBlock) + Size));
auto *D = new (Memory) DeadBlock(DeadBlocks, B);
// Move data from one block to another.
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpState.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpState.h
index 033080637385..fc28c74002d9 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpState.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpState.h
@@ -15,6 +15,7 @@
#include "Context.h"
#include "Function.h"
+#include "InterpFrame.h"
#include "InterpStack.h"
#include "State.h"
#include "clang/AST/APValue.h"
@@ -41,7 +42,9 @@ public:
// Stack frame accessors.
Frame *getSplitFrame() { return Parent.getCurrentFrame(); }
Frame *getCurrentFrame() override;
- unsigned getCallStackDepth() override { return CallStackDepth; }
+ unsigned getCallStackDepth() override {
+ return Current ? (Current->getDepth() + 1) : 1;
+ }
const Frame *getBottomFrame() const override {
return Parent.getBottomFrame();
}
@@ -86,6 +89,8 @@ public:
return M ? M->getSource(F, PC) : F->getSource(PC);
}
+ Context &getContext() const { return Ctx; }
+
private:
/// AST Walker state.
State &Parent;
@@ -103,8 +108,6 @@ public:
Context &Ctx;
/// The current frame.
InterpFrame *Current = nullptr;
- /// Call stack depth.
- unsigned CallStackDepth;
};
} // namespace interp
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td b/contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td
index 058475b2d399..28074a350d05 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td
@@ -25,7 +25,9 @@ def Sint32 : Type;
def Uint32 : Type;
def Sint64 : Type;
def Uint64 : Type;
+def Float : Type;
def Ptr : Type;
+def FnPtr : Type;
//===----------------------------------------------------------------------===//
// Types transferred to the interpreter.
@@ -40,11 +42,15 @@ def ArgSint32 : ArgType { let Name = "int32_t"; }
def ArgUint32 : ArgType { let Name = "uint32_t"; }
def ArgSint64 : ArgType { let Name = "int64_t"; }
def ArgUint64 : ArgType { let Name = "uint64_t"; }
+def ArgFloat : ArgType { let Name = "Floating"; }
def ArgBool : ArgType { let Name = "bool"; }
def ArgFunction : ArgType { let Name = "const Function *"; }
def ArgRecordDecl : ArgType { let Name = "const RecordDecl *"; }
def ArgRecordField : ArgType { let Name = "const Record::Field *"; }
+def ArgFltSemantics : ArgType { let Name = "const llvm::fltSemantics *"; }
+def ArgRoundingMode : ArgType { let Name = "llvm::RoundingMode"; }
+def ArgLETD: ArgType { let Name = "const LifetimeExtendedTemporaryDecl *"; }
//===----------------------------------------------------------------------===//
// Classes of types instructions operate on.
@@ -54,34 +60,41 @@ class TypeClass {
list<Type> Types;
}
-def NumberTypeClass : TypeClass {
+def IntegerTypeClass : TypeClass {
let Types = [Sint8, Uint8, Sint16, Uint16, Sint32,
Uint32, Sint64, Uint64];
}
-def IntegerTypeClass : TypeClass {
- let Types = [Sint8, Uint8, Sint16, Uint16, Sint32,
- Uint32, Sint64, Uint64];
+def NumberTypeClass : TypeClass {
+ let Types = !listconcat(IntegerTypeClass.Types, [Float]);
+}
+
+def FloatTypeClass : TypeClass {
+ let Types = [Float];
}
def AluTypeClass : TypeClass {
- let Types = !listconcat(NumberTypeClass.Types, [Bool]);
+ let Types = !listconcat(IntegerTypeClass.Types, [Bool]);
}
def PtrTypeClass : TypeClass {
- let Types = [Ptr];
+ let Types = [Ptr, FnPtr];
}
def BoolTypeClass : TypeClass {
let Types = [Bool];
}
+def NonPtrTypeClass : TypeClass {
+ let Types = !listconcat(IntegerTypeClass.Types, [Bool], [Float]);
+}
+
def AllTypeClass : TypeClass {
- let Types = !listconcat(AluTypeClass.Types, PtrTypeClass.Types);
+ let Types = !listconcat(AluTypeClass.Types, PtrTypeClass.Types, FloatTypeClass.Types);
}
def ComparableTypeClass : TypeClass {
- let Types = !listconcat(AluTypeClass.Types, [Ptr]);
+ let Types = !listconcat(AluTypeClass.Types, [Ptr], [Float], [FnPtr]);
}
class SingletonTypeClass<Type Ty> : TypeClass {
@@ -108,6 +121,11 @@ class AluOpcode : Opcode {
let HasGroup = 1;
}
+class FloatOpcode : Opcode {
+ let Types = [];
+ let Args = [ArgRoundingMode];
+}
+
class IntegerOpcode : Opcode {
let Types = [IntegerTypeClass];
let HasGroup = 1;
@@ -161,7 +179,21 @@ def NoRet : Opcode {}
def Call : Opcode {
let Args = [ArgFunction];
let Types = [];
- let ChangesPC = 1;
+}
+
+def CallVirt : Opcode {
+ let Args = [ArgFunction];
+ let Types = [];
+}
+
+def CallBI : Opcode {
+ let Args = [ArgFunction];
+ let Types = [];
+}
+
+def CallPtr : Opcode {
+ let Args = [];
+ let Types = [];
}
//===----------------------------------------------------------------------===//
@@ -193,6 +225,7 @@ def ConstSint32 : ConstOpcode<Sint32, ArgSint32>;
def ConstUint32 : ConstOpcode<Uint32, ArgUint32>;
def ConstSint64 : ConstOpcode<Sint64, ArgSint64>;
def ConstUint64 : ConstOpcode<Uint64, ArgUint64>;
+def ConstFloat : ConstOpcode<Float, ArgFloat>;
def ConstBool : ConstOpcode<Bool, ArgBool>;
// [] -> [Integer]
@@ -204,6 +237,7 @@ def Zero : Opcode {
// [] -> [Pointer]
def Null : Opcode {
let Types = [PtrTypeClass];
+ let HasGroup = 1;
}
//===----------------------------------------------------------------------===//
@@ -252,6 +286,12 @@ def GetPtrBase : Opcode {
let Args = [ArgUint32];
}
// [Pointer] -> [Pointer]
+def GetPtrBasePop : Opcode {
+ // Offset of field, which is a base.
+ let Args = [ArgUint32];
+}
+
+// [Pointer] -> [Pointer]
def GetPtrVirtBase : Opcode {
// RecordDecl of base class.
let Args = [ArgRecordDecl];
@@ -276,6 +316,9 @@ def RVOPtr : Opcode;
def NarrowPtr : Opcode;
// [Pointer] -> [Pointer]
def ExpandPtr : Opcode;
+// [Pointer, Offset] -> [Pointer]
+def ArrayElemPtr : AluOpcode;
+def ArrayElemPtrPop : AluOpcode;
//===----------------------------------------------------------------------===//
// Direct field accessors
@@ -298,11 +341,17 @@ def GetLocal : AccessOpcode { let HasCustomEval = 1; }
// [] -> [Pointer]
def SetLocal : AccessOpcode { let HasCustomEval = 1; }
+def CheckGlobalCtor : Opcode {}
+
// [] -> [Value]
def GetGlobal : AccessOpcode;
// [Value] -> []
def InitGlobal : AccessOpcode;
// [Value] -> []
+def InitGlobalTemp : AccessOpcode {
+ let Args = [ArgUint32, ArgLETD];
+}
+// [Value] -> []
def SetGlobal : AccessOpcode;
// [] -> [Value]
@@ -393,24 +442,47 @@ def AddOffset : AluOpcode;
// [Pointer, Integral] -> [Pointer]
def SubOffset : AluOpcode;
-// Pointer, Pointer] - [Integral]
+// [Pointer, Pointer] -> [Integral]
def SubPtr : Opcode {
let Types = [IntegerTypeClass];
let HasGroup = 1;
}
+// [Pointer] -> [Pointer]
+def IncPtr : Opcode {
+ let HasGroup = 0;
+}
+// [Pointer] -> [Pointer]
+def DecPtr : Opcode {
+ let HasGroup = 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Function pointers.
+//===----------------------------------------------------------------------===//
+def GetFnPtr : Opcode {
+ let Args = [ArgFunction];
+}
+
+
//===----------------------------------------------------------------------===//
// Binary operators.
//===----------------------------------------------------------------------===//
// [Real, Real] -> [Real]
-def Sub : AluOpcode;
-def Add : AluOpcode;
-def Mul : AluOpcode;
-def Rem : Opcode {
- let Types = [NumberTypeClass];
- let HasGroup = 1;
-}
+def Add : AluOpcode;
+def Addf : FloatOpcode;
+def Sub : AluOpcode;
+def Subf : FloatOpcode;
+def Mul : AluOpcode;
+def Mulf : FloatOpcode;
+def Rem : IntegerOpcode;
+def Div : IntegerOpcode;
+def Divf : FloatOpcode;
+
+def BitAnd : IntegerOpcode;
+def BitOr : IntegerOpcode;
+def BitXor : IntegerOpcode;
def Shl : Opcode {
let Types = [IntegerTypeClass, IntegerTypeClass];
@@ -422,14 +494,6 @@ def Shr : Opcode {
let HasGroup = 1;
}
-def BitAnd : IntegerOpcode;
-def BitOr : IntegerOpcode;
-def Div : Opcode {
- let Types = [NumberTypeClass];
- let HasGroup = 1;
-}
-def BitXor : IntegerOpcode;
-
//===----------------------------------------------------------------------===//
// Unary operators.
//===----------------------------------------------------------------------===//
@@ -440,27 +504,33 @@ def Inv: Opcode {
let HasGroup = 1;
}
+// Increment and decrement.
def Inc: IntegerOpcode;
def IncPop : IntegerOpcode;
def Dec: IntegerOpcode;
def DecPop: IntegerOpcode;
+// Float increment and decrement.
+def Incf: FloatOpcode;
+def IncfPop : FloatOpcode;
+def Decf: FloatOpcode;
+def DecfPop : FloatOpcode;
+
// [Real] -> [Real]
def Neg: Opcode {
- let Types = [AluTypeClass];
+ let Types = [NonPtrTypeClass];
let HasGroup = 1;
}
// [Real] -> [Real]
def Comp: Opcode {
- let Types = [NumberTypeClass];
+ let Types = [IntegerTypeClass];
let HasGroup = 1;
}
//===----------------------------------------------------------------------===//
-// Cast.
+// Cast, CastFP.
//===----------------------------------------------------------------------===//
-// TODO: Expand this to handle casts between more types.
def FromCastTypeClass : TypeClass {
let Types = [Uint8, Sint8, Uint16, Sint16, Uint32, Sint32, Uint64, Sint64, Bool];
@@ -475,6 +545,25 @@ def Cast: Opcode {
let HasGroup = 1;
}
+def CastFP : Opcode {
+ let Types = [];
+ let Args = [ArgFltSemantics, ArgRoundingMode];
+}
+
+// Cast an integer to a floating type
+def CastIntegralFloating : Opcode {
+ let Types = [AluTypeClass];
+ let Args = [ArgFltSemantics, ArgRoundingMode];
+ let HasGroup = 1;
+}
+
+// Cast a floating to an integer type
+def CastFloatingIntegral : Opcode {
+ let Types = [AluTypeClass];
+ let Args = [];
+ let HasGroup = 1;
+}
+
//===----------------------------------------------------------------------===//
// Comparison opcodes.
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp
index fd8c98fae039..00943dc846d3 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp
@@ -24,7 +24,7 @@ Pointer::Pointer(const Pointer &P) : Pointer(P.Pointee, P.Base, P.Offset) {}
Pointer::Pointer(Pointer &&P)
: Pointee(P.Pointee), Base(P.Base), Offset(P.Offset) {
if (Pointee)
- Pointee->movePointer(&P, this);
+ Pointee->replacePointer(&P, this);
}
Pointer::Pointer(Block *Pointee, unsigned Base, unsigned Offset)
@@ -69,7 +69,7 @@ void Pointer::operator=(Pointer &&P) {
Pointee = P.Pointee;
if (Pointee)
- Pointee->movePointer(&P, this);
+ Pointee->replacePointer(&P, this);
if (Old)
Old->cleanup();
@@ -103,6 +103,10 @@ APValue Pointer::toAPValue() const {
if (isUnknownSizeArray()) {
IsOnePastEnd = false;
Offset = CharUnits::Zero();
+ } else if (Desc->asExpr()) {
+ // Pointer pointing to a an expression.
+ IsOnePastEnd = false;
+ Offset = CharUnits::Zero();
} else {
// TODO: compute the offset into the object.
Offset = CharUnits::Zero();
@@ -143,7 +147,7 @@ APValue Pointer::toAPValue() const {
bool Pointer::isInitialized() const {
assert(Pointee && "Cannot check if null pointer was initialized");
- Descriptor *Desc = getFieldDesc();
+ const Descriptor *Desc = getFieldDesc();
assert(Desc);
if (Desc->isPrimitiveArray()) {
if (isStatic() && Base == 0)
@@ -155,39 +159,38 @@ bool Pointer::isInitialized() const {
if (Map == (InitMap *)-1)
return true;
return Map->isInitialized(getIndex());
- } else {
- // Field has its bit in an inline descriptor.
- return Base == 0 || getInlineDesc()->IsInitialized;
}
+
+ // Field has its bit in an inline descriptor.
+ return Base == 0 || getInlineDesc()->IsInitialized;
}
void Pointer::initialize() const {
assert(Pointee && "Cannot initialize null pointer");
- Descriptor *Desc = getFieldDesc();
+ const Descriptor *Desc = getFieldDesc();
assert(Desc);
- if (Desc->isArray()) {
- if (Desc->isPrimitiveArray()) {
- // Primitive global arrays don't have an initmap.
- if (isStatic() && Base == 0)
- return;
-
- // Primitive array initializer.
- InitMap *&Map = getInitMap();
- if (Map == (InitMap *)-1)
- return;
- if (Map == nullptr)
- Map = InitMap::allocate(Desc->getNumElems());
- if (Map->initialize(getIndex())) {
- free(Map);
- Map = (InitMap *)-1;
- }
+ if (Desc->isPrimitiveArray()) {
+ // Primitive global arrays don't have an initmap.
+ if (isStatic() && Base == 0)
+ return;
+
+ // Primitive array initializer.
+ InitMap *&Map = getInitMap();
+ if (Map == (InitMap *)-1)
+ return;
+ if (Map == nullptr)
+ Map = InitMap::allocate(Desc->getNumElems());
+ if (Map->initialize(getIndex())) {
+ free(Map);
+ Map = (InitMap *)-1;
}
- } else {
- // Field has its bit in an inline descriptor.
- assert(Base != 0 && "Only composite fields can be initialised");
- getInlineDesc()->IsInitialized = true;
+ return;
}
+
+ // Field has its bit in an inline descriptor.
+ assert(Base != 0 && "Only composite fields can be initialised");
+ getInlineDesc()->IsInitialized = true;
}
void Pointer::activate() const {
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h
index 1462d01c2412..f795466f1db4 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h
@@ -200,6 +200,8 @@ public:
/// Returns the type of the innermost field.
QualType getType() const { return getFieldDesc()->getType(); }
+ Pointer getDeclPtr() const { return Pointer(Pointee); }
+
/// Returns the element size of the innermost field.
size_t elemSize() const {
if (Base == RootPtrMark)
@@ -225,6 +227,10 @@ public:
return Offset - Base - Adjust;
}
+ /// Whether this array refers to an array, but not
+ /// to the first element.
+ bool isArrayRoot() const { return inArray() && Offset == Base; }
+
/// Checks if the innermost field is an array.
bool inArray() const { return getFieldDesc()->IsArray; }
/// Checks if the structure is a primitive array.
@@ -241,9 +247,11 @@ public:
}
/// Returns the record descriptor of a class.
- Record *getRecord() const { return getFieldDesc()->ElemRecord; }
- // Returns the element record type, if this is a non-primive array.
- Record *getElemRecord() const { return getFieldDesc()->ElemDesc->ElemRecord; }
+ const Record *getRecord() const { return getFieldDesc()->ElemRecord; }
+ /// Returns the element record type, if this is a non-primive array.
+ const Record *getElemRecord() const {
+ return getFieldDesc()->ElemDesc->ElemRecord;
+ }
/// Returns the field information.
const FieldDecl *getField() const { return getFieldDesc()->asFieldDecl(); }
@@ -286,6 +294,8 @@ public:
/// Returns the number of elements.
unsigned getNumElems() const { return getSize() / elemSize(); }
+ const Block *block() const { return Pointee; }
+
/// Returns the index into an array.
int64_t getIndex() const {
if (isElementPastEnd())
@@ -306,12 +316,17 @@ public:
/// Dereferences the pointer, if it's live.
template <typename T> T &deref() const {
assert(isLive() && "Invalid pointer");
+ if (isArrayRoot())
+ return *reinterpret_cast<T *>(Pointee->rawData() + Base +
+ sizeof(InitMap *));
+
return *reinterpret_cast<T *>(Pointee->rawData() + Offset);
}
/// Dereferences a primitive element.
template <typename T> T &elem(unsigned I) const {
- return reinterpret_cast<T *>(Pointee->rawData())[I];
+ assert(I < getNumElems());
+ return reinterpret_cast<T *>(Pointee->data() + sizeof(InitMap *))[I];
}
/// Initializes a field.
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp b/contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp
index eda90e1c36c2..a9b5d8ea8cc8 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp
@@ -8,6 +8,8 @@
#include "PrimType.h"
#include "Boolean.h"
+#include "Floating.h"
+#include "FunctionPointer.h"
#include "Pointer.h"
using namespace clang;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h b/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h
index c8f2a600fb3c..693e57210608 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h
@@ -13,7 +13,6 @@
#ifndef LLVM_CLANG_AST_INTERP_TYPE_H
#define LLVM_CLANG_AST_INTERP_TYPE_H
-#include "Integral.h"
#include <climits>
#include <cstddef>
#include <cstdint>
@@ -23,6 +22,9 @@ namespace interp {
class Pointer;
class Boolean;
+class Floating;
+class FunctionPointer;
+template <unsigned Bits, bool Signed> class Integral;
/// Enumeration of the primitive types of the VM.
enum PrimType : unsigned {
@@ -35,9 +37,13 @@ enum PrimType : unsigned {
PT_Sint64,
PT_Uint64,
PT_Bool,
+ PT_Float,
PT_Ptr,
+ PT_FnPtr,
};
+constexpr bool isIntegralType(PrimType T) { return T <= PT_Uint64; }
+
/// Mapping from primitive types to their representation.
template <PrimType T> struct PrimConv;
template <> struct PrimConv<PT_Sint8> { using T = Integral<8, true>; };
@@ -48,8 +54,12 @@ template <> struct PrimConv<PT_Sint32> { using T = Integral<32, true>; };
template <> struct PrimConv<PT_Uint32> { using T = Integral<32, false>; };
template <> struct PrimConv<PT_Sint64> { using T = Integral<64, true>; };
template <> struct PrimConv<PT_Uint64> { using T = Integral<64, false>; };
+template <> struct PrimConv<PT_Float> { using T = Floating; };
template <> struct PrimConv<PT_Bool> { using T = Boolean; };
template <> struct PrimConv<PT_Ptr> { using T = Pointer; };
+template <> struct PrimConv<PT_FnPtr> {
+ using T = FunctionPointer;
+};
/// Returns the size of a primitive type in bytes.
size_t primSize(PrimType Type);
@@ -66,23 +76,6 @@ static inline bool aligned(const void *P) {
return aligned(reinterpret_cast<uintptr_t>(P));
}
-inline bool isPrimitiveIntegral(PrimType Type) {
- switch (Type) {
- case PT_Bool:
- case PT_Sint8:
- case PT_Uint8:
- case PT_Sint16:
- case PT_Uint16:
- case PT_Sint32:
- case PT_Uint32:
- case PT_Sint64:
- case PT_Uint64:
- return true;
- default:
- return false;
- }
-}
-
} // namespace interp
} // namespace clang
@@ -101,8 +94,10 @@ inline bool isPrimitiveIntegral(PrimType Type) {
TYPE_SWITCH_CASE(PT_Uint32, B) \
TYPE_SWITCH_CASE(PT_Sint64, B) \
TYPE_SWITCH_CASE(PT_Uint64, B) \
+ TYPE_SWITCH_CASE(PT_Float, B) \
TYPE_SWITCH_CASE(PT_Bool, B) \
TYPE_SWITCH_CASE(PT_Ptr, B) \
+ TYPE_SWITCH_CASE(PT_FnPtr, B) \
} \
} while (0)
#define COMPOSITE_TYPE_SWITCH(Expr, B, D) \
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Primitives.h b/contrib/llvm-project/clang/lib/AST/Interp/Primitives.h
new file mode 100644
index 000000000000..e935dbfd3691
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Primitives.h
@@ -0,0 +1,36 @@
+//===------ Primitives.h - Types for the constexpr VM -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Utilities and helper functions for all primitive types:
+// - Integral
+// - Floating
+// - Boolean
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_INTERP_PRIMITIVES_H
+#define LLVM_CLANG_AST_INTERP_PRIMITIVES_H
+
+#include "clang/AST/ComparisonCategories.h"
+
+namespace clang {
+namespace interp {
+
+/// Helper to compare two comparable types.
+template <typename T> ComparisonCategoryResult Compare(const T &X, const T &Y) {
+ if (X < Y)
+ return ComparisonCategoryResult::Less;
+ if (X > Y)
+ return ComparisonCategoryResult::Greater;
+ return ComparisonCategoryResult::Equal;
+}
+
+} // namespace interp
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Program.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Program.cpp
index 5305ddd8de18..c1697bb7fa6d 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Program.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Program.cpp
@@ -10,6 +10,7 @@
#include "ByteCodeStmtGen.h"
#include "Context.h"
#include "Function.h"
+#include "Integral.h"
#include "Opcode.h"
#include "PrimType.h"
#include "clang/AST/Decl.h"
@@ -119,7 +120,7 @@ std::optional<unsigned> Program::getGlobal(const ValueDecl *VD) {
// Map the decl to the existing index.
if (Index) {
GlobalIndices[VD] = *Index;
- return {};
+ return std::nullopt;
}
return Index;
@@ -134,26 +135,26 @@ std::optional<unsigned> Program::getOrCreateGlobal(const ValueDecl *VD,
GlobalIndices[VD] = *Idx;
return Idx;
}
- return {};
+ return std::nullopt;
}
std::optional<unsigned> Program::getOrCreateDummy(const ParmVarDecl *PD) {
- auto &ASTCtx = Ctx.getASTContext();
+ // Dedup blocks since they are immutable and pointers cannot be compared.
+ if (auto It = DummyParams.find(PD);
+ It != DummyParams.end())
+ return It->second;
+
+ auto &ASTCtx = Ctx.getASTContext();
// Create a pointer to an incomplete array of the specified elements.
QualType ElemTy = PD->getType()->castAs<PointerType>()->getPointeeType();
QualType Ty = ASTCtx.getIncompleteArrayType(ElemTy, ArrayType::Normal, 0);
- // Dedup blocks since they are immutable and pointers cannot be compared.
- auto It = DummyParams.find(PD);
- if (It != DummyParams.end())
- return It->second;
-
if (auto Idx = createGlobal(PD, Ty, /*isStatic=*/true, /*isExtern=*/true)) {
DummyParams[PD] = *Idx;
return Idx;
}
- return {};
+ return std::nullopt;
}
std::optional<unsigned> Program::createGlobal(const ValueDecl *VD,
@@ -161,7 +162,7 @@ std::optional<unsigned> Program::createGlobal(const ValueDecl *VD,
assert(!getGlobal(VD));
bool IsStatic, IsExtern;
if (auto *Var = dyn_cast<VarDecl>(VD)) {
- IsStatic = !Var->hasLocalStorage();
+ IsStatic = Context::shouldBeGloballyIndexed(VD);
IsExtern = !Var->getAnyInitializer();
} else {
IsStatic = false;
@@ -172,7 +173,7 @@ std::optional<unsigned> Program::createGlobal(const ValueDecl *VD,
GlobalIndices[P] = *Idx;
return *Idx;
}
- return {};
+ return std::nullopt;
}
std::optional<unsigned> Program::createGlobal(const Expr *E) {
@@ -193,7 +194,7 @@ std::optional<unsigned> Program::createGlobal(const DeclTy &D, QualType Ty,
IsTemporary);
}
if (!Desc)
- return {};
+ return std::nullopt;
// Allocate a block for storage.
unsigned I = Globals.size();
@@ -221,10 +222,8 @@ Record *Program::getOrCreateRecord(const RecordDecl *RD) {
return nullptr;
// Deduplicate records.
- auto It = Records.find(RD);
- if (It != Records.end()) {
+ if (auto It = Records.find(RD); It != Records.end())
return It->second;
- }
// We insert nullptr now and replace that later, so recursive calls
// to this function with the same RecordDecl don't run into
@@ -340,7 +339,7 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
D, ElemTy.getTypePtr(), std::nullopt, IsConst, IsTemporary);
if (!ElemDesc)
return nullptr;
- InterpSize ElemSize =
+ unsigned ElemSize =
ElemDesc->getAllocSize() + sizeof(InlineDescriptor);
if (std::numeric_limits<unsigned>::max() / ElemSize <= NumElems)
return {};
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Program.h b/contrib/llvm-project/clang/lib/AST/Interp/Program.h
index 5a80dd1ed748..4547ca7ac69c 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Program.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Program.h
@@ -131,7 +131,9 @@ public:
/// Context to manage declaration lifetimes.
class DeclScope {
public:
- DeclScope(Program &P, const VarDecl *VD) : P(P) { P.startDeclaration(VD); }
+ DeclScope(Program &P, const ValueDecl *VD) : P(P) {
+ P.startDeclaration(VD);
+ }
~DeclScope() { P.endDeclaration(); }
private:
@@ -222,7 +224,7 @@ private:
unsigned CurrentDeclaration = NoDeclaration;
/// Starts evaluating a declaration.
- void startDeclaration(const VarDecl *Decl) {
+ void startDeclaration(const ValueDecl *Decl) {
LastDeclaration += 1;
CurrentDeclaration = LastDeclaration;
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Record.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Record.cpp
index f440c4705051..909416e6e1a1 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Record.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Record.cpp
@@ -39,6 +39,14 @@ const Record::Base *Record::getBase(const RecordDecl *FD) const {
return It->second;
}
+const Record::Base *Record::getBase(QualType T) const {
+ if (!T->isRecordType())
+ return nullptr;
+
+ const RecordDecl *RD = T->getAs<RecordType>()->getDecl();
+ return BaseMap.lookup(RD);
+}
+
const Record::Base *Record::getVirtualBase(const RecordDecl *FD) const {
auto It = VirtualBaseMap.find(FD);
assert(It != VirtualBaseMap.end() && "Missing virtual base");
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Record.h b/contrib/llvm-project/clang/lib/AST/Interp/Record.h
index 1742cb1cc4ee..940b4c9ebf59 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Record.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Record.h
@@ -61,9 +61,11 @@ public:
const Field *getField(const FieldDecl *FD) const;
/// Returns a base descriptor.
const Base *getBase(const RecordDecl *FD) const;
+ /// Returns a base descriptor.
+ const Base *getBase(QualType T) const;
/// Returns a virtual base descriptor.
const Base *getVirtualBase(const RecordDecl *RD) const;
- // Returns the destructor of the record, if any.
+ /// Returns the destructor of the record, if any.
const CXXDestructorDecl *getDestructor() const {
if (const auto *CXXDecl = dyn_cast<CXXRecordDecl>(Decl))
return CXXDecl->getDestructor();
@@ -85,7 +87,7 @@ public:
}
unsigned getNumBases() const { return Bases.size(); }
- Base *getBase(unsigned I) { return &Bases[I]; }
+ const Base *getBase(unsigned I) const { return &Bases[I]; }
using const_virtual_iter = VirtualBaseList::const_iterator;
llvm::iterator_range<const_virtual_iter> virtual_bases() const {
@@ -93,7 +95,7 @@ public:
}
unsigned getNumVirtualBases() const { return VirtualBases.size(); }
- Base *getVirtualBase(unsigned I) { return &VirtualBases[I]; }
+ const Base *getVirtualBase(unsigned I) const { return &VirtualBases[I]; }
private:
/// Constructor used by Program to create record descriptors.
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Source.h b/contrib/llvm-project/clang/lib/AST/Interp/Source.h
index 99ffce34c12f..89fca9ac80f2 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Source.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Source.h
@@ -56,14 +56,11 @@ public:
}
private:
- /// Constructor used by Function to generate pointers.
- CodePtr(const char *Ptr) : Ptr(Ptr) {}
-
-private:
friend class Function;
-
+ /// Constructor used by Function to generate pointers.
+ CodePtr(const std::byte *Ptr) : Ptr(Ptr) {}
/// Pointer into the code owned by a function.
- const char *Ptr;
+ const std::byte *Ptr;
};
/// Describes the statement/declaration an opcode was generated from.
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/State.cpp b/contrib/llvm-project/clang/lib/AST/Interp/State.cpp
index 56774f88fb45..9a327fb810de 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/State.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/State.cpp
@@ -11,6 +11,7 @@
#include "Program.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/OptionalDiagnostic.h"
using namespace clang;
using namespace clang::interp;
@@ -125,9 +126,9 @@ void State::addCallStack(unsigned Limit) {
// Walk the call stack and add the diagnostics.
unsigned CallIdx = 0;
- Frame *Top = getCurrentFrame();
+ const Frame *Top = getCurrentFrame();
const Frame *Bottom = getBottomFrame();
- for (Frame *F = Top; F != Bottom; F = F->getCaller(), ++CallIdx) {
+ for (const Frame *F = Top; F != Bottom; F = F->getCaller(), ++CallIdx) {
SourceLocation CallLocation = F->getCallLocation();
// Skip this call?
@@ -142,12 +143,12 @@ void State::addCallStack(unsigned Limit) {
// Use a different note for an inheriting constructor, because from the
// user's perspective it's not really a function at all.
- if (auto *CD = dyn_cast_or_null<CXXConstructorDecl>(F->getCallee())) {
- if (CD->isInheritingConstructor()) {
- addDiag(CallLocation, diag::note_constexpr_inherited_ctor_call_here)
- << CD->getParent();
- continue;
- }
+ if (const auto *CD =
+ dyn_cast_if_present<CXXConstructorDecl>(F->getCallee());
+ CD && CD->isInheritingConstructor()) {
+ addDiag(CallLocation, diag::note_constexpr_inherited_ctor_call_here)
+ << CD->getParent();
+ continue;
}
SmallString<128> Buffer;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/State.h b/contrib/llvm-project/clang/lib/AST/Interp/State.h
index 131fbcf3cffc..d897b7c20275 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/State.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/State.h
@@ -15,9 +15,9 @@
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/Expr.h"
-#include "clang/AST/OptionalDiagnostic.h"
namespace clang {
+class OptionalDiagnostic;
/// Kinds of access we can perform on an object, for diagnostics. Note that
/// we consider a member function call to be a kind of access, even though
@@ -36,7 +36,7 @@ enum AccessKinds {
AK_Destroy,
};
-// The order of this enum is important for diagnostics.
+/// The order of this enum is important for diagnostics.
enum CheckSubobjectKind {
CSK_Base,
CSK_Derived,
@@ -72,7 +72,7 @@ public:
public:
State() : InConstantContext(false) {}
- // Diagnose that the evaluation could not be folded (FF => FoldFailure)
+ /// Diagnose that the evaluation could not be folded (FF => FoldFailure)
OptionalDiagnostic
FFDiag(SourceLocation Loc,
diag::kind DiagId = diag::note_invalid_subexpr_in_const_expr,
diff --git a/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp b/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
index b23bc5f8d881..f08286a0d4ba 100644
--- a/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
@@ -35,6 +35,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
#include <optional>
using namespace clang;
@@ -109,8 +110,10 @@ public:
void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
const CXXRecordDecl *Type, raw_ostream &) override;
void mangleCXXRTTI(QualType T, raw_ostream &) override;
- void mangleCXXRTTIName(QualType T, raw_ostream &) override;
- void mangleTypeName(QualType T, raw_ostream &) override;
+ void mangleCXXRTTIName(QualType T, raw_ostream &,
+ bool NormalizeIntegers) override;
+ void mangleTypeName(QualType T, raw_ostream &,
+ bool NormalizeIntegers) override;
void mangleCXXCtorComdat(const CXXConstructorDecl *D, raw_ostream &) override;
void mangleCXXDtorComdat(const CXXDestructorDecl *D, raw_ostream &) override;
@@ -206,7 +209,6 @@ public:
}
bool isInternalLinkageDecl(const NamedDecl *ND);
- const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC);
/// @}
};
@@ -215,6 +217,10 @@ public:
class CXXNameMangler {
ItaniumMangleContextImpl &Context;
raw_ostream &Out;
+ /// Normalize integer types for cross-language CFI support with other
+ /// languages that can't represent and encode C/C++ integer types.
+ bool NormalizeIntegers = false;
+
bool NullOut = false;
/// In the "DisableDerivedAbiTags" mode derived ABI tags are not calculated.
/// This mode is used when mangler creates another mangler recursively to
@@ -390,7 +396,6 @@ class CXXNameMangler {
bool isStdNamespace(const DeclContext *DC);
const RecordDecl *GetLocalClassDecl(const Decl *D);
- const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC);
bool isSpecializedAs(QualType S, llvm::StringRef Name, QualType A);
bool isStdCharSpecialization(const ClassTemplateSpecializationDecl *SD,
llvm::StringRef Name, bool HasAllocator);
@@ -413,6 +418,10 @@ public:
: Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
AbiTagsRoot(AbiTags) {}
+ CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_,
+ bool NormalizeIntegers_)
+ : Context(C), Out(Out_), NormalizeIntegers(NormalizeIntegers_),
+ NullOut(false), Structor(nullptr), AbiTagsRoot(AbiTags) {}
CXXNameMangler(CXXNameMangler &Outer, raw_ostream &Out_)
: Context(Outer.Context), Out(Out_), Structor(Outer.Structor),
StructorType(Outer.StructorType), SeqID(Outer.SeqID),
@@ -553,6 +562,8 @@ private:
void mangleAArch64NeonVectorType(const DependentVectorType *T);
void mangleAArch64FixedSveVectorType(const VectorType *T);
void mangleAArch64FixedSveVectorType(const DependentVectorType *T);
+ void mangleRISCVFixedRVVVectorType(const VectorType *T);
+ void mangleRISCVFixedRVVVectorType(const DependentVectorType *T);
void mangleIntegerLiteral(QualType T, const llvm::APSInt &Value);
void mangleFloatLiteral(QualType T, const llvm::APFloat &V);
@@ -2937,6 +2948,85 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
// ::= Dn # std::nullptr_t (i.e., decltype(nullptr))
// ::= u <source-name> # vendor extended type
std::string type_name;
+ // Normalize integer types as vendor extended types:
+ // u<length>i<type size>
+ // u<length>u<type size>
+ if (NormalizeIntegers && T->isInteger()) {
+ if (T->isSignedInteger()) {
+ switch (getASTContext().getTypeSize(T)) {
+ case 8:
+ // Pick a representative for each integer size in the substitution
+ // dictionary. (Its actual defined size is not relevant.)
+ if (mangleSubstitution(BuiltinType::SChar))
+ break;
+ Out << "u2i8";
+ addSubstitution(BuiltinType::SChar);
+ break;
+ case 16:
+ if (mangleSubstitution(BuiltinType::Short))
+ break;
+ Out << "u3i16";
+ addSubstitution(BuiltinType::Short);
+ break;
+ case 32:
+ if (mangleSubstitution(BuiltinType::Int))
+ break;
+ Out << "u3i32";
+ addSubstitution(BuiltinType::Int);
+ break;
+ case 64:
+ if (mangleSubstitution(BuiltinType::Long))
+ break;
+ Out << "u3i64";
+ addSubstitution(BuiltinType::Long);
+ break;
+ case 128:
+ if (mangleSubstitution(BuiltinType::Int128))
+ break;
+ Out << "u4i128";
+ addSubstitution(BuiltinType::Int128);
+ break;
+ default:
+ llvm_unreachable("Unknown integer size for normalization");
+ }
+ } else {
+ switch (getASTContext().getTypeSize(T)) {
+ case 8:
+ if (mangleSubstitution(BuiltinType::UChar))
+ break;
+ Out << "u2u8";
+ addSubstitution(BuiltinType::UChar);
+ break;
+ case 16:
+ if (mangleSubstitution(BuiltinType::UShort))
+ break;
+ Out << "u3u16";
+ addSubstitution(BuiltinType::UShort);
+ break;
+ case 32:
+ if (mangleSubstitution(BuiltinType::UInt))
+ break;
+ Out << "u3u32";
+ addSubstitution(BuiltinType::UInt);
+ break;
+ case 64:
+ if (mangleSubstitution(BuiltinType::ULong))
+ break;
+ Out << "u3u64";
+ addSubstitution(BuiltinType::ULong);
+ break;
+ case 128:
+ if (mangleSubstitution(BuiltinType::UInt128))
+ break;
+ Out << "u4u128";
+ addSubstitution(BuiltinType::UInt128);
+ break;
+ default:
+ llvm_unreachable("Unknown integer size for normalization");
+ }
+ }
+ return;
+ }
switch (T->getKind()) {
case BuiltinType::Void:
Out << 'v';
@@ -3035,23 +3125,30 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
Out << 'd';
break;
case BuiltinType::LongDouble: {
- const TargetInfo *TI = getASTContext().getLangOpts().OpenMP &&
- getASTContext().getLangOpts().OpenMPIsDevice
- ? getASTContext().getAuxTargetInfo()
- : &getASTContext().getTargetInfo();
+ const TargetInfo *TI =
+ getASTContext().getLangOpts().OpenMP &&
+ getASTContext().getLangOpts().OpenMPIsTargetDevice
+ ? getASTContext().getAuxTargetInfo()
+ : &getASTContext().getTargetInfo();
Out << TI->getLongDoubleMangling();
break;
}
case BuiltinType::Float128: {
- const TargetInfo *TI = getASTContext().getLangOpts().OpenMP &&
- getASTContext().getLangOpts().OpenMPIsDevice
- ? getASTContext().getAuxTargetInfo()
- : &getASTContext().getTargetInfo();
+ const TargetInfo *TI =
+ getASTContext().getLangOpts().OpenMP &&
+ getASTContext().getLangOpts().OpenMPIsTargetDevice
+ ? getASTContext().getAuxTargetInfo()
+ : &getASTContext().getTargetInfo();
Out << TI->getFloat128Mangling();
break;
}
case BuiltinType::BFloat16: {
- const TargetInfo *TI = &getASTContext().getTargetInfo();
+ const TargetInfo *TI =
+ ((getASTContext().getLangOpts().OpenMP &&
+ getASTContext().getLangOpts().OpenMPIsTargetDevice) ||
+ getASTContext().getLangOpts().SYCLIsDevice)
+ ? getASTContext().getAuxTargetInfo()
+ : &getASTContext().getTargetInfo();
Out << TI->getBFloat16Mangling();
break;
}
@@ -3124,6 +3221,12 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
Out << (type_name == InternalName ? "u" : "") << type_name.size() \
<< type_name; \
break;
+#define SVE_OPAQUE_TYPE(InternalName, MangledName, Id, SingletonId) \
+ case BuiltinType::Id: \
+ type_name = MangledName; \
+ Out << (type_name == InternalName ? "u" : "") << type_name.size() \
+ << type_name; \
+ break;
#include "clang/Basic/AArch64SVEACLETypes.def"
#define PPC_VECTOR_TYPE(Name, Id, Size) \
case BuiltinType::Id: \
@@ -3138,6 +3241,12 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
Out << 'u' << type_name.size() << type_name; \
break;
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_REF_TYPE(InternalName, MangledName, Id, SingletonId, AS) \
+ case BuiltinType::Id: \
+ type_name = MangledName; \
+ Out << 'u' << type_name.size() << type_name; \
+ break;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
}
}
@@ -3707,6 +3816,82 @@ void CXXNameMangler::mangleAArch64FixedSveVectorType(
Diags.Report(T->getAttributeLoc(), DiagID);
}
+void CXXNameMangler::mangleRISCVFixedRVVVectorType(const VectorType *T) {
+ assert(T->getVectorKind() == VectorType::RVVFixedLengthDataVector &&
+ "expected fixed-length RVV vector!");
+
+ QualType EltType = T->getElementType();
+ assert(EltType->isBuiltinType() &&
+ "expected builtin type for fixed-length RVV vector!");
+
+ SmallString<20> TypeNameStr;
+ llvm::raw_svector_ostream TypeNameOS(TypeNameStr);
+ TypeNameOS << "__rvv_";
+ switch (cast<BuiltinType>(EltType)->getKind()) {
+ case BuiltinType::SChar:
+ TypeNameOS << "int8";
+ break;
+ case BuiltinType::UChar:
+ TypeNameOS << "uint8";
+ break;
+ case BuiltinType::Short:
+ TypeNameOS << "int16";
+ break;
+ case BuiltinType::UShort:
+ TypeNameOS << "uint16";
+ break;
+ case BuiltinType::Int:
+ TypeNameOS << "int32";
+ break;
+ case BuiltinType::UInt:
+ TypeNameOS << "uint32";
+ break;
+ case BuiltinType::Long:
+ TypeNameOS << "int64";
+ break;
+ case BuiltinType::ULong:
+ TypeNameOS << "uint64";
+ break;
+ case BuiltinType::Half:
+ TypeNameOS << "float16";
+ break;
+ case BuiltinType::Float:
+ TypeNameOS << "float32";
+ break;
+ case BuiltinType::Double:
+ TypeNameOS << "float64";
+ break;
+ default:
+ llvm_unreachable("unexpected element type for fixed-length RVV vector!");
+ }
+
+ unsigned VecSizeInBits = getASTContext().getTypeInfo(T).Width;
+
+ // Apend the LMUL suffix.
+ auto VScale = getASTContext().getTargetInfo().getVScaleRange(
+ getASTContext().getLangOpts());
+ unsigned VLen = VScale->first * llvm::RISCV::RVVBitsPerBlock;
+ TypeNameOS << 'm';
+ if (VecSizeInBits >= VLen)
+ TypeNameOS << (VecSizeInBits / VLen);
+ else
+ TypeNameOS << 'f' << (VLen / VecSizeInBits);
+
+ TypeNameOS << "_t";
+
+ Out << "9__RVV_VLSI" << 'u' << TypeNameStr.size() << TypeNameStr << "Lj"
+ << VecSizeInBits << "EE";
+}
+
+void CXXNameMangler::mangleRISCVFixedRVVVectorType(
+ const DependentVectorType *T) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "cannot mangle this dependent fixed-length RVV vector type yet");
+ Diags.Report(T->getAttributeLoc(), DiagID);
+}
+
// GNU extension: vector types
// <type> ::= <vector-type>
// <vector-type> ::= Dv <positive dimension number> _
@@ -3731,6 +3916,9 @@ void CXXNameMangler::mangleType(const VectorType *T) {
T->getVectorKind() == VectorType::SveFixedLengthPredicateVector) {
mangleAArch64FixedSveVectorType(T);
return;
+ } else if (T->getVectorKind() == VectorType::RVVFixedLengthDataVector) {
+ mangleRISCVFixedRVVVectorType(T);
+ return;
}
Out << "Dv" << T->getNumElements() << '_';
if (T->getVectorKind() == VectorType::AltiVecPixel)
@@ -3757,6 +3945,9 @@ void CXXNameMangler::mangleType(const DependentVectorType *T) {
T->getVectorKind() == VectorType::SveFixedLengthPredicateVector) {
mangleAArch64FixedSveVectorType(T);
return;
+ } else if (T->getVectorKind() == VectorType::RVVFixedLengthDataVector) {
+ mangleRISCVFixedRVVVectorType(T);
+ return;
}
Out << "Dv";
@@ -6519,16 +6710,17 @@ void ItaniumMangleContextImpl::mangleCXXRTTI(QualType Ty, raw_ostream &Out) {
Mangler.mangleType(Ty);
}
-void ItaniumMangleContextImpl::mangleCXXRTTIName(QualType Ty,
- raw_ostream &Out) {
+void ItaniumMangleContextImpl::mangleCXXRTTIName(
+ QualType Ty, raw_ostream &Out, bool NormalizeIntegers = false) {
// <special-name> ::= TS <type> # typeinfo name (null terminated byte string)
- CXXNameMangler Mangler(*this, Out);
+ CXXNameMangler Mangler(*this, Out, NormalizeIntegers);
Mangler.getStream() << "_ZTS";
Mangler.mangleType(Ty);
}
-void ItaniumMangleContextImpl::mangleTypeName(QualType Ty, raw_ostream &Out) {
- mangleCXXRTTIName(Ty, Out);
+void ItaniumMangleContextImpl::mangleTypeName(QualType Ty, raw_ostream &Out,
+ bool NormalizeIntegers = false) {
+ mangleCXXRTTIName(Ty, Out, NormalizeIntegers);
}
void ItaniumMangleContextImpl::mangleStringLiteral(const StringLiteral *, raw_ostream &) {
diff --git a/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp b/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp
index 83b097daf8ab..958b0e6cf2ef 100644
--- a/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp
+++ b/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp
@@ -3,6 +3,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/StringExtras.h"
#include <optional>
using namespace clang;
@@ -662,6 +663,9 @@ void JSONNodeDumper::VisitVectorType(const VectorType *VT) {
case VectorType::SveFixedLengthPredicateVector:
JOS.attribute("vectorKind", "fixed-length sve predicate vector");
break;
+ case VectorType::RVVFixedLengthDataVector:
+ JOS.attribute("vectorKind", "fixed-length rvv data vector");
+ break;
}
}
@@ -770,6 +774,12 @@ void JSONNodeDumper::VisitNamedDecl(const NamedDecl *ND) {
if (isa<RequiresExprBodyDecl>(ND->getDeclContext()))
return;
+ // If the declaration is dependent or is in a dependent context, then the
+ // mangling is unlikely to be meaningful (and in some cases may cause
+ // "don't know how to mangle this" assertion failures.
+ if (ND->isTemplated())
+ return;
+
// Mangled names are not meaningful for locals, and may not be well-defined
// in the case of VLAs.
auto *VD = dyn_cast<VarDecl>(ND);
@@ -880,6 +890,7 @@ void JSONNodeDumper::VisitFunctionDecl(const FunctionDecl *FD) {
attributeOnlyIfTrue("explicitlyDeleted", FD->isDeletedAsWritten());
attributeOnlyIfTrue("constexpr", FD->isConstexpr());
attributeOnlyIfTrue("variadic", FD->isVariadic());
+ attributeOnlyIfTrue("immediate", FD->isImmediateFunction());
if (FD->isDefaulted())
JOS.attribute("explicitlyDefaulted",
@@ -1240,6 +1251,7 @@ void JSONNodeDumper::VisitDeclRefExpr(const DeclRefExpr *DRE) {
case NOUR_Constant: JOS.attribute("nonOdrUseReason", "constant"); break;
case NOUR_Discarded: JOS.attribute("nonOdrUseReason", "discarded"); break;
}
+ attributeOnlyIfTrue("isImmediateEscalating", DRE->isImmediateEscalating());
}
void JSONNodeDumper::VisitSYCLUniqueStableNameExpr(
@@ -1399,6 +1411,7 @@ void JSONNodeDumper::VisitCXXConstructExpr(const CXXConstructExpr *CE) {
attributeOnlyIfTrue("initializer_list", CE->isStdInitListInitialization());
attributeOnlyIfTrue("zeroing", CE->requiresZeroInitialization());
attributeOnlyIfTrue("hadMultipleCandidates", CE->hadMultipleCandidates());
+ attributeOnlyIfTrue("isImmediateEscalating", CE->isImmediateEscalating());
switch (CE->getConstructionKind()) {
case CXXConstructExpr::CK_Complete:
diff --git a/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp b/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp
index cdd2c93c4b14..3306d90dc856 100644
--- a/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp
+++ b/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp
@@ -29,12 +29,14 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/CRC.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/StringSaver.h"
#include "llvm/Support/xxhash.h"
+#include <functional>
#include <optional>
using namespace clang;
@@ -180,7 +182,8 @@ public:
int32_t VBPtrOffset, uint32_t VBIndex,
raw_ostream &Out) override;
void mangleCXXRTTI(QualType T, raw_ostream &Out) override;
- void mangleCXXRTTIName(QualType T, raw_ostream &Out) override;
+ void mangleCXXRTTIName(QualType T, raw_ostream &Out,
+ bool NormalizeIntegers) override;
void mangleCXXRTTIBaseClassDescriptor(const CXXRecordDecl *Derived,
uint32_t NVOffset, int32_t VBPtrOffset,
uint32_t VBTableOffset, uint32_t Flags,
@@ -193,7 +196,8 @@ public:
mangleCXXRTTICompleteObjectLocator(const CXXRecordDecl *Derived,
ArrayRef<const CXXRecordDecl *> BasePath,
raw_ostream &Out) override;
- void mangleTypeName(QualType T, raw_ostream &) override;
+ void mangleTypeName(QualType T, raw_ostream &,
+ bool NormalizeIntegers) override;
void mangleReferenceTemporary(const VarDecl *, unsigned ManglingNumber,
raw_ostream &) override;
void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &Out) override;
@@ -286,12 +290,8 @@ public:
assert(!RD->isExternallyVisible() && "RD must not be visible!");
assert(RD->getLambdaManglingNumber() == 0 &&
"RD must not have a mangling number!");
- llvm::DenseMap<const CXXRecordDecl *, unsigned>::iterator Result =
- LambdaIds.find(RD);
// The lambda should exist, but return 0 in case it doesn't.
- if (Result == LambdaIds.end())
- return 0;
- return Result->second;
+ return LambdaIds.lookup(RD);
}
/// Return a character sequence that is (somewhat) unique to the TU suitable
@@ -325,8 +325,8 @@ class MicrosoftCXXNameMangler {
typedef llvm::DenseMap<const void *, StringRef> TemplateArgStringMap;
TemplateArgStringMap TemplateArgStrings;
- llvm::StringSaver TemplateArgStringStorage;
llvm::BumpPtrAllocator TemplateArgStringStorageAlloc;
+ llvm::StringSaver TemplateArgStringStorage;
typedef std::set<std::pair<int, bool>> PassObjectSizeArgsSet;
PassObjectSizeArgsSet PassObjectSizeArgs;
@@ -366,9 +366,13 @@ public:
void mangleVariableEncoding(const VarDecl *VD);
void mangleMemberDataPointer(const CXXRecordDecl *RD, const ValueDecl *VD,
StringRef Prefix = "$");
+ void mangleMemberDataPointerInClassNTTP(const CXXRecordDecl *,
+ const ValueDecl *);
void mangleMemberFunctionPointer(const CXXRecordDecl *RD,
const CXXMethodDecl *MD,
StringRef Prefix = "$");
+ void mangleMemberFunctionPointerInClassNTTP(const CXXRecordDecl *RD,
+ const CXXMethodDecl *MD);
void mangleVirtualMemPtrThunk(const CXXMethodDecl *MD,
const MethodVFTableLocation &ML);
void mangleNumber(int64_t Number);
@@ -481,7 +485,7 @@ MicrosoftMangleContextImpl::MicrosoftMangleContextImpl(ASTContext &Context,
SourceManager &SM = Context.getSourceManager();
if (const FileEntry *FE = SM.getFileEntryForID(SM.getMainFileID())) {
// Truncate the hash so we get 8 characters of hexadecimal.
- uint32_t TruncatedHash = uint32_t(xxHash64(FE->getName()));
+ uint32_t TruncatedHash = uint32_t(xxh3_64bits(FE->getName()));
AnonymousNamespaceHash = llvm::utohexstr(TruncatedHash);
} else {
// If we don't have a path to the main file, we'll just use 0.
@@ -709,6 +713,28 @@ void MicrosoftCXXNameMangler::mangleMemberDataPointer(const CXXRecordDecl *RD,
mangleNumber(VBTableOffset);
}
+void MicrosoftCXXNameMangler::mangleMemberDataPointerInClassNTTP(
+ const CXXRecordDecl *RD, const ValueDecl *VD) {
+ MSInheritanceModel IM = RD->getMSInheritanceModel();
+ // <nttp-class-member-data-pointer> ::= <member-data-pointer>
+ // ::= N
+ // ::= 8 <postfix> @ <unqualified-name> @
+
+ if (IM != MSInheritanceModel::Single && IM != MSInheritanceModel::Multiple)
+ return mangleMemberDataPointer(RD, VD, "");
+
+ if (!VD) {
+ Out << 'N';
+ return;
+ }
+
+ Out << '8';
+ mangleNestedName(VD);
+ Out << '@';
+ mangleUnqualifiedName(VD);
+ Out << '@';
+}
+
void
MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD,
const CXXMethodDecl *MD,
@@ -773,6 +799,34 @@ MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD,
mangleNumber(VBTableOffset);
}
+void MicrosoftCXXNameMangler::mangleMemberFunctionPointerInClassNTTP(
+ const CXXRecordDecl *RD, const CXXMethodDecl *MD) {
+ // <nttp-class-member-function-pointer> ::= <member-function-pointer>
+ // ::= N
+ // ::= E? <virtual-mem-ptr-thunk>
+ // ::= E? <mangled-name> <type-encoding>
+
+ if (!MD) {
+ if (RD->getMSInheritanceModel() != MSInheritanceModel::Single)
+ return mangleMemberFunctionPointer(RD, MD, "");
+
+ Out << 'N';
+ return;
+ }
+
+ Out << "E?";
+ if (MD->isVirtual()) {
+ MicrosoftVTableContext *VTContext =
+ cast<MicrosoftVTableContext>(getASTContext().getVTableContext());
+ MethodVFTableLocation ML =
+ VTContext->getMethodVFTableLocation(GlobalDecl(MD));
+ mangleVirtualMemPtrThunk(MD, ML);
+ } else {
+ mangleName(MD);
+ mangleFunctionEncoding(MD, /*ShouldMangle=*/true);
+ }
+}
+
void MicrosoftCXXNameMangler::mangleVirtualMemPtrThunk(
const CXXMethodDecl *MD, const MethodVFTableLocation &ML) {
// Get the vftable offset.
@@ -841,6 +895,10 @@ void MicrosoftCXXNameMangler::mangleFloat(llvm::APFloat Number) {
case APFloat::S_PPCDoubleDouble: Out << 'Z'; break;
case APFloat::S_Float8E5M2:
case APFloat::S_Float8E4M3FN:
+ case APFloat::S_Float8E5M2FNUZ:
+ case APFloat::S_Float8E4M3FNUZ:
+ case APFloat::S_Float8E4M3B11FNUZ:
+ case APFloat::S_FloatTF32:
llvm_unreachable("Tried to mangle unexpected APFloat semantics");
}
@@ -1183,6 +1241,11 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
// ::= <substitution> [<postfix>]
void MicrosoftCXXNameMangler::mangleNestedName(GlobalDecl GD) {
const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
+
+ if (const auto *ID = dyn_cast<IndirectFieldDecl>(ND))
+ for (unsigned I = 1, IE = ID->getChainingSize(); I < IE; ++I)
+ mangleSourceName("<unnamed-tag>");
+
const DeclContext *DC = getEffectiveDeclContext(ND);
while (!DC->isTranslationUnit()) {
if (isa<TagDecl>(ND) || isa<VarDecl>(ND)) {
@@ -1565,7 +1628,6 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
// ::= 8 <class> <unqualified-name> @
// ::= A <type> <non-negative integer> # float
// ::= B <type> <non-negative integer> # double
- // ::= E <mangled-name> # reference to D
// # pointer to member, by component value
// ::= F <number> <number>
// ::= G <number> <number> <number>
@@ -1610,7 +1672,7 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
mangleTemplateArgValue(TPO->getType().getUnqualifiedType(),
TPO->getValue());
} else {
- mangle(ND, TA.getParamTypeForDecl()->isReferenceType() ? "$E?" : "$1?");
+ mangle(ND, "$1?");
}
break;
}
@@ -1739,46 +1801,62 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T,
// FIXME: This can only happen as an extension. Invent a mangling.
break;
} else if (auto *VD = Base.dyn_cast<const ValueDecl*>()) {
- Out << (T->isReferenceType() ? "E" : "1");
+ Out << "E";
mangle(VD);
} else {
break;
}
} else {
- unsigned NumAts = 0;
- if (T->isPointerType()) {
+ if (T->isPointerType())
Out << "5";
- ++NumAts;
- }
- QualType T = Base.getType();
+ SmallVector<char, 2> EntryTypes;
+ SmallVector<std::function<void()>, 2> EntryManglers;
+ QualType ET = Base.getType();
for (APValue::LValuePathEntry E : V.getLValuePath()) {
- // We don't know how to mangle array subscripting yet.
- if (T->isArrayType())
- goto mangling_unknown;
+ if (auto *AT = ET->getAsArrayTypeUnsafe()) {
+ EntryTypes.push_back('C');
+ EntryManglers.push_back([this, I = E.getAsArrayIndex()] {
+ Out << '0';
+ mangleNumber(I);
+ Out << '@';
+ });
+ ET = AT->getElementType();
+ continue;
+ }
const Decl *D = E.getAsBaseOrMember().getPointer();
- auto *FD = dyn_cast<FieldDecl>(D);
- // We don't know how to mangle derived-to-base conversions yet.
- if (!FD)
- goto mangling_unknown;
-
- Out << "6";
- ++NumAts;
- T = FD->getType();
+ if (auto *FD = dyn_cast<FieldDecl>(D)) {
+ ET = FD->getType();
+ if (const auto *RD = ET->getAsRecordDecl())
+ if (RD->isAnonymousStructOrUnion())
+ continue;
+ } else {
+ ET = getASTContext().getRecordType(cast<CXXRecordDecl>(D));
+ // Bug in MSVC: fully qualified name of base class should be used for
+ // mangling to prevent collisions e.g. on base classes with same names
+ // in different namespaces.
+ }
+
+ EntryTypes.push_back('6');
+ EntryManglers.push_back([this, D] {
+ mangleUnqualifiedName(cast<NamedDecl>(D));
+ Out << '@';
+ });
}
+ for (auto I = EntryTypes.rbegin(), E = EntryTypes.rend(); I != E; ++I)
+ Out << *I;
+
auto *VD = Base.dyn_cast<const ValueDecl*>();
if (!VD)
break;
Out << "E";
mangle(VD);
- for (APValue::LValuePathEntry E : V.getLValuePath()) {
- const Decl *D = E.getAsBaseOrMember().getPointer();
- mangleUnqualifiedName(cast<FieldDecl>(D));
- }
- for (unsigned I = 0; I != NumAts; ++I)
+ for (const std::function<void()> &Mangler : EntryManglers)
+ Mangler();
+ if (T->isPointerType())
Out << '@';
}
@@ -1789,20 +1867,14 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T,
if (WithScalarType)
mangleType(T, SourceRange(), QMM_Escape);
- // FIXME: The below manglings don't include a conversion, so bail if there
- // would be one. MSVC mangles the (possibly converted) value of the
- // pointer-to-member object as if it were a struct, leading to collisions
- // in some cases.
- if (!V.getMemberPointerPath().empty())
- break;
-
const CXXRecordDecl *RD =
T->castAs<MemberPointerType>()->getMostRecentCXXRecordDecl();
const ValueDecl *D = V.getMemberPointerDecl();
if (T->isMemberDataPointerType())
- mangleMemberDataPointer(RD, D, "");
+ mangleMemberDataPointerInClassNTTP(RD, D);
else
- mangleMemberFunctionPointer(RD, cast_or_null<CXXMethodDecl>(D), "");
+ mangleMemberFunctionPointerInClassNTTP(RD,
+ cast_or_null<CXXMethodDecl>(D));
return;
}
@@ -1890,7 +1962,6 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T,
break;
}
-mangling_unknown:
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "cannot mangle this template argument yet");
@@ -2477,6 +2548,13 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
mangleArtificialTagType(TTK_Struct, "__bf16", {"__clang"});
break;
+#define WASM_REF_TYPE(InternalName, MangledName, Id, SingletonId, AS) \
+ case BuiltinType::Id: \
+ mangleArtificialTagType(TTK_Struct, MangledName); \
+ mangleArtificialTagType(TTK_Struct, MangledName, {"__clang"}); \
+ break;
+
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
#define SVE_TYPE(Name, Id, SingletonId) \
case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
@@ -2612,7 +2690,7 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
// Copy constructor closure always takes an unqualified reference.
mangleFunctionArgumentType(getASTContext().getLValueReferenceType(
Proto->getParamType(0)
- ->getAs<LValueReferenceType>()
+ ->castAs<LValueReferenceType>()
->getPointeeType(),
/*SpelledAsLValue=*/true),
Range);
@@ -3586,8 +3664,8 @@ void MicrosoftMangleContextImpl::mangleCXXRTTI(QualType T, raw_ostream &Out) {
Mangler.getStream() << "@8";
}
-void MicrosoftMangleContextImpl::mangleCXXRTTIName(QualType T,
- raw_ostream &Out) {
+void MicrosoftMangleContextImpl::mangleCXXRTTIName(
+ QualType T, raw_ostream &Out, bool NormalizeIntegers = false) {
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << '.';
Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result);
@@ -3754,7 +3832,8 @@ void MicrosoftMangleContextImpl::mangleSEHFinallyBlock(
Mangler.mangleName(EnclosingDecl);
}
-void MicrosoftMangleContextImpl::mangleTypeName(QualType T, raw_ostream &Out) {
+void MicrosoftMangleContextImpl::mangleTypeName(
+ QualType T, raw_ostream &Out, bool NormalizeIntegers = false) {
// This is just a made up unique string for the purposes of tbaa. undname
// does *not* know how to demangle it.
MicrosoftCXXNameMangler Mangler(*this, Out);
diff --git a/contrib/llvm-project/clang/lib/AST/NSAPI.cpp b/contrib/llvm-project/clang/lib/AST/NSAPI.cpp
index 3621a2eaa573..86dee540e9e2 100644
--- a/contrib/llvm-project/clang/lib/AST/NSAPI.cpp
+++ b/contrib/llvm-project/clang/lib/AST/NSAPI.cpp
@@ -481,6 +481,8 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
case BuiltinType::BoundMember:
case BuiltinType::Dependent:
case BuiltinType::Overload:
diff --git a/contrib/llvm-project/clang/lib/AST/ODRDiagsEmitter.cpp b/contrib/llvm-project/clang/lib/AST/ODRDiagsEmitter.cpp
index b3fe070889c5..0189a5de625e 100644
--- a/contrib/llvm-project/clang/lib/AST/ODRDiagsEmitter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ODRDiagsEmitter.cpp
@@ -994,40 +994,43 @@ bool ODRDiagsEmitter::diagnoseMismatch(
return true;
}
- const StringLiteral *FirstStr = FirstSA->getMessage();
- const StringLiteral *SecondStr = SecondSA->getMessage();
- assert((FirstStr || SecondStr) && "Both messages cannot be empty");
- if ((FirstStr && !SecondStr) || (!FirstStr && SecondStr)) {
+ const Expr *FirstMessage = FirstSA->getMessage();
+ const Expr *SecondMessage = SecondSA->getMessage();
+ assert((FirstMessage || SecondMessage) && "Both messages cannot be empty");
+ if ((FirstMessage && !SecondMessage) || (!FirstMessage && SecondMessage)) {
SourceLocation FirstLoc, SecondLoc;
SourceRange FirstRange, SecondRange;
- if (FirstStr) {
- FirstLoc = FirstStr->getBeginLoc();
- FirstRange = FirstStr->getSourceRange();
+ if (FirstMessage) {
+ FirstLoc = FirstMessage->getBeginLoc();
+ FirstRange = FirstMessage->getSourceRange();
} else {
FirstLoc = FirstSA->getBeginLoc();
FirstRange = FirstSA->getSourceRange();
}
- if (SecondStr) {
- SecondLoc = SecondStr->getBeginLoc();
- SecondRange = SecondStr->getSourceRange();
+ if (SecondMessage) {
+ SecondLoc = SecondMessage->getBeginLoc();
+ SecondRange = SecondMessage->getSourceRange();
} else {
SecondLoc = SecondSA->getBeginLoc();
SecondRange = SecondSA->getSourceRange();
}
DiagError(FirstLoc, FirstRange, StaticAssertOnlyMessage)
- << (FirstStr == nullptr);
+ << (FirstMessage == nullptr);
DiagNote(SecondLoc, SecondRange, StaticAssertOnlyMessage)
- << (SecondStr == nullptr);
+ << (SecondMessage == nullptr);
return true;
}
- if (FirstStr && SecondStr &&
- FirstStr->getString() != SecondStr->getString()) {
- DiagError(FirstStr->getBeginLoc(), FirstStr->getSourceRange(),
- StaticAssertMessage);
- DiagNote(SecondStr->getBeginLoc(), SecondStr->getSourceRange(),
- StaticAssertMessage);
- return true;
+ if (FirstMessage && SecondMessage) {
+ unsigned FirstMessageODRHash = computeODRHash(FirstMessage);
+ unsigned SecondMessageODRHash = computeODRHash(SecondMessage);
+ if (FirstMessageODRHash != SecondMessageODRHash) {
+ DiagError(FirstMessage->getBeginLoc(), FirstMessage->getSourceRange(),
+ StaticAssertMessage);
+ DiagNote(SecondMessage->getBeginLoc(), SecondMessage->getSourceRange(),
+ StaticAssertMessage);
+ return true;
+ }
}
break;
}
@@ -1742,6 +1745,7 @@ bool ODRDiagsEmitter::diagnoseMismatch(
return true;
}
+ // Note, these calls can trigger deserialization.
const Expr *FirstInit = FirstParam->getInit();
const Expr *SecondInit = SecondParam->getInit();
if ((FirstInit == nullptr) != (SecondInit == nullptr)) {
@@ -2095,7 +2099,8 @@ bool ODRDiagsEmitter::diagnoseMismatch(
<< FirstDecl->getSourceRange();
Diag(SecondDecl->getLocation(),
diag::note_module_odr_violation_mismatch_decl_unknown)
- << SecondModule << FirstDiffType << SecondDecl->getSourceRange();
+ << SecondModule.empty() << SecondModule << FirstDiffType
+ << SecondDecl->getSourceRange();
return true;
}
diff --git a/contrib/llvm-project/clang/lib/AST/ODRHash.cpp b/contrib/llvm-project/clang/lib/AST/ODRHash.cpp
index 3374b49f5d8e..507fb0b49f8a 100644
--- a/contrib/llvm-project/clang/lib/AST/ODRHash.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ODRHash.cpp
@@ -373,11 +373,9 @@ public:
void VisitObjCMethodDecl(const ObjCMethodDecl *Method) {
ID.AddInteger(Method->getDeclKind());
Hash.AddBoolean(Method->isInstanceMethod()); // false if class method
- Hash.AddBoolean(Method->isPropertyAccessor());
Hash.AddBoolean(Method->isVariadic());
Hash.AddBoolean(Method->isSynthesizedAccessorStub());
Hash.AddBoolean(Method->isDefined());
- Hash.AddBoolean(Method->isOverriding());
Hash.AddBoolean(Method->isDirectMethod());
Hash.AddBoolean(Method->isThisDeclarationADesignatedInitializer());
Hash.AddBoolean(Method->hasSkippedBody());
@@ -594,7 +592,7 @@ void ODRHash::AddCXXRecordDecl(const CXXRecordDecl *Record) {
ID.AddInteger(Record->getNumBases());
auto Bases = Record->bases();
- for (auto Base : Bases) {
+ for (const auto &Base : Bases) {
AddQualType(Base.getType());
ID.AddInteger(Base.isVirtual());
ID.AddInteger(Base.getAccessSpecifierAsWritten());
diff --git a/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp b/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp
index 2e88c08ae789..4c895822ffdf 100644
--- a/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp
+++ b/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp
@@ -1669,6 +1669,52 @@ OMPBindClause::Create(const ASTContext &C, OpenMPBindClauseKind K,
OMPBindClause *OMPBindClause::CreateEmpty(const ASTContext &C) {
return new (C) OMPBindClause();
}
+
+OMPDoacrossClause *
+OMPDoacrossClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc,
+ OpenMPDoacrossClauseModifier DepType,
+ SourceLocation DepLoc, SourceLocation ColonLoc,
+ ArrayRef<Expr *> VL, unsigned NumLoops) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size() + NumLoops),
+ alignof(OMPDoacrossClause));
+ OMPDoacrossClause *Clause = new (Mem)
+ OMPDoacrossClause(StartLoc, LParenLoc, EndLoc, VL.size(), NumLoops);
+ Clause->setDependenceType(DepType);
+ Clause->setDependenceLoc(DepLoc);
+ Clause->setColonLoc(ColonLoc);
+ Clause->setVarRefs(VL);
+ for (unsigned I = 0; I < NumLoops; ++I)
+ Clause->setLoopData(I, nullptr);
+ return Clause;
+}
+
+OMPDoacrossClause *OMPDoacrossClause::CreateEmpty(const ASTContext &C,
+ unsigned N,
+ unsigned NumLoops) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N + NumLoops),
+ alignof(OMPDoacrossClause));
+ return new (Mem) OMPDoacrossClause(N, NumLoops);
+}
+
+void OMPDoacrossClause::setLoopData(unsigned NumLoop, Expr *Cnt) {
+ assert(NumLoop < NumLoops && "Loop index must be less number of loops.");
+ auto *It = std::next(getVarRefs().end(), NumLoop);
+ *It = Cnt;
+}
+
+Expr *OMPDoacrossClause::getLoopData(unsigned NumLoop) {
+ assert(NumLoop < NumLoops && "Loop index must be less number of loops.");
+ auto *It = std::next(getVarRefs().end(), NumLoop);
+ return *It;
+}
+
+const Expr *OMPDoacrossClause::getLoopData(unsigned NumLoop) const {
+ assert(NumLoop < NumLoops && "Loop index must be less number of loops.");
+ const auto *It = std::next(getVarRefs().end(), NumLoop);
+ return *It;
+}
+
//===----------------------------------------------------------------------===//
// OpenMP clauses printing methods
//===----------------------------------------------------------------------===//
@@ -2464,6 +2510,30 @@ void OMPClausePrinter::VisitOMPXDynCGroupMemClause(
OS << ")";
}
+void OMPClausePrinter::VisitOMPDoacrossClause(OMPDoacrossClause *Node) {
+ OS << "doacross(";
+ OpenMPDoacrossClauseModifier DepType = Node->getDependenceType();
+
+ switch (DepType) {
+ case OMPC_DOACROSS_source:
+ OS << "source:";
+ break;
+ case OMPC_DOACROSS_sink:
+ OS << "sink:";
+ break;
+ case OMPC_DOACROSS_source_omp_cur_iteration:
+ OS << "source: omp_cur_iteration";
+ break;
+ case OMPC_DOACROSS_sink_omp_cur_iteration:
+ OS << "sink: omp_cur_iteration - 1";
+ break;
+ default:
+ llvm_unreachable("unknown docaross modifier");
+ }
+ VisitOMPClauseList(Node, ' ');
+ OS << ")";
+}
+
void OMPTraitInfo::getAsVariantMatchInfo(ASTContext &ASTCtx,
VariantMatchInfo &VMI) const {
for (const OMPTraitSet &Set : Sets) {
@@ -2642,7 +2712,7 @@ TargetOMPContext::TargetOMPContext(
ASTContext &ASTCtx, std::function<void(StringRef)> &&DiagUnknownTrait,
const FunctionDecl *CurrentFunctionDecl,
ArrayRef<llvm::omp::TraitProperty> ConstructTraits)
- : OMPContext(ASTCtx.getLangOpts().OpenMPIsDevice,
+ : OMPContext(ASTCtx.getLangOpts().OpenMPIsTargetDevice,
ASTCtx.getTargetInfo().getTriple()),
FeatureValidityCheck([&](StringRef FeatureName) {
return ASTCtx.getTargetInfo().isValidFeatureName(FeatureName);
diff --git a/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp b/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp
index bcebf92bf763..c450e2239fee 100644
--- a/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp
+++ b/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp
@@ -800,6 +800,8 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
#define SIGNED_TYPE(Id, SingletonId)
#define UNSIGNED_TYPE(Id, SingletonId)
#define FLOATING_TYPE(Id, SingletonId)
diff --git a/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp b/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp
index 2f546398338c..3f836cb96be5 100644
--- a/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -1853,9 +1853,8 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
bool InsertExtraPadding) {
auto *FieldClass = D->getType()->getAsCXXRecordDecl();
- bool PotentiallyOverlapping = D->hasAttr<NoUniqueAddressAttr>() && FieldClass;
bool IsOverlappingEmptyField =
- PotentiallyOverlapping && FieldClass->isEmpty();
+ D->isPotentiallyOverlapping() && FieldClass->isEmpty();
CharUnits FieldOffset =
(IsUnion || IsOverlappingEmptyField) ? CharUnits::Zero() : getDataSize();
@@ -1916,7 +1915,7 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
// A potentially-overlapping field occupies its dsize or nvsize, whichever
// is larger.
- if (PotentiallyOverlapping) {
+ if (D->isPotentiallyOverlapping()) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(FieldClass);
EffectiveFieldSize =
std::max(Layout.getNonVirtualSize(), Layout.getDataSize());
@@ -2199,11 +2198,19 @@ void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
<< (InBits ? 1 : 0); // (byte|bit)
}
+ const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
+
// Warn if we packed it unnecessarily, when the unpacked alignment is not
// greater than the one after packing, the size in bits doesn't change and
// the offset of each field is identical.
+ // Unless the type is non-POD (for Clang ABI > 15), where the packed
+ // attribute on such a type does allow the type to be packed into other
+ // structures that use the packed attribute.
if (Packed && UnpackedAlignment <= Alignment &&
- UnpackedSizeInBits == getSizeInBits() && !HasPackedField)
+ UnpackedSizeInBits == getSizeInBits() && !HasPackedField &&
+ (!CXXRD || CXXRD->isPOD() ||
+ Context.getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver15))
Diag(D->getLocation(), diag::warn_unnecessary_packed)
<< Context.getTypeDeclType(RD);
}
@@ -2919,8 +2926,7 @@ void MicrosoftRecordLayoutBuilder::layoutNonVirtualBase(
bool FoundBase = false;
if (UseExternalLayout) {
FoundBase = External.getExternalNVBaseOffset(BaseDecl, BaseOffset);
- if (FoundBase) {
- assert(BaseOffset >= Size && "base offset already allocated");
+ if (BaseOffset > Size) {
Size = BaseOffset;
}
}
@@ -3716,6 +3722,28 @@ void ASTContext::DumpRecordLayout(const RecordDecl *RD, raw_ostream &OS,
if (Target->defaultsToAIXPowerAlignment())
OS << " PreferredAlignment:" << toBits(Info.getPreferredAlignment())
<< "\n";
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ OS << " BaseOffsets: [";
+ const CXXRecordDecl *Base = nullptr;
+ for (auto I : CXXRD->bases()) {
+ if (I.isVirtual())
+ continue;
+ if (Base)
+ OS << ", ";
+ Base = I.getType()->getAsCXXRecordDecl();
+ OS << Info.CXXInfo->BaseOffsets[Base].getQuantity();
+ }
+ OS << "]>\n";
+ OS << " VBaseOffsets: [";
+ const CXXRecordDecl *VBase = nullptr;
+ for (auto I : CXXRD->vbases()) {
+ if (VBase)
+ OS << ", ";
+ VBase = I.getType()->getAsCXXRecordDecl();
+ OS << Info.CXXInfo->VBaseOffsets[VBase].VBaseOffset.getQuantity();
+ }
+ OS << "]>\n";
+ }
OS << " FieldOffsets: [";
for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i) {
if (i)
diff --git a/contrib/llvm-project/clang/lib/AST/Stmt.cpp b/contrib/llvm-project/clang/lib/AST/Stmt.cpp
index 8744bba6c6d9..c31fb48a2add 100644
--- a/contrib/llvm-project/clang/lib/AST/Stmt.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Stmt.cpp
@@ -1345,6 +1345,11 @@ CapturedStmt::CapturedStmt(EmptyShell Empty, unsigned NumCaptures)
: Stmt(CapturedStmtClass, Empty), NumCaptures(NumCaptures),
CapDeclAndKind(nullptr, CR_Default) {
getStoredStmts()[NumCaptures] = nullptr;
+
+ // Construct default capture objects.
+ Capture *Buffer = getStoredCaptures();
+ for (unsigned I = 0, N = NumCaptures; I != N; ++I)
+ new (Buffer++) Capture();
}
CapturedStmt *CapturedStmt::Create(const ASTContext &Context, Stmt *S,
diff --git a/contrib/llvm-project/clang/lib/AST/StmtCXX.cpp b/contrib/llvm-project/clang/lib/AST/StmtCXX.cpp
index 33b0421ad101..0d6fc848f739 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtCXX.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtCXX.cpp
@@ -23,7 +23,8 @@ QualType CXXCatchStmt::getCaughtType() const {
}
CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, SourceLocation tryLoc,
- Stmt *tryBlock, ArrayRef<Stmt *> handlers) {
+ CompoundStmt *tryBlock,
+ ArrayRef<Stmt *> handlers) {
const size_t Size = totalSizeToAlloc<Stmt *>(handlers.size() + 1);
void *Mem = C.Allocate(Size, alignof(CXXTryStmt));
return new (Mem) CXXTryStmt(tryLoc, tryBlock, handlers);
@@ -36,7 +37,7 @@ CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, EmptyShell Empty,
return new (Mem) CXXTryStmt(Empty, numHandlers);
}
-CXXTryStmt::CXXTryStmt(SourceLocation tryLoc, Stmt *tryBlock,
+CXXTryStmt::CXXTryStmt(SourceLocation tryLoc, CompoundStmt *tryBlock,
ArrayRef<Stmt *> handlers)
: Stmt(CXXTryStmtClass), TryLoc(tryLoc), NumHandlers(handlers.size()) {
Stmt **Stmts = getStmts();
@@ -117,6 +118,7 @@ CoroutineBodyStmt::CoroutineBodyStmt(CoroutineBodyStmt::CtorArgs const &Args)
SubStmts[CoroutineBodyStmt::OnFallthrough] = Args.OnFallthrough;
SubStmts[CoroutineBodyStmt::Allocate] = Args.Allocate;
SubStmts[CoroutineBodyStmt::Deallocate] = Args.Deallocate;
+ SubStmts[CoroutineBodyStmt::ResultDecl] = Args.ResultDecl;
SubStmts[CoroutineBodyStmt::ReturnValue] = Args.ReturnValue;
SubStmts[CoroutineBodyStmt::ReturnStmt] = Args.ReturnStmt;
SubStmts[CoroutineBodyStmt::ReturnStmtOnAllocFailure] =
diff --git a/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp b/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp
index 7c5b9f23fc26..a544732bb4c2 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp
@@ -2374,6 +2374,10 @@ OMPTeamsGenericLoopDirective *OMPTeamsGenericLoopDirective::Create(
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
+ Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -2383,6 +2387,15 @@ OMPTeamsGenericLoopDirective *OMPTeamsGenericLoopDirective::Create(
Dir->setDependentInits(Exprs.DependentInits);
Dir->setFinalsConditions(Exprs.FinalsConditions);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setCombinedLowerBoundVariable(Exprs.DistCombinedFields.LB);
+ Dir->setCombinedUpperBoundVariable(Exprs.DistCombinedFields.UB);
+ Dir->setCombinedEnsureUpperBound(Exprs.DistCombinedFields.EUB);
+ Dir->setCombinedInit(Exprs.DistCombinedFields.Init);
+ Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
+ Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
+ Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
+ Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
+ Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
return Dir;
}
@@ -2418,6 +2431,10 @@ OMPTargetTeamsGenericLoopDirective *OMPTargetTeamsGenericLoopDirective::Create(
Dir->setNextLowerBound(Exprs.NLB);
Dir->setNextUpperBound(Exprs.NUB);
Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
+ Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -2427,6 +2444,15 @@ OMPTargetTeamsGenericLoopDirective *OMPTargetTeamsGenericLoopDirective::Create(
Dir->setDependentInits(Exprs.DependentInits);
Dir->setFinalsConditions(Exprs.FinalsConditions);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setCombinedLowerBoundVariable(Exprs.DistCombinedFields.LB);
+ Dir->setCombinedUpperBoundVariable(Exprs.DistCombinedFields.UB);
+ Dir->setCombinedEnsureUpperBound(Exprs.DistCombinedFields.EUB);
+ Dir->setCombinedInit(Exprs.DistCombinedFields.Init);
+ Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
+ Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
+ Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
+ Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
+ Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
return Dir;
}
diff --git a/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp b/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp
index 0a879bb6df2a..c3db500d8a8d 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp
@@ -400,7 +400,9 @@ void StmtPrinter::VisitForStmt(ForStmt *Node) {
PrintInitStmt(Node->getInit(), 5);
else
OS << (Node->getCond() ? "; " : ";");
- if (Node->getCond())
+ if (const DeclStmt *DS = Node->getConditionVariableDeclStmt())
+ PrintRawDeclStmt(DS);
+ else if (Node->getCond())
PrintExpr(Node->getCond());
OS << ";";
if (Node->getInc()) {
@@ -1458,8 +1460,12 @@ void StmtPrinter::VisitUnaryExprOrTypeTraitExpr(
void StmtPrinter::VisitGenericSelectionExpr(GenericSelectionExpr *Node) {
OS << "_Generic(";
- PrintExpr(Node->getControllingExpr());
- for (const GenericSelectionExpr::Association Assoc : Node->associations()) {
+ if (Node->isExprPredicate())
+ PrintExpr(Node->getControllingExpr());
+ else
+ Node->getControllingType()->getType().print(OS, Policy);
+
+ for (const GenericSelectionExpr::Association &Assoc : Node->associations()) {
OS << ", ";
QualType T = Assoc.getType();
if (T.isNull())
@@ -1740,7 +1746,7 @@ void StmtPrinter::VisitDesignatedInitExpr(DesignatedInitExpr *Node) {
for (const DesignatedInitExpr::Designator &D : Node->designators()) {
if (D.isFieldDesignator()) {
if (D.getDotLoc().isInvalid()) {
- if (IdentifierInfo *II = D.getFieldName()) {
+ if (const IdentifierInfo *II = D.getFieldName()) {
OS << II->getName() << ":";
NeedsEquals = false;
}
@@ -2539,7 +2545,7 @@ void StmtPrinter::VisitRequiresExpr(RequiresExpr *E) {
OS << "}";
}
-// C++ Coroutines TS
+// C++ Coroutines
void StmtPrinter::VisitCoroutineBodyStmt(CoroutineBodyStmt *S) {
Visit(S->getBody());
diff --git a/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp b/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp
index 960cc4f4fc27..d8a667b2d0fd 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp
@@ -29,10 +29,12 @@ namespace {
protected:
llvm::FoldingSetNodeID &ID;
bool Canonical;
+ bool ProfileLambdaExpr;
public:
- StmtProfiler(llvm::FoldingSetNodeID &ID, bool Canonical)
- : ID(ID), Canonical(Canonical) {}
+ StmtProfiler(llvm::FoldingSetNodeID &ID, bool Canonical,
+ bool ProfileLambdaExpr)
+ : ID(ID), Canonical(Canonical), ProfileLambdaExpr(ProfileLambdaExpr) {}
virtual ~StmtProfiler() {}
@@ -83,8 +85,10 @@ namespace {
public:
StmtProfilerWithPointers(llvm::FoldingSetNodeID &ID,
- const ASTContext &Context, bool Canonical)
- : StmtProfiler(ID, Canonical), Context(Context) {}
+ const ASTContext &Context, bool Canonical,
+ bool ProfileLambdaExpr)
+ : StmtProfiler(ID, Canonical, ProfileLambdaExpr), Context(Context) {}
+
private:
void HandleStmtClass(Stmt::StmtClass SC) override {
ID.AddInteger(SC);
@@ -99,7 +103,15 @@ namespace {
ID.AddInteger(NTTP->getDepth());
ID.AddInteger(NTTP->getIndex());
ID.AddBoolean(NTTP->isParameterPack());
- VisitType(NTTP->getType());
+ // C++20 [temp.over.link]p6:
+ // Two template-parameters are equivalent under the following
+ // conditions: [...] if they declare non-type template parameters,
+ // they have equivalent types ignoring the use of type-constraints
+ // for placeholder types
+ //
+ // TODO: Why do we need to include the type in the profile? It's not
+ // part of the mangling.
+ VisitType(Context.getUnconstrainedType(NTTP->getType()));
return;
}
@@ -111,6 +123,9 @@ namespace {
// definition of "equivalent" (per C++ [temp.over.link]) is at
// least as strong as the definition of "equivalent" used for
// name mangling.
+ //
+ // TODO: The Itanium C++ ABI only uses the top-level cv-qualifiers,
+ // not the entirety of the type.
VisitType(Parm->getType());
ID.AddInteger(Parm->getFunctionScopeDepth());
ID.AddInteger(Parm->getFunctionScopeIndex());
@@ -170,7 +185,8 @@ namespace {
ODRHash &Hash;
public:
StmtProfilerWithoutPointers(llvm::FoldingSetNodeID &ID, ODRHash &Hash)
- : StmtProfiler(ID, false), Hash(Hash) {}
+ : StmtProfiler(ID, /*Canonical=*/false, /*ProfileLambdaExpr=*/false),
+ Hash(Hash) {}
private:
void HandleStmtClass(Stmt::StmtClass SC) override {
@@ -909,6 +925,9 @@ void OMPClauseProfiler::VisitOMPXDynCGroupMemClause(
if (Expr *Size = C->getSize())
Profiler->VisitStmt(Size);
}
+void OMPClauseProfiler::VisitOMPDoacrossClause(const OMPDoacrossClause *C) {
+ VisitOMPClauseList(C);
+}
} // namespace
void
@@ -1519,7 +1538,7 @@ void StmtProfiler::VisitDesignatedInitExpr(const DesignatedInitExpr *S) {
assert(D.isArrayRangeDesignator());
ID.AddInteger(2);
}
- ID.AddInteger(D.getFirstExprIndex());
+ ID.AddInteger(D.getArrayIndex());
}
}
@@ -1637,7 +1656,8 @@ void StmtProfiler::VisitRequiresExpr(const RequiresExpr *S) {
static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
UnaryOperatorKind &UnaryOp,
- BinaryOperatorKind &BinaryOp) {
+ BinaryOperatorKind &BinaryOp,
+ unsigned &NumArgs) {
switch (S->getOperator()) {
case OO_None:
case OO_New:
@@ -1650,7 +1670,7 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
llvm_unreachable("Invalid operator call kind");
case OO_Plus:
- if (S->getNumArgs() == 1) {
+ if (NumArgs == 1) {
UnaryOp = UO_Plus;
return Stmt::UnaryOperatorClass;
}
@@ -1659,7 +1679,7 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
return Stmt::BinaryOperatorClass;
case OO_Minus:
- if (S->getNumArgs() == 1) {
+ if (NumArgs == 1) {
UnaryOp = UO_Minus;
return Stmt::UnaryOperatorClass;
}
@@ -1668,7 +1688,7 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
return Stmt::BinaryOperatorClass;
case OO_Star:
- if (S->getNumArgs() == 1) {
+ if (NumArgs == 1) {
UnaryOp = UO_Deref;
return Stmt::UnaryOperatorClass;
}
@@ -1689,7 +1709,7 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
return Stmt::BinaryOperatorClass;
case OO_Amp:
- if (S->getNumArgs() == 1) {
+ if (NumArgs == 1) {
UnaryOp = UO_AddrOf;
return Stmt::UnaryOperatorClass;
}
@@ -1798,13 +1818,13 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
return Stmt::BinaryOperatorClass;
case OO_PlusPlus:
- UnaryOp = S->getNumArgs() == 1? UO_PreInc
- : UO_PostInc;
+ UnaryOp = NumArgs == 1 ? UO_PreInc : UO_PostInc;
+ NumArgs = 1;
return Stmt::UnaryOperatorClass;
case OO_MinusMinus:
- UnaryOp = S->getNumArgs() == 1? UO_PreDec
- : UO_PostDec;
+ UnaryOp = NumArgs == 1 ? UO_PreDec : UO_PostDec;
+ NumArgs = 1;
return Stmt::UnaryOperatorClass;
case OO_Comma:
@@ -1850,10 +1870,11 @@ void StmtProfiler::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *S) {
UnaryOperatorKind UnaryOp = UO_Extension;
BinaryOperatorKind BinaryOp = BO_Comma;
- Stmt::StmtClass SC = DecodeOperatorCall(S, UnaryOp, BinaryOp);
+ unsigned NumArgs = S->getNumArgs();
+ Stmt::StmtClass SC = DecodeOperatorCall(S, UnaryOp, BinaryOp, NumArgs);
ID.AddInteger(SC);
- for (unsigned I = 0, N = S->getNumArgs(); I != N; ++I)
+ for (unsigned I = 0; I != NumArgs; ++I)
Visit(S->getArg(I));
if (SC == Stmt::UnaryOperatorClass)
ID.AddInteger(UnaryOp);
@@ -2016,14 +2037,27 @@ StmtProfiler::VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *S) {
void
StmtProfiler::VisitLambdaExpr(const LambdaExpr *S) {
- // Do not recursively visit the children of this expression. Profiling the
- // body would result in unnecessary work, and is not safe to do during
- // deserialization.
- VisitStmtNoChildren(S);
+ if (!ProfileLambdaExpr) {
+ // Do not recursively visit the children of this expression. Profiling the
+ // body would result in unnecessary work, and is not safe to do during
+ // deserialization.
+ VisitStmtNoChildren(S);
+
+ // C++20 [temp.over.link]p5:
+ // Two lambda-expressions are never considered equivalent.
+ VisitDecl(S->getLambdaClass());
- // C++20 [temp.over.link]p5:
- // Two lambda-expressions are never considered equivalent.
- VisitDecl(S->getLambdaClass());
+ return;
+ }
+
+ CXXRecordDecl *Lambda = S->getLambdaClass();
+ ID.AddInteger(Lambda->getODRHash());
+
+ for (const auto &Capture : Lambda->captures()) {
+ ID.AddInteger(Capture.getCaptureKind());
+ if (Capture.capturesVariable())
+ VisitDecl(Capture.getCapturedVar());
+ }
}
void
@@ -2377,8 +2411,8 @@ void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) {
}
void Stmt::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
- bool Canonical) const {
- StmtProfilerWithPointers Profiler(ID, Context, Canonical);
+ bool Canonical, bool ProfileLambdaExpr) const {
+ StmtProfilerWithPointers Profiler(ID, Context, Canonical, ProfileLambdaExpr);
Profiler.Visit(this);
}
diff --git a/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp b/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp
index ceff7a313716..c46b3e3d0c50 100644
--- a/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp
@@ -161,8 +161,9 @@ static bool needsAmpersandOnTemplateArg(QualType paramType, QualType argType) {
//===----------------------------------------------------------------------===//
TemplateArgument::TemplateArgument(ASTContext &Ctx, const llvm::APSInt &Value,
- QualType Type) {
+ QualType Type, bool IsDefaulted) {
Integer.Kind = Integral;
+ Integer.IsDefaulted = IsDefaulted;
// Copy the APSInt value into our decomposed form.
Integer.BitWidth = Value.getBitWidth();
Integer.IsUnsigned = Value.isUnsigned();
@@ -326,9 +327,9 @@ void TemplateArgument::Profile(llvm::FoldingSetNodeID &ID,
case TemplateExpansion:
ID.AddInteger(TemplateArg.NumExpansions);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Template:
- getAsTemplateOrTemplatePattern().Profile(ID);
+ ID.AddPointer(TemplateArg.Name);
break;
case Integral:
diff --git a/contrib/llvm-project/clang/lib/AST/TemplateName.cpp b/contrib/llvm-project/clang/lib/AST/TemplateName.cpp
index a6dd0fad9331..2f0e4181e940 100644
--- a/contrib/llvm-project/clang/lib/AST/TemplateName.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TemplateName.cpp
@@ -281,6 +281,15 @@ bool TemplateName::containsUnexpandedParameterPack() const {
return getDependence() & TemplateNameDependence::UnexpandedPack;
}
+void TemplateName::Profile(llvm::FoldingSetNodeID &ID) {
+ if (const auto* USD = getAsUsingShadowDecl())
+ ID.AddPointer(USD->getCanonicalDecl());
+ else if (const auto *TD = getAsTemplateDecl())
+ ID.AddPointer(TD->getCanonicalDecl());
+ else
+ ID.AddPointer(Storage.getOpaqueValue());
+}
+
void TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
Qualified Qual) const {
auto Kind = getKind();
diff --git a/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp b/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp
index a5573c117e62..a174faa6635e 100644
--- a/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp
@@ -283,6 +283,8 @@ void TextNodeDumper::Visit(const Decl *D) {
OS << " constexpr";
if (FD->isConsteval())
OS << " consteval";
+ else if (FD->isImmediateFunction())
+ OS << " immediate";
if (FD->isMultiVersion())
OS << " multiversion";
}
@@ -1046,6 +1048,8 @@ void TextNodeDumper::VisitDeclRefExpr(const DeclRefExpr *Node) {
case NOUR_Constant: OS << " non_odr_use_constant"; break;
case NOUR_Discarded: OS << " non_odr_use_discarded"; break;
}
+ if (Node->isImmediateEscalating())
+ OS << " immediate-escalating";
}
void TextNodeDumper::VisitUnresolvedLookupExpr(
@@ -1227,6 +1231,8 @@ void TextNodeDumper::VisitCXXConstructExpr(const CXXConstructExpr *Node) {
OS << " std::initializer_list";
if (Node->requiresZeroInitialization())
OS << " zeroing";
+ if (Node->isImmediateEscalating())
+ OS << " immediate-escalating";
}
void TextNodeDumper::VisitCXXBindTemporaryExpr(
@@ -1495,6 +1501,9 @@ void TextNodeDumper::VisitVectorType(const VectorType *T) {
case VectorType::SveFixedLengthPredicateVector:
OS << " fixed-length sve predicate vector";
break;
+ case VectorType::RVVFixedLengthDataVector:
+ OS << " fixed-length rvv data vector";
+ break;
}
OS << " " << T->getNumElements();
}
@@ -1818,7 +1827,8 @@ void TextNodeDumper::VisitVarDecl(const VarDecl *D) {
if (D->hasInit()) {
const Expr *E = D->getInit();
// Only dump the value of constexpr VarDecls for now.
- if (E && !E->isValueDependent() && D->isConstexpr()) {
+ if (E && !E->isValueDependent() && D->isConstexpr() &&
+ !D->getType()->isDependentType()) {
const APValue *Value = D->evaluateValue();
if (Value)
AddChild("value", [=] { Visit(*Value, E->getType()); });
diff --git a/contrib/llvm-project/clang/lib/AST/Type.cpp b/contrib/llvm-project/clang/lib/AST/Type.cpp
index 54e62a1939f7..99c859034423 100644
--- a/contrib/llvm-project/clang/lib/AST/Type.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Type.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DependenceFlags.h"
@@ -46,6 +47,7 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
@@ -158,7 +160,7 @@ unsigned ConstantArrayType::getNumAddressingBits(const ASTContext &Context,
if ((ElementSize >> 32) == 0 && NumElements.getBitWidth() <= 64 &&
(NumElements.getZExtValue() >> 32) == 0) {
uint64_t TotalSize = NumElements.getZExtValue() * ElementSize;
- return 64 - llvm::countLeadingZeros(TotalSize);
+ return llvm::bit_width(TotalSize);
}
// Otherwise, use APSInt to handle arbitrary sized values.
@@ -1486,7 +1488,13 @@ struct StripObjCKindOfTypeVisitor
bool QualType::UseExcessPrecision(const ASTContext &Ctx) {
const BuiltinType *BT = getTypePtr()->getAs<BuiltinType>();
- if (BT) {
+ if (!BT) {
+ const VectorType *VT = getTypePtr()->getAs<VectorType>();
+ if (VT) {
+ QualType ElementType = VT->getElementType();
+ return ElementType.UseExcessPrecision(Ctx);
+ }
+ } else {
switch (BT->getKind()) {
case BuiltinType::Kind::Float16: {
const TargetInfo &TI = Ctx.getTargetInfo();
@@ -1495,7 +1503,15 @@ bool QualType::UseExcessPrecision(const ASTContext &Ctx) {
Ctx.getLangOpts().ExcessPrecisionKind::FPP_None)
return true;
return false;
- }
+ } break;
+ case BuiltinType::Kind::BFloat16: {
+ const TargetInfo &TI = Ctx.getTargetInfo();
+ if (TI.hasBFloat16Type() && !TI.hasFullBFloat16Type() &&
+ Ctx.getLangOpts().getBFloat16ExcessPrecision() !=
+ Ctx.getLangOpts().ExcessPrecisionKind::FPP_None)
+ return true;
+ return false;
+ } break;
default:
return false;
}
@@ -1928,6 +1944,11 @@ bool Type::hasIntegerRepresentation() const {
(VT->getKind() >= BuiltinType::SveInt8 &&
VT->getKind() <= BuiltinType::SveUint64);
}
+ if (CanonicalType->isRVVVLSBuiltinType()) {
+ const auto *VT = cast<BuiltinType>(CanonicalType);
+ return (VT->getKind() >= BuiltinType::RvvInt8mf8 &&
+ VT->getKind() <= BuiltinType::RvvUint64m8);
+ }
return isIntegerType();
}
@@ -2154,8 +2175,9 @@ bool Type::isFloatingType() const {
bool Type::hasFloatingRepresentation() const {
if (const auto *VT = dyn_cast<VectorType>(CanonicalType))
return VT->getElementType()->isFloatingType();
- else
- return isFloatingType();
+ if (const auto *MT = dyn_cast<MatrixType>(CanonicalType))
+ return MT->getElementType()->isFloatingType();
+ return isFloatingType();
}
bool Type::isRealFloatingType() const {
@@ -2176,8 +2198,7 @@ bool Type::isRealType() const {
bool Type::isArithmeticType() const {
if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
- BT->getKind() <= BuiltinType::Ibm128 &&
- BT->getKind() != BuiltinType::BFloat16;
+ BT->getKind() <= BuiltinType::Ibm128;
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
// GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2).
// If a body isn't seen by the time we get here, return false.
@@ -2335,6 +2356,10 @@ bool Type::isSizelessBuiltinType() const {
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
return true;
+ // WebAssembly reference types
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
+ return true;
default:
return false;
}
@@ -2342,6 +2367,22 @@ bool Type::isSizelessBuiltinType() const {
return false;
}
+bool Type::isWebAssemblyExternrefType() const {
+ if (const auto *BT = getAs<BuiltinType>())
+ return BT->getKind() == BuiltinType::WasmExternRef;
+ return false;
+}
+
+bool Type::isWebAssemblyTableType() const {
+ if (const auto *ATy = dyn_cast<ArrayType>(this))
+ return ATy->getElementType().isWebAssemblyReferenceType();
+
+ if (const auto *PTy = dyn_cast<PointerType>(this))
+ return PTy->getPointeeType().isWebAssemblyReferenceType();
+
+ return false;
+}
+
bool Type::isSizelessType() const { return isSizelessBuiltinType(); }
bool Type::isSVESizelessBuiltinType() const {
@@ -2358,6 +2399,19 @@ bool Type::isSVESizelessBuiltinType() const {
return false;
}
+bool Type::isRVVSizelessBuiltinType() const {
+ if (const BuiltinType *BT = getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/RISCVVTypes.def"
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
bool Type::isVLSTBuiltinType() const {
if (const BuiltinType *BT = getAs<BuiltinType>()) {
switch (BT->getKind()) {
@@ -2374,6 +2428,8 @@ bool Type::isVLSTBuiltinType() const {
case BuiltinType::SveFloat64:
case BuiltinType::SveBFloat16:
case BuiltinType::SveBool:
+ case BuiltinType::SveBoolx2:
+ case BuiltinType::SveBoolx4:
return true;
default:
return false;
@@ -2385,7 +2441,7 @@ bool Type::isVLSTBuiltinType() const {
QualType Type::getSveEltType(const ASTContext &Ctx) const {
assert(isVLSTBuiltinType() && "unsupported type!");
- const BuiltinType *BTy = getAs<BuiltinType>();
+ const BuiltinType *BTy = castAs<BuiltinType>();
if (BTy->getKind() == BuiltinType::SveBool)
// Represent predicates as i8 rather than i1 to avoid any layout issues.
// The type is bitcasted to a scalable predicate type when casting between
@@ -2395,6 +2451,27 @@ QualType Type::getSveEltType(const ASTContext &Ctx) const {
return Ctx.getBuiltinVectorTypeInfo(BTy).ElementType;
}
+bool Type::isRVVVLSBuiltinType() const {
+ if (const BuiltinType *BT = getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, IsFP) \
+ case BuiltinType::Id: \
+ return NF == 1;
+#include "clang/Basic/RISCVVTypes.def"
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
+QualType Type::getRVVEltType(const ASTContext &Ctx) const {
+ assert(isRVVVLSBuiltinType() && "unsupported type!");
+
+ const BuiltinType *BTy = castAs<BuiltinType>();
+ return Ctx.getBuiltinVectorTypeInfo(BTy).ElementType;
+}
+
bool QualType::isPODType(const ASTContext &Context) const {
// C++11 has a more relaxed definition of POD.
if (Context.getLangOpts().CPlusPlus11)
@@ -2564,6 +2641,64 @@ bool QualType::isTriviallyRelocatableType(const ASTContext &Context) const {
}
}
+static bool
+HasNonDeletedDefaultedEqualityComparison(const CXXRecordDecl *Decl) {
+ if (Decl->isUnion())
+ return false;
+
+ auto IsDefaultedOperatorEqualEqual = [&](const FunctionDecl *Function) {
+ return Function->getOverloadedOperator() ==
+ OverloadedOperatorKind::OO_EqualEqual &&
+ Function->isDefaulted() && Function->getNumParams() > 0 &&
+ (Function->getParamDecl(0)->getType()->isReferenceType() ||
+ Decl->isTriviallyCopyable());
+ };
+
+ if (llvm::none_of(Decl->methods(), IsDefaultedOperatorEqualEqual) &&
+ llvm::none_of(Decl->friends(), [&](const FriendDecl *Friend) {
+ if (NamedDecl *ND = Friend->getFriendDecl()) {
+ return ND->isFunctionOrFunctionTemplate() &&
+ IsDefaultedOperatorEqualEqual(ND->getAsFunction());
+ }
+ return false;
+ }))
+ return false;
+
+ return llvm::all_of(Decl->bases(),
+ [](const CXXBaseSpecifier &BS) {
+ if (const auto *RD = BS.getType()->getAsCXXRecordDecl())
+ return HasNonDeletedDefaultedEqualityComparison(RD);
+ return true;
+ }) &&
+ llvm::all_of(Decl->fields(), [](const FieldDecl *FD) {
+ auto Type = FD->getType();
+ if (Type->isArrayType())
+ Type = Type->getBaseElementTypeUnsafe()->getCanonicalTypeUnqualified();
+
+ if (Type->isReferenceType() || Type->isEnumeralType())
+ return false;
+ if (const auto *RD = Type->getAsCXXRecordDecl())
+ return HasNonDeletedDefaultedEqualityComparison(RD);
+ return true;
+ });
+}
+
+bool QualType::isTriviallyEqualityComparableType(
+ const ASTContext &Context) const {
+ QualType CanonicalType = getCanonicalType();
+ if (CanonicalType->isIncompleteType() || CanonicalType->isDependentType() ||
+ CanonicalType->isEnumeralType() || CanonicalType->isArrayType())
+ return false;
+
+ if (const auto *RD = CanonicalType->getAsCXXRecordDecl()) {
+ if (!HasNonDeletedDefaultedEqualityComparison(RD))
+ return false;
+ }
+
+ return Context.hasUniqueObjectRepresentations(
+ CanonicalType, /*CheckIfTriviallyCopyable=*/false);
+}
+
bool QualType::isNonWeakInMRRWithObjCWeak(const ASTContext &Context) const {
return !Context.getLangOpts().ObjCAutoRefCount &&
Context.getLangOpts().ObjCWeak &&
@@ -2582,6 +2717,19 @@ bool QualType::hasNonTrivialToPrimitiveCopyCUnion(const RecordDecl *RD) {
return RD->hasNonTrivialToPrimitiveCopyCUnion();
}
+bool QualType::isWebAssemblyReferenceType() const {
+ return isWebAssemblyExternrefType() || isWebAssemblyFuncrefType();
+}
+
+bool QualType::isWebAssemblyExternrefType() const {
+ return getTypePtr()->isWebAssemblyExternrefType();
+}
+
+bool QualType::isWebAssemblyFuncrefType() const {
+ return getTypePtr()->isFunctionPointerType() &&
+ getAddressSpace() == LangAS::wasm_funcref;
+}
+
QualType::PrimitiveDefaultInitializeKind
QualType::isNonTrivialToPrimitiveDefaultInitialize() const {
if (const auto *RT =
@@ -3150,6 +3298,10 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
case Id: \
return Name;
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) \
+ case Id: \
+ return Name;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
}
llvm_unreachable("Invalid builtin type.");
@@ -3240,7 +3392,10 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
// Fill in the exception type array if present.
if (getExceptionSpecType() == EST_Dynamic) {
auto &ExtraBits = *getTrailingObjects<FunctionTypeExtraBitfields>();
- ExtraBits.NumExceptionType = epi.ExceptionSpec.Exceptions.size();
+ size_t NumExceptions = epi.ExceptionSpec.Exceptions.size();
+ assert(NumExceptions <= UINT16_MAX &&
+ "Not enough bits to encode exceptions");
+ ExtraBits.NumExceptionType = NumExceptions;
assert(hasExtraBitfields() && "missing trailing extra bitfields!");
auto *exnSlot =
@@ -3640,6 +3795,10 @@ bool AttributedType::isMSTypeSpec() const {
llvm_unreachable("invalid attr kind");
}
+bool AttributedType::isWebAssemblyFuncrefSpec() const {
+ return getAttrKind() == attr::WebAssemblyFuncref;
+}
+
bool AttributedType::isCallingConv() const {
// FIXME: Generate this with TableGen.
switch (getAttrKind()) {
@@ -4279,6 +4438,8 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
case BuiltinType::BuiltinFn:
case BuiltinType::NullPtr:
case BuiltinType::IncompleteMatrixIdx:
@@ -4546,10 +4707,13 @@ AutoType::AutoType(QualType DeducedAsType, AutoTypeKeyword Keyword,
AutoTypeBits.Keyword = (unsigned)Keyword;
AutoTypeBits.NumArgs = TypeConstraintArgs.size();
this->TypeConstraintConcept = TypeConstraintConcept;
+ assert(TypeConstraintConcept || AutoTypeBits.NumArgs == 0);
if (TypeConstraintConcept) {
auto *ArgBuffer =
const_cast<TemplateArgument *>(getTypeConstraintArguments().data());
for (const TemplateArgument &Arg : TypeConstraintArgs) {
+ // We only syntactically depend on the constraint arguments. They don't
+ // affect the deduced type, only its validity.
addDependence(
toSyntacticDependence(toTypeDependence(Arg.getDependence())));
diff --git a/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp b/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp
index bcc5a223e6f7..69efa6fecbc4 100644
--- a/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp
@@ -424,6 +424,8 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
case BuiltinType::BuiltinFn:
case BuiltinType::IncompleteMatrixIdx:
case BuiltinType::OMPArraySection:
diff --git a/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp b/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
index 2d06faeca182..1b62f6630928 100644
--- a/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
@@ -692,6 +692,20 @@ void TypePrinter::printVectorBefore(const VectorType *T, raw_ostream &OS) {
// Multiply by 8 for the number of bits.
OS << ") * 8))) ";
printBefore(T->getElementType(), OS);
+ break;
+ case VectorType::RVVFixedLengthDataVector:
+ // FIXME: We prefer to print the size directly here, but have no way
+ // to get the size of the type.
+ OS << "__attribute__((__riscv_rvv_vector_bits__(";
+
+ OS << T->getNumElements();
+
+ OS << " * sizeof(";
+ print(T->getElementType(), OS, StringRef());
+ // Multiply by 8 for the number of bits.
+ OS << ") * 8))) ";
+ printBefore(T->getElementType(), OS);
+ break;
}
}
@@ -757,6 +771,21 @@ void TypePrinter::printDependentVectorBefore(
}
OS << "))) ";
printBefore(T->getElementType(), OS);
+ break;
+ case VectorType::RVVFixedLengthDataVector:
+ // FIXME: We prefer to print the size directly here, but have no way
+ // to get the size of the type.
+ OS << "__attribute__((__riscv_rvv_vector_bits__(";
+ if (T->getSizeExpr()) {
+ T->getSizeExpr()->printPretty(OS, nullptr, Policy);
+ OS << " * sizeof(";
+ print(T->getElementType(), OS, StringRef());
+ // Multiply by 8 for the number of bits.
+ OS << ") * 8";
+ }
+ OS << "))) ";
+ printBefore(T->getElementType(), OS);
+ break;
}
}
@@ -1356,11 +1385,20 @@ void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
if (PLoc.isValid()) {
OS << " at ";
StringRef File = PLoc.getFilename();
+ llvm::SmallString<1024> WrittenFile(File);
if (auto *Callbacks = Policy.Callbacks)
- OS << Callbacks->remapPath(File);
- else
- OS << File;
- OS << ':' << PLoc.getLine() << ':' << PLoc.getColumn();
+ WrittenFile = Callbacks->remapPath(File);
+ // Fix inconsistent path separator created by
+ // clang::DirectoryLookup::LookupFile when the file path is relative
+ // path.
+ llvm::sys::path::Style Style =
+ llvm::sys::path::is_absolute(WrittenFile)
+ ? llvm::sys::path::Style::native
+ : (Policy.MSVCFormatting
+ ? llvm::sys::path::Style::windows_backslash
+ : llvm::sys::path::Style::posix);
+ llvm::sys::path::native(WrittenFile, Style);
+ OS << WrittenFile << ':' << PLoc.getLine() << ':' << PLoc.getColumn();
}
}
@@ -1538,6 +1576,11 @@ void TypePrinter::printElaboratedBefore(const ElaboratedType *T,
return;
}
+ if (Policy.SuppressElaboration) {
+ printBefore(T->getNamedType(), OS);
+ return;
+ }
+
// The tag definition will take care of these.
if (!Policy.IncludeTagDefinition)
{
@@ -1557,6 +1600,12 @@ void TypePrinter::printElaboratedAfter(const ElaboratedType *T,
raw_ostream &OS) {
if (Policy.IncludeTagDefinition && T->getOwnedTagDecl())
return;
+
+ if (Policy.SuppressElaboration) {
+ printAfter(T->getNamedType(), OS);
+ return;
+ }
+
ElaboratedTypePolicyRAII PolicyRAII(Policy);
printAfter(T->getNamedType(), OS);
}
@@ -1649,6 +1698,9 @@ void TypePrinter::printAttributedBefore(const AttributedType *T,
spaceBeforePlaceHolder(OS);
}
+ if (T->isWebAssemblyFuncrefSpec())
+ OS << "__funcref";
+
// Print nullability type specifiers.
if (T->getImmediateNullability()) {
if (T->getAttrKind() == attr::TypeNonNull)
@@ -1682,8 +1734,8 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
// Some attributes are printed as qualifiers before the type, so we have
// nothing left to do.
- if (T->getAttrKind() == attr::ObjCKindOf ||
- T->isMSTypeSpec() || T->getImmediateNullability())
+ if (T->getAttrKind() == attr::ObjCKindOf || T->isMSTypeSpec() ||
+ T->getImmediateNullability() || T->isWebAssemblyFuncrefSpec())
return;
// Don't print the inert __unsafe_unretained attribute at all.
@@ -1716,6 +1768,11 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
return;
}
+ if (T->getAttrKind() == attr::ArmStreaming) {
+ OS << "__arm_streaming";
+ return;
+ }
+
OS << " __attribute__((";
switch (T->getAttrKind()) {
#define TYPE_ATTR(NAME)
@@ -1755,6 +1812,8 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::AddressSpace:
case attr::CmseNSCall:
case attr::AnnotateType:
+ case attr::WebAssemblyFuncref:
+ case attr::ArmStreaming:
llvm_unreachable("This attribute should have been handled already");
case attr::NSReturnsRetained:
@@ -2007,6 +2066,36 @@ static bool isSubstitutedType(ASTContext &Ctx, QualType T, QualType Pattern,
return false;
}
+/// Evaluates the expression template argument 'Pattern' and returns true
+/// if 'Arg' evaluates to the same result.
+static bool templateArgumentExpressionsEqual(ASTContext const &Ctx,
+ TemplateArgument const &Pattern,
+ TemplateArgument const &Arg) {
+ if (Pattern.getKind() != TemplateArgument::Expression)
+ return false;
+
+ // Can't evaluate value-dependent expressions so bail early
+ Expr const *pattern_expr = Pattern.getAsExpr();
+ if (pattern_expr->isValueDependent() ||
+ !pattern_expr->isIntegerConstantExpr(Ctx))
+ return false;
+
+ if (Arg.getKind() == TemplateArgument::Integral)
+ return llvm::APSInt::isSameValue(pattern_expr->EvaluateKnownConstInt(Ctx),
+ Arg.getAsIntegral());
+
+ if (Arg.getKind() == TemplateArgument::Expression) {
+ Expr const *args_expr = Arg.getAsExpr();
+ if (args_expr->isValueDependent() || !args_expr->isIntegerConstantExpr(Ctx))
+ return false;
+
+ return llvm::APSInt::isSameValue(args_expr->EvaluateKnownConstInt(Ctx),
+ pattern_expr->EvaluateKnownConstInt(Ctx));
+ }
+
+ return false;
+}
+
static bool isSubstitutedTemplateArgument(ASTContext &Ctx, TemplateArgument Arg,
TemplateArgument Pattern,
ArrayRef<TemplateArgument> Args,
@@ -2025,15 +2114,8 @@ static bool isSubstitutedTemplateArgument(ASTContext &Ctx, TemplateArgument Arg,
}
}
- if (Arg.getKind() == TemplateArgument::Integral &&
- Pattern.getKind() == TemplateArgument::Expression) {
- Expr const *expr = Pattern.getAsExpr();
-
- if (!expr->isValueDependent() && expr->isIntegerConstantExpr(Ctx)) {
- return llvm::APSInt::isSameValue(expr->EvaluateKnownConstInt(Ctx),
- Arg.getAsIntegral());
- }
- }
+ if (templateArgumentExpressionsEqual(Ctx, Pattern, Arg))
+ return true;
if (Arg.getKind() != Pattern.getKind())
return false;
@@ -2086,14 +2168,10 @@ printTo(raw_ostream &OS, ArrayRef<TA> Args, const PrintingPolicy &Policy,
if (TPL && Policy.SuppressDefaultTemplateArgs &&
!Policy.PrintCanonicalTypes && !Args.empty() && !IsPack &&
Args.size() <= TPL->size()) {
- ASTContext &Ctx = TPL->getParam(0)->getASTContext();
llvm::SmallVector<TemplateArgument, 8> OrigArgs;
for (const TA &A : Args)
OrigArgs.push_back(getArgument(A));
- while (!Args.empty() &&
- isSubstitutedDefaultArgument(Ctx, getArgument(Args.back()),
- TPL->getParam(Args.size() - 1),
- OrigArgs, TPL->getDepth()))
+ while (!Args.empty() && getArgument(Args.back()).getIsDefaulted())
Args = Args.drop_back();
}
@@ -2238,6 +2316,8 @@ std::string Qualifiers::getAddrSpaceAsString(LangAS AS) {
return "__uptr __ptr32";
case LangAS::ptr64:
return "__ptr64";
+ case LangAS::wasm_funcref:
+ return "__funcref";
case LangAS::hlsl_groupshared:
return "groupshared";
default:
diff --git a/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp b/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp
index bc9a83bde8a0..2a6f1e20202f 100644
--- a/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp
@@ -2259,7 +2259,7 @@ VTableLayout::VTableLayout(ArrayRef<size_t> VTableIndices,
VTableLayout::~VTableLayout() { }
bool VTableContextBase::hasVtableSlot(const CXXMethodDecl *MD) {
- return MD->isVirtual() && !MD->isConsteval();
+ return MD->isVirtual() && !MD->isImmediateFunction();
}
ItaniumVTableContext::ItaniumVTableContext(
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
index f1f73fc42075..3470467112dd 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -800,6 +800,7 @@ const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
+const internal::VariadicDynCastAllOfMatcher<Decl, ConceptDecl> conceptDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
@@ -882,6 +883,10 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
cxxNoexceptExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
+const internal::VariadicDynCastAllOfMatcher<Stmt, ArrayInitIndexExpr>
+ arrayInitIndexExpr;
+const internal::VariadicDynCastAllOfMatcher<Stmt, ArrayInitLoopExpr>
+ arrayInitLoopExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
@@ -910,6 +915,8 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt;
+const internal::VariadicDynCastAllOfMatcher<Stmt, CoroutineBodyStmt>
+ coroutineBodyStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr;
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
index 1b099ec3a314..c76ddf17b719 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
@@ -1008,7 +1008,7 @@ public:
Diagnostics *) const override {
std::vector<ASTNodeKind> NodeKinds;
- for (auto Arg : Args) {
+ for (const auto &Arg : Args) {
if (!Arg.Value.isNodeKind())
return {};
NodeKinds.push_back(Arg.Value.getNodeKind());
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
index 0d436fa29a79..1098df032a64 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -60,7 +60,7 @@ private:
void RegistryMaps::registerMatcher(
StringRef MatcherName, std::unique_ptr<MatcherDescriptor> Callback) {
- assert(Constructors.find(MatcherName) == Constructors.end());
+ assert(!Constructors.contains(MatcherName));
Constructors[MatcherName] = std::move(Callback);
}
@@ -134,7 +134,10 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(allOf);
REGISTER_MATCHER(anyOf);
REGISTER_MATCHER(anything);
+ REGISTER_MATCHER(arrayInitIndexExpr);
+ REGISTER_MATCHER(arrayInitLoopExpr);
REGISTER_MATCHER(argumentCountIs);
+ REGISTER_MATCHER(argumentCountAtLeast);
REGISTER_MATCHER(arraySubscriptExpr);
REGISTER_MATCHER(arrayType);
REGISTER_MATCHER(asString);
@@ -169,12 +172,14 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(compoundLiteralExpr);
REGISTER_MATCHER(compoundStmt);
REGISTER_MATCHER(coawaitExpr);
+ REGISTER_MATCHER(conceptDecl);
REGISTER_MATCHER(conditionalOperator);
REGISTER_MATCHER(constantArrayType);
REGISTER_MATCHER(constantExpr);
REGISTER_MATCHER(containsDeclaration);
REGISTER_MATCHER(continueStmt);
REGISTER_MATCHER(coreturnStmt);
+ REGISTER_MATCHER(coroutineBodyStmt);
REGISTER_MATCHER(coyieldExpr);
REGISTER_MATCHER(cudaKernelCallExpr);
REGISTER_MATCHER(cxxBaseSpecifier);
diff --git a/contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp b/contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp
index c05534886cb5..b989b8422cfc 100644
--- a/contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp
@@ -717,6 +717,7 @@ Stmt *BodyFarm::getBody(const FunctionDecl *D) {
switch (BuiltinID) {
case Builtin::BIas_const:
case Builtin::BIforward:
+ case Builtin::BIforward_like:
case Builtin::BImove:
case Builtin::BImove_if_noexcept:
FF = create_std_move_forward;
diff --git a/contrib/llvm-project/clang/lib/Analysis/CFG.cpp b/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
index ea8b73e81ea2..64a4fffaea5d 100644
--- a/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
@@ -300,6 +300,7 @@ public:
int distance(const_iterator L);
const_iterator shared_parent(const_iterator L);
bool pointsToFirstDeclaredVar() { return VarIter == 1; }
+ bool inSameLocalScope(const_iterator rhs) { return Scope == rhs.Scope; }
};
private:
@@ -349,18 +350,33 @@ int LocalScope::const_iterator::distance(LocalScope::const_iterator L) {
/// between this and shared_parent(L) end.
LocalScope::const_iterator
LocalScope::const_iterator::shared_parent(LocalScope::const_iterator L) {
- llvm::SmallPtrSet<const LocalScope *, 4> ScopesOfL;
+ // one of iterators is not valid (we are not in scope), so common
+ // parent is const_iterator() (i.e. sentinel).
+ if ((*this == const_iterator()) || (L == const_iterator())) {
+ return const_iterator();
+ }
+
+ const_iterator F = *this;
+ if (F.inSameLocalScope(L)) {
+ // Iterators are in the same scope, get common subset of variables.
+ F.VarIter = std::min(F.VarIter, L.VarIter);
+ return F;
+ }
+
+ llvm::SmallDenseMap<const LocalScope *, unsigned, 4> ScopesOfL;
while (true) {
- ScopesOfL.insert(L.Scope);
+ ScopesOfL.try_emplace(L.Scope, L.VarIter);
if (L == const_iterator())
break;
L = L.Scope->Prev;
}
- const_iterator F = *this;
while (true) {
- if (ScopesOfL.count(F.Scope))
+ if (auto LIt = ScopesOfL.find(F.Scope); LIt != ScopesOfL.end()) {
+ // Get common subset of variables in given scope
+ F.VarIter = std::min(F.VarIter, LIt->getSecond());
return F;
+ }
assert(F != const_iterator() &&
"L iterator is not reachable from F iterator.");
F = F.Scope->Prev;
@@ -513,9 +529,6 @@ class CFGBuilder {
llvm::DenseMap<Expr *, const ConstructionContextLayer *>
ConstructionContextMap;
- using DeclsWithEndedScopeSetTy = llvm::SmallSetVector<VarDecl *, 16>;
- DeclsWithEndedScopeSetTy DeclsWithEndedScope;
-
bool badCFG = false;
const CFG::BuildOptions &BuildOpts;
@@ -756,18 +769,20 @@ private:
CFGBlock *addInitializer(CXXCtorInitializer *I);
void addLoopExit(const Stmt *LoopStmt);
- void addAutomaticObjDtors(LocalScope::const_iterator B,
- LocalScope::const_iterator E, Stmt *S);
- void addLifetimeEnds(LocalScope::const_iterator B,
- LocalScope::const_iterator E, Stmt *S);
void addAutomaticObjHandling(LocalScope::const_iterator B,
LocalScope::const_iterator E, Stmt *S);
+ void addAutomaticObjDestruction(LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt *S);
+ void addScopeExitHandling(LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt *S);
void addImplicitDtorsForDestructor(const CXXDestructorDecl *DD);
- void addScopesEnd(LocalScope::const_iterator B, LocalScope::const_iterator E,
- Stmt *S);
-
- void getDeclsWithEndedScope(LocalScope::const_iterator B,
- LocalScope::const_iterator E, Stmt *S);
+ void addScopeChangesHandling(LocalScope::const_iterator SrcPos,
+ LocalScope::const_iterator DstPos,
+ Stmt *S);
+ CFGBlock *createScopeChangesHandlingBlock(LocalScope::const_iterator SrcPos,
+ CFGBlock *SrcBlk,
+ LocalScope::const_iterator DstPost,
+ CFGBlock *DstBlk);
// Local scopes creation.
LocalScope* createOrReuseLocalScope(LocalScope* Scope);
@@ -878,18 +893,6 @@ private:
B->appendDeleteDtor(RD, DE, cfg->getBumpVectorContext());
}
- void prependAutomaticObjDtorsWithTerminator(CFGBlock *Blk,
- LocalScope::const_iterator B, LocalScope::const_iterator E);
-
- void prependAutomaticObjLifetimeWithTerminator(CFGBlock *Blk,
- LocalScope::const_iterator B,
- LocalScope::const_iterator E);
-
- const VarDecl *
- prependAutomaticObjScopeEndWithTerminator(CFGBlock *Blk,
- LocalScope::const_iterator B,
- LocalScope::const_iterator E);
-
void addSuccessor(CFGBlock *B, CFGBlock *S, bool IsReachable = true) {
B->addSuccessor(CFGBlock::AdjacentBlock(S, IsReachable),
cfg->getBumpVectorContext());
@@ -907,21 +910,11 @@ private:
B->appendScopeBegin(VD, S, cfg->getBumpVectorContext());
}
- void prependScopeBegin(CFGBlock *B, const VarDecl *VD, const Stmt *S) {
- if (BuildOpts.AddScopes)
- B->prependScopeBegin(VD, S, cfg->getBumpVectorContext());
- }
-
void appendScopeEnd(CFGBlock *B, const VarDecl *VD, const Stmt *S) {
if (BuildOpts.AddScopes)
B->appendScopeEnd(VD, S, cfg->getBumpVectorContext());
}
- void prependScopeEnd(CFGBlock *B, const VarDecl *VD, const Stmt *S) {
- if (BuildOpts.AddScopes)
- B->prependScopeEnd(VD, S, cfg->getBumpVectorContext());
- }
-
/// Find a relational comparison with an expression evaluating to a
/// boolean and a constant other than 0 and 1.
/// e.g. if ((x < y) == 10)
@@ -1538,7 +1531,6 @@ void CFGBuilder::cleanupConstructionContext(Expr *E) {
ConstructionContextMap.erase(E);
}
-
/// BuildCFG - Constructs a CFG from an AST (a Stmt*). The AST can represent an
/// arbitrary statement. Examples include a single expression or a function
/// body (compound statement). The ownership of the returned CFG is
@@ -1556,9 +1548,6 @@ std::unique_ptr<CFG> CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
assert(Succ == &cfg->getExit());
Block = nullptr; // the EXIT block is empty. Create all other blocks lazily.
- assert(!(BuildOpts.AddImplicitDtors && BuildOpts.AddLifetime) &&
- "AddImplicitDtors and AddLifetime cannot be used at the same time");
-
if (BuildOpts.AddImplicitDtors)
if (const CXXDestructorDecl *DD = dyn_cast_or_null<CXXDestructorDecl>(D))
addImplicitDtorsForDestructor(DD);
@@ -1622,16 +1611,11 @@ std::unique_ptr<CFG> CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
if (LI == LabelMap.end())
continue;
JumpTarget JT = LI->second;
- prependAutomaticObjLifetimeWithTerminator(B, I->scopePosition,
- JT.scopePosition);
- prependAutomaticObjDtorsWithTerminator(B, I->scopePosition,
- JT.scopePosition);
- const VarDecl *VD = prependAutomaticObjScopeEndWithTerminator(
- B, I->scopePosition, JT.scopePosition);
- appendScopeBegin(JT.block, VD, G);
- addSuccessor(B, JT.block);
- };
- if (auto *G = dyn_cast<GCCAsmStmt>(B->getTerminator())) {
+
+ CFGBlock *SuccBlk = createScopeChangesHandlingBlock(
+ I->scopePosition, B, JT.scopePosition, JT.block);
+ addSuccessor(B, SuccBlk);
+ } else if (auto *G = dyn_cast<GCCAsmStmt>(B->getTerminator())) {
CFGBlock *Successor = (I+1)->block;
for (auto *L : G->labels()) {
LabelMapTy::iterator LI = LabelMap.find(L->getLabel());
@@ -1798,143 +1782,195 @@ void CFGBuilder::addLoopExit(const Stmt *LoopStmt){
appendLoopExit(Block, LoopStmt);
}
-void CFGBuilder::getDeclsWithEndedScope(LocalScope::const_iterator B,
- LocalScope::const_iterator E, Stmt *S) {
- if (!BuildOpts.AddScopes)
+/// Adds the CFG elements for leaving the scope of automatic objects in
+/// range [B, E). This include following:
+/// * AutomaticObjectDtor for variables with non-trivial destructor
+/// * LifetimeEnds for all variables
+/// * ScopeEnd for each scope left
+void CFGBuilder::addAutomaticObjHandling(LocalScope::const_iterator B,
+ LocalScope::const_iterator E,
+ Stmt *S) {
+ if (!BuildOpts.AddScopes && !BuildOpts.AddImplicitDtors &&
+ !BuildOpts.AddLifetime)
return;
if (B == E)
return;
- // To go from B to E, one first goes up the scopes from B to P
- // then sideways in one scope from P to P' and then down
- // the scopes from P' to E.
- // The lifetime of all objects between B and P end.
- LocalScope::const_iterator P = B.shared_parent(E);
- int Dist = B.distance(P);
- if (Dist <= 0)
+ // Not leaving the scope, only need to handle destruction and lifetime
+ if (B.inSameLocalScope(E)) {
+ addAutomaticObjDestruction(B, E, S);
return;
+ }
- for (LocalScope::const_iterator I = B; I != P; ++I)
- if (I.pointsToFirstDeclaredVar())
- DeclsWithEndedScope.insert(*I);
-}
+ // Extract information about all local scopes that are left
+ SmallVector<LocalScope::const_iterator, 10> LocalScopeEndMarkers;
+ LocalScopeEndMarkers.push_back(B);
+ for (LocalScope::const_iterator I = B; I != E; ++I) {
+ if (!I.inSameLocalScope(LocalScopeEndMarkers.back()))
+ LocalScopeEndMarkers.push_back(I);
+ }
+ LocalScopeEndMarkers.push_back(E);
+
+ // We need to leave the scope in reverse order, so we reverse the end
+ // markers
+ std::reverse(LocalScopeEndMarkers.begin(), LocalScopeEndMarkers.end());
+ auto Pairwise =
+ llvm::zip(LocalScopeEndMarkers, llvm::drop_begin(LocalScopeEndMarkers));
+ for (auto [E, B] : Pairwise) {
+ if (!B.inSameLocalScope(E))
+ addScopeExitHandling(B, E, S);
+ addAutomaticObjDestruction(B, E, S);
+ }
+}
+
+/// Add CFG elements corresponding to call destructor and end of lifetime
+/// of all automatic variables with non-trivial destructor in range [B, E).
+/// This include AutomaticObjectDtor and LifetimeEnds elements.
+void CFGBuilder::addAutomaticObjDestruction(LocalScope::const_iterator B,
+ LocalScope::const_iterator E,
+ Stmt *S) {
+ if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime)
+ return;
-void CFGBuilder::addAutomaticObjHandling(LocalScope::const_iterator B,
- LocalScope::const_iterator E,
- Stmt *S) {
- getDeclsWithEndedScope(B, E, S);
- if (BuildOpts.AddScopes)
- addScopesEnd(B, E, S);
- if (BuildOpts.AddImplicitDtors)
- addAutomaticObjDtors(B, E, S);
- if (BuildOpts.AddLifetime)
- addLifetimeEnds(B, E, S);
+ if (B == E)
+ return;
+
+ SmallVector<VarDecl *, 10> DeclsNonTrivial;
+ DeclsNonTrivial.reserve(B.distance(E));
+
+ for (VarDecl* D : llvm::make_range(B, E))
+ if (!hasTrivialDestructor(D))
+ DeclsNonTrivial.push_back(D);
+
+ for (VarDecl *VD : llvm::reverse(DeclsNonTrivial)) {
+ if (BuildOpts.AddImplicitDtors) {
+ // If this destructor is marked as a no-return destructor, we need to
+ // create a new block for the destructor which does not have as a
+ // successor anything built thus far: control won't flow out of this
+ // block.
+ QualType Ty = VD->getType();
+ if (Ty->isReferenceType())
+ Ty = getReferenceInitTemporaryType(VD->getInit());
+ Ty = Context->getBaseElementType(Ty);
+
+ if (Ty->getAsCXXRecordDecl()->isAnyDestructorNoReturn())
+ Block = createNoReturnBlock();
+ }
+
+ autoCreateBlock();
+
+ // Add LifetimeEnd after automatic obj with non-trivial destructors,
+ // as they end their lifetime when the destructor returns. For trivial
+ // objects, we end lifetime with scope end.
+ if (BuildOpts.AddLifetime)
+ appendLifetimeEnds(Block, VD, S);
+ if (BuildOpts.AddImplicitDtors)
+ appendAutomaticObjDtor(Block, VD, S);
+ }
}
-/// Add to current block automatic objects that leave the scope.
-void CFGBuilder::addLifetimeEnds(LocalScope::const_iterator B,
- LocalScope::const_iterator E, Stmt *S) {
- if (!BuildOpts.AddLifetime)
+/// Add CFG elements corresponding to leaving a scope.
+/// Assumes that range [B, E) corresponds to single scope.
+/// This add following elements:
+/// * LifetimeEnds for all variables with non-trivial destructor
+/// * ScopeEnd for each scope left
+void CFGBuilder::addScopeExitHandling(LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt *S) {
+ assert(!B.inSameLocalScope(E));
+ if (!BuildOpts.AddLifetime && !BuildOpts.AddScopes)
return;
- if (B == E)
- return;
+ if (BuildOpts.AddScopes) {
+ autoCreateBlock();
+ appendScopeEnd(Block, B.getFirstVarInScope(), S);
+ }
- // To go from B to E, one first goes up the scopes from B to P
- // then sideways in one scope from P to P' and then down
- // the scopes from P' to E.
- // The lifetime of all objects between B and P end.
- LocalScope::const_iterator P = B.shared_parent(E);
- int dist = B.distance(P);
- if (dist <= 0)
+ if (!BuildOpts.AddLifetime)
return;
// We need to perform the scope leaving in reverse order
SmallVector<VarDecl *, 10> DeclsTrivial;
- SmallVector<VarDecl *, 10> DeclsNonTrivial;
- DeclsTrivial.reserve(dist);
- DeclsNonTrivial.reserve(dist);
+ DeclsTrivial.reserve(B.distance(E));
- for (LocalScope::const_iterator I = B; I != P; ++I)
- if (hasTrivialDestructor(*I))
- DeclsTrivial.push_back(*I);
- else
- DeclsNonTrivial.push_back(*I);
+ // Objects with trivial destructor ends their lifetime when their storage
+ // is destroyed, for automatic variables, this happens when the end of the
+ // scope is added.
+ for (VarDecl* D : llvm::make_range(B, E))
+ if (hasTrivialDestructor(D))
+ DeclsTrivial.push_back(D);
+
+ if (DeclsTrivial.empty())
+ return;
autoCreateBlock();
- // object with trivial destructor end their lifetime last (when storage
- // duration ends)
for (VarDecl *VD : llvm::reverse(DeclsTrivial))
appendLifetimeEnds(Block, VD, S);
-
- for (VarDecl *VD : llvm::reverse(DeclsNonTrivial))
- appendLifetimeEnds(Block, VD, S);
}
-/// Add to current block markers for ending scopes.
-void CFGBuilder::addScopesEnd(LocalScope::const_iterator B,
- LocalScope::const_iterator E, Stmt *S) {
- // If implicit destructors are enabled, we'll add scope ends in
- // addAutomaticObjDtors.
- if (BuildOpts.AddImplicitDtors)
+/// addScopeChangesHandling - appends information about destruction, lifetime
+/// and cfgScopeEnd for variables in the scope that was left by the jump, and
+/// appends cfgScopeBegin for all scopes that where entered.
+/// We insert the cfgScopeBegin at the end of the jump node, as depending on
+/// the sourceBlock, each goto, may enter different amount of scopes.
+void CFGBuilder::addScopeChangesHandling(LocalScope::const_iterator SrcPos,
+ LocalScope::const_iterator DstPos,
+ Stmt *S) {
+ assert(Block && "Source block should be always crated");
+ if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime &&
+ !BuildOpts.AddScopes) {
return;
+ }
- autoCreateBlock();
+ if (SrcPos == DstPos)
+ return;
+
+ // Get common scope, the jump leaves all scopes [SrcPos, BasePos), and
+ // enter all scopes between [DstPos, BasePos)
+ LocalScope::const_iterator BasePos = SrcPos.shared_parent(DstPos);
+
+ // Append scope begins for scopes entered by goto
+ if (BuildOpts.AddScopes && !DstPos.inSameLocalScope(BasePos)) {
+ for (LocalScope::const_iterator I = DstPos; I != BasePos; ++I)
+ if (I.pointsToFirstDeclaredVar())
+ appendScopeBegin(Block, *I, S);
+ }
- for (VarDecl *VD : llvm::reverse(DeclsWithEndedScope))
- appendScopeEnd(Block, VD, S);
+ // Append scopeEnds, destructor and lifetime with the terminator for
+ // block left by goto.
+ addAutomaticObjHandling(SrcPos, BasePos, S);
}
-/// addAutomaticObjDtors - Add to current block automatic objects destructors
-/// for objects in range of local scope positions. Use S as trigger statement
-/// for destructors.
-void CFGBuilder::addAutomaticObjDtors(LocalScope::const_iterator B,
- LocalScope::const_iterator E, Stmt *S) {
- if (!BuildOpts.AddImplicitDtors)
- return;
+/// createScopeChangesHandlingBlock - Creates a block with cfgElements
+/// corresponding to changing the scope from the source scope of the GotoStmt,
+/// to destination scope. Add destructor, lifetime and cfgScopeEnd
+/// CFGElements to newly created CFGBlock, that will have the CFG terminator
+/// transferred.
+CFGBlock *CFGBuilder::createScopeChangesHandlingBlock(
+ LocalScope::const_iterator SrcPos, CFGBlock *SrcBlk,
+ LocalScope::const_iterator DstPos, CFGBlock *DstBlk) {
+ if (SrcPos == DstPos)
+ return DstBlk;
- if (B == E)
- return;
+ if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime &&
+ (!BuildOpts.AddScopes || SrcPos.inSameLocalScope(DstPos)))
+ return DstBlk;
- // We need to append the destructors in reverse order, but any one of them
- // may be a no-return destructor which changes the CFG. As a result, buffer
- // this sequence up and replay them in reverse order when appending onto the
- // CFGBlock(s).
- SmallVector<VarDecl*, 10> Decls;
- Decls.reserve(B.distance(E));
- for (LocalScope::const_iterator I = B; I != E; ++I)
- Decls.push_back(*I);
-
- for (VarDecl *VD : llvm::reverse(Decls)) {
- if (hasTrivialDestructor(VD)) {
- // If AddScopes is enabled and *I is a first variable in a scope, add a
- // ScopeEnd marker in a Block.
- if (BuildOpts.AddScopes && DeclsWithEndedScope.count(VD)) {
- autoCreateBlock();
- appendScopeEnd(Block, VD, S);
- }
- continue;
- }
- // If this destructor is marked as a no-return destructor, we need to
- // create a new block for the destructor which does not have as a successor
- // anything built thus far: control won't flow out of this block.
- QualType Ty = VD->getType();
- if (Ty->isReferenceType()) {
- Ty = getReferenceInitTemporaryType(VD->getInit());
- }
- Ty = Context->getBaseElementType(Ty);
+ // We will update CFBBuilder when creating new block, restore the
+ // previous state at exit.
+ SaveAndRestore save_Block(Block), save_Succ(Succ);
- if (Ty->getAsCXXRecordDecl()->isAnyDestructorNoReturn())
- Block = createNoReturnBlock();
- else
- autoCreateBlock();
+ // Create a new block, and transfer terminator
+ Block = createBlock(false);
+ Block->setTerminator(SrcBlk->getTerminator());
+ SrcBlk->setTerminator(CFGTerminator());
+ addSuccessor(Block, DstBlk);
- // Add ScopeEnd just after automatic obj destructor.
- if (BuildOpts.AddScopes && DeclsWithEndedScope.count(VD))
- appendScopeEnd(Block, VD, S);
- appendAutomaticObjDtor(Block, VD, S);
- }
+ // Fill the created Block with the required elements.
+ addScopeChangesHandling(SrcPos, DstPos, Block->getTerminatorStmt());
+
+ assert(Block && "There should be at least one scope changing Block");
+ return Block;
}
/// addImplicitDtorsForDestructor - Add implicit destructors generated for
@@ -1992,8 +2028,7 @@ LocalScope* CFGBuilder::createOrReuseLocalScope(LocalScope* Scope) {
if (Scope)
return Scope;
llvm::BumpPtrAllocator &alloc = cfg->getAllocator();
- return new (alloc.Allocate<LocalScope>())
- LocalScope(BumpVectorContext(alloc), ScopePos);
+ return new (alloc) LocalScope(BumpVectorContext(alloc), ScopePos);
}
/// addLocalScopeForStmt - Add LocalScope to local scopes tree for statement
@@ -2080,8 +2115,6 @@ bool CFGBuilder::hasTrivialDestructor(VarDecl *VD) {
/// const reference. Will reuse Scope if not NULL.
LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl *VD,
LocalScope* Scope) {
- assert(!(BuildOpts.AddImplicitDtors && BuildOpts.AddLifetime) &&
- "AddImplicitDtors and AddLifetime cannot be used at the same time");
if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime &&
!BuildOpts.AddScopes)
return Scope;
@@ -2090,17 +2123,12 @@ LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl *VD,
if (!VD->hasLocalStorage())
return Scope;
- if (BuildOpts.AddImplicitDtors) {
- if (!hasTrivialDestructor(VD) || BuildOpts.AddScopes) {
- // Add the variable to scope
- Scope = createOrReuseLocalScope(Scope);
- Scope->addVar(VD);
- ScopePos = Scope->begin();
- }
+ if (!BuildOpts.AddLifetime && !BuildOpts.AddScopes &&
+ hasTrivialDestructor(VD)) {
+ assert(BuildOpts.AddImplicitDtors);
return Scope;
}
- assert(BuildOpts.AddLifetime);
// Add the variable to scope
Scope = createOrReuseLocalScope(Scope);
Scope->addVar(VD);
@@ -2116,63 +2144,6 @@ void CFGBuilder::addLocalScopeAndDtors(Stmt *S) {
addAutomaticObjHandling(ScopePos, scopeBeginPos, S);
}
-/// prependAutomaticObjDtorsWithTerminator - Prepend destructor CFGElements for
-/// variables with automatic storage duration to CFGBlock's elements vector.
-/// Elements will be prepended to physical beginning of the vector which
-/// happens to be logical end. Use blocks terminator as statement that specifies
-/// destructors call site.
-/// FIXME: This mechanism for adding automatic destructors doesn't handle
-/// no-return destructors properly.
-void CFGBuilder::prependAutomaticObjDtorsWithTerminator(CFGBlock *Blk,
- LocalScope::const_iterator B, LocalScope::const_iterator E) {
- if (!BuildOpts.AddImplicitDtors)
- return;
- BumpVectorContext &C = cfg->getBumpVectorContext();
- CFGBlock::iterator InsertPos
- = Blk->beginAutomaticObjDtorsInsert(Blk->end(), B.distance(E), C);
- for (LocalScope::const_iterator I = B; I != E; ++I)
- InsertPos = Blk->insertAutomaticObjDtor(InsertPos, *I,
- Blk->getTerminatorStmt());
-}
-
-/// prependAutomaticObjLifetimeWithTerminator - Prepend lifetime CFGElements for
-/// variables with automatic storage duration to CFGBlock's elements vector.
-/// Elements will be prepended to physical beginning of the vector which
-/// happens to be logical end. Use blocks terminator as statement that specifies
-/// where lifetime ends.
-void CFGBuilder::prependAutomaticObjLifetimeWithTerminator(
- CFGBlock *Blk, LocalScope::const_iterator B, LocalScope::const_iterator E) {
- if (!BuildOpts.AddLifetime)
- return;
- BumpVectorContext &C = cfg->getBumpVectorContext();
- CFGBlock::iterator InsertPos =
- Blk->beginLifetimeEndsInsert(Blk->end(), B.distance(E), C);
- for (LocalScope::const_iterator I = B; I != E; ++I) {
- InsertPos =
- Blk->insertLifetimeEnds(InsertPos, *I, Blk->getTerminatorStmt());
- }
-}
-
-/// prependAutomaticObjScopeEndWithTerminator - Prepend scope end CFGElements for
-/// variables with automatic storage duration to CFGBlock's elements vector.
-/// Elements will be prepended to physical beginning of the vector which
-/// happens to be logical end. Use blocks terminator as statement that specifies
-/// where scope ends.
-const VarDecl *
-CFGBuilder::prependAutomaticObjScopeEndWithTerminator(
- CFGBlock *Blk, LocalScope::const_iterator B, LocalScope::const_iterator E) {
- if (!BuildOpts.AddScopes)
- return nullptr;
- BumpVectorContext &C = cfg->getBumpVectorContext();
- CFGBlock::iterator InsertPos =
- Blk->beginScopeEndInsert(Blk->end(), 1, C);
- LocalScope::const_iterator PlaceToInsert = B;
- for (LocalScope::const_iterator I = B; I != E; ++I)
- PlaceToInsert = I;
- Blk->insertScopeEnd(InsertPos, *PlaceToInsert, Blk->getTerminatorStmt());
- return *PlaceToInsert;
-}
-
/// Visit - Walk the subtree of a statement and add extra
/// blocks for ternary operators, &&, and ||. We also process "," and
/// DeclStmts (which may contain nested control-flow).
@@ -3387,8 +3358,7 @@ CFGBlock *CFGBuilder::VisitLabelStmt(LabelStmt *L) {
if (!LabelBlock) // This can happen when the body is empty, i.e.
LabelBlock = createBlock(); // scopes that only contains NullStmts.
- assert(LabelMap.find(L->getDecl()) == LabelMap.end() &&
- "label already in map");
+ assert(!LabelMap.contains(L->getDecl()) && "label already in map");
LabelMap[L->getDecl()] = JumpTarget(LabelBlock, ScopePos);
// Labels partition blocks, so this is the end of the basic block we were
@@ -3460,8 +3430,8 @@ CFGBlock *CFGBuilder::VisitGotoStmt(GotoStmt *G) {
BackpatchBlocks.push_back(JumpSource(Block, ScopePos));
else {
JumpTarget JT = I->second;
- addAutomaticObjHandling(ScopePos, JT.scopePosition, G);
addSuccessor(Block, JT.block);
+ addScopeChangesHandling(ScopePos, JT.scopePosition, G);
}
return Block;
@@ -3541,6 +3511,11 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
Block = Succ = TransitionBlock = createBlock(false);
TransitionBlock->setLoopTarget(F);
+
+ // Loop iteration (after increment) should end with destructor of Condition
+ // variable (if any).
+ addAutomaticObjHandling(ScopePos, LoopBeginScopePos, F);
+
if (Stmt *I = F->getInc()) {
// Generate increment code in its own basic block. This is the target of
// continue statements.
@@ -3560,8 +3535,6 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
ContinueJumpTarget = JumpTarget(Succ, ContinueScopePos);
ContinueJumpTarget.block->setLoopTarget(F);
- // Loop body should end with destructor of Condition variable (if any).
- addAutomaticObjHandling(ScopePos, LoopBeginScopePos, F);
// If body is not a compound statement create implicit scope
// and add destructors.
@@ -4155,7 +4128,7 @@ CFGBlock *CFGBuilder::VisitCXXTypeidExpr(CXXTypeidExpr *S, AddStmtChoice asc) {
// operand. [...]
// We add only potentially evaluated statements to the block to avoid
// CFG generation for unevaluated operands.
- if (S && !S->isTypeDependent() && S->isPotentiallyEvaluated())
+ if (!S->isTypeDependent() && S->isPotentiallyEvaluated())
return VisitChildren(S);
// Return block without CFG for unevaluated operands.
@@ -5215,8 +5188,7 @@ CFGBlock *CFG::createBlock() {
bool first_block = begin() == end();
// Create the block.
- CFGBlock *Mem = getAllocator().Allocate<CFGBlock>();
- new (Mem) CFGBlock(NumBlockIDs++, BlkBVC, this);
+ CFGBlock *Mem = new (getAllocator()) CFGBlock(NumBlockIDs++, BlkBVC, this);
Blocks.push_back(Mem, BlkBVC);
// If this is the first block, set it as the Entry and Exit.
@@ -5748,7 +5720,8 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
const CFGElement &E);
void CFGElement::dumpToStream(llvm::raw_ostream &OS) const {
- StmtPrinterHelper Helper(nullptr, {});
+ LangOptions LangOpts;
+ StmtPrinterHelper Helper(nullptr, LangOpts);
print_elem(OS, Helper, *this);
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp b/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp
index c876eaa6358a..90803830ff41 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp
@@ -102,6 +102,8 @@ AST_MATCHER(CXXTypeidExpr, isPotentiallyEvaluated) {
AST_MATCHER_P(GenericSelectionExpr, hasControllingExpr,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
+ if (Node.isTypePredicate())
+ return false;
return InnerMatcher.matches(*Node.getControllingExpr(), Finder, Builder);
}
@@ -605,7 +607,7 @@ FunctionParmMutationAnalyzer::FunctionParmMutationAnalyzer(
for (const CXXCtorInitializer *Init : Ctor->inits()) {
ExprMutationAnalyzer InitAnalyzer(*Init->getInit(), Context);
for (const ParmVarDecl *Parm : Ctor->parameters()) {
- if (Results.find(Parm) != Results.end())
+ if (Results.contains(Parm))
continue;
if (const Stmt *S = InitAnalyzer.findMutation(Parm))
Results[Parm] = S;
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Arena.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Arena.cpp
new file mode 100644
index 000000000000..a12da2d9b555
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Arena.cpp
@@ -0,0 +1,98 @@
+//===-- Arena.cpp ---------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/Arena.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+
+namespace clang::dataflow {
+
+static std::pair<const Formula *, const Formula *>
+canonicalFormulaPair(const Formula &LHS, const Formula &RHS) {
+ auto Res = std::make_pair(&LHS, &RHS);
+ if (&RHS < &LHS) // FIXME: use a deterministic order instead
+ std::swap(Res.first, Res.second);
+ return Res;
+}
+
+const Formula &Arena::makeAtomRef(Atom A) {
+ auto [It, Inserted] = AtomRefs.try_emplace(A);
+ if (Inserted)
+ It->second =
+ &Formula::create(Alloc, Formula::AtomRef, {}, static_cast<unsigned>(A));
+ return *It->second;
+}
+
+const Formula &Arena::makeAnd(const Formula &LHS, const Formula &RHS) {
+ if (&LHS == &RHS)
+ return LHS;
+
+ auto [It, Inserted] =
+ Ands.try_emplace(canonicalFormulaPair(LHS, RHS), nullptr);
+ if (Inserted)
+ It->second = &Formula::create(Alloc, Formula::And, {&LHS, &RHS});
+ return *It->second;
+}
+
+const Formula &Arena::makeOr(const Formula &LHS, const Formula &RHS) {
+ if (&LHS == &RHS)
+ return LHS;
+
+ auto [It, Inserted] =
+ Ors.try_emplace(canonicalFormulaPair(LHS, RHS), nullptr);
+ if (Inserted)
+ It->second = &Formula::create(Alloc, Formula::Or, {&LHS, &RHS});
+ return *It->second;
+}
+
+const Formula &Arena::makeNot(const Formula &Val) {
+ auto [It, Inserted] = Nots.try_emplace(&Val, nullptr);
+ if (Inserted)
+ It->second = &Formula::create(Alloc, Formula::Not, {&Val});
+ return *It->second;
+}
+
+const Formula &Arena::makeImplies(const Formula &LHS, const Formula &RHS) {
+ if (&LHS == &RHS)
+ return makeLiteral(true);
+
+ auto [It, Inserted] =
+ Implies.try_emplace(std::make_pair(&LHS, &RHS), nullptr);
+ if (Inserted)
+ It->second = &Formula::create(Alloc, Formula::Implies, {&LHS, &RHS});
+ return *It->second;
+}
+
+const Formula &Arena::makeEquals(const Formula &LHS, const Formula &RHS) {
+ if (&LHS == &RHS)
+ return makeLiteral(true);
+
+ auto [It, Inserted] =
+ Equals.try_emplace(canonicalFormulaPair(LHS, RHS), nullptr);
+ if (Inserted)
+ It->second = &Formula::create(Alloc, Formula::Equal, {&LHS, &RHS});
+ return *It->second;
+}
+
+IntegerValue &Arena::makeIntLiteral(llvm::APInt Value) {
+ auto [It, Inserted] = IntegerLiterals.try_emplace(Value, nullptr);
+
+ if (Inserted)
+ It->second = &create<IntegerValue>();
+ return *It->second;
+}
+
+BoolValue &Arena::makeBoolValue(const Formula &F) {
+ auto [It, Inserted] = FormulaValues.try_emplace(&F);
+ if (Inserted)
+ It->second = (F.kind() == Formula::AtomRef)
+ ? (BoolValue *)&create<AtomicBoolValue>(F)
+ : &create<FormulaBoolValue>(F);
+ return *It->second;
+}
+
+} // namespace clang::dataflow
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp
index 2492b5203724..c80525dc4f34 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/Stmt.h"
#include "clang/Analysis/CFG.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/Error.h"
#include <utility>
@@ -44,10 +45,47 @@ buildStmtToBasicBlockMap(const CFG &Cfg) {
return StmtToBlock;
}
+static llvm::BitVector findReachableBlocks(const CFG &Cfg) {
+ llvm::BitVector BlockReachable(Cfg.getNumBlockIDs(), false);
+
+ llvm::SmallVector<const CFGBlock *> BlocksToVisit;
+ BlocksToVisit.push_back(&Cfg.getEntry());
+ while (!BlocksToVisit.empty()) {
+ const CFGBlock *Block = BlocksToVisit.back();
+ BlocksToVisit.pop_back();
+
+ if (BlockReachable[Block->getBlockID()])
+ continue;
+
+ BlockReachable[Block->getBlockID()] = true;
+
+ for (const CFGBlock *Succ : Block->succs())
+ if (Succ)
+ BlocksToVisit.push_back(Succ);
+ }
+
+ return BlockReachable;
+}
+
llvm::Expected<ControlFlowContext>
-ControlFlowContext::build(const Decl *D, Stmt &S, ASTContext &C) {
+ControlFlowContext::build(const FunctionDecl &Func) {
+ if (!Func.hasBody())
+ return llvm::createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot analyze function without a body");
+
+ return build(Func, *Func.getBody(), Func.getASTContext());
+}
+
+llvm::Expected<ControlFlowContext>
+ControlFlowContext::build(const Decl &D, Stmt &S, ASTContext &C) {
+ if (D.isTemplated())
+ return llvm::createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot analyze templated declarations");
+
CFG::BuildOptions Options;
- Options.PruneTriviallyFalseEdges = false;
+ Options.PruneTriviallyFalseEdges = true;
Options.AddImplicitDtors = true;
Options.AddTemporaryDtors = true;
Options.AddInitializers = true;
@@ -56,7 +94,7 @@ ControlFlowContext::build(const Decl *D, Stmt &S, ASTContext &C) {
// Ensure that all sub-expressions in basic blocks are evaluated.
Options.setAllAlwaysAdd();
- auto Cfg = CFG::buildCFG(D, &S, &C, Options);
+ auto Cfg = CFG::buildCFG(&D, &S, &C, Options);
if (Cfg == nullptr)
return llvm::createStringError(
std::make_error_code(std::errc::invalid_argument),
@@ -64,7 +102,21 @@ ControlFlowContext::build(const Decl *D, Stmt &S, ASTContext &C) {
llvm::DenseMap<const Stmt *, const CFGBlock *> StmtToBlock =
buildStmtToBasicBlockMap(*Cfg);
- return ControlFlowContext(D, std::move(Cfg), std::move(StmtToBlock));
+
+ llvm::BitVector BlockReachable = findReachableBlocks(*Cfg);
+
+ return ControlFlowContext(&D, std::move(Cfg), std::move(StmtToBlock),
+ std::move(BlockReachable));
+}
+
+llvm::Expected<ControlFlowContext>
+ControlFlowContext::build(const Decl *D, Stmt &S, ASTContext &C) {
+ if (D == nullptr)
+ return llvm::createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Declaration must not be null");
+
+ return build(*D, S, C);
}
} // namespace dataflow
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp
index 480606bdac8d..9f72dc8f6ab3 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp
@@ -15,54 +15,68 @@
#include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Analysis/FlowSensitive/DebugSupport.h"
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "clang/Analysis/FlowSensitive/Logger.h"
#include "clang/Analysis/FlowSensitive/Value.h"
#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <memory>
+#include <string>
#include <utility>
+#include <vector>
+
+static llvm::cl::opt<std::string> DataflowLog(
+ "dataflow-log", llvm::cl::Hidden, llvm::cl::ValueOptional,
+ llvm::cl::desc("Emit log of dataflow analysis. With no arg, writes textual "
+ "log to stderr. With an arg, writes HTML logs under the "
+ "specified directory (one per analyzed function)."));
namespace clang {
namespace dataflow {
-void DataflowAnalysisContext::addModeledFields(
- const llvm::DenseSet<const FieldDecl *> &Fields) {
- llvm::set_union(ModeledFields, Fields);
+FieldSet DataflowAnalysisContext::getModeledFields(QualType Type) {
+ // During context-sensitive analysis, a struct may be allocated in one
+ // function, but its field accessed in a function lower in the stack than
+ // the allocation. Since we only collect fields used in the function where
+ // the allocation occurs, we can't apply that filter when performing
+ // context-sensitive analysis. But, this only applies to storage locations,
+ // since field access it not allowed to fail. In contrast, field *values*
+ // don't need this allowance, since the API allows for uninitialized fields.
+ if (Opts.ContextSensitiveOpts)
+ return getObjectFields(Type);
+
+ return llvm::set_intersection(getObjectFields(Type), ModeledFields);
}
-llvm::DenseSet<const FieldDecl *>
-DataflowAnalysisContext::getReferencedFields(QualType Type) {
- llvm::DenseSet<const FieldDecl *> Fields = getObjectFields(Type);
- llvm::set_intersect(Fields, ModeledFields);
- return Fields;
+void DataflowAnalysisContext::addModeledFields(const FieldSet &Fields) {
+ ModeledFields.set_union(Fields);
}
StorageLocation &DataflowAnalysisContext::createStorageLocation(QualType Type) {
- if (!Type.isNull() &&
- (Type->isStructureOrClassType() || Type->isUnionType())) {
+ if (!Type.isNull() && Type->isRecordType()) {
llvm::DenseMap<const ValueDecl *, StorageLocation *> FieldLocs;
- // During context-sensitive analysis, a struct may be allocated in one
- // function, but its field accessed in a function lower in the stack than
- // the allocation. Since we only collect fields used in the function where
- // the allocation occurs, we can't apply that filter when performing
- // context-sensitive analysis. But, this only applies to storage locations,
- // since field access it not allowed to fail. In contrast, field *values*
- // don't need this allowance, since the API allows for uninitialized fields.
- auto Fields = Opts.ContextSensitiveOpts ? getObjectFields(Type)
- : getReferencedFields(Type);
- for (const FieldDecl *Field : Fields)
- FieldLocs.insert({Field, &createStorageLocation(Field->getType())});
- return takeOwnership(
- std::make_unique<AggregateStorageLocation>(Type, std::move(FieldLocs)));
+ for (const FieldDecl *Field : getModeledFields(Type))
+ if (Field->getType()->isReferenceType())
+ FieldLocs.insert({Field, nullptr});
+ else
+ FieldLocs.insert({Field, &createStorageLocation(
+ Field->getType().getNonReferenceType())});
+ return arena().create<AggregateStorageLocation>(Type, std::move(FieldLocs));
}
- return takeOwnership(std::make_unique<ScalarStorageLocation>(Type));
+ return arena().create<ScalarStorageLocation>(Type);
}
StorageLocation &
DataflowAnalysisContext::getStableStorageLocation(const VarDecl &D) {
if (auto *Loc = getStorageLocation(D))
return *Loc;
- auto &Loc = createStorageLocation(D.getType());
+ auto &Loc = createStorageLocation(D.getType().getNonReferenceType());
setStorageLocation(D, Loc);
return Loc;
}
@@ -83,275 +97,120 @@ DataflowAnalysisContext::getOrCreateNullPointerValue(QualType PointeeType) {
auto Res = NullPointerVals.try_emplace(CanonicalPointeeType, nullptr);
if (Res.second) {
auto &PointeeLoc = createStorageLocation(CanonicalPointeeType);
- Res.first->second =
- &takeOwnership(std::make_unique<PointerValue>(PointeeLoc));
+ Res.first->second = &arena().create<PointerValue>(PointeeLoc);
}
return *Res.first->second;
}
-static std::pair<BoolValue *, BoolValue *>
-makeCanonicalBoolValuePair(BoolValue &LHS, BoolValue &RHS) {
- auto Res = std::make_pair(&LHS, &RHS);
- if (&RHS < &LHS)
- std::swap(Res.first, Res.second);
- return Res;
-}
-
-BoolValue &DataflowAnalysisContext::getOrCreateConjunction(BoolValue &LHS,
- BoolValue &RHS) {
- if (&LHS == &RHS)
- return LHS;
-
- auto Res = ConjunctionVals.try_emplace(makeCanonicalBoolValuePair(LHS, RHS),
- nullptr);
- if (Res.second)
- Res.first->second =
- &takeOwnership(std::make_unique<ConjunctionValue>(LHS, RHS));
- return *Res.first->second;
-}
-
-BoolValue &DataflowAnalysisContext::getOrCreateDisjunction(BoolValue &LHS,
- BoolValue &RHS) {
- if (&LHS == &RHS)
- return LHS;
-
- auto Res = DisjunctionVals.try_emplace(makeCanonicalBoolValuePair(LHS, RHS),
- nullptr);
- if (Res.second)
- Res.first->second =
- &takeOwnership(std::make_unique<DisjunctionValue>(LHS, RHS));
- return *Res.first->second;
-}
-
-BoolValue &DataflowAnalysisContext::getOrCreateNegation(BoolValue &Val) {
- auto Res = NegationVals.try_emplace(&Val, nullptr);
- if (Res.second)
- Res.first->second = &takeOwnership(std::make_unique<NegationValue>(Val));
- return *Res.first->second;
-}
-
-BoolValue &DataflowAnalysisContext::getOrCreateImplication(BoolValue &LHS,
- BoolValue &RHS) {
- if (&LHS == &RHS)
- return getBoolLiteralValue(true);
-
- auto Res = ImplicationVals.try_emplace(std::make_pair(&LHS, &RHS), nullptr);
- if (Res.second)
- Res.first->second =
- &takeOwnership(std::make_unique<ImplicationValue>(LHS, RHS));
- return *Res.first->second;
-}
-
-BoolValue &DataflowAnalysisContext::getOrCreateIff(BoolValue &LHS,
- BoolValue &RHS) {
- if (&LHS == &RHS)
- return getBoolLiteralValue(true);
-
- auto Res = BiconditionalVals.try_emplace(makeCanonicalBoolValuePair(LHS, RHS),
- nullptr);
- if (Res.second)
- Res.first->second =
- &takeOwnership(std::make_unique<BiconditionalValue>(LHS, RHS));
- return *Res.first->second;
-}
-
-AtomicBoolValue &DataflowAnalysisContext::makeFlowConditionToken() {
- return createAtomicBoolValue();
-}
-
void DataflowAnalysisContext::addFlowConditionConstraint(
- AtomicBoolValue &Token, BoolValue &Constraint) {
- auto Res = FlowConditionConstraints.try_emplace(&Token, &Constraint);
+ Atom Token, const Formula &Constraint) {
+ auto Res = FlowConditionConstraints.try_emplace(Token, &Constraint);
if (!Res.second) {
- Res.first->second = &getOrCreateConjunction(*Res.first->second, Constraint);
+ Res.first->second =
+ &arena().makeAnd(*Res.first->second, Constraint);
}
}
-AtomicBoolValue &
-DataflowAnalysisContext::forkFlowCondition(AtomicBoolValue &Token) {
- auto &ForkToken = makeFlowConditionToken();
- FlowConditionDeps[&ForkToken].insert(&Token);
- addFlowConditionConstraint(ForkToken, Token);
+Atom DataflowAnalysisContext::forkFlowCondition(Atom Token) {
+ Atom ForkToken = arena().makeFlowConditionToken();
+ FlowConditionDeps[ForkToken].insert(Token);
+ addFlowConditionConstraint(ForkToken, arena().makeAtomRef(Token));
return ForkToken;
}
-AtomicBoolValue &
-DataflowAnalysisContext::joinFlowConditions(AtomicBoolValue &FirstToken,
- AtomicBoolValue &SecondToken) {
- auto &Token = makeFlowConditionToken();
- FlowConditionDeps[&Token].insert(&FirstToken);
- FlowConditionDeps[&Token].insert(&SecondToken);
+Atom
+DataflowAnalysisContext::joinFlowConditions(Atom FirstToken,
+ Atom SecondToken) {
+ Atom Token = arena().makeFlowConditionToken();
+ FlowConditionDeps[Token].insert(FirstToken);
+ FlowConditionDeps[Token].insert(SecondToken);
addFlowConditionConstraint(Token,
- getOrCreateDisjunction(FirstToken, SecondToken));
+ arena().makeOr(arena().makeAtomRef(FirstToken),
+ arena().makeAtomRef(SecondToken)));
return Token;
}
-Solver::Result
-DataflowAnalysisContext::querySolver(llvm::DenseSet<BoolValue *> Constraints) {
- Constraints.insert(&getBoolLiteralValue(true));
- Constraints.insert(&getOrCreateNegation(getBoolLiteralValue(false)));
- return S->solve(std::move(Constraints));
+Solver::Result DataflowAnalysisContext::querySolver(
+ llvm::SetVector<const Formula *> Constraints) {
+ Constraints.insert(&arena().makeLiteral(true));
+ Constraints.insert(&arena().makeNot(arena().makeLiteral(false)));
+ return S->solve(Constraints.getArrayRef());
}
-bool DataflowAnalysisContext::flowConditionImplies(AtomicBoolValue &Token,
- BoolValue &Val) {
+bool DataflowAnalysisContext::flowConditionImplies(Atom Token,
+ const Formula &Val) {
// Returns true if and only if truth assignment of the flow condition implies
// that `Val` is also true. We prove whether or not this property holds by
// reducing the problem to satisfiability checking. In other words, we attempt
// to show that assuming `Val` is false makes the constraints induced by the
// flow condition unsatisfiable.
- llvm::DenseSet<BoolValue *> Constraints = {&Token, &getOrCreateNegation(Val)};
- llvm::DenseSet<AtomicBoolValue *> VisitedTokens;
+ llvm::SetVector<const Formula *> Constraints;
+ Constraints.insert(&arena().makeAtomRef(Token));
+ Constraints.insert(&arena().makeNot(Val));
+ llvm::DenseSet<Atom> VisitedTokens;
addTransitiveFlowConditionConstraints(Token, Constraints, VisitedTokens);
return isUnsatisfiable(std::move(Constraints));
}
-bool DataflowAnalysisContext::flowConditionIsTautology(AtomicBoolValue &Token) {
+bool DataflowAnalysisContext::flowConditionIsTautology(Atom Token) {
// Returns true if and only if we cannot prove that the flow condition can
// ever be false.
- llvm::DenseSet<BoolValue *> Constraints = {&getOrCreateNegation(Token)};
- llvm::DenseSet<AtomicBoolValue *> VisitedTokens;
+ llvm::SetVector<const Formula *> Constraints;
+ Constraints.insert(&arena().makeNot(arena().makeAtomRef(Token)));
+ llvm::DenseSet<Atom> VisitedTokens;
addTransitiveFlowConditionConstraints(Token, Constraints, VisitedTokens);
return isUnsatisfiable(std::move(Constraints));
}
-bool DataflowAnalysisContext::equivalentBoolValues(BoolValue &Val1,
- BoolValue &Val2) {
- llvm::DenseSet<BoolValue *> Constraints = {
- &getOrCreateNegation(getOrCreateIff(Val1, Val2))};
- return isUnsatisfiable(Constraints);
+bool DataflowAnalysisContext::equivalentFormulas(const Formula &Val1,
+ const Formula &Val2) {
+ llvm::SetVector<const Formula *> Constraints;
+ Constraints.insert(&arena().makeNot(arena().makeEquals(Val1, Val2)));
+ return isUnsatisfiable(std::move(Constraints));
}
void DataflowAnalysisContext::addTransitiveFlowConditionConstraints(
- AtomicBoolValue &Token, llvm::DenseSet<BoolValue *> &Constraints,
- llvm::DenseSet<AtomicBoolValue *> &VisitedTokens) {
- auto Res = VisitedTokens.insert(&Token);
+ Atom Token, llvm::SetVector<const Formula *> &Constraints,
+ llvm::DenseSet<Atom> &VisitedTokens) {
+ auto Res = VisitedTokens.insert(Token);
if (!Res.second)
return;
- auto ConstraintsIt = FlowConditionConstraints.find(&Token);
+ auto ConstraintsIt = FlowConditionConstraints.find(Token);
if (ConstraintsIt == FlowConditionConstraints.end()) {
- Constraints.insert(&Token);
+ Constraints.insert(&arena().makeAtomRef(Token));
} else {
// Bind flow condition token via `iff` to its set of constraints:
// FC <=> (C1 ^ C2 ^ ...), where Ci are constraints
- Constraints.insert(&getOrCreateIff(Token, *ConstraintsIt->second));
+ Constraints.insert(&arena().makeEquals(arena().makeAtomRef(Token),
+ *ConstraintsIt->second));
}
- auto DepsIt = FlowConditionDeps.find(&Token);
+ auto DepsIt = FlowConditionDeps.find(Token);
if (DepsIt != FlowConditionDeps.end()) {
- for (AtomicBoolValue *DepToken : DepsIt->second) {
- addTransitiveFlowConditionConstraints(*DepToken, Constraints,
+ for (Atom DepToken : DepsIt->second) {
+ addTransitiveFlowConditionConstraints(DepToken, Constraints,
VisitedTokens);
}
}
}
-BoolValue &DataflowAnalysisContext::substituteBoolValue(
- BoolValue &Val,
- llvm::DenseMap<BoolValue *, BoolValue *> &SubstitutionsCache) {
- auto It = SubstitutionsCache.find(&Val);
- if (It != SubstitutionsCache.end()) {
- // Return memoized result of substituting this boolean value.
- return *It->second;
- }
-
- // Handle substitution on the boolean value (and its subvalues), saving the
- // result into `SubstitutionsCache`.
- BoolValue *Result;
- switch (Val.getKind()) {
- case Value::Kind::AtomicBool: {
- Result = &Val;
- break;
- }
- case Value::Kind::Negation: {
- auto &Negation = *cast<NegationValue>(&Val);
- auto &Sub = substituteBoolValue(Negation.getSubVal(), SubstitutionsCache);
- Result = &getOrCreateNegation(Sub);
- break;
- }
- case Value::Kind::Disjunction: {
- auto &Disjunct = *cast<DisjunctionValue>(&Val);
- auto &LeftSub =
- substituteBoolValue(Disjunct.getLeftSubValue(), SubstitutionsCache);
- auto &RightSub =
- substituteBoolValue(Disjunct.getRightSubValue(), SubstitutionsCache);
- Result = &getOrCreateDisjunction(LeftSub, RightSub);
- break;
- }
- case Value::Kind::Conjunction: {
- auto &Conjunct = *cast<ConjunctionValue>(&Val);
- auto &LeftSub =
- substituteBoolValue(Conjunct.getLeftSubValue(), SubstitutionsCache);
- auto &RightSub =
- substituteBoolValue(Conjunct.getRightSubValue(), SubstitutionsCache);
- Result = &getOrCreateConjunction(LeftSub, RightSub);
- break;
- }
- case Value::Kind::Implication: {
- auto &IV = *cast<ImplicationValue>(&Val);
- auto &LeftSub =
- substituteBoolValue(IV.getLeftSubValue(), SubstitutionsCache);
- auto &RightSub =
- substituteBoolValue(IV.getRightSubValue(), SubstitutionsCache);
- Result = &getOrCreateImplication(LeftSub, RightSub);
- break;
- }
- case Value::Kind::Biconditional: {
- auto &BV = *cast<BiconditionalValue>(&Val);
- auto &LeftSub =
- substituteBoolValue(BV.getLeftSubValue(), SubstitutionsCache);
- auto &RightSub =
- substituteBoolValue(BV.getRightSubValue(), SubstitutionsCache);
- Result = &getOrCreateIff(LeftSub, RightSub);
- break;
- }
- default:
- llvm_unreachable("Unhandled Value Kind");
- }
- SubstitutionsCache[&Val] = Result;
- return *Result;
-}
+void DataflowAnalysisContext::dumpFlowCondition(Atom Token,
+ llvm::raw_ostream &OS) {
+ llvm::SetVector<const Formula *> Constraints;
+ Constraints.insert(&arena().makeAtomRef(Token));
+ llvm::DenseSet<Atom> VisitedTokens;
+ addTransitiveFlowConditionConstraints(Token, Constraints, VisitedTokens);
-BoolValue &DataflowAnalysisContext::buildAndSubstituteFlowCondition(
- AtomicBoolValue &Token,
- llvm::DenseMap<AtomicBoolValue *, BoolValue *> Substitutions) {
- assert(
- Substitutions.find(&getBoolLiteralValue(true)) == Substitutions.end() &&
- Substitutions.find(&getBoolLiteralValue(false)) == Substitutions.end() &&
- "Do not substitute true/false boolean literals");
- llvm::DenseMap<BoolValue *, BoolValue *> SubstitutionsCache(
- Substitutions.begin(), Substitutions.end());
- return buildAndSubstituteFlowConditionWithCache(Token, SubstitutionsCache);
-}
+ // TODO: have formulas know about true/false directly instead
+ Atom True = arena().makeLiteral(true).getAtom();
+ Atom False = arena().makeLiteral(false).getAtom();
+ Formula::AtomNames Names = {{False, "false"}, {True, "true"}};
-BoolValue &DataflowAnalysisContext::buildAndSubstituteFlowConditionWithCache(
- AtomicBoolValue &Token,
- llvm::DenseMap<BoolValue *, BoolValue *> &SubstitutionsCache) {
- auto ConstraintsIt = FlowConditionConstraints.find(&Token);
- if (ConstraintsIt == FlowConditionConstraints.end()) {
- return getBoolLiteralValue(true);
+ for (const auto *Constraint : Constraints) {
+ Constraint->print(OS, &Names);
+ OS << "\n";
}
- auto DepsIt = FlowConditionDeps.find(&Token);
- if (DepsIt != FlowConditionDeps.end()) {
- for (AtomicBoolValue *DepToken : DepsIt->second) {
- auto &NewDep = buildAndSubstituteFlowConditionWithCache(
- *DepToken, SubstitutionsCache);
- SubstitutionsCache[DepToken] = &NewDep;
- }
- }
- return substituteBoolValue(*ConstraintsIt->second, SubstitutionsCache);
-}
-
-void DataflowAnalysisContext::dumpFlowCondition(AtomicBoolValue &Token) {
- llvm::DenseSet<BoolValue *> Constraints = {&Token};
- llvm::DenseSet<AtomicBoolValue *> VisitedTokens;
- addTransitiveFlowConditionConstraints(Token, Constraints, VisitedTokens);
-
- llvm::DenseMap<const AtomicBoolValue *, std::string> AtomNames = {
- {&getBoolLiteralValue(false), "False"},
- {&getBoolLiteralValue(true), "True"}};
- llvm::dbgs() << debugString(Constraints, AtomNames);
}
const ControlFlowContext *
@@ -364,8 +223,8 @@ DataflowAnalysisContext::getControlFlowContext(const FunctionDecl *F) {
if (It != FunctionContexts.end())
return &It->second;
- if (Stmt *Body = F->getBody()) {
- auto CFCtx = ControlFlowContext::build(F, *Body, F->getASTContext());
+ if (F->hasBody()) {
+ auto CFCtx = ControlFlowContext::build(*F);
// FIXME: Handle errors.
assert(CFCtx);
auto Result = FunctionContexts.insert({F, std::move(*CFCtx)});
@@ -375,6 +234,54 @@ DataflowAnalysisContext::getControlFlowContext(const FunctionDecl *F) {
return nullptr;
}
+static std::unique_ptr<Logger> makeLoggerFromCommandLine() {
+ if (DataflowLog.empty())
+ return Logger::textual(llvm::errs());
+
+ llvm::StringRef Dir = DataflowLog;
+ if (auto EC = llvm::sys::fs::create_directories(Dir))
+ llvm::errs() << "Failed to create log dir: " << EC.message() << "\n";
+ // All analysis runs within a process will log to the same directory.
+ // Share a counter so they don't all overwrite each other's 0.html.
+ // (Don't share a logger, it's not threadsafe).
+ static std::atomic<unsigned> Counter = {0};
+ auto StreamFactory =
+ [Dir(Dir.str())]() mutable -> std::unique_ptr<llvm::raw_ostream> {
+ llvm::SmallString<256> File(Dir);
+ llvm::sys::path::append(File,
+ std::to_string(Counter.fetch_add(1)) + ".html");
+ std::error_code EC;
+ auto OS = std::make_unique<llvm::raw_fd_ostream>(File, EC);
+ if (EC) {
+ llvm::errs() << "Failed to create log " << File << ": " << EC.message()
+ << "\n";
+ return std::make_unique<llvm::raw_null_ostream>();
+ }
+ return OS;
+ };
+ return Logger::html(std::move(StreamFactory));
+}
+
+DataflowAnalysisContext::DataflowAnalysisContext(std::unique_ptr<Solver> S,
+ Options Opts)
+ : S(std::move(S)), A(std::make_unique<Arena>()), Opts(Opts) {
+ assert(this->S != nullptr);
+ // If the -dataflow-log command-line flag was set, synthesize a logger.
+ // This is ugly but provides a uniform method for ad-hoc debugging dataflow-
+ // based tools.
+ if (Opts.Log == nullptr) {
+ if (DataflowLog.getNumOccurrences() > 0) {
+ LogOwner = makeLoggerFromCommandLine();
+ this->Opts.Log = LogOwner.get();
+ // FIXME: if the flag is given a value, write an HTML log to a file.
+ } else {
+ this->Opts.Log = &Logger::null();
+ }
+ }
+}
+
+DataflowAnalysisContext::~DataflowAnalysisContext() = default;
+
} // namespace dataflow
} // namespace clang
@@ -399,9 +306,8 @@ const Stmt &clang::dataflow::ignoreCFGOmittedNodes(const Stmt &S) {
// FIXME: Does not precisely handle non-virtual diamond inheritance. A single
// field decl will be modeled for all instances of the inherited field.
-static void
-getFieldsFromClassHierarchy(QualType Type,
- llvm::DenseSet<const FieldDecl *> &Fields) {
+static void getFieldsFromClassHierarchy(QualType Type,
+ clang::dataflow::FieldSet &Fields) {
if (Type->isIncompleteType() || Type->isDependentType() ||
!Type->isRecordType())
return;
@@ -414,9 +320,8 @@ getFieldsFromClassHierarchy(QualType Type,
}
/// Gets the set of all fields in the type.
-llvm::DenseSet<const FieldDecl *>
-clang::dataflow::getObjectFields(QualType Type) {
- llvm::DenseSet<const FieldDecl *> Fields;
+clang::dataflow::FieldSet clang::dataflow::getObjectFields(QualType Type) {
+ FieldSet Fields;
getFieldsFromClassHierarchy(Type, Fields);
return Fields;
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
index cc3992805cc7..3a91025df6e1 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
@@ -20,6 +20,7 @@
#include "clang/Analysis/FlowSensitive/Value.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
@@ -107,22 +108,44 @@ static Value *mergeDistinctValues(QualType Type, Value &Val1,
// if (o.has_value())
// x = o.value();
// ```
- auto *Expr1 = cast<BoolValue>(&Val1);
- auto *Expr2 = cast<BoolValue>(&Val2);
- auto &MergedVal = MergedEnv.makeAtomicBoolValue();
- MergedEnv.addToFlowCondition(MergedEnv.makeOr(
- MergedEnv.makeAnd(Env1.getFlowConditionToken(),
- MergedEnv.makeIff(MergedVal, *Expr1)),
- MergedEnv.makeAnd(Env2.getFlowConditionToken(),
- MergedEnv.makeIff(MergedVal, *Expr2))));
- return &MergedVal;
+ auto &Expr1 = cast<BoolValue>(Val1).formula();
+ auto &Expr2 = cast<BoolValue>(Val2).formula();
+ auto &A = MergedEnv.arena();
+ auto &MergedVal = A.makeAtomRef(A.makeAtom());
+ MergedEnv.addToFlowCondition(
+ A.makeOr(A.makeAnd(A.makeAtomRef(Env1.getFlowConditionToken()),
+ A.makeEquals(MergedVal, Expr1)),
+ A.makeAnd(A.makeAtomRef(Env2.getFlowConditionToken()),
+ A.makeEquals(MergedVal, Expr2))));
+ return &A.makeBoolValue(MergedVal);
+ }
+
+ Value *MergedVal = nullptr;
+ if (auto *StructVal1 = dyn_cast<StructValue>(&Val1)) {
+ [[maybe_unused]] auto *StructVal2 = cast<StructValue>(&Val2);
+
+ // Values to be merged are always associated with the same location in
+ // `LocToVal`. The location stored in `StructVal` should therefore also
+ // be the same.
+ assert(&StructVal1->getAggregateLoc() == &StructVal2->getAggregateLoc());
+
+ // `StructVal1` and `StructVal2` may have different properties associated
+ // with them. Create a new `StructValue` without any properties so that we
+ // soundly approximate both values. If a particular analysis needs to merge
+ // properties, it should do so in `DataflowAnalysis::merge()`.
+ MergedVal = &MergedEnv.create<StructValue>(StructVal1->getAggregateLoc());
+ } else {
+ MergedVal = MergedEnv.createValue(Type);
}
// FIXME: Consider destroying `MergedValue` immediately if `ValueModel::merge`
// returns false to avoid storing unneeded values in `DACtx`.
// FIXME: Creating the value based on the type alone creates misshapen values
// for lvalues, since the type does not reflect the need for `ReferenceValue`.
- if (Value *MergedVal = MergedEnv.createValue(Type))
+ // This issue will be resolved when `ReferenceValue` is eliminated as part
+ // of the ongoing migration to strict handling of value categories (see
+ // https://discourse.llvm.org/t/70086 for details).
+ if (MergedVal)
if (Model.merge(Type, Val1, Env1, Val2, Env2, *MergedVal, MergedEnv))
return MergedVal;
@@ -156,17 +179,24 @@ static Value &widenDistinctValues(QualType Type, Value &Prev,
/// Initializes a global storage value.
static void insertIfGlobal(const Decl &D,
- llvm::DenseSet<const FieldDecl *> &Fields,
llvm::DenseSet<const VarDecl *> &Vars) {
if (auto *V = dyn_cast<VarDecl>(&D))
if (V->hasGlobalStorage())
Vars.insert(V);
}
-static void getFieldsAndGlobalVars(const Decl &D,
- llvm::DenseSet<const FieldDecl *> &Fields,
- llvm::DenseSet<const VarDecl *> &Vars) {
- insertIfGlobal(D, Fields, Vars);
+static void insertIfFunction(const Decl &D,
+ llvm::DenseSet<const FunctionDecl *> &Funcs) {
+ if (auto *FD = dyn_cast<FunctionDecl>(&D))
+ Funcs.insert(FD);
+}
+
+static void
+getFieldsGlobalsAndFuncs(const Decl &D, FieldSet &Fields,
+ llvm::DenseSet<const VarDecl *> &Vars,
+ llvm::DenseSet<const FunctionDecl *> &Funcs) {
+ insertIfGlobal(D, Vars);
+ insertIfFunction(D, Funcs);
if (const auto *Decomp = dyn_cast<DecompositionDecl>(&D))
for (const auto *B : Decomp->bindings())
if (auto *ME = dyn_cast_or_null<MemberExpr>(B->getBinding()))
@@ -175,60 +205,99 @@ static void getFieldsAndGlobalVars(const Decl &D,
Fields.insert(FD);
}
-/// Traverses `S` and inserts into `Vars` any global storage values that are
-/// declared in or referenced from sub-statements.
-static void getFieldsAndGlobalVars(const Stmt &S,
- llvm::DenseSet<const FieldDecl *> &Fields,
- llvm::DenseSet<const VarDecl *> &Vars) {
+/// Traverses `S` and inserts into `Fields`, `Vars` and `Funcs` any fields,
+/// global variables and functions that are declared in or referenced from
+/// sub-statements.
+static void
+getFieldsGlobalsAndFuncs(const Stmt &S, FieldSet &Fields,
+ llvm::DenseSet<const VarDecl *> &Vars,
+ llvm::DenseSet<const FunctionDecl *> &Funcs) {
for (auto *Child : S.children())
if (Child != nullptr)
- getFieldsAndGlobalVars(*Child, Fields, Vars);
+ getFieldsGlobalsAndFuncs(*Child, Fields, Vars, Funcs);
+ if (const auto *DefaultInit = dyn_cast<CXXDefaultInitExpr>(&S))
+ getFieldsGlobalsAndFuncs(*DefaultInit->getExpr(), Fields, Vars, Funcs);
if (auto *DS = dyn_cast<DeclStmt>(&S)) {
if (DS->isSingleDecl())
- getFieldsAndGlobalVars(*DS->getSingleDecl(), Fields, Vars);
+ getFieldsGlobalsAndFuncs(*DS->getSingleDecl(), Fields, Vars, Funcs);
else
for (auto *D : DS->getDeclGroup())
- getFieldsAndGlobalVars(*D, Fields, Vars);
+ getFieldsGlobalsAndFuncs(*D, Fields, Vars, Funcs);
} else if (auto *E = dyn_cast<DeclRefExpr>(&S)) {
- insertIfGlobal(*E->getDecl(), Fields, Vars);
+ insertIfGlobal(*E->getDecl(), Vars);
+ insertIfFunction(*E->getDecl(), Funcs);
} else if (auto *E = dyn_cast<MemberExpr>(&S)) {
// FIXME: should we be using `E->getFoundDecl()`?
const ValueDecl *VD = E->getMemberDecl();
- insertIfGlobal(*VD, Fields, Vars);
+ insertIfGlobal(*VD, Vars);
+ insertIfFunction(*VD, Funcs);
if (const auto *FD = dyn_cast<FieldDecl>(VD))
Fields.insert(FD);
+ } else if (auto *InitList = dyn_cast<InitListExpr>(&S)) {
+ if (RecordDecl *RD = InitList->getType()->getAsRecordDecl())
+ for (const auto *FD : getFieldsForInitListExpr(RD))
+ Fields.insert(FD);
}
}
// FIXME: Add support for resetting globals after function calls to enable
// the implementation of sound analyses.
-void Environment::initVars(llvm::DenseSet<const VarDecl *> Vars) {
+void Environment::initFieldsGlobalsAndFuncs(const FunctionDecl *FuncDecl) {
+ assert(FuncDecl->getBody() != nullptr);
+
+ FieldSet Fields;
+ llvm::DenseSet<const VarDecl *> Vars;
+ llvm::DenseSet<const FunctionDecl *> Funcs;
+
+ // Look for global variable and field references in the
+ // constructor-initializers.
+ if (const auto *CtorDecl = dyn_cast<CXXConstructorDecl>(FuncDecl)) {
+ for (const auto *Init : CtorDecl->inits()) {
+ if (Init->isMemberInitializer()) {
+ Fields.insert(Init->getMember());
+ } else if (Init->isIndirectMemberInitializer()) {
+ for (const auto *I : Init->getIndirectMember()->chain())
+ Fields.insert(cast<FieldDecl>(I));
+ }
+ const Expr *E = Init->getInit();
+ assert(E != nullptr);
+ getFieldsGlobalsAndFuncs(*E, Fields, Vars, Funcs);
+ }
+ // Add all fields mentioned in default member initializers.
+ for (const FieldDecl *F : CtorDecl->getParent()->fields())
+ if (const auto *I = F->getInClassInitializer())
+ getFieldsGlobalsAndFuncs(*I, Fields, Vars, Funcs);
+ }
+ getFieldsGlobalsAndFuncs(*FuncDecl->getBody(), Fields, Vars, Funcs);
+
+ // These have to be added before the lines that follow to ensure that
+ // `create*` work correctly for structs.
+ DACtx->addModeledFields(Fields);
+
for (const VarDecl *D : Vars) {
- if (getStorageLocation(*D, SkipPast::None) != nullptr)
+ if (getStorageLocation(*D) != nullptr)
continue;
- auto &Loc = createStorageLocation(*D);
- setStorageLocation(*D, Loc);
- if (auto *Val = createValue(D->getType()))
- setValue(Loc, *Val);
+
+ setStorageLocation(*D, createObject(*D));
+ }
+
+ for (const FunctionDecl *FD : Funcs) {
+ if (getStorageLocation(*FD) != nullptr)
+ continue;
+ auto &Loc = createStorageLocation(FD->getType());
+ setStorageLocation(*FD, Loc);
}
}
Environment::Environment(DataflowAnalysisContext &DACtx)
- : DACtx(&DACtx), FlowConditionToken(&DACtx.makeFlowConditionToken()) {}
-
-Environment::Environment(const Environment &Other)
- : DACtx(Other.DACtx), CallStack(Other.CallStack),
- ReturnLoc(Other.ReturnLoc), ThisPointeeLoc(Other.ThisPointeeLoc),
- DeclToLoc(Other.DeclToLoc), ExprToLoc(Other.ExprToLoc),
- LocToVal(Other.LocToVal), MemberLocToStruct(Other.MemberLocToStruct),
- FlowConditionToken(&DACtx->forkFlowCondition(*Other.FlowConditionToken)) {
-}
+ : DACtx(&DACtx),
+ FlowConditionToken(DACtx.arena().makeFlowConditionToken()) {}
-Environment &Environment::operator=(const Environment &Other) {
- Environment Copy(Other);
- *this = std::move(Copy);
- return *this;
+Environment Environment::fork() const {
+ Environment Copy(*this);
+ Copy.FlowConditionToken = DACtx->forkFlowCondition(FlowConditionToken);
+ return Copy;
}
Environment::Environment(DataflowAnalysisContext &DACtx,
@@ -239,37 +308,12 @@ Environment::Environment(DataflowAnalysisContext &DACtx,
if (const auto *FuncDecl = dyn_cast<FunctionDecl>(&DeclCtx)) {
assert(FuncDecl->getBody() != nullptr);
- llvm::DenseSet<const FieldDecl *> Fields;
- llvm::DenseSet<const VarDecl *> Vars;
-
- // Look for global variable references in the constructor-initializers.
- if (const auto *CtorDecl = dyn_cast<CXXConstructorDecl>(&DeclCtx)) {
- for (const auto *Init : CtorDecl->inits()) {
- if (const auto *M = Init->getAnyMember())
- Fields.insert(M);
- const Expr *E = Init->getInit();
- assert(E != nullptr);
- getFieldsAndGlobalVars(*E, Fields, Vars);
- }
- }
- getFieldsAndGlobalVars(*FuncDecl->getBody(), Fields, Vars);
-
- // These have to be added before the lines that follow to ensure that
- // `create*` work correctly for structs.
- DACtx.addModeledFields(Fields);
-
- initVars(Vars);
+ initFieldsGlobalsAndFuncs(FuncDecl);
for (const auto *ParamDecl : FuncDecl->parameters()) {
assert(ParamDecl != nullptr);
- auto &ParamLoc = createStorageLocation(*ParamDecl);
- setStorageLocation(*ParamDecl, ParamLoc);
- if (Value *ParamVal = createValue(ParamDecl->getType()))
- setValue(ParamLoc, *ParamVal);
+ setStorageLocation(*ParamDecl, createObject(*ParamDecl, nullptr));
}
-
- QualType ReturnType = FuncDecl->getReturnType();
- ReturnLoc = &createStorageLocation(ReturnType);
}
if (const auto *MethodDecl = dyn_cast<CXXMethodDecl>(&DeclCtx)) {
@@ -281,9 +325,8 @@ Environment::Environment(DataflowAnalysisContext &DACtx,
// FIXME: Initialize the ThisPointeeLoc of lambdas too.
if (MethodDecl && !MethodDecl->isStatic()) {
QualType ThisPointeeType = MethodDecl->getThisObjectType();
- ThisPointeeLoc = &createStorageLocation(ThisPointeeType);
- if (Value *ThisPointeeVal = createValue(ThisPointeeType))
- setValue(*ThisPointeeLoc, *ThisPointeeVal);
+ ThisPointeeLoc =
+ &cast<StructValue>(createValue(ThisPointeeType))->getAggregateLoc();
}
}
}
@@ -296,13 +339,11 @@ bool Environment::canDescend(unsigned MaxDepth,
Environment Environment::pushCall(const CallExpr *Call) const {
Environment Env(*this);
- // FIXME: Support references here.
- Env.ReturnLoc = getStorageLocation(*Call, SkipPast::Reference);
-
if (const auto *MethodCall = dyn_cast<CXXMemberCallExpr>(Call)) {
if (const Expr *Arg = MethodCall->getImplicitObjectArgument()) {
if (!isa<CXXThisExpr>(Arg))
- Env.ThisPointeeLoc = getStorageLocation(*Arg, SkipPast::Reference);
+ Env.ThisPointeeLoc = cast<AggregateStorageLocation>(
+ getStorageLocation(*Arg, SkipPast::Reference));
// Otherwise (when the argument is `this`), retain the current
// environment's `ThisPointeeLoc`.
}
@@ -317,10 +358,7 @@ Environment Environment::pushCall(const CallExpr *Call) const {
Environment Environment::pushCall(const CXXConstructExpr *Call) const {
Environment Env(*this);
- // FIXME: Support references here.
- Env.ReturnLoc = getStorageLocation(*Call, SkipPast::Reference);
-
- Env.ThisPointeeLoc = Env.ReturnLoc;
+ Env.ThisPointeeLoc = &Env.getResultObjectLocation(*Call);
Env.pushCallInternal(Call->getConstructor(),
llvm::ArrayRef(Call->getArgs(), Call->getNumArgs()));
@@ -330,28 +368,15 @@ Environment Environment::pushCall(const CXXConstructExpr *Call) const {
void Environment::pushCallInternal(const FunctionDecl *FuncDecl,
ArrayRef<const Expr *> Args) {
- CallStack.push_back(FuncDecl);
+ // Canonicalize to the definition of the function. This ensures that we're
+ // putting arguments into the same `ParamVarDecl`s` that the callee will later
+ // be retrieving them from.
+ assert(FuncDecl->getDefinition() != nullptr);
+ FuncDecl = FuncDecl->getDefinition();
- // FIXME: Share this code with the constructor, rather than duplicating it.
- llvm::DenseSet<const FieldDecl *> Fields;
- llvm::DenseSet<const VarDecl *> Vars;
- // Look for global variable references in the constructor-initializers.
- if (const auto *CtorDecl = dyn_cast<CXXConstructorDecl>(FuncDecl)) {
- for (const auto *Init : CtorDecl->inits()) {
- if (const auto *M = Init->getAnyMember())
- Fields.insert(M);
- const Expr *E = Init->getInit();
- assert(E != nullptr);
- getFieldsAndGlobalVars(*E, Fields, Vars);
- }
- }
- getFieldsAndGlobalVars(*FuncDecl->getBody(), Fields, Vars);
-
- // These have to be added before the lines that follow to ensure that
- // `create*` work correctly for structs.
- DACtx->addModeledFields(Fields);
+ CallStack.push_back(FuncDecl);
- initVars(Vars);
+ initFieldsGlobalsAndFuncs(FuncDecl);
const auto *ParamIt = FuncDecl->param_begin();
@@ -359,45 +384,49 @@ void Environment::pushCallInternal(const FunctionDecl *FuncDecl,
// overloaded operators implemented as member functions, and parameter packs.
for (unsigned ArgIndex = 0; ArgIndex < Args.size(); ++ParamIt, ++ArgIndex) {
assert(ParamIt != FuncDecl->param_end());
-
- const Expr *Arg = Args[ArgIndex];
- auto *ArgLoc = getStorageLocation(*Arg, SkipPast::Reference);
- if (ArgLoc == nullptr)
- continue;
-
const VarDecl *Param = *ParamIt;
- auto &Loc = createStorageLocation(*Param);
- setStorageLocation(*Param, Loc);
-
- QualType ParamType = Param->getType();
- if (ParamType->isReferenceType()) {
- auto &Val = takeOwnership(std::make_unique<ReferenceValue>(*ArgLoc));
- setValue(Loc, Val);
- } else if (auto *ArgVal = getValue(*ArgLoc)) {
- setValue(Loc, *ArgVal);
- } else if (Value *Val = createValue(ParamType)) {
- setValue(Loc, *Val);
- }
+ setStorageLocation(*Param, createObject(*Param, Args[ArgIndex]));
}
}
-void Environment::popCall(const Environment &CalleeEnv) {
+void Environment::popCall(const CallExpr *Call, const Environment &CalleeEnv) {
// We ignore `DACtx` because it's already the same in both. We don't want the
- // callee's `DeclCtx`, `ReturnLoc` or `ThisPointeeLoc`. We don't bring back
- // `DeclToLoc` and `ExprToLoc` because we want to be able to later analyze the
- // same callee in a different context, and `setStorageLocation` requires there
- // to not already be a storage location assigned. Conceptually, these maps
- // capture information from the local scope, so when popping that scope, we do
- // not propagate the maps.
+ // callee's `DeclCtx`, `ReturnVal`, `ReturnLoc` or `ThisPointeeLoc`. We don't
+ // bring back `DeclToLoc` and `ExprToLoc` because we want to be able to later
+ // analyze the same callee in a different context, and `setStorageLocation`
+ // requires there to not already be a storage location assigned. Conceptually,
+ // these maps capture information from the local scope, so when popping that
+ // scope, we do not propagate the maps.
this->LocToVal = std::move(CalleeEnv.LocToVal);
- this->MemberLocToStruct = std::move(CalleeEnv.MemberLocToStruct);
this->FlowConditionToken = std::move(CalleeEnv.FlowConditionToken);
+
+ if (Call->isGLValue()) {
+ if (CalleeEnv.ReturnLoc != nullptr)
+ setStorageLocationStrict(*Call, *CalleeEnv.ReturnLoc);
+ } else if (!Call->getType()->isVoidType()) {
+ if (CalleeEnv.ReturnVal != nullptr)
+ setValueStrict(*Call, *CalleeEnv.ReturnVal);
+ }
+}
+
+void Environment::popCall(const CXXConstructExpr *Call,
+ const Environment &CalleeEnv) {
+ // See also comment in `popCall(const CallExpr *, const Environment &)` above.
+ this->LocToVal = std::move(CalleeEnv.LocToVal);
+ this->FlowConditionToken = std::move(CalleeEnv.FlowConditionToken);
+
+ if (Value *Val = CalleeEnv.getValue(*CalleeEnv.ThisPointeeLoc)) {
+ setValueStrict(*Call, *Val);
+ }
}
bool Environment::equivalentTo(const Environment &Other,
Environment::ValueModel &Model) const {
assert(DACtx == Other.DACtx);
+ if (ReturnVal != Other.ReturnVal)
+ return false;
+
if (ReturnLoc != Other.ReturnLoc)
return false;
@@ -435,6 +464,7 @@ bool Environment::equivalentTo(const Environment &Other,
LatticeJoinEffect Environment::widen(const Environment &PrevEnv,
Environment::ValueModel &Model) {
assert(DACtx == PrevEnv.DACtx);
+ assert(ReturnVal == PrevEnv.ReturnVal);
assert(ReturnLoc == PrevEnv.ReturnLoc);
assert(ThisPointeeLoc == PrevEnv.ThisPointeeLoc);
assert(CallStack == PrevEnv.CallStack);
@@ -447,17 +477,10 @@ LatticeJoinEffect Environment::widen(const Environment &PrevEnv,
// block. For `DeclToLoc` and `ExprToLoc`, join guarantees that these maps are
// subsets of the maps in `PrevEnv`. So, as long as we maintain this property
// here, we don't need change their current values to widen.
- //
- // FIXME: `MemberLocToStruct` does not share the above property, because
- // `join` can cause the map size to increase (when we add fresh data in places
- // of conflict). Once this issue with join is resolved, re-enable the
- // assertion below or replace with something that captures the desired
- // invariant.
assert(DeclToLoc.size() <= PrevEnv.DeclToLoc.size());
assert(ExprToLoc.size() <= PrevEnv.ExprToLoc.size());
- // assert(MemberLocToStruct.size() <= PrevEnv.MemberLocToStruct.size());
- llvm::DenseMap<const StorageLocation *, Value *> WidenedLocToVal;
+ llvm::MapVector<const StorageLocation *, Value *> WidenedLocToVal;
for (auto &Entry : LocToVal) {
const StorageLocation *Loc = Entry.first;
assert(Loc != nullptr);
@@ -482,60 +505,75 @@ LatticeJoinEffect Environment::widen(const Environment &PrevEnv,
Effect = LatticeJoinEffect::Changed;
}
LocToVal = std::move(WidenedLocToVal);
- // FIXME: update the equivalence calculation for `MemberLocToStruct`, once we
- // have a systematic way of soundly comparing this map.
if (DeclToLoc.size() != PrevEnv.DeclToLoc.size() ||
ExprToLoc.size() != PrevEnv.ExprToLoc.size() ||
- LocToVal.size() != PrevEnv.LocToVal.size() ||
- MemberLocToStruct.size() != PrevEnv.MemberLocToStruct.size())
+ LocToVal.size() != PrevEnv.LocToVal.size())
Effect = LatticeJoinEffect::Changed;
return Effect;
}
-LatticeJoinEffect Environment::join(const Environment &Other,
- Environment::ValueModel &Model) {
- assert(DACtx == Other.DACtx);
- assert(ReturnLoc == Other.ReturnLoc);
- assert(ThisPointeeLoc == Other.ThisPointeeLoc);
- assert(CallStack == Other.CallStack);
-
- auto Effect = LatticeJoinEffect::Unchanged;
-
- Environment JoinedEnv(*DACtx);
+Environment Environment::join(const Environment &EnvA, const Environment &EnvB,
+ Environment::ValueModel &Model) {
+ assert(EnvA.DACtx == EnvB.DACtx);
+ assert(EnvA.ThisPointeeLoc == EnvB.ThisPointeeLoc);
+ assert(EnvA.CallStack == EnvB.CallStack);
+
+ Environment JoinedEnv(*EnvA.DACtx);
+
+ JoinedEnv.CallStack = EnvA.CallStack;
+ JoinedEnv.ThisPointeeLoc = EnvA.ThisPointeeLoc;
+
+ if (EnvA.ReturnVal == nullptr || EnvB.ReturnVal == nullptr) {
+ // `ReturnVal` might not always get set -- for example if we have a return
+ // statement of the form `return some_other_func()` and we decide not to
+ // analyze `some_other_func()`.
+ // In this case, we can't say anything about the joined return value -- we
+ // don't simply want to propagate the return value that we do have, because
+ // it might not be the correct one.
+ // This occurs for example in the test `ContextSensitiveMutualRecursion`.
+ JoinedEnv.ReturnVal = nullptr;
+ } else if (areEquivalentValues(*EnvA.ReturnVal, *EnvB.ReturnVal)) {
+ JoinedEnv.ReturnVal = EnvA.ReturnVal;
+ } else {
+ assert(!EnvA.CallStack.empty());
+ // FIXME: Make `CallStack` a vector of `FunctionDecl` so we don't need this
+ // cast.
+ auto *Func = dyn_cast<FunctionDecl>(EnvA.CallStack.back());
+ assert(Func != nullptr);
+ if (Value *MergedVal =
+ mergeDistinctValues(Func->getReturnType(), *EnvA.ReturnVal, EnvA,
+ *EnvB.ReturnVal, EnvB, JoinedEnv, Model))
+ JoinedEnv.ReturnVal = MergedVal;
+ }
- JoinedEnv.CallStack = CallStack;
- JoinedEnv.ReturnLoc = ReturnLoc;
- JoinedEnv.ThisPointeeLoc = ThisPointeeLoc;
+ if (EnvA.ReturnLoc == EnvB.ReturnLoc)
+ JoinedEnv.ReturnLoc = EnvA.ReturnLoc;
+ else
+ JoinedEnv.ReturnLoc = nullptr;
- JoinedEnv.DeclToLoc = intersectDenseMaps(DeclToLoc, Other.DeclToLoc);
- if (DeclToLoc.size() != JoinedEnv.DeclToLoc.size())
- Effect = LatticeJoinEffect::Changed;
+ // FIXME: Once we're able to remove declarations from `DeclToLoc` when their
+ // lifetime ends, add an assertion that there aren't any entries in
+ // `DeclToLoc` and `Other.DeclToLoc` that map the same declaration to
+ // different storage locations.
+ JoinedEnv.DeclToLoc = intersectDenseMaps(EnvA.DeclToLoc, EnvB.DeclToLoc);
- JoinedEnv.ExprToLoc = intersectDenseMaps(ExprToLoc, Other.ExprToLoc);
- if (ExprToLoc.size() != JoinedEnv.ExprToLoc.size())
- Effect = LatticeJoinEffect::Changed;
+ JoinedEnv.ExprToLoc = intersectDenseMaps(EnvA.ExprToLoc, EnvB.ExprToLoc);
- JoinedEnv.MemberLocToStruct =
- intersectDenseMaps(MemberLocToStruct, Other.MemberLocToStruct);
- if (MemberLocToStruct.size() != JoinedEnv.MemberLocToStruct.size())
- Effect = LatticeJoinEffect::Changed;
-
- // FIXME: set `Effect` as needed.
// FIXME: update join to detect backedges and simplify the flow condition
// accordingly.
- JoinedEnv.FlowConditionToken = &DACtx->joinFlowConditions(
- *FlowConditionToken, *Other.FlowConditionToken);
+ JoinedEnv.FlowConditionToken = EnvA.DACtx->joinFlowConditions(
+ EnvA.FlowConditionToken, EnvB.FlowConditionToken);
- for (auto &Entry : LocToVal) {
+ for (auto &Entry : EnvA.LocToVal) {
const StorageLocation *Loc = Entry.first;
assert(Loc != nullptr);
Value *Val = Entry.second;
assert(Val != nullptr);
- auto It = Other.LocToVal.find(Loc);
- if (It == Other.LocToVal.end())
+ auto It = EnvB.LocToVal.find(Loc);
+ if (It == EnvB.LocToVal.end())
continue;
assert(It->second != nullptr);
@@ -544,19 +582,13 @@ LatticeJoinEffect Environment::join(const Environment &Other,
continue;
}
- if (Value *MergedVal =
- mergeDistinctValues(Loc->getType(), *Val, *this, *It->second, Other,
- JoinedEnv, Model)) {
+ if (Value *MergedVal = mergeDistinctValues(
+ Loc->getType(), *Val, EnvA, *It->second, EnvB, JoinedEnv, Model)) {
JoinedEnv.LocToVal.insert({Loc, MergedVal});
- Effect = LatticeJoinEffect::Changed;
}
}
- if (LocToVal.size() != JoinedEnv.LocToVal.size())
- Effect = LatticeJoinEffect::Changed;
- *this = std::move(JoinedEnv);
-
- return Effect;
+ return JoinedEnv;
}
StorageLocation &Environment::createStorageLocation(QualType Type) {
@@ -578,22 +610,39 @@ StorageLocation &Environment::createStorageLocation(const Expr &E) {
}
void Environment::setStorageLocation(const ValueDecl &D, StorageLocation &Loc) {
- assert(DeclToLoc.find(&D) == DeclToLoc.end());
+ assert(!DeclToLoc.contains(&D));
+ assert(!isa_and_nonnull<ReferenceValue>(getValue(Loc)));
DeclToLoc[&D] = &Loc;
}
-StorageLocation *Environment::getStorageLocation(const ValueDecl &D,
- SkipPast SP) const {
+StorageLocation *Environment::getStorageLocation(const ValueDecl &D) const {
auto It = DeclToLoc.find(&D);
- return It == DeclToLoc.end() ? nullptr : &skip(*It->second, SP);
+ if (It == DeclToLoc.end())
+ return nullptr;
+
+ StorageLocation *Loc = It->second;
+
+ assert(!isa_and_nonnull<ReferenceValue>(getValue(*Loc)));
+
+ return Loc;
}
void Environment::setStorageLocation(const Expr &E, StorageLocation &Loc) {
const Expr &CanonE = ignoreCFGOmittedNodes(E);
- assert(ExprToLoc.find(&CanonE) == ExprToLoc.end());
+ assert(!ExprToLoc.contains(&CanonE));
ExprToLoc[&CanonE] = &Loc;
}
+void Environment::setStorageLocationStrict(const Expr &E,
+ StorageLocation &Loc) {
+ // `DeclRefExpr`s to builtin function types aren't glvalues, for some reason,
+ // but we still want to be able to associate a `StorageLocation` with them,
+ // so allow these as an exception.
+ assert(E.isGLValue() ||
+ E.getType()->isSpecificBuiltinType(BuiltinType::BuiltinFn));
+ setStorageLocation(E, Loc);
+}
+
StorageLocation *Environment::getStorageLocation(const Expr &E,
SkipPast SP) const {
// FIXME: Add a test with parens.
@@ -601,12 +650,37 @@ StorageLocation *Environment::getStorageLocation(const Expr &E,
return It == ExprToLoc.end() ? nullptr : &skip(*It->second, SP);
}
-StorageLocation *Environment::getThisPointeeStorageLocation() const {
+StorageLocation *Environment::getStorageLocationStrict(const Expr &E) const {
+ // See comment in `setStorageLocationStrict()`.
+ assert(E.isGLValue() ||
+ E.getType()->isSpecificBuiltinType(BuiltinType::BuiltinFn));
+ StorageLocation *Loc = getStorageLocation(E, SkipPast::None);
+
+ if (Loc == nullptr)
+ return nullptr;
+
+ if (auto *RefVal = dyn_cast_or_null<ReferenceValue>(getValue(*Loc)))
+ return &RefVal->getReferentLoc();
+
+ return Loc;
+}
+
+AggregateStorageLocation *Environment::getThisPointeeStorageLocation() const {
return ThisPointeeLoc;
}
-StorageLocation *Environment::getReturnStorageLocation() const {
- return ReturnLoc;
+AggregateStorageLocation &
+Environment::getResultObjectLocation(const Expr &RecordPRValue) {
+ assert(RecordPRValue.getType()->isRecordType());
+ assert(RecordPRValue.isPRValue());
+
+ if (StorageLocation *ExistingLoc =
+ getStorageLocation(RecordPRValue, SkipPast::None))
+ return *cast<AggregateStorageLocation>(ExistingLoc);
+ auto &Loc = cast<AggregateStorageLocation>(
+ DACtx->getStableStorageLocation(RecordPRValue));
+ setStorageLocation(RecordPRValue, Loc);
+ return Loc;
}
PointerValue &Environment::getOrCreateNullPointerValue(QualType PointeeType) {
@@ -614,45 +688,41 @@ PointerValue &Environment::getOrCreateNullPointerValue(QualType PointeeType) {
}
void Environment::setValue(const StorageLocation &Loc, Value &Val) {
- LocToVal[&Loc] = &Val;
+ assert(!isa<StructValue>(&Val) ||
+ &cast<StructValue>(&Val)->getAggregateLoc() == &Loc);
- if (auto *StructVal = dyn_cast<StructValue>(&Val)) {
- auto &AggregateLoc = *cast<AggregateStorageLocation>(&Loc);
+ LocToVal[&Loc] = &Val;
+}
- const QualType Type = AggregateLoc.getType();
- assert(Type->isStructureOrClassType() || Type->isUnionType());
+void Environment::setValueStrict(const Expr &E, Value &Val) {
+ assert(E.isPRValue());
+ assert(!isa<ReferenceValue>(Val));
- for (const FieldDecl *Field : DACtx->getReferencedFields(Type)) {
- assert(Field != nullptr);
- StorageLocation &FieldLoc = AggregateLoc.getChild(*Field);
- MemberLocToStruct[&FieldLoc] = std::make_pair(StructVal, Field);
- if (auto *FieldVal = StructVal->getChild(*Field))
- setValue(FieldLoc, *FieldVal);
- }
+ if (auto *StructVal = dyn_cast<StructValue>(&Val)) {
+ if (auto *ExistingVal = cast_or_null<StructValue>(getValueStrict(E)))
+ assert(&ExistingVal->getAggregateLoc() == &StructVal->getAggregateLoc());
+ if (StorageLocation *ExistingLoc = getStorageLocation(E, SkipPast::None))
+ assert(ExistingLoc == &StructVal->getAggregateLoc());
+ else
+ setStorageLocation(E, StructVal->getAggregateLoc());
+ setValue(StructVal->getAggregateLoc(), Val);
+ return;
}
- auto It = MemberLocToStruct.find(&Loc);
- if (It != MemberLocToStruct.end()) {
- // `Loc` is the location of a struct member so we need to also update the
- // value of the member in the corresponding `StructValue`.
-
- assert(It->second.first != nullptr);
- StructValue &StructVal = *It->second.first;
-
- assert(It->second.second != nullptr);
- const ValueDecl &Member = *It->second.second;
-
- StructVal.setChild(Member, Val);
+ StorageLocation *Loc = getStorageLocation(E, SkipPast::None);
+ if (Loc == nullptr) {
+ Loc = &createStorageLocation(E);
+ setStorageLocation(E, *Loc);
}
+ setValue(*Loc, Val);
}
Value *Environment::getValue(const StorageLocation &Loc) const {
- auto It = LocToVal.find(&Loc);
- return It == LocToVal.end() ? nullptr : It->second;
+ return LocToVal.lookup(&Loc);
}
-Value *Environment::getValue(const ValueDecl &D, SkipPast SP) const {
- auto *Loc = getStorageLocation(D, SP);
+Value *Environment::getValue(const ValueDecl &D) const {
+ auto *Loc = getStorageLocation(D);
if (Loc == nullptr)
return nullptr;
return getValue(*Loc);
@@ -665,6 +735,15 @@ Value *Environment::getValue(const Expr &E, SkipPast SP) const {
return getValue(*Loc);
}
+Value *Environment::getValueStrict(const Expr &E) const {
+ assert(E.isPRValue());
+ Value *Val = getValue(E, SkipPast::None);
+
+ assert(Val == nullptr || !isa<ReferenceValue>(Val));
+
+ return Val;
+}
+
Value *Environment::createValue(QualType Type) {
llvm::DenseSet<QualType> Visited;
int CreatedValuesCount = 0;
@@ -697,65 +776,120 @@ Value *Environment::createValueUnlessSelfReferential(
// with integers, and so distinguishing them serves no purpose, but could
// prevent convergence.
CreatedValuesCount++;
- return &takeOwnership(std::make_unique<IntegerValue>());
+ return &arena().create<IntegerValue>();
}
- if (Type->isReferenceType()) {
+ if (Type->isReferenceType() || Type->isPointerType()) {
CreatedValuesCount++;
- QualType PointeeType = Type->castAs<ReferenceType>()->getPointeeType();
- auto &PointeeLoc = createStorageLocation(PointeeType);
+ QualType PointeeType = Type->getPointeeType();
+ StorageLocation &PointeeLoc =
+ createLocAndMaybeValue(PointeeType, Visited, Depth, CreatedValuesCount);
- if (Visited.insert(PointeeType.getCanonicalType()).second) {
- Value *PointeeVal = createValueUnlessSelfReferential(
- PointeeType, Visited, Depth, CreatedValuesCount);
- Visited.erase(PointeeType.getCanonicalType());
-
- if (PointeeVal != nullptr)
- setValue(PointeeLoc, *PointeeVal);
- }
-
- return &takeOwnership(std::make_unique<ReferenceValue>(PointeeLoc));
+ if (Type->isReferenceType())
+ return &arena().create<ReferenceValue>(PointeeLoc);
+ else
+ return &arena().create<PointerValue>(PointeeLoc);
}
- if (Type->isPointerType()) {
+ if (Type->isRecordType()) {
CreatedValuesCount++;
- QualType PointeeType = Type->castAs<PointerType>()->getPointeeType();
- auto &PointeeLoc = createStorageLocation(PointeeType);
+ llvm::DenseMap<const ValueDecl *, StorageLocation *> FieldLocs;
+ for (const FieldDecl *Field : DACtx->getModeledFields(Type)) {
+ assert(Field != nullptr);
- if (Visited.insert(PointeeType.getCanonicalType()).second) {
- Value *PointeeVal = createValueUnlessSelfReferential(
- PointeeType, Visited, Depth, CreatedValuesCount);
- Visited.erase(PointeeType.getCanonicalType());
+ QualType FieldType = Field->getType();
- if (PointeeVal != nullptr)
- setValue(PointeeLoc, *PointeeVal);
+ FieldLocs.insert(
+ {Field, &createLocAndMaybeValue(FieldType, Visited, Depth + 1,
+ CreatedValuesCount)});
}
- return &takeOwnership(std::make_unique<PointerValue>(PointeeLoc));
+ AggregateStorageLocation &Loc =
+ arena().create<AggregateStorageLocation>(Type, std::move(FieldLocs));
+ StructValue &StructVal = create<StructValue>(Loc);
+
+ // As we already have a storage location for the `StructValue`, we can and
+ // should associate them in the environment.
+ setValue(Loc, StructVal);
+
+ return &StructVal;
}
- if (Type->isStructureOrClassType() || Type->isUnionType()) {
- CreatedValuesCount++;
- llvm::DenseMap<const ValueDecl *, Value *> FieldValues;
- for (const FieldDecl *Field : DACtx->getReferencedFields(Type)) {
- assert(Field != nullptr);
+ return nullptr;
+}
- QualType FieldType = Field->getType();
- if (Visited.contains(FieldType.getCanonicalType()))
- continue;
-
- Visited.insert(FieldType.getCanonicalType());
- if (auto *FieldValue = createValueUnlessSelfReferential(
- FieldType, Visited, Depth + 1, CreatedValuesCount))
- FieldValues.insert({Field, FieldValue});
- Visited.erase(FieldType.getCanonicalType());
+StorageLocation &
+Environment::createLocAndMaybeValue(QualType Ty,
+ llvm::DenseSet<QualType> &Visited,
+ int Depth, int &CreatedValuesCount) {
+ if (!Visited.insert(Ty.getCanonicalType()).second)
+ return createStorageLocation(Ty.getNonReferenceType());
+ Value *Val = createValueUnlessSelfReferential(
+ Ty.getNonReferenceType(), Visited, Depth, CreatedValuesCount);
+ Visited.erase(Ty.getCanonicalType());
+
+ Ty = Ty.getNonReferenceType();
+
+ if (Val == nullptr)
+ return createStorageLocation(Ty);
+
+ if (Ty->isRecordType())
+ return cast<StructValue>(Val)->getAggregateLoc();
+
+ StorageLocation &Loc = createStorageLocation(Ty);
+ setValue(Loc, *Val);
+ return Loc;
+}
+
+StorageLocation &Environment::createObjectInternal(const VarDecl *D,
+ QualType Ty,
+ const Expr *InitExpr) {
+ if (Ty->isReferenceType()) {
+ // Although variables of reference type always need to be initialized, it
+ // can happen that we can't see the initializer, so `InitExpr` may still
+ // be null.
+ if (InitExpr) {
+ if (auto *InitExprLoc =
+ getStorageLocation(*InitExpr, SkipPast::Reference))
+ return *InitExprLoc;
}
- return &takeOwnership(
- std::make_unique<StructValue>(std::move(FieldValues)));
+ // Even though we have an initializer, we might not get an
+ // InitExprLoc, for example if the InitExpr is a CallExpr for which we
+ // don't have a function body. In this case, we just invent a storage
+ // location and value -- it's the best we can do.
+ return createObjectInternal(D, Ty.getNonReferenceType(), nullptr);
}
- return nullptr;
+ Value *Val = nullptr;
+ if (InitExpr)
+ // In the (few) cases where an expression is intentionally
+ // "uninterpreted", `InitExpr` is not associated with a value. There are
+ // two ways to handle this situation: propagate the status, so that
+ // uninterpreted initializers result in uninterpreted variables, or
+ // provide a default value. We choose the latter so that later refinements
+ // of the variable can be used for reasoning about the surrounding code.
+ // For this reason, we let this case be handled by the `createValue()`
+ // call below.
+ //
+ // FIXME. If and when we interpret all language cases, change this to
+ // assert that `InitExpr` is interpreted, rather than supplying a
+ // default value (assuming we don't update the environment API to return
+ // references).
+ Val = getValueStrict(*InitExpr);
+ if (!Val)
+ Val = createValue(Ty);
+
+ if (Ty->isRecordType())
+ return cast<StructValue>(Val)->getAggregateLoc();
+
+ StorageLocation &Loc =
+ D ? createStorageLocation(*D) : createStorageLocation(Ty);
+
+ if (Val)
+ setValue(Loc, *Val);
+
+ return Loc;
}
StorageLocation &Environment::skip(StorageLocation &Loc, SkipPast SP) const {
@@ -768,11 +902,6 @@ StorageLocation &Environment::skip(StorageLocation &Loc, SkipPast SP) const {
if (auto *Val = dyn_cast_or_null<ReferenceValue>(getValue(Loc)))
return Val->getReferentLoc();
return Loc;
- case SkipPast::ReferenceThenPointer:
- StorageLocation &LocPastRef = skip(Loc, SkipPast::Reference);
- if (auto *Val = dyn_cast_or_null<PointerValue>(getValue(LocPastRef)))
- return Val->getPointeeLoc();
- return LocPastRef;
}
llvm_unreachable("bad SkipPast kind");
}
@@ -782,12 +911,12 @@ const StorageLocation &Environment::skip(const StorageLocation &Loc,
return skip(*const_cast<StorageLocation *>(&Loc), SP);
}
-void Environment::addToFlowCondition(BoolValue &Val) {
- DACtx->addFlowConditionConstraint(*FlowConditionToken, Val);
+void Environment::addToFlowCondition(const Formula &Val) {
+ DACtx->addFlowConditionConstraint(FlowConditionToken, Val);
}
-bool Environment::flowConditionImplies(BoolValue &Val) const {
- return DACtx->flowConditionImplies(*FlowConditionToken, Val);
+bool Environment::flowConditionImplies(const Formula &Val) const {
+ return DACtx->flowConditionImplies(FlowConditionToken, Val);
}
void Environment::dump(raw_ostream &OS) const {
@@ -795,7 +924,7 @@ void Environment::dump(raw_ostream &OS) const {
// fields are printed.
OS << "DeclToLoc:\n";
for (auto [D, L] : DeclToLoc)
- OS << " [" << D->getName() << ", " << L << "]\n";
+ OS << " [" << D->getNameAsString() << ", " << L << "]\n";
OS << "ExprToLoc:\n";
for (auto [E, L] : ExprToLoc)
@@ -807,12 +936,93 @@ void Environment::dump(raw_ostream &OS) const {
}
OS << "FlowConditionToken:\n";
- DACtx->dumpFlowCondition(*FlowConditionToken);
+ DACtx->dumpFlowCondition(FlowConditionToken, OS);
}
void Environment::dump() const {
dump(llvm::dbgs());
}
+AggregateStorageLocation *
+getImplicitObjectLocation(const CXXMemberCallExpr &MCE,
+ const Environment &Env) {
+ Expr *ImplicitObject = MCE.getImplicitObjectArgument();
+ if (ImplicitObject == nullptr)
+ return nullptr;
+ StorageLocation *Loc =
+ Env.getStorageLocation(*ImplicitObject, SkipPast::Reference);
+ if (Loc == nullptr)
+ return nullptr;
+ if (ImplicitObject->getType()->isPointerType()) {
+ if (auto *Val = cast_or_null<PointerValue>(Env.getValue(*Loc)))
+ return &cast<AggregateStorageLocation>(Val->getPointeeLoc());
+ return nullptr;
+ }
+ return cast<AggregateStorageLocation>(Loc);
+}
+
+AggregateStorageLocation *getBaseObjectLocation(const MemberExpr &ME,
+ const Environment &Env) {
+ Expr *Base = ME.getBase();
+ if (Base == nullptr)
+ return nullptr;
+ StorageLocation *Loc = Env.getStorageLocation(*Base, SkipPast::Reference);
+ if (Loc == nullptr)
+ return nullptr;
+ if (ME.isArrow()) {
+ if (auto *Val = cast_or_null<PointerValue>(Env.getValue(*Loc)))
+ return &cast<AggregateStorageLocation>(Val->getPointeeLoc());
+ return nullptr;
+ }
+ return cast<AggregateStorageLocation>(Loc);
+}
+
+std::vector<FieldDecl *> getFieldsForInitListExpr(const RecordDecl *RD) {
+ // Unnamed bitfields are only used for padding and do not appear in
+ // `InitListExpr`'s inits. However, those fields do appear in `RecordDecl`'s
+ // field list, and we thus need to remove them before mapping inits to
+ // fields to avoid mapping inits to the wrongs fields.
+ std::vector<FieldDecl *> Fields;
+ llvm::copy_if(
+ RD->fields(), std::back_inserter(Fields),
+ [](const FieldDecl *Field) { return !Field->isUnnamedBitfield(); });
+ return Fields;
+}
+
+StructValue &refreshStructValue(AggregateStorageLocation &Loc,
+ Environment &Env) {
+ auto &NewVal = Env.create<StructValue>(Loc);
+ Env.setValue(Loc, NewVal);
+ return NewVal;
+}
+
+StructValue &refreshStructValue(const Expr &Expr, Environment &Env) {
+ assert(Expr.getType()->isRecordType());
+
+ if (Expr.isPRValue()) {
+ if (auto *ExistingVal =
+ cast_or_null<StructValue>(Env.getValueStrict(Expr))) {
+ auto &NewVal = Env.create<StructValue>(ExistingVal->getAggregateLoc());
+ Env.setValueStrict(Expr, NewVal);
+ return NewVal;
+ }
+
+ auto &NewVal = *cast<StructValue>(Env.createValue(Expr.getType()));
+ Env.setValueStrict(Expr, NewVal);
+ return NewVal;
+ }
+
+ if (auto *Loc = cast_or_null<AggregateStorageLocation>(
+ Env.getStorageLocationStrict(Expr))) {
+ auto &NewVal = Env.create<StructValue>(*Loc);
+ Env.setValue(*Loc, NewVal);
+ return NewVal;
+ }
+
+ auto &NewVal = *cast<StructValue>(Env.createValue(Expr.getType()));
+ Env.setStorageLocationStrict(Expr, NewVal.getAggregateLoc());
+ return NewVal;
+}
+
} // namespace dataflow
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp
index d4886f154b33..f8a049adea5e 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp
@@ -16,22 +16,12 @@
#include "clang/Analysis/FlowSensitive/DebugSupport.h"
#include "clang/Analysis/FlowSensitive/Solver.h"
#include "clang/Analysis/FlowSensitive/Value.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/StringSet.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/FormatAdapters.h"
-#include "llvm/Support/FormatCommon.h"
-#include "llvm/Support/FormatVariadic.h"
namespace clang {
namespace dataflow {
-using llvm::AlignStyle;
-using llvm::fmt_pad;
-using llvm::formatv;
-
llvm::StringRef debugString(Value::Kind Kind) {
switch (Kind) {
case Value::Kind::Integer:
@@ -46,26 +36,19 @@ llvm::StringRef debugString(Value::Kind Kind) {
return "AtomicBool";
case Value::Kind::TopBool:
return "TopBool";
- case Value::Kind::Conjunction:
- return "Conjunction";
- case Value::Kind::Disjunction:
- return "Disjunction";
- case Value::Kind::Negation:
- return "Negation";
- case Value::Kind::Implication:
- return "Implication";
- case Value::Kind::Biconditional:
- return "Biconditional";
+ case Value::Kind::FormulaBool:
+ return "FormulaBool";
}
llvm_unreachable("Unhandled value kind");
}
-llvm::StringRef debugString(Solver::Result::Assignment Assignment) {
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
+ Solver::Result::Assignment Assignment) {
switch (Assignment) {
case Solver::Result::Assignment::AssignedFalse:
- return "False";
+ return OS << "False";
case Solver::Result::Assignment::AssignedTrue:
- return "True";
+ return OS << "True";
}
llvm_unreachable("Booleans can only be assigned true/false");
}
@@ -82,177 +65,16 @@ llvm::StringRef debugString(Solver::Result::Status Status) {
llvm_unreachable("Unhandled SAT check result status");
}
-namespace {
-
-class DebugStringGenerator {
-public:
- explicit DebugStringGenerator(
- llvm::DenseMap<const AtomicBoolValue *, std::string> AtomNamesArg)
- : Counter(0), AtomNames(std::move(AtomNamesArg)) {
-#ifndef NDEBUG
- llvm::StringSet<> Names;
- for (auto &N : AtomNames) {
- assert(Names.insert(N.second).second &&
- "The same name must not assigned to different atoms");
- }
-#endif
- }
-
- /// Returns a string representation of a boolean value `B`.
- std::string debugString(const BoolValue &B, size_t Depth = 0) {
- std::string S;
- switch (B.getKind()) {
- case Value::Kind::AtomicBool: {
- S = getAtomName(&cast<AtomicBoolValue>(B));
- break;
- }
- case Value::Kind::Conjunction: {
- auto &C = cast<ConjunctionValue>(B);
- auto L = debugString(C.getLeftSubValue(), Depth + 1);
- auto R = debugString(C.getRightSubValue(), Depth + 1);
- S = formatv("(and\n{0}\n{1})", L, R);
- break;
- }
- case Value::Kind::Disjunction: {
- auto &D = cast<DisjunctionValue>(B);
- auto L = debugString(D.getLeftSubValue(), Depth + 1);
- auto R = debugString(D.getRightSubValue(), Depth + 1);
- S = formatv("(or\n{0}\n{1})", L, R);
- break;
- }
- case Value::Kind::Negation: {
- auto &N = cast<NegationValue>(B);
- S = formatv("(not\n{0})", debugString(N.getSubVal(), Depth + 1));
- break;
- }
- case Value::Kind::Implication: {
- auto &IV = cast<ImplicationValue>(B);
- auto L = debugString(IV.getLeftSubValue(), Depth + 1);
- auto R = debugString(IV.getRightSubValue(), Depth + 1);
- S = formatv("(=>\n{0}\n{1})", L, R);
- break;
- }
- case Value::Kind::Biconditional: {
- auto &BV = cast<BiconditionalValue>(B);
- auto L = debugString(BV.getLeftSubValue(), Depth + 1);
- auto R = debugString(BV.getRightSubValue(), Depth + 1);
- S = formatv("(=\n{0}\n{1})", L, R);
- break;
- }
- default:
- llvm_unreachable("Unhandled value kind");
- }
- auto Indent = Depth * 4;
- return formatv("{0}", fmt_pad(S, Indent, 0));
- }
-
- std::string debugString(const llvm::DenseSet<BoolValue *> &Constraints) {
- std::vector<std::string> ConstraintsStrings;
- ConstraintsStrings.reserve(Constraints.size());
- for (BoolValue *Constraint : Constraints) {
- ConstraintsStrings.push_back(debugString(*Constraint));
- }
- llvm::sort(ConstraintsStrings);
-
- std::string Result;
- for (const std::string &S : ConstraintsStrings) {
- Result += S;
- Result += '\n';
- }
- return Result;
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Solver::Result &R) {
+ OS << debugString(R.getStatus()) << "\n";
+ if (auto Solution = R.getSolution()) {
+ std::vector<std::pair<Atom, Solver::Result::Assignment>> Sorted = {
+ Solution->begin(), Solution->end()};
+ llvm::sort(Sorted);
+ for (const auto &Entry : Sorted)
+ OS << Entry.first << " = " << Entry.second << "\n";
}
-
- /// Returns a string representation of a set of boolean `Constraints` and the
- /// `Result` of satisfiability checking on the `Constraints`.
- std::string debugString(ArrayRef<BoolValue *> &Constraints,
- const Solver::Result &Result) {
- auto Template = R"(
-Constraints
-------------
-{0:$[
-
-]}
-------------
-{1}.
-{2}
-)";
-
- std::vector<std::string> ConstraintsStrings;
- ConstraintsStrings.reserve(Constraints.size());
- for (auto &Constraint : Constraints) {
- ConstraintsStrings.push_back(debugString(*Constraint));
- }
-
- auto StatusString = clang::dataflow::debugString(Result.getStatus());
- auto Solution = Result.getSolution();
- auto SolutionString = Solution ? "\n" + debugString(*Solution) : "";
-
- return formatv(
- Template,
- llvm::make_range(ConstraintsStrings.begin(), ConstraintsStrings.end()),
- StatusString, SolutionString);
- }
-
-private:
- /// Returns a string representation of a truth assignment to atom booleans.
- std::string debugString(
- const llvm::DenseMap<AtomicBoolValue *, Solver::Result::Assignment>
- &AtomAssignments) {
- size_t MaxNameLength = 0;
- for (auto &AtomName : AtomNames) {
- MaxNameLength = std::max(MaxNameLength, AtomName.second.size());
- }
-
- std::vector<std::string> Lines;
- for (auto &AtomAssignment : AtomAssignments) {
- auto Line = formatv("{0} = {1}",
- fmt_align(getAtomName(AtomAssignment.first),
- AlignStyle::Left, MaxNameLength),
- clang::dataflow::debugString(AtomAssignment.second));
- Lines.push_back(Line);
- }
- llvm::sort(Lines);
-
- return formatv("{0:$[\n]}", llvm::make_range(Lines.begin(), Lines.end()));
- }
-
- /// Returns the name assigned to `Atom`, either user-specified or created by
- /// default rules (B0, B1, ...).
- std::string getAtomName(const AtomicBoolValue *Atom) {
- auto Entry = AtomNames.try_emplace(Atom, formatv("B{0}", Counter));
- if (Entry.second) {
- Counter++;
- }
- return Entry.first->second;
- }
-
- // Keep track of number of atoms without a user-specified name, used to assign
- // non-repeating default names to such atoms.
- size_t Counter;
-
- // Keep track of names assigned to atoms.
- llvm::DenseMap<const AtomicBoolValue *, std::string> AtomNames;
-};
-
-} // namespace
-
-std::string
-debugString(const BoolValue &B,
- llvm::DenseMap<const AtomicBoolValue *, std::string> AtomNames) {
- return DebugStringGenerator(std::move(AtomNames)).debugString(B);
-}
-
-std::string
-debugString(const llvm::DenseSet<BoolValue *> &Constraints,
- llvm::DenseMap<const AtomicBoolValue *, std::string> AtomNames) {
- return DebugStringGenerator(std::move(AtomNames)).debugString(Constraints);
-}
-
-std::string
-debugString(ArrayRef<BoolValue *> Constraints, const Solver::Result &Result,
- llvm::DenseMap<const AtomicBoolValue *, std::string> AtomNames) {
- return DebugStringGenerator(std::move(AtomNames))
- .debugString(Constraints, Result);
+ return OS;
}
} // namespace dataflow
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Formula.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Formula.cpp
new file mode 100644
index 000000000000..504ad2fb7938
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Formula.cpp
@@ -0,0 +1,82 @@
+//===- Formula.cpp ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+
+namespace clang::dataflow {
+
+Formula &Formula::create(llvm::BumpPtrAllocator &Alloc, Kind K,
+ ArrayRef<const Formula *> Operands, unsigned Value) {
+ assert(Operands.size() == numOperands(K));
+ if (Value != 0) // Currently, formulas have values or operands, not both.
+ assert(numOperands(K) == 0);
+ void *Mem = Alloc.Allocate(sizeof(Formula) +
+ Operands.size() * sizeof(Operands.front()),
+ alignof(Formula));
+ Formula *Result = new (Mem) Formula();
+ Result->FormulaKind = K;
+ Result->Value = Value;
+ // Operands are stored as `const Formula *`s after the formula itself.
+ // We don't need to construct an object as pointers are trivial types.
+ // Formula is alignas(const Formula *), so alignment is satisfied.
+ llvm::copy(Operands, reinterpret_cast<const Formula **>(Result + 1));
+ return *Result;
+}
+
+static llvm::StringLiteral sigil(Formula::Kind K) {
+ switch (K) {
+ case Formula::AtomRef:
+ return "";
+ case Formula::Not:
+ return "!";
+ case Formula::And:
+ return " & ";
+ case Formula::Or:
+ return " | ";
+ case Formula::Implies:
+ return " => ";
+ case Formula::Equal:
+ return " = ";
+ }
+ llvm_unreachable("unhandled formula kind");
+}
+
+void Formula::print(llvm::raw_ostream &OS, const AtomNames *Names) const {
+ if (Names && kind() == AtomRef)
+ if (auto It = Names->find(getAtom()); It != Names->end()) {
+ OS << It->second;
+ return;
+ }
+
+ switch (numOperands(kind())) {
+ case 0:
+ OS << getAtom();
+ break;
+ case 1:
+ OS << sigil(kind());
+ operands()[0]->print(OS, Names);
+ break;
+ case 2:
+ OS << '(';
+ operands()[0]->print(OS, Names);
+ OS << sigil(kind());
+ operands()[1]->print(OS, Names);
+ OS << ')';
+ break;
+ default:
+ llvm_unreachable("unhandled formula arity");
+ }
+}
+
+} // namespace clang::dataflow \ No newline at end of file
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.cpp
new file mode 100644
index 000000000000..ee89e074f846
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.cpp
@@ -0,0 +1,536 @@
+//===-- HTMLLogger.cpp ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the HTML logger. Given a directory dir/, we write
+// dir/0.html for the first analysis, etc.
+// These files contain a visualization that allows inspecting the CFG and the
+// state of the analysis at each point.
+// Static assets (HTMLLogger.js, HTMLLogger.css) and SVG graphs etc are embedded
+// so each output file is self-contained.
+//
+// VIEWS
+//
+// The timeline and function view are always shown. These allow selecting basic
+// blocks, statements within them, and processing iterations (BBs are visited
+// multiple times when e.g. loops are involved).
+// These are written directly into the HTML body.
+//
+// There are also listings of particular basic blocks, and dumps of the state
+// at particular analysis points (i.e. BB2 iteration 3 statement 2).
+// These are only shown when the relevant BB/analysis point is *selected*.
+//
+// DATA AND TEMPLATES
+//
+// The HTML proper is mostly static.
+// The analysis data is in a JSON object HTMLLoggerData which is embedded as
+// a <script> in the <head>.
+// This gets rendered into DOM by a simple template processor which substitutes
+// the data into <template> tags embedded in the HTML. (see inflate() in JS).
+//
+// SELECTION
+//
+// This is the only real interactive mechanism.
+//
+// At any given time, there are several named selections, e.g.:
+// bb: B2 (basic block 0 is selected)
+// elt: B2.4 (statement 4 is selected)
+// iter: B2:1 (iteration 1 of the basic block is selected)
+// hover: B3 (hovering over basic block 3)
+//
+// The selection is updated by mouse events: hover by moving the mouse and
+// others by clicking. Elements that are click targets generally have attributes
+// (id or data-foo) that define what they should select.
+// See watchSelection() in JS for the exact logic.
+//
+// When the "bb" selection is set to "B2":
+// - sections <section data-selection="bb"> get shown
+// - templates under such sections get re-rendered
+// - elements with class/id "B2" get class "bb-select"
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
+#include "clang/Analysis/FlowSensitive/DebugSupport.h"
+#include "clang/Analysis/FlowSensitive/Logger.h"
+#include "clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ScopeExit.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/JSON.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/ScopedPrinter.h"
+#include "llvm/Support/raw_ostream.h"
+// Defines assets: HTMLLogger_{html_js,css}
+#include "HTMLLogger.inc"
+
+namespace clang::dataflow {
+namespace {
+
+// Render a graphviz graph specification to SVG using the `dot` tool.
+llvm::Expected<std::string> renderSVG(llvm::StringRef DotGraph);
+
+using StreamFactory = std::function<std::unique_ptr<llvm::raw_ostream>()>;
+
+// Recursively dumps Values/StorageLocations as JSON
+class ModelDumper {
+public:
+ ModelDumper(llvm::json::OStream &JOS, const Environment &Env)
+ : JOS(JOS), Env(Env) {}
+
+ void dump(Value &V) {
+ JOS.attribute("value_id", llvm::to_string(&V));
+ if (!Visited.insert(&V).second)
+ return;
+
+ JOS.attribute("kind", debugString(V.getKind()));
+
+ switch (V.getKind()) {
+ case Value::Kind::Integer:
+ case Value::Kind::TopBool:
+ case Value::Kind::AtomicBool:
+ case Value::Kind::FormulaBool:
+ break;
+ case Value::Kind::Reference:
+ JOS.attributeObject(
+ "referent", [&] { dump(cast<ReferenceValue>(V).getReferentLoc()); });
+ break;
+ case Value::Kind::Pointer:
+ JOS.attributeObject(
+ "pointee", [&] { dump(cast<PointerValue>(V).getPointeeLoc()); });
+ break;
+ case Value::Kind::Struct:
+ for (const auto &Child :
+ cast<StructValue>(V).getAggregateLoc().children())
+ JOS.attributeObject("f:" + Child.first->getNameAsString(), [&] {
+ if (Child.second)
+ if (Value *Val = Env.getValue(*Child.second))
+ dump(*Val);
+ });
+ break;
+ }
+
+ for (const auto& Prop : V.properties())
+ JOS.attributeObject(("p:" + Prop.first()).str(),
+ [&] { dump(*Prop.second); });
+
+ // Running the SAT solver is expensive, but knowing which booleans are
+ // guaranteed true/false here is valuable and hard to determine by hand.
+ if (auto *B = llvm::dyn_cast<BoolValue>(&V)) {
+ JOS.attribute("formula", llvm::to_string(B->formula()));
+ JOS.attribute(
+ "truth", Env.flowConditionImplies(B->formula()) ? "true"
+ : Env.flowConditionImplies(Env.arena().makeNot(B->formula()))
+ ? "false"
+ : "unknown");
+ }
+ }
+ void dump(const StorageLocation &L) {
+ JOS.attribute("location", llvm::to_string(&L));
+ if (!Visited.insert(&L).second)
+ return;
+
+ JOS.attribute("type", L.getType().getAsString());
+ if (auto *V = Env.getValue(L))
+ dump(*V);
+ }
+
+ llvm::DenseSet<const void*> Visited;
+ llvm::json::OStream &JOS;
+ const Environment &Env;
+};
+
+class HTMLLogger : public Logger {
+ StreamFactory Streams;
+ std::unique_ptr<llvm::raw_ostream> OS;
+ std::optional<llvm::json::OStream> JOS;
+
+ const ControlFlowContext *CFG;
+ // Timeline of iterations of CFG block visitation.
+ std::vector<std::pair<const CFGBlock *, unsigned>> Iters;
+ // Number of times each CFG block has been seen.
+ llvm::DenseMap<const CFGBlock *, unsigned> BlockIters;
+ // The messages logged in the current context but not yet written.
+ std::string ContextLogs;
+ // The number of elements we have visited within the current CFG block.
+ unsigned ElementIndex;
+
+public:
+ explicit HTMLLogger(StreamFactory Streams) : Streams(std::move(Streams)) {}
+ void beginAnalysis(const ControlFlowContext &CFG,
+ TypeErasedDataflowAnalysis &A) override {
+ OS = Streams();
+ this->CFG = &CFG;
+ *OS << llvm::StringRef(HTMLLogger_html).split("<?INJECT?>").first;
+
+ if (const auto *D = CFG.getDecl()) {
+ const auto &SM = A.getASTContext().getSourceManager();
+ *OS << "<title>";
+ if (const auto *ND = dyn_cast<NamedDecl>(D))
+ *OS << ND->getNameAsString() << " at ";
+ *OS << SM.getFilename(D->getLocation()) << ":"
+ << SM.getSpellingLineNumber(D->getLocation());
+ *OS << "</title>\n";
+ };
+
+ *OS << "<style>" << HTMLLogger_css << "</style>\n";
+ *OS << "<script>" << HTMLLogger_js << "</script>\n";
+
+ writeCode();
+ writeCFG();
+
+ *OS << "<script>var HTMLLoggerData = \n";
+ JOS.emplace(*OS, /*Indent=*/2);
+ JOS->objectBegin();
+ JOS->attributeBegin("states");
+ JOS->objectBegin();
+ }
+ // Between beginAnalysis() and endAnalysis() we write all the states for
+ // particular analysis points into the `timeline` array.
+ void endAnalysis() override {
+ JOS->objectEnd();
+ JOS->attributeEnd();
+
+ JOS->attributeArray("timeline", [&] {
+ for (const auto &E : Iters) {
+ JOS->object([&] {
+ JOS->attribute("block", blockID(E.first->getBlockID()));
+ JOS->attribute("iter", E.second);
+ });
+ }
+ });
+ JOS->attributeObject("cfg", [&] {
+ for (const auto &E : BlockIters)
+ writeBlock(*E.first, E.second);
+ });
+
+ JOS->objectEnd();
+ JOS.reset();
+ *OS << ";\n</script>\n";
+ *OS << llvm::StringRef(HTMLLogger_html).split("<?INJECT?>").second;
+ }
+
+ void enterBlock(const CFGBlock &B) override {
+ Iters.emplace_back(&B, ++BlockIters[&B]);
+ ElementIndex = 0;
+ }
+ void enterElement(const CFGElement &E) override {
+ ++ElementIndex;
+ }
+
+ static std::string blockID(unsigned Block) {
+ return llvm::formatv("B{0}", Block);
+ }
+ static std::string eltID(unsigned Block, unsigned Element) {
+ return llvm::formatv("B{0}.{1}", Block, Element);
+ }
+ static std::string iterID(unsigned Block, unsigned Iter) {
+ return llvm::formatv("B{0}:{1}", Block, Iter);
+ }
+ static std::string elementIterID(unsigned Block, unsigned Iter,
+ unsigned Element) {
+ return llvm::formatv("B{0}:{1}_B{0}.{2}", Block, Iter, Element);
+ }
+
+ // Write the analysis state associated with a particular analysis point.
+ // FIXME: this dump is fairly opaque. We should show:
+ // - values associated with the current Stmt
+ // - values associated with its children
+ // - meaningful names for values
+ // - which boolean values are implied true/false by the flow condition
+ void recordState(TypeErasedDataflowAnalysisState &State) override {
+ unsigned Block = Iters.back().first->getBlockID();
+ unsigned Iter = Iters.back().second;
+ JOS->attributeObject(elementIterID(Block, Iter, ElementIndex), [&] {
+ JOS->attribute("block", blockID(Block));
+ JOS->attribute("iter", Iter);
+ JOS->attribute("element", ElementIndex);
+
+ // If this state immediately follows an Expr, show its built-in model.
+ if (ElementIndex > 0) {
+ auto S =
+ Iters.back().first->Elements[ElementIndex - 1].getAs<CFGStmt>();
+ if (const Expr *E = S ? llvm::dyn_cast<Expr>(S->getStmt()) : nullptr)
+ if (auto *Loc = State.Env.getStorageLocation(*E, SkipPast::None))
+ JOS->attributeObject(
+ "value", [&] { ModelDumper(*JOS, State.Env).dump(*Loc); });
+ }
+ if (!ContextLogs.empty()) {
+ JOS->attribute("logs", ContextLogs);
+ ContextLogs.clear();
+ }
+ {
+ std::string BuiltinLattice;
+ llvm::raw_string_ostream BuiltinLatticeS(BuiltinLattice);
+ State.Env.dump(BuiltinLatticeS);
+ JOS->attribute("builtinLattice", BuiltinLattice);
+ }
+ });
+ }
+ void blockConverged() override { logText("Block converged"); }
+
+ void logText(llvm::StringRef S) override {
+ ContextLogs.append(S.begin(), S.end());
+ ContextLogs.push_back('\n');
+ }
+
+private:
+ // Write the CFG block details.
+ // Currently this is just the list of elements in execution order.
+ // FIXME: an AST dump would be a useful view, too.
+ void writeBlock(const CFGBlock &B, unsigned Iters) {
+ JOS->attributeObject(blockID(B.getBlockID()), [&] {
+ JOS->attribute("iters", Iters);
+ JOS->attributeArray("elements", [&] {
+ for (const auto &Elt : B.Elements) {
+ std::string Dump;
+ llvm::raw_string_ostream DumpS(Dump);
+ Elt.dumpToStream(DumpS);
+ JOS->value(Dump);
+ }
+ });
+ });
+ }
+
+ // Write the code of function being examined.
+ // We want to overlay the code with <span>s that mark which BB particular
+ // tokens are associated with, and even which BB element (so that clicking
+ // can select the right element).
+ void writeCode() {
+ if (!CFG->getDecl())
+ return;
+ const auto &AST = CFG->getDecl()->getASTContext();
+ bool Invalid = false;
+
+ // Extract the source code from the original file.
+ // Pretty-printing from the AST would probably be nicer (no macros or
+ // indentation to worry about), but we need the boundaries of particular
+ // AST nodes and the printer doesn't provide this.
+ auto Range = clang::Lexer::makeFileCharRange(
+ CharSourceRange::getTokenRange(CFG->getDecl()->getSourceRange()),
+ AST.getSourceManager(), AST.getLangOpts());
+ if (Range.isInvalid())
+ return;
+ llvm::StringRef Code = clang::Lexer::getSourceText(
+ Range, AST.getSourceManager(), AST.getLangOpts(), &Invalid);
+ if (Invalid)
+ return;
+
+ static constexpr unsigned Missing = -1;
+ // TokenInfo stores the BB and set of elements that a token is part of.
+ struct TokenInfo {
+ // The basic block this is part of.
+ // This is the BB of the stmt with the smallest containing range.
+ unsigned BB = Missing;
+ unsigned BBPriority = 0;
+ // The most specific stmt this is part of (smallest range).
+ unsigned Elt = Missing;
+ unsigned EltPriority = 0;
+ // All stmts this is part of.
+ SmallVector<unsigned> Elts;
+
+ // Mark this token as being part of BB.Elt.
+ // RangeLen is the character length of the element's range, used to
+ // distinguish inner vs outer statements.
+ // For example in `a==0`, token "a" is part of the stmts "a" and "a==0".
+ // However "a" has a smaller range, so is more specific. Clicking on the
+ // token "a" should select the stmt "a".
+ void assign(unsigned BB, unsigned Elt, unsigned RangeLen) {
+ // A worse BB (larger range) => ignore.
+ if (this->BB != Missing && BB != this->BB && BBPriority <= RangeLen)
+ return;
+ if (BB != this->BB) {
+ this->BB = BB;
+ Elts.clear();
+ BBPriority = RangeLen;
+ }
+ BBPriority = std::min(BBPriority, RangeLen);
+ Elts.push_back(Elt);
+ if (this->Elt == Missing || EltPriority > RangeLen)
+ this->Elt = Elt;
+ }
+ bool operator==(const TokenInfo &Other) const {
+ return std::tie(BB, Elt, Elts) ==
+ std::tie(Other.BB, Other.Elt, Other.Elts);
+ }
+ // Write the attributes for the <span> on this token.
+ void write(llvm::raw_ostream &OS) const {
+ OS << "class='c";
+ if (BB != Missing)
+ OS << " " << blockID(BB);
+ for (unsigned Elt : Elts)
+ OS << " " << eltID(BB, Elt);
+ OS << "'";
+
+ if (Elt != Missing)
+ OS << " data-elt='" << eltID(BB, Elt) << "'";
+ if (BB != Missing)
+ OS << " data-bb='" << blockID(BB) << "'";
+ }
+ };
+
+ // Construct one TokenInfo per character in a flat array.
+ // This is inefficient (chars in a token all have the same info) but simple.
+ std::vector<TokenInfo> State(Code.size());
+ for (const auto *Block : CFG->getCFG()) {
+ unsigned EltIndex = 0;
+ for (const auto& Elt : *Block) {
+ ++EltIndex;
+ if (const auto S = Elt.getAs<CFGStmt>()) {
+ auto EltRange = clang::Lexer::makeFileCharRange(
+ CharSourceRange::getTokenRange(S->getStmt()->getSourceRange()),
+ AST.getSourceManager(), AST.getLangOpts());
+ if (EltRange.isInvalid())
+ continue;
+ if (EltRange.getBegin() < Range.getBegin() ||
+ EltRange.getEnd() >= Range.getEnd() ||
+ EltRange.getEnd() < Range.getBegin() ||
+ EltRange.getEnd() >= Range.getEnd())
+ continue;
+ unsigned Off = EltRange.getBegin().getRawEncoding() -
+ Range.getBegin().getRawEncoding();
+ unsigned Len = EltRange.getEnd().getRawEncoding() -
+ EltRange.getBegin().getRawEncoding();
+ for (unsigned I = 0; I < Len; ++I)
+ State[Off + I].assign(Block->getBlockID(), EltIndex, Len);
+ }
+ }
+ }
+
+ // Finally, write the code with the correct <span>s.
+ unsigned Line =
+ AST.getSourceManager().getSpellingLineNumber(Range.getBegin());
+ *OS << "<template data-copy='code'>\n";
+ *OS << "<code class='filename'>";
+ llvm::printHTMLEscaped(
+ llvm::sys::path::filename(
+ AST.getSourceManager().getFilename(Range.getBegin())),
+ *OS);
+ *OS << "</code>";
+ *OS << "<code class='line' data-line='" << Line++ << "'>";
+ for (unsigned I = 0; I < Code.size(); ++I) {
+ // Don't actually write a <span> around each character, only break spans
+ // when the TokenInfo changes.
+ bool NeedOpen = I == 0 || !(State[I] == State[I-1]);
+ bool NeedClose = I + 1 == Code.size() || !(State[I] == State[I + 1]);
+ if (NeedOpen) {
+ *OS << "<span ";
+ State[I].write(*OS);
+ *OS << ">";
+ }
+ if (Code[I] == '\n')
+ *OS << "</code>\n<code class='line' data-line='" << Line++ << "'>";
+ else
+ llvm::printHTMLEscaped(Code.substr(I, 1), *OS);
+ if (NeedClose) *OS << "</span>";
+ }
+ *OS << "</code>\n";
+ *OS << "</template>";
+ }
+
+ // Write the CFG diagram, a graph of basic blocks.
+ // Laying out graphs is hard, so we construct a graphviz description and shell
+ // out to `dot` to turn it into an SVG.
+ void writeCFG() {
+ *OS << "<template data-copy='cfg'>\n";
+ if (auto SVG = renderSVG(buildCFGDot(CFG->getCFG())))
+ *OS << *SVG;
+ else
+ *OS << "Can't draw CFG: " << toString(SVG.takeError());
+ *OS << "</template>\n";
+ }
+
+ // Produce a graphviz description of a CFG.
+ static std::string buildCFGDot(const clang::CFG &CFG) {
+ std::string Graph;
+ llvm::raw_string_ostream GraphS(Graph);
+ // Graphviz likes to add unhelpful tooltips everywhere, " " suppresses.
+ GraphS << R"(digraph {
+ tooltip=" "
+ node[class=bb, shape=square, fontname="sans-serif", tooltip=" "]
+ edge[tooltip = " "]
+)";
+ for (unsigned I = 0; I < CFG.getNumBlockIDs(); ++I)
+ GraphS << " " << blockID(I) << " [id=" << blockID(I) << "]\n";
+ for (const auto *Block : CFG) {
+ for (const auto &Succ : Block->succs()) {
+ GraphS << " " << blockID(Block->getBlockID()) << " -> "
+ << blockID(Succ.getReachableBlock()->getBlockID()) << "\n";
+ }
+ }
+ GraphS << "}\n";
+ return Graph;
+ }
+};
+
+// Nothing interesting here, just subprocess/temp-file plumbing.
+llvm::Expected<std::string> renderSVG(llvm::StringRef DotGraph) {
+ std::string DotPath;
+ if (const auto *FromEnv = ::getenv("GRAPHVIZ_DOT"))
+ DotPath = FromEnv;
+ else {
+ auto FromPath = llvm::sys::findProgramByName("dot");
+ if (!FromPath)
+ return llvm::createStringError(FromPath.getError(),
+ "'dot' not found on PATH");
+ DotPath = FromPath.get();
+ }
+
+ // Create input and output files for `dot` subprocess.
+ // (We create the output file as empty, to reserve the temp filename).
+ llvm::SmallString<256> Input, Output;
+ int InputFD;
+ if (auto EC = llvm::sys::fs::createTemporaryFile("analysis", ".dot", InputFD,
+ Input))
+ return llvm::createStringError(EC, "failed to create `dot` temp input");
+ llvm::raw_fd_ostream(InputFD, /*shouldClose=*/true) << DotGraph;
+ auto DeleteInput =
+ llvm::make_scope_exit([&] { llvm::sys::fs::remove(Input); });
+ if (auto EC = llvm::sys::fs::createTemporaryFile("analysis", ".svg", Output))
+ return llvm::createStringError(EC, "failed to create `dot` temp output");
+ auto DeleteOutput =
+ llvm::make_scope_exit([&] { llvm::sys::fs::remove(Output); });
+
+ std::vector<std::optional<llvm::StringRef>> Redirects = {
+ Input, Output,
+ /*stderr=*/std::nullopt};
+ std::string ErrMsg;
+ int Code = llvm::sys::ExecuteAndWait(
+ DotPath, {"dot", "-Tsvg"}, /*Env=*/std::nullopt, Redirects,
+ /*SecondsToWait=*/0, /*MemoryLimit=*/0, &ErrMsg);
+ if (!ErrMsg.empty())
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "'dot' failed: " + ErrMsg);
+ if (Code != 0)
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "'dot' failed (" + llvm::Twine(Code) + ")");
+
+ auto Buf = llvm::MemoryBuffer::getFile(Output);
+ if (!Buf)
+ return llvm::createStringError(Buf.getError(), "Can't read `dot` output");
+
+ // Output has <?xml> prefix we don't want. Skip to <svg> tag.
+ llvm::StringRef Result = Buf.get()->getBuffer();
+ auto Pos = Result.find("<svg");
+ if (Pos == llvm::StringRef::npos)
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "Can't find <svg> tag in `dot` output");
+ return Result.substr(Pos).str();
+}
+
+} // namespace
+
+std::unique_ptr<Logger>
+Logger::html(std::function<std::unique_ptr<llvm::raw_ostream>()> Streams) {
+ return std::make_unique<HTMLLogger>(std::move(Streams));
+}
+
+} // namespace clang::dataflow
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.css b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.css
new file mode 100644
index 000000000000..c8212df1f94b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.css
@@ -0,0 +1,142 @@
+/*===-- HTMLLogger.css ----------------------------------------------------===
+*
+* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+* See https://llvm.org/LICENSE.txt for license information.
+* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+*
+*===----------------------------------------------------------------------===*/
+html { font-family: sans-serif; }
+body { margin: 0; display: flex; justify-content: left; }
+body > * { box-sizing: border-box; }
+body > section {
+ border: 1px solid black;
+ min-width: 20em;
+ overflow: auto;
+ max-height: 100vh;
+}
+section header {
+ background-color: #008;
+ color: white;
+ font-weight: bold;
+ font-size: large;
+}
+section h2 {
+ font-size: medium;
+ margin-bottom: 0.5em;
+ padding-top: 0.5em;
+ border-top: 1px solid #aaa;
+}
+#timeline {
+ min-width: 0;
+}
+#timeline .entry.hover {
+ background-color: #aaa;
+}
+#timeline .entry.iter-select {
+ background-color: #aac;
+}
+
+#bb-elements {
+ font-family: monospace;
+ font-size: x-small;
+ border-collapse: collapse;
+}
+#bb-elements td:nth-child(1) {
+ text-align: right;
+ width: 4em;
+ border-right: 1px solid #008;
+ padding: 0.3em 0.5em;
+
+ font-weight: bold;
+ color: #888;
+};
+#bb-elements tr.hover {
+ background-color: #abc;
+}
+#bb-elements tr.elt-select {
+ background-color: #acf;
+}
+#iterations {
+ display: flex;
+}
+#iterations .chooser {
+ flex-grow: 1;
+ text-align: center;
+}
+#iterations .chooser:not(.iter-select).hover {
+ background-color: #aaa;
+}
+#iterations .iter-select {
+ font-weight: bold;
+ background-color: #ccc;
+}
+#iterations .chooser:not(.iter-select) {
+ text-decoration: underline;
+ color: blue;
+}
+
+code.filename {
+ font-weight: bold;
+ color: black;
+ background-color: #ccc;
+ display: block;
+ text-align: center;
+}
+code.line {
+ display: block;
+ white-space: pre;
+}
+code.line:before { /* line numbers */
+ content: attr(data-line);
+ display: inline-block;
+ width: 2em;
+ text-align: right;
+ padding-right: 2px;
+ background-color: #ccc;
+ border-right: 1px solid #888;
+ margin-right: 8px;
+}
+code.line:has(.bb-select):before {
+ border-right: 4px solid black;
+ margin-right: 5px;
+}
+.c.hover, .bb.hover {
+ filter: saturate(200%) brightness(90%);
+}
+.c.elt-select {
+ box-shadow: inset 0 -4px 2px -2px #a00;
+}
+.bb.bb-select polygon {
+ stroke-width: 4px;
+ filter: brightness(70%) saturate(150%);
+}
+.bb { user-select: none; }
+.bb polygon { fill: white; }
+#cfg {
+ position: relative;
+ margin-left: 0.5em;
+}
+
+.value {
+ border: 1px solid #888;
+ font-size: x-small;
+ flex-grow: 1;
+}
+.value summary {
+ background-color: #ace;
+ display: flex;
+ justify-content: space-between;
+}
+.value .address {
+ font-size: xx-small;
+ font-family: monospace;
+ color: #888;
+}
+.value .property {
+ display: flex;
+ margin-top: 0.5em;
+}
+.value .property .key {
+ font-weight: bold;
+ min-width: 5em;
+}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.html b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.html
new file mode 100644
index 000000000000..a60259a99cce
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.html
@@ -0,0 +1,107 @@
+<!doctype html>
+<html>
+<!-- HTMLLogger.cpp ----------------------------------------------------
+
+ Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ See https://llvm.org/LICENSE.txt for license information.
+ SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+//===------------------------------------------------------------------------>
+
+<head>
+<?INJECT?>
+
+<template id="value-template">
+ <details class="value" open>
+ <summary>
+ <span>{{v.kind}}
+ <template data-if="v.value_id"><span class="address">#{{v.value_id}}</span></template>
+ </span>
+ <template data-if="v.location">
+ <span>{{v.type}} <span class="address">@{{v.location}}</span></span>
+ </template>
+ </summary>
+ <template
+ data-for="kv in Object.entries(v)"
+ data-if="['kind', 'value_id', 'type', 'location'].indexOf(kv[0]) < 0">
+ <div class="property"><span class="key">{{kv[0]}}</span>
+ <template data-if="typeof(kv[1]) != 'object'">{{kv[1]}}</template>
+ <template data-if="typeof(kv[1]) == 'object'" data-let="v = kv[1]">
+ <template data-use="value-template"></template>
+ </template>
+ </div>
+ </template>
+ </details>
+</template>
+
+</head>
+
+<body>
+
+<section id="timeline" data-selection="">
+<header>Timeline</header>
+<template data-for="entry in timeline">
+ <div id="{{entry.block}}:{{entry.iter}}" data-bb="{{entry.block}}" class="entry">{{entry.block}} ({{entry.iter}})</div>
+</template>
+</section>
+
+<section id="function" data-selection="">
+<header>Function</header>
+<div id="code"></div>
+<div id="cfg"></div>
+</section>
+
+<section id="block" data-selection="bb">
+<header><template>Block {{selection.bb}}</template></header>
+<div id="iterations">
+ <template data-for="i in Array(cfg[selection.bb].iters).keys()">
+ <a class="chooser {{selection.bb}}:{{i+1}}" data-iter="{{selection.bb}}:{{i+1}}">Iteration {{i+1}}</a>
+ </template>
+</div>
+<table id="bb-elements">
+<template>
+ <tr id="{{selection.bb}}.0">
+ <td class="{{selection.bb}}">{{selection.bb}}.0</td>
+ <td>(initial state)</td>
+ </tr>
+</template>
+<template data-for="elt in cfg[selection.bb].elements">
+ <tr id="{{selection.bb}}.{{elt_index+1}}">
+ <td class="{{selection.bb}}">{{selection.bb}}.{{elt_index+1}}</td>
+ <td>{{elt}}</td>
+ </tr>
+</template>
+</table>
+</section>
+
+<section id="element" data-selection="iter,elt">
+<template data-let="state = states[selection.iter + '_' + selection.elt]">
+<header>
+ <template data-if="state.element == 0">{{state.block}} (iteration {{state.iter}}) initial state</template>
+ <template data-if="state.element != 0">Element {{selection.elt}} (iteration {{state.iter}})</template>
+</header>
+<template data-if="state.value" data-let="v = state.value">
+ <h2>Value</h2>
+ <template data-use="value-template"></template>
+</template>
+<template data-if="state.logs">
+ <h2>Logs</h2>
+ <pre>{{state.logs}}</pre>
+</template>
+<h2>Built-in lattice</h2>
+<pre>{{state.builtinLattice}}</pre>
+</template>
+</section>
+
+<script>
+addBBColors(Object.keys(HTMLLoggerData.cfg).length);
+watchSelection(HTMLLoggerData);
+updateSelection({}, HTMLLoggerData);
+// Copy code and cfg from <template>s into the body.
+for (tmpl of document.querySelectorAll('template[data-copy]'))
+ document.getElementById(tmpl.dataset.copy).replaceChildren(
+ ...tmpl.content.cloneNode(/*deep=*/true).childNodes);
+</script>
+
+</body>
+</html>
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.js b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.js
new file mode 100644
index 000000000000..6e04bc00f663
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.js
@@ -0,0 +1,219 @@
+//===-- HTMLLogger.js -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// Based on selected objects, hide/show sections & populate data from templates.
+//
+// For example, if the selection is {bb="BB4", elt="BB4.6" iter="BB4:2"}:
+// - show the "block" and "element" sections
+// - re-render templates within these sections (if selection changed)
+// - apply "bb-select" to items with class class "BB4", etc
+let selection = {};
+function updateSelection(changes, data) {
+ Object.assign(selection, changes);
+
+ data = Object.create(data);
+ data.selection = selection;
+ for (root of document.querySelectorAll('[data-selection]'))
+ updateSection(root, data);
+
+ for (var k in changes)
+ applyClassIf(k + '-select', classSelector(changes[k]));
+}
+
+// Given <section data-selection="x,y">:
+// - hide section if selections x or y are null
+// - re-render templates if x or y have changed
+function updateSection(root, data) {
+ let changed = root.selection == null;
+ root.selection ||= {};
+ for (key of root.dataset.selection.split(',')) {
+ if (!key) continue;
+ if (data.selection[key] != root.selection[key]) {
+ root.selection[key] = data.selection[key];
+ changed = true;
+ }
+ if (data.selection[key] == null) {
+ root.hidden = true;
+ return;
+ }
+ }
+ if (changed) {
+ root.hidden = false;
+ for (tmpl of root.getElementsByTagName('template'))
+ reinflate(tmpl, data);
+ }
+}
+
+// Expands template `tmpl` based on input `data`:
+// - interpolates {{expressions}} in text and attributes
+// - <template> tags can modify expansion: if, for etc
+// Outputs to `parent` element, inserting before `next`.
+function inflate(tmpl, data, parent, next) {
+ // We use eval() as our expression language in templates!
+ // The templates are static and trusted.
+ let evalExpr = (expr, data) => eval('with (data) { ' + expr + ' }');
+ let interpolate = (str, data) =>
+ str.replace(/\{\{(.*?)\}\}/g, (_, expr) => evalExpr(expr, data))
+ // Anything other than <template> tag: copy, interpolate, recursively inflate.
+ if (tmpl.nodeName != 'TEMPLATE') {
+ let clone = tmpl.cloneNode();
+ clone.inflated = true;
+ if (clone instanceof Text)
+ clone.textContent = interpolate(clone.textContent, data);
+ if (clone instanceof Element) {
+ for (attr of clone.attributes)
+ attr.value = interpolate(attr.value, data);
+ for (c of tmpl.childNodes)
+ inflate(c, data, clone, /*next=*/null);
+ }
+ return parent.insertBefore(clone, next);
+ }
+ // data-use="xyz": use <template id="xyz"> instead. (Allows recursion.)
+ if ('use' in tmpl.dataset)
+ return inflate(document.getElementById(tmpl.dataset.use), data, parent, next);
+ // <template> tag handling. Base case: recursively inflate.
+ function handle(data) {
+ for (c of tmpl.content.childNodes)
+ inflate(c, data, parent, next);
+ }
+ // Directives on <template> tags modify behavior.
+ const directives = {
+ // data-for="x in expr": expr is enumerable, bind x to each in turn
+ 'for': (nameInExpr, data, proceed) => {
+ let [name, expr] = nameInExpr.split(' in ');
+ let newData = Object.create(data);
+ let index = 0;
+ for (val of evalExpr(expr, data) || []) {
+ newData[name] = val;
+ newData[name + '_index'] = index++;
+ proceed(newData);
+ }
+ },
+ // data-if="expr": only include contents if expression is truthy
+ 'if': (expr, data, proceed) => { if (evalExpr(expr, data)) proceed(data); },
+ // data-let="x = expr": bind x to value of expr
+ 'let': (nameEqExpr, data, proceed) => {
+ let [name, expr] = nameEqExpr.split(' = ');
+ let newData = Object.create(data);
+ newData[name] = evalExpr(expr, data);
+ proceed(newData);
+ },
+ }
+ // Compose directive handlers on top of the base handler.
+ for (let [dir, value] of Object.entries(tmpl.dataset).reverse()) {
+ if (dir in directives) {
+ let proceed = handle;
+ handle = (data) => directives[dir](value, data, proceed);
+ }
+ }
+ handle(data);
+}
+// Expand a template, after first removing any prior expansion of it.
+function reinflate(tmpl, data) {
+ // Clear previously rendered template contents.
+ while (tmpl.nextSibling && tmpl.nextSibling.inflated)
+ tmpl.parentNode.removeChild(tmpl.nextSibling);
+ inflate(tmpl, data, tmpl.parentNode, tmpl.nextSibling);
+}
+
+// Handle a mouse event on a region containing selectable items.
+// This might end up changing the hover state or the selection state.
+//
+// targetSelector describes what target HTML element is selectable.
+// targetToID specifies how to determine the selection from it:
+// hover: a function from target to the class name to highlight
+// bb: a function from target to the basic-block name to select (BB4)
+// elt: a function from target to the CFG element name to select (BB4.5)
+// iter: a function from target to the BB iteration to select (BB4:2)
+// If an entry is missing, the selection is unmodified.
+// If an entry is null, the selection is always cleared.
+function mouseEventHandler(event, targetSelector, targetToID, data) {
+ var target = event.type == "mouseout" ? null : event.target.closest(targetSelector);
+ let selTarget = k => (target && targetToID[k]) ? targetToID[k](target) : null;
+ if (event.type == "click") {
+ let newSel = {};
+ for (var k in targetToID) {
+ if (k == 'hover') continue;
+ let t = selTarget(k);
+ newSel[k] = t;
+ }
+ updateSelection(newSel, data);
+ } else if ("hover" in targetToID) {
+ applyClassIf("hover", classSelector(selTarget("hover")));
+ }
+}
+function watch(rootSelector, targetSelector, targetToID, data) {
+ var root = document.querySelector(rootSelector);
+ for (event of ['mouseout', 'mousemove', 'click'])
+ root.addEventListener(event, e => mouseEventHandler(e, targetSelector, targetToID, data));
+}
+function watchSelection(data) {
+ let lastIter = (bb) => `${bb}:${data.cfg[bb].iters}`;
+ watch('#code', '.c', {
+ hover: e => e.dataset.elt,
+ bb: e => e.dataset.bb,
+ elt: e => e.dataset.elt,
+ // If we're already viewing an iteration of this BB, stick with the same.
+ iter: e => (selection.iter && selection.bb == e.dataset.bb) ? selection.iter : lastIter(e.dataset.bb),
+ }, data);
+ watch('#cfg', '.bb', {
+ hover: e => e.id,
+ bb: e => e.id,
+ elt: e => e.id + ".0",
+ iter: e => lastIter(e.id),
+ }, data);
+ watch('#timeline', '.entry', {
+ hover: e => [e.id, e.dataset.bb],
+ bb: e => e.dataset.bb,
+ elt: e => e.dataset.bb + ".0",
+ iter: e => e.id,
+ }, data);
+ watch('#bb-elements', 'tr', {
+ hover: e => e.id,
+ elt: e => e.id,
+ }, data);
+ watch('#iterations', '.chooser', {
+ hover: e => e.dataset.iter,
+ iter: e => e.dataset.iter,
+ }, data);
+ updateSelection({}, data);
+}
+function applyClassIf(cls, query) {
+ document.querySelectorAll('.' + cls).forEach(elt => elt.classList.remove(cls));
+ document.querySelectorAll(query).forEach(elt => elt.classList.add(cls));
+}
+// Turns a class name into a CSS selector matching it, with some wrinkles:
+// - we treat id="foo" just like class="foo" to avoid repetition in the HTML
+// - cls can be an array of strings, we match them all
+function classSelector(cls) {
+ if (cls == null) return null;
+ if (Array.isArray(cls)) return cls.map(classSelector).join(', ');
+ var escaped = cls.replace('.', '\\.').replace(':', '\\:');
+ // don't require id="foo" class="foo"
+ return '.' + escaped + ", #" + escaped;
+}
+
+// Add a stylesheet defining colors for n basic blocks.
+function addBBColors(n) {
+ let sheet = new CSSStyleSheet();
+ // hex values to subtract from fff to get a base color
+ options = [0x001, 0x010, 0x011, 0x100, 0x101, 0x110, 0x111];
+ function color(hex) {
+ return "#" + hex.toString(16).padStart(3, "0");
+ }
+ function add(selector, property, hex) {
+ sheet.insertRule(`${selector} { ${property}: ${color(hex)}; }`)
+ }
+ for (var i = 0; i < n; ++i) {
+ let opt = options[i%options.length];
+ add(`.B${i}`, 'background-color', 0xfff - 2*opt);
+ add(`#B${i} polygon`, 'fill', 0xfff - 2*opt);
+ add(`#B${i} polygon`, 'stroke', 0x888 - 4*opt);
+ }
+ document.adoptedStyleSheets.push(sheet);
+}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Logger.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Logger.cpp
new file mode 100644
index 000000000000..469fea338e45
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Logger.cpp
@@ -0,0 +1,108 @@
+//===-- Logger.cpp --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/Logger.h"
+#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
+#include "clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h"
+#include "llvm/Support/WithColor.h"
+
+namespace clang::dataflow {
+
+Logger &Logger::null() {
+ struct NullLogger final : Logger {};
+ static auto *Instance = new NullLogger();
+ return *Instance;
+}
+
+namespace {
+struct TextualLogger final : Logger {
+ llvm::raw_ostream &OS;
+ const CFG *CurrentCFG;
+ const CFGBlock *CurrentBlock;
+ const CFGElement *CurrentElement;
+ unsigned CurrentElementIndex;
+ bool ShowColors;
+ llvm::DenseMap<const CFGBlock *, unsigned> VisitCount;
+ TypeErasedDataflowAnalysis *CurrentAnalysis;
+
+ TextualLogger(llvm::raw_ostream &OS)
+ : OS(OS), ShowColors(llvm::WithColor::defaultAutoDetectFunction()(OS)) {}
+
+ virtual void beginAnalysis(const ControlFlowContext &CFG,
+ TypeErasedDataflowAnalysis &Analysis) override {
+ {
+ llvm::WithColor Header(OS, llvm::raw_ostream::Colors::RED, /*Bold=*/true);
+ OS << "=== Beginning data flow analysis ===\n";
+ }
+ if (auto *D = CFG.getDecl()) {
+ D->print(OS);
+ OS << "\n";
+ D->dump(OS);
+ }
+ CurrentCFG = &CFG.getCFG();
+ CurrentCFG->print(OS, Analysis.getASTContext().getLangOpts(), ShowColors);
+ CurrentAnalysis = &Analysis;
+ }
+ virtual void endAnalysis() override {
+ llvm::WithColor Header(OS, llvm::raw_ostream::Colors::RED, /*Bold=*/true);
+ unsigned Blocks = 0, Steps = 0;
+ for (const auto &E : VisitCount) {
+ ++Blocks;
+ Steps += E.second;
+ }
+ llvm::errs() << "=== Finished analysis: " << Blocks << " blocks in "
+ << Steps << " total steps ===\n";
+ }
+ virtual void enterBlock(const CFGBlock &Block) override {
+ unsigned Count = ++VisitCount[&Block];
+ {
+ llvm::WithColor Header(OS, llvm::raw_ostream::Colors::RED, /*Bold=*/true);
+ OS << "=== Entering block B" << Block.getBlockID() << " (iteration "
+ << Count << ") ===\n";
+ }
+ Block.print(OS, CurrentCFG, CurrentAnalysis->getASTContext().getLangOpts(),
+ ShowColors);
+ CurrentBlock = &Block;
+ CurrentElement = nullptr;
+ CurrentElementIndex = 0;
+ }
+ virtual void enterElement(const CFGElement &Element) override {
+ ++CurrentElementIndex;
+ CurrentElement = &Element;
+ {
+ llvm::WithColor Subheader(OS, llvm::raw_ostream::Colors::CYAN,
+ /*Bold=*/true);
+ OS << "Processing element B" << CurrentBlock->getBlockID() << "."
+ << CurrentElementIndex << ": ";
+ Element.dumpToStream(OS);
+ }
+ }
+ void recordState(TypeErasedDataflowAnalysisState &State) override {
+ {
+ llvm::WithColor Subheader(OS, llvm::raw_ostream::Colors::CYAN,
+ /*Bold=*/true);
+ OS << "Computed state for B" << CurrentBlock->getBlockID() << "."
+ << CurrentElementIndex << ":\n";
+ }
+ // FIXME: currently the environment dump is verbose and unenlightening.
+ // FIXME: dump the user-defined lattice, too.
+ State.Env.dump(OS);
+ OS << "\n";
+ }
+ void blockConverged() override {
+ OS << "B" << CurrentBlock->getBlockID() << " has converged!\n";
+ }
+ virtual void logText(llvm::StringRef S) override { OS << S << "\n"; }
+};
+} // namespace
+
+std::unique_ptr<Logger> Logger::textual(llvm::raw_ostream &OS) {
+ return std::make_unique<TextualLogger>(OS);
+}
+
+} // namespace clang::dataflow
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/ChromiumCheckModel.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/ChromiumCheckModel.cpp
index f457964fb132..895f4ff04a17 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/ChromiumCheckModel.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/ChromiumCheckModel.cpp
@@ -50,8 +50,8 @@ bool isCheckLikeMethod(llvm::SmallDenseSet<const CXXMethodDecl *> &CheckDecls,
return CheckDecls.contains(&D);
}
-bool ChromiumCheckModel::transfer(const CFGElement *Element, Environment &Env) {
- auto CS = Element->getAs<CFGStmt>();
+bool ChromiumCheckModel::transfer(const CFGElement &Element, Environment &Env) {
+ auto CS = Element.getAs<CFGStmt>();
if (!CS)
return false;
auto Stmt = CS->getStmt();
@@ -59,7 +59,7 @@ bool ChromiumCheckModel::transfer(const CFGElement *Element, Environment &Env) {
if (const auto *M = dyn_cast<CXXMethodDecl>(Call->getDirectCallee())) {
if (isCheckLikeMethod(CheckDecls, *M)) {
// Mark this branch as unreachable.
- Env.addToFlowCondition(Env.getBoolLiteralValue(false));
+ Env.addToFlowCondition(Env.arena().makeLiteral(false));
return true;
}
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
index 308dc25dad1f..b0a8667f3fe5 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
@@ -18,9 +18,11 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Stmt.h"
#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/FlowSensitive/CFGMatchSwitch.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/Formula.h"
#include "clang/Analysis/FlowSensitive/NoopLattice.h"
#include "clang/Analysis/FlowSensitive/StorageLocation.h"
#include "clang/Analysis/FlowSensitive/Value.h"
@@ -36,16 +38,45 @@
namespace clang {
namespace dataflow {
+
+static bool isTopLevelNamespaceWithName(const NamespaceDecl &NS,
+ llvm::StringRef Name) {
+ return NS.getDeclName().isIdentifier() && NS.getName() == Name &&
+ NS.getParent() != nullptr && NS.getParent()->isTranslationUnit();
+}
+
+static bool hasOptionalClassName(const CXXRecordDecl &RD) {
+ if (!RD.getDeclName().isIdentifier())
+ return false;
+
+ if (RD.getName() == "optional") {
+ if (const auto *N = dyn_cast_or_null<NamespaceDecl>(RD.getDeclContext()))
+ return N->isStdNamespace() || isTopLevelNamespaceWithName(*N, "absl");
+ return false;
+ }
+
+ if (RD.getName() == "Optional") {
+ // Check whether namespace is "::base" or "::folly".
+ const auto *N = dyn_cast_or_null<NamespaceDecl>(RD.getDeclContext());
+ return N != nullptr && (isTopLevelNamespaceWithName(*N, "base") ||
+ isTopLevelNamespaceWithName(*N, "folly"));
+ }
+
+ return false;
+}
+
namespace {
using namespace ::clang::ast_matchers;
using LatticeTransferState = TransferState<NoopLattice>;
+AST_MATCHER(CXXRecordDecl, hasOptionalClassNameMatcher) {
+ return hasOptionalClassName(Node);
+}
+
DeclarationMatcher optionalClass() {
return classTemplateSpecializationDecl(
- anyOf(hasName("std::optional"), hasName("std::__optional_storage_base"),
- hasName("__optional_destruct_base"), hasName("absl::optional"),
- hasName("base::Optional")),
+ hasOptionalClassNameMatcher(),
hasTemplateArgument(0, refersToType(type().bind("T"))));
}
@@ -57,14 +88,16 @@ auto optionalOrAliasType() {
/// Matches any of the spellings of the optional types and sugar, aliases, etc.
auto hasOptionalType() { return hasType(optionalOrAliasType()); }
-auto isOptionalMemberCallWithName(
- llvm::StringRef MemberName,
+auto isOptionalMemberCallWithNameMatcher(
+ ast_matchers::internal::Matcher<NamedDecl> matcher,
const std::optional<StatementMatcher> &Ignorable = std::nullopt) {
auto Exception = unless(Ignorable ? expr(anyOf(*Ignorable, cxxThisExpr()))
: cxxThisExpr());
return cxxMemberCallExpr(
- on(expr(Exception)),
- callee(cxxMethodDecl(hasName(MemberName), ofClass(optionalClass()))));
+ on(expr(Exception,
+ anyOf(hasOptionalType(),
+ hasType(pointerType(pointee(optionalOrAliasType())))))),
+ callee(cxxMethodDecl(matcher)));
}
auto isOptionalOperatorCallWithName(
@@ -77,15 +110,15 @@ auto isOptionalOperatorCallWithName(
}
auto isMakeOptionalCall() {
- return callExpr(
- callee(functionDecl(hasAnyName(
- "std::make_optional", "base::make_optional", "absl::make_optional"))),
- hasOptionalType());
+ return callExpr(callee(functionDecl(hasAnyName(
+ "std::make_optional", "base::make_optional",
+ "absl::make_optional", "folly::make_optional"))),
+ hasOptionalType());
}
auto nulloptTypeDecl() {
- return namedDecl(
- hasAnyName("std::nullopt_t", "absl::nullopt_t", "base::nullopt_t"));
+ return namedDecl(hasAnyName("std::nullopt_t", "absl::nullopt_t",
+ "base::nullopt_t", "folly::None"));
}
auto hasNulloptType() { return hasType(nulloptTypeDecl()); }
@@ -96,10 +129,9 @@ auto hasAnyOptionalType() {
recordType(hasDeclaration(anyOf(nulloptTypeDecl(), optionalClass())))));
}
-
auto inPlaceClass() {
- return recordDecl(
- hasAnyName("std::in_place_t", "absl::in_place_t", "base::in_place_t"));
+ return recordDecl(hasAnyName("std::in_place_t", "absl::in_place_t",
+ "base::in_place_t", "folly::in_place_t"));
}
auto isOptionalNulloptConstructor() {
@@ -149,6 +181,11 @@ auto isStdSwapCall() {
hasArgument(1, hasOptionalType()));
}
+auto isStdForwardCall() {
+ return callExpr(callee(functionDecl(hasName("std::forward"))),
+ argumentCountIs(1), hasArgument(0, hasOptionalType()));
+}
+
constexpr llvm::StringLiteral ValueOrCallID = "ValueOrCall";
auto isValueOrStringEmptyCall() {
@@ -199,17 +236,17 @@ auto isComparisonOperatorCall(L lhs_arg_matcher, R rhs_arg_matcher) {
hasArgument(1, rhs_arg_matcher));
}
-// Ensures that `Expr` is mapped to a `BoolValue` and returns it.
-BoolValue &forceBoolValue(Environment &Env, const Expr &Expr) {
+/// Ensures that `Expr` is mapped to a `BoolValue` and returns its formula.
+const Formula &forceBoolValue(Environment &Env, const Expr &Expr) {
auto *Value = cast_or_null<BoolValue>(Env.getValue(Expr, SkipPast::None));
if (Value != nullptr)
- return *Value;
+ return Value->formula();
auto &Loc = Env.createStorageLocation(Expr);
Value = &Env.makeAtomicBoolValue();
Env.setValue(Loc, *Value);
Env.setStorageLocation(Expr, Loc);
- return *Value;
+ return Value->formula();
}
/// Sets `HasValueVal` as the symbolic value that represents the "has_value"
@@ -218,12 +255,15 @@ void setHasValue(Value &OptionalVal, BoolValue &HasValueVal) {
OptionalVal.setProperty("has_value", HasValueVal);
}
-/// Creates a symbolic value for an `optional` value using `HasValueVal` as the
-/// symbolic value of its "has_value" property.
-StructValue &createOptionalValue(Environment &Env, BoolValue &HasValueVal) {
- auto OptionalVal = std::make_unique<StructValue>();
- setHasValue(*OptionalVal, HasValueVal);
- return Env.takeOwnership(std::move(OptionalVal));
+/// Creates a symbolic value for an `optional` value at an existing storage
+/// location. Uses `HasValueVal` as the symbolic value of the "has_value"
+/// property.
+StructValue &createOptionalValue(AggregateStorageLocation &Loc,
+ BoolValue &HasValueVal, Environment &Env) {
+ auto &OptionalVal = Env.create<StructValue>(Loc);
+ Env.setValue(Loc, OptionalVal);
+ setHasValue(OptionalVal, HasValueVal);
+ return OptionalVal;
}
/// Returns the symbolic value that represents the "has_value" property of the
@@ -241,20 +281,12 @@ BoolValue *getHasValue(Environment &Env, Value *OptionalVal) {
return nullptr;
}
-/// If `Type` is a reference type, returns the type of its pointee. Otherwise,
-/// returns `Type` itself.
-QualType stripReference(QualType Type) {
- return Type->isReferenceType() ? Type->getPointeeType() : Type;
-}
-
/// Returns true if and only if `Type` is an optional type.
bool isOptionalType(QualType Type) {
if (!Type->isRecordType())
return false;
- // FIXME: Optimize this by avoiding the `getQualifiedNameAsString` call.
- auto TypeName = Type->getAsCXXRecordDecl()->getQualifiedNameAsString();
- return TypeName == "std::optional" || TypeName == "absl::optional" ||
- TypeName == "base::Optional";
+ const CXXRecordDecl *D = Type->getAsCXXRecordDecl();
+ return D != nullptr && hasOptionalClassName(*D);
}
/// Returns the number of optional wrappers in `Type`.
@@ -280,40 +312,91 @@ StorageLocation *maybeInitializeOptionalValueMember(QualType Q,
Environment &Env) {
// The "value" property represents a synthetic field. As such, it needs
// `StorageLocation`, like normal fields (and other variables). So, we model
- // it with a `ReferenceValue`, since that includes a storage location. Once
+ // it with a `PointerValue`, since that includes a storage location. Once
// the property is set, it will be shared by all environments that access the
// `Value` representing the optional (here, `OptionalVal`).
if (auto *ValueProp = OptionalVal.getProperty("value")) {
- auto *ValueRef = clang::cast<ReferenceValue>(ValueProp);
- auto &ValueLoc = ValueRef->getReferentLoc();
- if (Env.getValue(ValueLoc) == nullptr) {
- // The property was previously set, but the value has been lost. This can
- // happen, for example, because of an environment merge (where the two
- // environments mapped the property to different values, which resulted in
- // them both being discarded), or when two blocks in the CFG, with neither
- // a dominator of the other, visit the same optional value, or even when a
- // block is revisited during testing to collect per-statement state.
- // FIXME: This situation means that the optional contents are not shared
- // between branches and the like. Practically, this lack of sharing
- // reduces the precision of the model when the contents are relevant to
- // the check, like another optional or a boolean that influences control
- // flow.
+ auto *ValuePtr = clang::cast<PointerValue>(ValueProp);
+ auto &ValueLoc = ValuePtr->getPointeeLoc();
+ if (Env.getValue(ValueLoc) != nullptr)
+ return &ValueLoc;
+
+ // The property was previously set, but the value has been lost. This can
+ // happen in various situations, for example:
+ // - Because of an environment merge (where the two environments mapped the
+ // property to different values, which resulted in them both being
+ // discarded).
+ // - When two blocks in the CFG, with neither a dominator of the other,
+ // visit the same optional value. (FIXME: This is something we can and
+ // should fix -- see also the lengthy FIXME below.)
+ // - Or even when a block is revisited during testing to collect
+ // per-statement state.
+ // FIXME: This situation means that the optional contents are not shared
+ // between branches and the like. Practically, this lack of sharing
+ // reduces the precision of the model when the contents are relevant to
+ // the check, like another optional or a boolean that influences control
+ // flow.
+ if (ValueLoc.getType()->isRecordType()) {
+ refreshStructValue(cast<AggregateStorageLocation>(ValueLoc), Env);
+ return &ValueLoc;
+ } else {
auto *ValueVal = Env.createValue(ValueLoc.getType());
if (ValueVal == nullptr)
return nullptr;
Env.setValue(ValueLoc, *ValueVal);
+ return &ValueLoc;
}
- return &ValueLoc;
}
- auto Ty = stripReference(Q);
- auto *ValueVal = Env.createValue(Ty);
- if (ValueVal == nullptr)
- return nullptr;
- auto &ValueLoc = Env.createStorageLocation(Ty);
- Env.setValue(ValueLoc, *ValueVal);
- auto ValueRef = std::make_unique<ReferenceValue>(ValueLoc);
- OptionalVal.setProperty("value", Env.takeOwnership(std::move(ValueRef)));
+ auto Ty = Q.getNonReferenceType();
+ auto &ValueLoc = Env.createObject(Ty);
+ auto &ValuePtr = Env.create<PointerValue>(ValueLoc);
+ // FIXME:
+ // The change we make to the `value` property below may become visible to
+ // other blocks that aren't successors of the current block and therefore
+ // don't see the change we made above mapping `ValueLoc` to `ValueVal`. For
+ // example:
+ //
+ // void target(optional<int> oo, bool b) {
+ // // `oo` is associated with a `StructValue` here, which we will call
+ // // `OptionalVal`.
+ //
+ // // The `has_value` property is set on `OptionalVal` (but not the
+ // // `value` property yet).
+ // if (!oo.has_value()) return;
+ //
+ // if (b) {
+ // // Let's assume we transfer the `if` branch first.
+ // //
+ // // This causes us to call `maybeInitializeOptionalValueMember()`,
+ // // which causes us to set the `value` property on `OptionalVal`
+ // // (which had not been set until this point). This `value` property
+ // // refers to a `PointerValue`, which in turn refers to a
+ // // StorageLocation` that is associated to an `IntegerValue`.
+ // oo.value();
+ // } else {
+ // // Let's assume we transfer the `else` branch after the `if` branch.
+ // //
+ // // We see the `value` property that the `if` branch set on
+ // // `OptionalVal`, but in the environment for this block, the
+ // // `StorageLocation` in the `PointerValue` is not associated with any
+ // // `Value`.
+ // oo.value();
+ // }
+ // }
+ //
+ // This situation is currently "saved" by the code above that checks whether
+ // the `value` property is already set, and if, the `ValueLoc` is not
+ // associated with a `ValueVal`, creates a new `ValueVal`.
+ //
+ // However, what we should really do is to make sure that the change to the
+ // `value` property does not "leak" to other blocks that are not successors
+ // of this block. To do this, instead of simply setting the `value` property
+ // on the existing `OptionalVal`, we should create a new `Value` for the
+ // optional, set the property on that, and associate the storage location that
+ // is currently associated with the existing `OptionalVal` with the newly
+ // created `Value` instead.
+ OptionalVal.setProperty("value", ValuePtr);
return &ValueLoc;
}
@@ -329,26 +412,34 @@ void initializeOptionalReference(const Expr *OptionalExpr,
}
/// Returns true if and only if `OptionalVal` is initialized and known to be
-/// empty in `Env.
+/// empty in `Env`.
bool isEmptyOptional(const Value &OptionalVal, const Environment &Env) {
auto *HasValueVal =
cast_or_null<BoolValue>(OptionalVal.getProperty("has_value"));
return HasValueVal != nullptr &&
- Env.flowConditionImplies(Env.makeNot(*HasValueVal));
+ Env.flowConditionImplies(Env.arena().makeNot(HasValueVal->formula()));
}
/// Returns true if and only if `OptionalVal` is initialized and known to be
-/// non-empty in `Env.
+/// non-empty in `Env`.
bool isNonEmptyOptional(const Value &OptionalVal, const Environment &Env) {
auto *HasValueVal =
cast_or_null<BoolValue>(OptionalVal.getProperty("has_value"));
- return HasValueVal != nullptr && Env.flowConditionImplies(*HasValueVal);
+ return HasValueVal != nullptr &&
+ Env.flowConditionImplies(HasValueVal->formula());
+}
+
+Value *getValueBehindPossiblePointer(const Expr &E, const Environment &Env) {
+ Value *Val = Env.getValue(E, SkipPast::Reference);
+ if (auto *PointerVal = dyn_cast_or_null<PointerValue>(Val))
+ return Env.getValue(PointerVal->getPointeeLoc());
+ return Val;
}
void transferUnwrapCall(const Expr *UnwrapExpr, const Expr *ObjectExpr,
LatticeTransferState &State) {
if (auto *OptionalVal =
- State.Env.getValue(*ObjectExpr, SkipPast::ReferenceThenPointer)) {
+ getValueBehindPossiblePointer(*ObjectExpr, State.Env)) {
if (State.Env.getStorageLocation(*UnwrapExpr, SkipPast::None) == nullptr)
if (auto *Loc = maybeInitializeOptionalValueMember(
UnwrapExpr->getType(), *OptionalVal, State.Env))
@@ -356,21 +447,31 @@ void transferUnwrapCall(const Expr *UnwrapExpr, const Expr *ObjectExpr,
}
}
+void transferArrowOpCall(const Expr *UnwrapExpr, const Expr *ObjectExpr,
+ LatticeTransferState &State) {
+ if (auto *OptionalVal =
+ getValueBehindPossiblePointer(*ObjectExpr, State.Env)) {
+ if (auto *Loc = maybeInitializeOptionalValueMember(
+ UnwrapExpr->getType()->getPointeeType(), *OptionalVal, State.Env)) {
+ State.Env.setValueStrict(*UnwrapExpr,
+ State.Env.create<PointerValue>(*Loc));
+ }
+ }
+}
+
void transferMakeOptionalCall(const CallExpr *E,
const MatchFinder::MatchResult &,
LatticeTransferState &State) {
- auto &Loc = State.Env.createStorageLocation(*E);
- State.Env.setStorageLocation(*E, Loc);
- State.Env.setValue(
- Loc, createOptionalValue(State.Env, State.Env.getBoolLiteralValue(true)));
+ createOptionalValue(State.Env.getResultObjectLocation(*E),
+ State.Env.getBoolLiteralValue(true), State.Env);
}
void transferOptionalHasValueCall(const CXXMemberCallExpr *CallExpr,
const MatchFinder::MatchResult &,
LatticeTransferState &State) {
if (auto *HasValueVal = getHasValue(
- State.Env, State.Env.getValue(*CallExpr->getImplicitObjectArgument(),
- SkipPast::ReferenceThenPointer))) {
+ State.Env, getValueBehindPossiblePointer(
+ *CallExpr->getImplicitObjectArgument(), State.Env))) {
auto &CallExprLoc = State.Env.createStorageLocation(*CallExpr);
State.Env.setValue(CallExprLoc, *HasValueVal);
State.Env.setStorageLocation(*CallExpr, CallExprLoc);
@@ -379,12 +480,11 @@ void transferOptionalHasValueCall(const CXXMemberCallExpr *CallExpr,
/// `ModelPred` builds a logical formula relating the predicate in
/// `ValueOrPredExpr` to the optional's `has_value` property.
-void transferValueOrImpl(const clang::Expr *ValueOrPredExpr,
- const MatchFinder::MatchResult &Result,
- LatticeTransferState &State,
- BoolValue &(*ModelPred)(Environment &Env,
- BoolValue &ExprVal,
- BoolValue &HasValueVal)) {
+void transferValueOrImpl(
+ const clang::Expr *ValueOrPredExpr, const MatchFinder::MatchResult &Result,
+ LatticeTransferState &State,
+ const Formula &(*ModelPred)(Environment &Env, const Formula &ExprVal,
+ const Formula &HasValueVal)) {
auto &Env = State.Env;
const auto *ObjectArgumentExpr =
@@ -392,29 +492,29 @@ void transferValueOrImpl(const clang::Expr *ValueOrPredExpr,
->getImplicitObjectArgument();
auto *HasValueVal = getHasValue(
- State.Env,
- State.Env.getValue(*ObjectArgumentExpr, SkipPast::ReferenceThenPointer));
+ State.Env, getValueBehindPossiblePointer(*ObjectArgumentExpr, State.Env));
if (HasValueVal == nullptr)
return;
- Env.addToFlowCondition(
- ModelPred(Env, forceBoolValue(Env, *ValueOrPredExpr), *HasValueVal));
+ Env.addToFlowCondition(ModelPred(Env, forceBoolValue(Env, *ValueOrPredExpr),
+ HasValueVal->formula()));
}
void transferValueOrStringEmptyCall(const clang::Expr *ComparisonExpr,
const MatchFinder::MatchResult &Result,
LatticeTransferState &State) {
return transferValueOrImpl(ComparisonExpr, Result, State,
- [](Environment &Env, BoolValue &ExprVal,
- BoolValue &HasValueVal) -> BoolValue & {
+ [](Environment &Env, const Formula &ExprVal,
+ const Formula &HasValueVal) -> const Formula & {
+ auto &A = Env.arena();
// If the result is *not* empty, then we know the
// optional must have been holding a value. If
// `ExprVal` is true, though, we don't learn
// anything definite about `has_value`, so we
// don't add any corresponding implications to
// the flow condition.
- return Env.makeImplication(Env.makeNot(ExprVal),
- HasValueVal);
+ return A.makeImplies(A.makeNot(ExprVal),
+ HasValueVal);
});
}
@@ -422,12 +522,13 @@ void transferValueOrNotEqX(const Expr *ComparisonExpr,
const MatchFinder::MatchResult &Result,
LatticeTransferState &State) {
transferValueOrImpl(ComparisonExpr, Result, State,
- [](Environment &Env, BoolValue &ExprVal,
- BoolValue &HasValueVal) -> BoolValue & {
+ [](Environment &Env, const Formula &ExprVal,
+ const Formula &HasValueVal) -> const Formula & {
+ auto &A = Env.arena();
// We know that if `(opt.value_or(X) != X)` then
// `opt.hasValue()`, even without knowing further
// details about the contents of `opt`.
- return Env.makeImplication(ExprVal, HasValueVal);
+ return A.makeImplies(ExprVal, HasValueVal);
});
}
@@ -437,18 +538,21 @@ void transferCallReturningOptional(const CallExpr *E,
if (State.Env.getStorageLocation(*E, SkipPast::None) != nullptr)
return;
- auto &Loc = State.Env.createStorageLocation(*E);
- State.Env.setStorageLocation(*E, Loc);
- State.Env.setValue(
- Loc, createOptionalValue(State.Env, State.Env.makeAtomicBoolValue()));
+ AggregateStorageLocation *Loc = nullptr;
+ if (E->isPRValue()) {
+ Loc = &State.Env.getResultObjectLocation(*E);
+ } else {
+ Loc = &cast<AggregateStorageLocation>(State.Env.createStorageLocation(*E));
+ State.Env.setStorageLocationStrict(*E, *Loc);
+ }
+
+ createOptionalValue(*Loc, State.Env.makeAtomicBoolValue(), State.Env);
}
-void assignOptionalValue(const Expr &E, Environment &Env,
- BoolValue &HasValueVal) {
- if (auto *OptionalLoc =
- Env.getStorageLocation(E, SkipPast::ReferenceThenPointer)) {
- Env.setValue(*OptionalLoc, createOptionalValue(Env, HasValueVal));
- }
+void constructOptionalValue(const Expr &E, Environment &Env,
+ BoolValue &HasValueVal) {
+ AggregateStorageLocation &Loc = Env.getResultObjectLocation(E);
+ Env.setValueStrict(E, createOptionalValue(Loc, HasValueVal, Env));
}
/// Returns a symbolic value for the "has_value" property of an `optional<T>`
@@ -460,11 +564,13 @@ BoolValue &valueOrConversionHasValue(const FunctionDecl &F, const Expr &E,
assert(F.getTemplateSpecializationArgs() != nullptr);
assert(F.getTemplateSpecializationArgs()->size() > 0);
- const int TemplateParamOptionalWrappersCount = countOptionalWrappers(
- *MatchRes.Context,
- stripReference(F.getTemplateSpecializationArgs()->get(0).getAsType()));
- const int ArgTypeOptionalWrappersCount =
- countOptionalWrappers(*MatchRes.Context, stripReference(E.getType()));
+ const int TemplateParamOptionalWrappersCount =
+ countOptionalWrappers(*MatchRes.Context, F.getTemplateSpecializationArgs()
+ ->get(0)
+ .getAsType()
+ .getNonReferenceType());
+ const int ArgTypeOptionalWrappersCount = countOptionalWrappers(
+ *MatchRes.Context, E.getType().getNonReferenceType());
// Check if this is a constructor/assignment call for `optional<T>` with
// argument of type `U` such that `T` is constructible from `U`.
@@ -484,25 +590,23 @@ void transferValueOrConversionConstructor(
LatticeTransferState &State) {
assert(E->getNumArgs() > 0);
- assignOptionalValue(*E, State.Env,
- valueOrConversionHasValue(*E->getConstructor(),
- *E->getArg(0), MatchRes,
- State));
+ constructOptionalValue(*E, State.Env,
+ valueOrConversionHasValue(*E->getConstructor(),
+ *E->getArg(0), MatchRes,
+ State));
}
void transferAssignment(const CXXOperatorCallExpr *E, BoolValue &HasValueVal,
LatticeTransferState &State) {
assert(E->getNumArgs() > 0);
- auto *OptionalLoc =
- State.Env.getStorageLocation(*E->getArg(0), SkipPast::Reference);
- if (OptionalLoc == nullptr)
- return;
+ if (auto *Loc = cast<AggregateStorageLocation>(
+ State.Env.getStorageLocationStrict(*E->getArg(0)))) {
+ createOptionalValue(*Loc, HasValueVal, State.Env);
- State.Env.setValue(*OptionalLoc, createOptionalValue(State.Env, HasValueVal));
-
- // Assign a storage location for the whole expression.
- State.Env.setStorageLocation(*E, *OptionalLoc);
+ // Assign a storage location for the whole expression.
+ State.Env.setStorageLocationStrict(*E, *Loc);
+ }
}
void transferValueOrConversionAssignment(
@@ -521,52 +625,69 @@ void transferNulloptAssignment(const CXXOperatorCallExpr *E,
transferAssignment(E, State.Env.getBoolLiteralValue(false), State);
}
-void transferSwap(const StorageLocation &OptionalLoc1,
- const StorageLocation &OptionalLoc2,
- LatticeTransferState &State) {
- auto *OptionalVal1 = State.Env.getValue(OptionalLoc1);
- assert(OptionalVal1 != nullptr);
+void transferSwap(AggregateStorageLocation *Loc1,
+ AggregateStorageLocation *Loc2, Environment &Env) {
+ // We account for cases where one or both of the optionals are not modeled,
+ // either lacking associated storage locations, or lacking values associated
+ // to such storage locations.
- auto *OptionalVal2 = State.Env.getValue(OptionalLoc2);
- assert(OptionalVal2 != nullptr);
+ if (Loc1 == nullptr) {
+ if (Loc2 != nullptr)
+ createOptionalValue(*Loc2, Env.makeAtomicBoolValue(), Env);
+ return;
+ }
+ if (Loc2 == nullptr) {
+ createOptionalValue(*Loc1, Env.makeAtomicBoolValue(), Env);
+ return;
+ }
- State.Env.setValue(OptionalLoc1, *OptionalVal2);
- State.Env.setValue(OptionalLoc2, *OptionalVal1);
+ // Both expressions have locations, though they may not have corresponding
+ // values. In that case, we create a fresh value at this point. Note that if
+ // two branches both do this, they will not share the value, but it at least
+ // allows for local reasoning about the value. To avoid the above, we would
+ // need *lazy* value allocation.
+ // FIXME: allocate values lazily, instead of just creating a fresh value.
+ BoolValue *BoolVal1 = getHasValue(Env, Env.getValue(*Loc1));
+ if (BoolVal1 == nullptr)
+ BoolVal1 = &Env.makeAtomicBoolValue();
+
+ BoolValue *BoolVal2 = getHasValue(Env, Env.getValue(*Loc2));
+ if (BoolVal2 == nullptr)
+ BoolVal2 = &Env.makeAtomicBoolValue();
+
+ createOptionalValue(*Loc1, *BoolVal2, Env);
+ createOptionalValue(*Loc2, *BoolVal1, Env);
}
void transferSwapCall(const CXXMemberCallExpr *E,
const MatchFinder::MatchResult &,
LatticeTransferState &State) {
assert(E->getNumArgs() == 1);
-
- auto *OptionalLoc1 = State.Env.getStorageLocation(
- *E->getImplicitObjectArgument(), SkipPast::ReferenceThenPointer);
- assert(OptionalLoc1 != nullptr);
-
- auto *OptionalLoc2 =
- State.Env.getStorageLocation(*E->getArg(0), SkipPast::Reference);
- assert(OptionalLoc2 != nullptr);
-
- transferSwap(*OptionalLoc1, *OptionalLoc2, State);
+ auto *OtherLoc = cast_or_null<AggregateStorageLocation>(
+ State.Env.getStorageLocationStrict(*E->getArg(0)));
+ transferSwap(getImplicitObjectLocation(*E, State.Env), OtherLoc, State.Env);
}
void transferStdSwapCall(const CallExpr *E, const MatchFinder::MatchResult &,
LatticeTransferState &State) {
assert(E->getNumArgs() == 2);
+ auto *Arg0Loc = cast_or_null<AggregateStorageLocation>(
+ State.Env.getStorageLocationStrict(*E->getArg(0)));
+ auto *Arg1Loc = cast_or_null<AggregateStorageLocation>(
+ State.Env.getStorageLocationStrict(*E->getArg(1)));
+ transferSwap(Arg0Loc, Arg1Loc, State.Env);
+}
- auto *OptionalLoc1 =
- State.Env.getStorageLocation(*E->getArg(0), SkipPast::Reference);
- assert(OptionalLoc1 != nullptr);
-
- auto *OptionalLoc2 =
- State.Env.getStorageLocation(*E->getArg(1), SkipPast::Reference);
- assert(OptionalLoc2 != nullptr);
+void transferStdForwardCall(const CallExpr *E, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ assert(E->getNumArgs() == 1);
- transferSwap(*OptionalLoc1, *OptionalLoc2, State);
+ if (auto *Loc = State.Env.getStorageLocationStrict(*E->getArg(0)))
+ State.Env.setStorageLocationStrict(*E, *Loc);
}
-BoolValue &evaluateEquality(Environment &Env, BoolValue &EqVal, BoolValue &LHS,
- BoolValue &RHS) {
+const Formula &evaluateEquality(Arena &A, const Formula &EqVal,
+ const Formula &LHS, const Formula &RHS) {
// Logically, an optional<T> object is composed of two values - a `has_value`
// bit and a value of type T. Equality of optional objects compares both
// values. Therefore, merely comparing the `has_value` bits isn't sufficient:
@@ -581,37 +702,38 @@ BoolValue &evaluateEquality(Environment &Env, BoolValue &EqVal, BoolValue &LHS,
// b) (!LHS & !RHS) => EqVal
// If neither is set, then they are equal.
// We rewrite b) as !EqVal => (LHS v RHS), for a more compact formula.
- return Env.makeAnd(
- Env.makeImplication(
- EqVal, Env.makeOr(Env.makeAnd(LHS, RHS),
- Env.makeAnd(Env.makeNot(LHS), Env.makeNot(RHS)))),
- Env.makeImplication(Env.makeNot(EqVal), Env.makeOr(LHS, RHS)));
+ return A.makeAnd(
+ A.makeImplies(EqVal, A.makeOr(A.makeAnd(LHS, RHS),
+ A.makeAnd(A.makeNot(LHS), A.makeNot(RHS)))),
+ A.makeImplies(A.makeNot(EqVal), A.makeOr(LHS, RHS)));
}
void transferOptionalAndOptionalCmp(const clang::CXXOperatorCallExpr *CmpExpr,
const MatchFinder::MatchResult &,
LatticeTransferState &State) {
Environment &Env = State.Env;
+ auto &A = Env.arena();
auto *CmpValue = &forceBoolValue(Env, *CmpExpr);
if (auto *LHasVal = getHasValue(
Env, Env.getValue(*CmpExpr->getArg(0), SkipPast::Reference)))
if (auto *RHasVal = getHasValue(
Env, Env.getValue(*CmpExpr->getArg(1), SkipPast::Reference))) {
if (CmpExpr->getOperator() == clang::OO_ExclaimEqual)
- CmpValue = &State.Env.makeNot(*CmpValue);
- Env.addToFlowCondition(
- evaluateEquality(Env, *CmpValue, *LHasVal, *RHasVal));
+ CmpValue = &A.makeNot(*CmpValue);
+ Env.addToFlowCondition(evaluateEquality(A, *CmpValue, LHasVal->formula(),
+ RHasVal->formula()));
}
}
void transferOptionalAndValueCmp(const clang::CXXOperatorCallExpr *CmpExpr,
const clang::Expr *E, Environment &Env) {
+ auto &A = Env.arena();
auto *CmpValue = &forceBoolValue(Env, *CmpExpr);
if (auto *HasVal = getHasValue(Env, Env.getValue(*E, SkipPast::Reference))) {
if (CmpExpr->getOperator() == clang::OO_ExclaimEqual)
- CmpValue = &Env.makeNot(*CmpValue);
- Env.addToFlowCondition(evaluateEquality(Env, *CmpValue, *HasVal,
- Env.getBoolLiteralValue(true)));
+ CmpValue = &A.makeNot(*CmpValue);
+ Env.addToFlowCondition(
+ evaluateEquality(A, *CmpValue, HasVal->formula(), A.makeLiteral(true)));
}
}
@@ -629,7 +751,8 @@ ignorableOptional(const UncheckedOptionalAccessModelOptions &Options) {
StatementMatcher
valueCall(const std::optional<StatementMatcher> &IgnorableOptional) {
- return isOptionalMemberCallWithName("value", IgnorableOptional);
+ return isOptionalMemberCallWithNameMatcher(hasName("value"),
+ IgnorableOptional);
}
StatementMatcher
@@ -657,30 +780,29 @@ auto buildTransferMatchSwitch() {
isOptionalInPlaceConstructor(),
[](const CXXConstructExpr *E, const MatchFinder::MatchResult &,
LatticeTransferState &State) {
- assignOptionalValue(*E, State.Env,
- State.Env.getBoolLiteralValue(true));
+ constructOptionalValue(*E, State.Env,
+ State.Env.getBoolLiteralValue(true));
})
// nullopt_t::nullopt_t
.CaseOfCFGStmt<CXXConstructExpr>(
isNulloptConstructor(),
[](const CXXConstructExpr *E, const MatchFinder::MatchResult &,
LatticeTransferState &State) {
- assignOptionalValue(*E, State.Env,
- State.Env.getBoolLiteralValue(false));
+ constructOptionalValue(*E, State.Env,
+ State.Env.getBoolLiteralValue(false));
})
// optional::optional(nullopt_t)
.CaseOfCFGStmt<CXXConstructExpr>(
isOptionalNulloptConstructor(),
[](const CXXConstructExpr *E, const MatchFinder::MatchResult &,
LatticeTransferState &State) {
- assignOptionalValue(*E, State.Env,
- State.Env.getBoolLiteralValue(false));
+ constructOptionalValue(*E, State.Env,
+ State.Env.getBoolLiteralValue(false));
})
// optional::optional (value/conversion)
.CaseOfCFGStmt<CXXConstructExpr>(isOptionalValueOrConversionConstructor(),
transferValueOrConversionConstructor)
-
// optional::operator=
.CaseOfCFGStmt<CXXOperatorCallExpr>(
isOptionalValueOrConversionAssignment(),
@@ -696,49 +818,70 @@ auto buildTransferMatchSwitch() {
transferUnwrapCall(E, E->getImplicitObjectArgument(), State);
})
- // optional::operator*, optional::operator->
- .CaseOfCFGStmt<CallExpr>(valueOperatorCall(std::nullopt),
+ // optional::operator*
+ .CaseOfCFGStmt<CallExpr>(isOptionalOperatorCallWithName("*"),
[](const CallExpr *E,
const MatchFinder::MatchResult &,
LatticeTransferState &State) {
transferUnwrapCall(E, E->getArg(0), State);
})
- // optional::has_value
+ // optional::operator->
+ .CaseOfCFGStmt<CallExpr>(isOptionalOperatorCallWithName("->"),
+ [](const CallExpr *E,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ transferArrowOpCall(E, E->getArg(0), State);
+ })
+
+ // optional::has_value, optional::hasValue
+ // Of the supported optionals only folly::Optional uses hasValue, but this
+ // will also pass for other types
.CaseOfCFGStmt<CXXMemberCallExpr>(
- isOptionalMemberCallWithName("has_value"),
+ isOptionalMemberCallWithNameMatcher(
+ hasAnyName("has_value", "hasValue")),
transferOptionalHasValueCall)
// optional::operator bool
.CaseOfCFGStmt<CXXMemberCallExpr>(
- isOptionalMemberCallWithName("operator bool"),
+ isOptionalMemberCallWithNameMatcher(hasName("operator bool")),
transferOptionalHasValueCall)
// optional::emplace
.CaseOfCFGStmt<CXXMemberCallExpr>(
- isOptionalMemberCallWithName("emplace"),
+ isOptionalMemberCallWithNameMatcher(hasName("emplace")),
[](const CXXMemberCallExpr *E, const MatchFinder::MatchResult &,
LatticeTransferState &State) {
- assignOptionalValue(*E->getImplicitObjectArgument(), State.Env,
- State.Env.getBoolLiteralValue(true));
+ if (AggregateStorageLocation *Loc =
+ getImplicitObjectLocation(*E, State.Env)) {
+ createOptionalValue(*Loc, State.Env.getBoolLiteralValue(true),
+ State.Env);
+ }
})
// optional::reset
.CaseOfCFGStmt<CXXMemberCallExpr>(
- isOptionalMemberCallWithName("reset"),
+ isOptionalMemberCallWithNameMatcher(hasName("reset")),
[](const CXXMemberCallExpr *E, const MatchFinder::MatchResult &,
LatticeTransferState &State) {
- assignOptionalValue(*E->getImplicitObjectArgument(), State.Env,
- State.Env.getBoolLiteralValue(false));
+ if (AggregateStorageLocation *Loc =
+ getImplicitObjectLocation(*E, State.Env)) {
+ createOptionalValue(*Loc, State.Env.getBoolLiteralValue(false),
+ State.Env);
+ }
})
// optional::swap
- .CaseOfCFGStmt<CXXMemberCallExpr>(isOptionalMemberCallWithName("swap"),
- transferSwapCall)
+ .CaseOfCFGStmt<CXXMemberCallExpr>(
+ isOptionalMemberCallWithNameMatcher(hasName("swap")),
+ transferSwapCall)
// std::swap
.CaseOfCFGStmt<CallExpr>(isStdSwapCall(), transferStdSwapCall)
+ // std::forward
+ .CaseOfCFGStmt<CallExpr>(isStdForwardCall(), transferStdForwardCall)
+
// opt.value_or("").empty()
.CaseOfCFGStmt<Expr>(isValueOrStringEmptyCall(),
transferValueOrStringEmptyCall)
@@ -772,14 +915,12 @@ auto buildTransferMatchSwitch() {
.Build();
}
-std::vector<SourceLocation> diagnoseUnwrapCall(const Expr *UnwrapExpr,
- const Expr *ObjectExpr,
+std::vector<SourceLocation> diagnoseUnwrapCall(const Expr *ObjectExpr,
const Environment &Env) {
- if (auto *OptionalVal =
- Env.getValue(*ObjectExpr, SkipPast::ReferenceThenPointer)) {
+ if (auto *OptionalVal = getValueBehindPossiblePointer(*ObjectExpr, Env)) {
auto *Prop = OptionalVal->getProperty("has_value");
if (auto *HasValueVal = cast_or_null<BoolValue>(Prop)) {
- if (Env.flowConditionImplies(*HasValueVal))
+ if (Env.flowConditionImplies(HasValueVal->formula()))
return {};
}
}
@@ -802,16 +943,16 @@ auto buildDiagnoseMatchSwitch(
valueCall(IgnorableOptional),
[](const CXXMemberCallExpr *E, const MatchFinder::MatchResult &,
const Environment &Env) {
- return diagnoseUnwrapCall(E, E->getImplicitObjectArgument(), Env);
+ return diagnoseUnwrapCall(E->getImplicitObjectArgument(), Env);
})
// optional::operator*, optional::operator->
- .CaseOfCFGStmt<CallExpr>(
- valueOperatorCall(IgnorableOptional),
- [](const CallExpr *E, const MatchFinder::MatchResult &,
- const Environment &Env) {
- return diagnoseUnwrapCall(E, E->getArg(0), Env);
- })
+ .CaseOfCFGStmt<CallExpr>(valueOperatorCall(IgnorableOptional),
+ [](const CallExpr *E,
+ const MatchFinder::MatchResult &,
+ const Environment &Env) {
+ return diagnoseUnwrapCall(E->getArg(0), Env);
+ })
.Build();
}
@@ -826,10 +967,10 @@ UncheckedOptionalAccessModel::UncheckedOptionalAccessModel(ASTContext &Ctx)
: DataflowAnalysis<UncheckedOptionalAccessModel, NoopLattice>(Ctx),
TransferMatchSwitch(buildTransferMatchSwitch()) {}
-void UncheckedOptionalAccessModel::transfer(const CFGElement *Elt,
+void UncheckedOptionalAccessModel::transfer(const CFGElement &Elt,
NoopLattice &L, Environment &Env) {
LatticeTransferState State(L, Env);
- TransferMatchSwitch(*Elt, getASTContext(), State);
+ TransferMatchSwitch(Elt, getASTContext(), State);
}
ComparisonResult UncheckedOptionalAccessModel::compare(
@@ -839,10 +980,12 @@ ComparisonResult UncheckedOptionalAccessModel::compare(
return ComparisonResult::Unknown;
bool MustNonEmpty1 = isNonEmptyOptional(Val1, Env1);
bool MustNonEmpty2 = isNonEmptyOptional(Val2, Env2);
- if (MustNonEmpty1 && MustNonEmpty2) return ComparisonResult::Same;
+ if (MustNonEmpty1 && MustNonEmpty2)
+ return ComparisonResult::Same;
// If exactly one is true, then they're different, no reason to check whether
// they're definitely empty.
- if (MustNonEmpty1 || MustNonEmpty2) return ComparisonResult::Different;
+ if (MustNonEmpty1 || MustNonEmpty2)
+ return ComparisonResult::Different;
// Check if they're both definitely empty.
return (isEmptyOptional(Val1, Env1) && isEmptyOptional(Val2, Env2))
? ComparisonResult::Same
@@ -863,13 +1006,14 @@ bool UncheckedOptionalAccessModel::merge(QualType Type, const Value &Val1,
bool MustNonEmpty1 = isNonEmptyOptional(Val1, Env1);
bool MustNonEmpty2 = isNonEmptyOptional(Val2, Env2);
if (MustNonEmpty1 && MustNonEmpty2)
- MergedEnv.addToFlowCondition(HasValueVal);
+ MergedEnv.addToFlowCondition(HasValueVal.formula());
else if (
// Only make the costly calls to `isEmptyOptional` if we got "unknown"
// (false) for both calls to `isNonEmptyOptional`.
!MustNonEmpty1 && !MustNonEmpty2 && isEmptyOptional(Val1, Env1) &&
isEmptyOptional(Val2, Env2))
- MergedEnv.addToFlowCondition(MergedEnv.makeNot(HasValueVal));
+ MergedEnv.addToFlowCondition(
+ MergedEnv.arena().makeNot(HasValueVal.formula()));
setHasValue(MergedVal, HasValueVal);
return true;
}
@@ -892,7 +1036,8 @@ Value *UncheckedOptionalAccessModel::widen(QualType Type, Value &Prev,
if (isa<TopBoolValue>(CurrentHasVal))
return &Current;
}
- return &createOptionalValue(CurrentEnv, CurrentEnv.makeTopBoolValue());
+ return &createOptionalValue(cast<StructValue>(Current).getAggregateLoc(),
+ CurrentEnv.makeTopBoolValue(), CurrentEnv);
case ComparisonResult::Unknown:
return nullptr;
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/RecordOps.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/RecordOps.cpp
new file mode 100644
index 000000000000..60144531c251
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/RecordOps.cpp
@@ -0,0 +1,117 @@
+//===-- RecordOps.cpp -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Operations on records (structs, classes, and unions).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/RecordOps.h"
+
+#define DEBUG_TYPE "dataflow"
+
+void clang::dataflow::copyRecord(AggregateStorageLocation &Src,
+ AggregateStorageLocation &Dst,
+ Environment &Env) {
+ LLVM_DEBUG({
+ if (Dst.getType().getCanonicalType().getUnqualifiedType() !=
+ Src.getType().getCanonicalType().getUnqualifiedType()) {
+ llvm::dbgs() << "Source type " << Src.getType() << "\n";
+ llvm::dbgs() << "Destination type " << Dst.getType() << "\n";
+ }
+ });
+ assert(Dst.getType().getCanonicalType().getUnqualifiedType() ==
+ Src.getType().getCanonicalType().getUnqualifiedType());
+
+ for (auto [Field, SrcFieldLoc] : Src.children()) {
+ StorageLocation *DstFieldLoc = Dst.getChild(*Field);
+
+ assert(Field->getType()->isReferenceType() ||
+ (SrcFieldLoc != nullptr && DstFieldLoc != nullptr));
+
+ if (Field->getType()->isRecordType()) {
+ copyRecord(cast<AggregateStorageLocation>(*SrcFieldLoc),
+ cast<AggregateStorageLocation>(*DstFieldLoc), Env);
+ } else if (Field->getType()->isReferenceType()) {
+ Dst.setChild(*Field, SrcFieldLoc);
+ } else {
+ if (Value *Val = Env.getValue(*SrcFieldLoc))
+ Env.setValue(*DstFieldLoc, *Val);
+ else
+ Env.clearValue(*DstFieldLoc);
+ }
+ }
+
+ StructValue *SrcVal = cast_or_null<StructValue>(Env.getValue(Src));
+ StructValue *DstVal = cast_or_null<StructValue>(Env.getValue(Dst));
+
+ DstVal = &Env.create<StructValue>(Dst);
+ Env.setValue(Dst, *DstVal);
+
+ if (SrcVal == nullptr)
+ return;
+
+ for (const auto &[Name, Value] : SrcVal->properties()) {
+ if (Value != nullptr)
+ DstVal->setProperty(Name, *Value);
+ }
+}
+
+bool clang::dataflow::recordsEqual(const AggregateStorageLocation &Loc1,
+ const Environment &Env1,
+ const AggregateStorageLocation &Loc2,
+ const Environment &Env2) {
+ LLVM_DEBUG({
+ if (Loc2.getType().getCanonicalType().getUnqualifiedType() !=
+ Loc1.getType().getCanonicalType().getUnqualifiedType()) {
+ llvm::dbgs() << "Loc1 type " << Loc1.getType() << "\n";
+ llvm::dbgs() << "Loc2 type " << Loc2.getType() << "\n";
+ }
+ });
+ assert(Loc2.getType().getCanonicalType().getUnqualifiedType() ==
+ Loc1.getType().getCanonicalType().getUnqualifiedType());
+
+ for (auto [Field, FieldLoc1] : Loc1.children()) {
+ StorageLocation *FieldLoc2 = Loc2.getChild(*Field);
+
+ assert(Field->getType()->isReferenceType() ||
+ (FieldLoc1 != nullptr && FieldLoc2 != nullptr));
+
+ if (Field->getType()->isRecordType()) {
+ if (!recordsEqual(cast<AggregateStorageLocation>(*FieldLoc1), Env1,
+ cast<AggregateStorageLocation>(*FieldLoc2), Env2))
+ return false;
+ } else if (Field->getType()->isReferenceType()) {
+ if (FieldLoc1 != FieldLoc2)
+ return false;
+ } else if (Env1.getValue(*FieldLoc1) != Env2.getValue(*FieldLoc2)) {
+ return false;
+ }
+ }
+
+ llvm::StringMap<Value *> Props1, Props2;
+
+ if (StructValue *Val1 = cast_or_null<StructValue>(Env1.getValue(Loc1)))
+ for (const auto &[Name, Value] : Val1->properties())
+ Props1[Name] = Value;
+ if (StructValue *Val2 = cast_or_null<StructValue>(Env2.getValue(Loc2)))
+ for (const auto &[Name, Value] : Val2->properties())
+ Props2[Name] = Value;
+
+ if (Props1.size() != Props2.size())
+ return false;
+
+ for (const auto &[Name, Value] : Props1) {
+ auto It = Props2.find(Name);
+ if (It == Props2.end())
+ return false;
+ if (Value != It->second)
+ return false;
+ }
+
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp
index 0e6c484b67e7..39faeca4b45c 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp
@@ -23,6 +23,7 @@
#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
#include "clang/Analysis/FlowSensitive/NoopAnalysis.h"
+#include "clang/Analysis/FlowSensitive/RecordOps.h"
#include "clang/Analysis/FlowSensitive/Value.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/OperatorKinds.h"
@@ -36,83 +37,44 @@
namespace clang {
namespace dataflow {
-static BoolValue &evaluateBooleanEquality(const Expr &LHS, const Expr &RHS,
- Environment &Env) {
- if (auto *LHSValue =
- dyn_cast_or_null<BoolValue>(Env.getValue(LHS, SkipPast::Reference)))
- if (auto *RHSValue =
- dyn_cast_or_null<BoolValue>(Env.getValue(RHS, SkipPast::Reference)))
- return Env.makeIff(*LHSValue, *RHSValue);
-
- return Env.makeAtomicBoolValue();
+const Environment *StmtToEnvMap::getEnvironment(const Stmt &S) const {
+ auto BlockIt = CFCtx.getStmtToBlock().find(&ignoreCFGOmittedNodes(S));
+ assert(BlockIt != CFCtx.getStmtToBlock().end());
+ if (!CFCtx.isBlockReachable(*BlockIt->getSecond()))
+ return nullptr;
+ const auto &State = BlockToState[BlockIt->getSecond()->getBlockID()];
+ assert(State);
+ return &State->Env;
}
-// Functionally updates `V` such that any instances of `TopBool` are replaced
-// with fresh atomic bools. Note: This implementation assumes that `B` is a
-// tree; if `B` is a DAG, it will lose any sharing between subvalues that was
-// present in the original .
-static BoolValue &unpackValue(BoolValue &V, Environment &Env);
+static BoolValue &evaluateBooleanEquality(const Expr &LHS, const Expr &RHS,
+ Environment &Env) {
+ Value *LHSValue = Env.getValueStrict(LHS);
+ Value *RHSValue = Env.getValueStrict(RHS);
-template <typename Derived, typename M>
-BoolValue &unpackBinaryBoolValue(Environment &Env, BoolValue &B, M build) {
- auto &V = *cast<Derived>(&B);
- BoolValue &Left = V.getLeftSubValue();
- BoolValue &Right = V.getRightSubValue();
- BoolValue &ULeft = unpackValue(Left, Env);
- BoolValue &URight = unpackValue(Right, Env);
+ if (LHSValue == RHSValue)
+ return Env.getBoolLiteralValue(true);
- if (&ULeft == &Left && &URight == &Right)
- return V;
+ if (auto *LHSBool = dyn_cast_or_null<BoolValue>(LHSValue))
+ if (auto *RHSBool = dyn_cast_or_null<BoolValue>(RHSValue))
+ return Env.makeIff(*LHSBool, *RHSBool);
- return (Env.*build)(ULeft, URight);
+ return Env.makeAtomicBoolValue();
}
static BoolValue &unpackValue(BoolValue &V, Environment &Env) {
- switch (V.getKind()) {
- case Value::Kind::Integer:
- case Value::Kind::Reference:
- case Value::Kind::Pointer:
- case Value::Kind::Struct:
- llvm_unreachable("BoolValue cannot have any of these kinds.");
-
- case Value::Kind::AtomicBool:
- return V;
-
- case Value::Kind::TopBool:
- // Unpack `TopBool` into a fresh atomic bool.
- return Env.makeAtomicBoolValue();
-
- case Value::Kind::Negation: {
- auto &N = *cast<NegationValue>(&V);
- BoolValue &Sub = N.getSubVal();
- BoolValue &USub = unpackValue(Sub, Env);
-
- if (&USub == &Sub)
- return V;
- return Env.makeNot(USub);
- }
- case Value::Kind::Conjunction:
- return unpackBinaryBoolValue<ConjunctionValue>(Env, V,
- &Environment::makeAnd);
- case Value::Kind::Disjunction:
- return unpackBinaryBoolValue<DisjunctionValue>(Env, V,
- &Environment::makeOr);
- case Value::Kind::Implication:
- return unpackBinaryBoolValue<ImplicationValue>(
- Env, V, &Environment::makeImplication);
- case Value::Kind::Biconditional:
- return unpackBinaryBoolValue<BiconditionalValue>(Env, V,
- &Environment::makeIff);
+ if (auto *Top = llvm::dyn_cast<TopBoolValue>(&V)) {
+ auto &A = Env.getDataflowAnalysisContext().arena();
+ return A.makeBoolValue(A.makeAtomRef(Top->getAtom()));
}
- llvm_unreachable("All reachable cases in switch return");
+ return V;
}
// Unpacks the value (if any) associated with `E` and updates `E` to the new
-// value, if any unpacking occured.
+// value, if any unpacking occured. Also, does the lvalue-to-rvalue conversion,
+// by skipping past the reference.
static Value *maybeUnpackLValueExpr(const Expr &E, Environment &Env) {
- // FIXME: this is too flexible: it _allows_ a reference, while it should
- // _require_ one, since lvalues should always be wrapped in `ReferenceValue`.
- auto *Loc = Env.getStorageLocation(E, SkipPast::Reference);
+ auto *Loc = Env.getStorageLocationStrict(E);
if (Loc == nullptr)
return nullptr;
auto *Val = Env.getValue(*Loc);
@@ -128,6 +90,31 @@ static Value *maybeUnpackLValueExpr(const Expr &E, Environment &Env) {
return &UnpackedVal;
}
+static void propagateValue(const Expr &From, const Expr &To, Environment &Env) {
+ if (auto *Val = Env.getValueStrict(From))
+ Env.setValueStrict(To, *Val);
+}
+
+static void propagateStorageLocation(const Expr &From, const Expr &To,
+ Environment &Env) {
+ if (auto *Loc = Env.getStorageLocationStrict(From))
+ Env.setStorageLocationStrict(To, *Loc);
+}
+
+// Propagates the value or storage location of `From` to `To` in cases where
+// `From` may be either a glvalue or a prvalue. `To` must be a glvalue iff
+// `From` is a glvalue.
+static void propagateValueOrStorageLocation(const Expr &From, const Expr &To,
+ Environment &Env) {
+ assert(From.isGLValue() == To.isGLValue());
+ if (From.isGLValue())
+ propagateStorageLocation(From, To, Env);
+ else
+ propagateValue(From, To, Env);
+}
+
+namespace {
+
class TransferVisitor : public ConstStmtVisitor<TransferVisitor> {
public:
TransferVisitor(const StmtToEnvMap &StmtToEnv, Environment &Env)
@@ -142,11 +129,11 @@ public:
switch (S->getOpcode()) {
case BO_Assign: {
- auto *LHSLoc = Env.getStorageLocation(*LHS, SkipPast::Reference);
+ auto *LHSLoc = Env.getStorageLocationStrict(*LHS);
if (LHSLoc == nullptr)
break;
- auto *RHSVal = Env.getValue(*RHS, SkipPast::Reference);
+ auto *RHSVal = Env.getValueStrict(*RHS);
if (RHSVal == nullptr)
break;
@@ -159,11 +146,12 @@ public:
}
case BO_LAnd:
case BO_LOr: {
+ auto &Loc = Env.createStorageLocation(*S);
+ Env.setStorageLocation(*S, Loc);
+
BoolValue &LHSVal = getLogicOperatorSubExprValue(*LHS);
BoolValue &RHSVal = getLogicOperatorSubExprValue(*RHS);
- auto &Loc = Env.createStorageLocation(*S);
- Env.setStorageLocation(*S, Loc);
if (S->getOpcode() == BO_LAnd)
Env.setValue(Loc, Env.makeAnd(LHSVal, RHSVal));
else
@@ -180,8 +168,7 @@ public:
break;
}
case BO_Comma: {
- if (auto *Loc = Env.getStorageLocation(*RHS, SkipPast::None))
- Env.setStorageLocation(*S, *Loc);
+ propagateValueOrStorageLocation(*RHS, *S, Env);
break;
}
default:
@@ -192,20 +179,20 @@ public:
void VisitDeclRefExpr(const DeclRefExpr *S) {
const ValueDecl *VD = S->getDecl();
assert(VD != nullptr);
- auto *DeclLoc = Env.getStorageLocation(*VD, SkipPast::None);
+
+ // `DeclRefExpr`s to fields and non-static methods aren't glvalues, and
+ // there's also no sensible `Value` we can assign to them, so skip them.
+ if (isa<FieldDecl>(VD))
+ return;
+ if (auto *Method = dyn_cast<CXXMethodDecl>(VD);
+ Method && !Method->isStatic())
+ return;
+
+ auto *DeclLoc = Env.getStorageLocation(*VD);
if (DeclLoc == nullptr)
return;
- if (VD->getType()->isReferenceType()) {
- assert(isa_and_nonnull<ReferenceValue>(Env.getValue((*DeclLoc))) &&
- "reference-typed declarations map to `ReferenceValue`s");
- Env.setStorageLocation(*S, *DeclLoc);
- } else {
- auto &Loc = Env.createStorageLocation(*S);
- auto &Val = Env.takeOwnership(std::make_unique<ReferenceValue>(*DeclLoc));
- Env.setStorageLocation(*S, Loc);
- Env.setValue(Loc, Val);
- }
+ Env.setStorageLocationStrict(*S, *DeclLoc);
}
void VisitDeclStmt(const DeclStmt *S) {
@@ -213,57 +200,27 @@ public:
// is safe.
const auto &D = *cast<VarDecl>(S->getSingleDecl());
+ ProcessVarDecl(D);
+ }
+
+ void ProcessVarDecl(const VarDecl &D) {
// Static local vars are already initialized in `Environment`.
if (D.hasGlobalStorage())
return;
- // The storage location for `D` could have been created earlier, before the
- // variable's declaration statement (for example, in the case of
- // BindingDecls).
- auto *MaybeLoc = Env.getStorageLocation(D, SkipPast::None);
- if (MaybeLoc == nullptr) {
- MaybeLoc = &Env.createStorageLocation(D);
- Env.setStorageLocation(D, *MaybeLoc);
- }
- auto &Loc = *MaybeLoc;
-
- const Expr *InitExpr = D.getInit();
- if (InitExpr == nullptr) {
- // No initializer expression - associate `Loc` with a new value.
- if (Value *Val = Env.createValue(D.getType()))
- Env.setValue(Loc, *Val);
+ // If this is the holding variable for a `BindingDecl`, we may already
+ // have a storage location set up -- so check. (See also explanation below
+ // where we process the `BindingDecl`.)
+ if (D.getType()->isReferenceType() && Env.getStorageLocation(D) != nullptr)
return;
- }
- if (D.getType()->isReferenceType()) {
- // Initializing a reference variable - do not create a reference to
- // reference.
- if (auto *InitExprLoc =
- Env.getStorageLocation(*InitExpr, SkipPast::Reference)) {
- auto &Val =
- Env.takeOwnership(std::make_unique<ReferenceValue>(*InitExprLoc));
- Env.setValue(Loc, Val);
- }
- } else if (auto *InitExprVal = Env.getValue(*InitExpr, SkipPast::None)) {
- Env.setValue(Loc, *InitExprVal);
- }
+ assert(Env.getStorageLocation(D) == nullptr);
- if (Env.getValue(Loc) == nullptr) {
- // We arrive here in (the few) cases where an expression is intentionally
- // "uninterpreted". There are two ways to handle this situation: propagate
- // the status, so that uninterpreted initializers result in uninterpreted
- // variables, or provide a default value. We choose the latter so that
- // later refinements of the variable can be used for reasoning about the
- // surrounding code.
- //
- // FIXME. If and when we interpret all language cases, change this to
- // assert that `InitExpr` is interpreted, rather than supplying a default
- // value (assuming we don't update the environment API to return
- // references).
- if (Value *Val = Env.createValue(D.getType()))
- Env.setValue(Loc, *Val);
- }
+ Env.setStorageLocation(D, Env.createObject(D));
+ // `DecompositionDecl` must be handled after we've interpreted the loc
+ // itself, because the binding expression refers back to the
+ // `DecompositionDecl` (even though it has no written name).
if (const auto *Decomp = dyn_cast<DecompositionDecl>(&D)) {
// If VarDecl is a DecompositionDecl, evaluate each of its bindings. This
// needs to be evaluated after initializing the values in the storage for
@@ -284,14 +241,16 @@ public:
if (auto *Loc = Env.getStorageLocation(*ME, SkipPast::Reference))
Env.setStorageLocation(*B, *Loc);
} else if (auto *VD = B->getHoldingVar()) {
- // Holding vars are used to back the BindingDecls of tuple-like
- // types. The holding var declarations appear *after* this statement,
- // so we have to create a location for them here to share with `B`. We
- // don't visit the binding, because we know it will be a DeclRefExpr
- // to `VD`.
- auto &VDLoc = Env.createStorageLocation(*VD);
- Env.setStorageLocation(*VD, VDLoc);
- Env.setStorageLocation(*B, VDLoc);
+ // Holding vars are used to back the `BindingDecl`s of tuple-like
+ // types. The holding var declarations appear after the
+ // `DecompositionDecl`, so we have to explicitly process them here
+ // to know their storage location. They will be processed a second
+ // time when we visit their `VarDecl`s, so we have code that protects
+ // against this above.
+ ProcessVarDecl(*VD);
+ auto *VDLoc = Env.getStorageLocation(*VD);
+ assert(VDLoc != nullptr);
+ Env.setStorageLocation(*B, *VDLoc);
}
}
}
@@ -306,21 +265,20 @@ public:
// This cast creates a new, boolean value from the integral value. We
// model that with a fresh value in the environment, unless it's already a
// boolean.
- auto &Loc = Env.createStorageLocation(*S);
- Env.setStorageLocation(*S, Loc);
- if (auto *SubExprVal = dyn_cast_or_null<BoolValue>(
- Env.getValue(*SubExpr, SkipPast::Reference)))
- Env.setValue(Loc, *SubExprVal);
+ if (auto *SubExprVal =
+ dyn_cast_or_null<BoolValue>(Env.getValueStrict(*SubExpr)))
+ Env.setValueStrict(*S, *SubExprVal);
else
// FIXME: If integer modeling is added, then update this code to create
// the boolean based on the integer model.
- Env.setValue(Loc, Env.makeAtomicBoolValue());
+ Env.setValueStrict(*S, Env.makeAtomicBoolValue());
break;
}
case CK_LValueToRValue: {
// When an L-value is used as an R-value, it may result in sharing, so we
- // need to unpack any nested `Top`s.
+ // need to unpack any nested `Top`s. We also need to strip off the
+ // `ReferenceValue` associated with the lvalue.
auto *SubExprVal = maybeUnpackLValueExpr(*SubExpr, Env);
if (SubExprVal == nullptr)
break;
@@ -344,17 +302,12 @@ public:
// CK_ConstructorConversion, and CK_UserDefinedConversion.
case CK_NoOp: {
// FIXME: Consider making `Environment::getStorageLocation` skip noop
- // expressions (this and other similar expressions in the file) instead of
- // assigning them storage locations.
- auto *SubExprLoc = Env.getStorageLocation(*SubExpr, SkipPast::None);
- if (SubExprLoc == nullptr)
- break;
-
- Env.setStorageLocation(*S, *SubExprLoc);
+ // expressions (this and other similar expressions in the file) instead
+ // of assigning them storage locations.
+ propagateValueOrStorageLocation(*SubExpr, *S, Env);
break;
}
- case CK_NullToPointer:
- case CK_NullToMemberPointer: {
+ case CK_NullToPointer: {
auto &Loc = Env.createStorageLocation(S->getType());
Env.setStorageLocation(*S, Loc);
@@ -363,6 +316,28 @@ public:
Env.setValue(Loc, NullPointerVal);
break;
}
+ case CK_NullToMemberPointer:
+ // FIXME: Implement pointers to members. For now, don't associate a value
+ // with this expression.
+ break;
+ case CK_FunctionToPointerDecay: {
+ StorageLocation *PointeeLoc =
+ Env.getStorageLocation(*SubExpr, SkipPast::Reference);
+ if (PointeeLoc == nullptr)
+ break;
+
+ auto &PointerLoc = Env.createStorageLocation(*S);
+ auto &PointerVal = Env.create<PointerValue>(*PointeeLoc);
+ Env.setStorageLocation(*S, PointerLoc);
+ Env.setValue(PointerLoc, PointerVal);
+ break;
+ }
+ case CK_BuiltinFnToFnPtr:
+ // Despite its name, the result type of `BuiltinFnToFnPtr` is a function,
+ // not a function pointer. In addition, builtin functions can only be
+ // called directly; it is not legal to take their address. We therefore
+ // don't need to create a value or storage location for them.
+ break;
default:
break;
}
@@ -374,37 +349,26 @@ public:
switch (S->getOpcode()) {
case UO_Deref: {
- // Skip past a reference to handle dereference of a dependent pointer.
- const auto *SubExprVal = cast_or_null<PointerValue>(
- Env.getValue(*SubExpr, SkipPast::Reference));
+ const auto *SubExprVal =
+ cast_or_null<PointerValue>(Env.getValueStrict(*SubExpr));
if (SubExprVal == nullptr)
break;
- auto &Loc = Env.createStorageLocation(*S);
- Env.setStorageLocation(*S, Loc);
- Env.setValue(Loc, Env.takeOwnership(std::make_unique<ReferenceValue>(
- SubExprVal->getPointeeLoc())));
+ Env.setStorageLocationStrict(*S, SubExprVal->getPointeeLoc());
break;
}
case UO_AddrOf: {
- // Do not form a pointer to a reference. If `SubExpr` is assigned a
- // `ReferenceValue` then form a value that points to the location of its
- // pointee.
- StorageLocation *PointeeLoc =
- Env.getStorageLocation(*SubExpr, SkipPast::Reference);
- if (PointeeLoc == nullptr)
+ // FIXME: Model pointers to members.
+ if (S->getType()->isMemberPointerType())
break;
- auto &PointerLoc = Env.createStorageLocation(*S);
- auto &PointerVal =
- Env.takeOwnership(std::make_unique<PointerValue>(*PointeeLoc));
- Env.setStorageLocation(*S, PointerLoc);
- Env.setValue(PointerLoc, PointerVal);
+ if (StorageLocation *PointeeLoc = Env.getStorageLocationStrict(*SubExpr))
+ Env.setValueStrict(*S, Env.create<PointerValue>(*PointeeLoc));
break;
}
case UO_LNot: {
auto *SubExprVal =
- dyn_cast_or_null<BoolValue>(Env.getValue(*SubExpr, SkipPast::None));
+ dyn_cast_or_null<BoolValue>(Env.getValueStrict(*SubExpr));
if (SubExprVal == nullptr)
break;
@@ -427,34 +391,46 @@ public:
auto &Loc = Env.createStorageLocation(*S);
Env.setStorageLocation(*S, Loc);
- Env.setValue(Loc, Env.takeOwnership(
- std::make_unique<PointerValue>(*ThisPointeeLoc)));
+ Env.setValue(Loc, Env.create<PointerValue>(*ThisPointeeLoc));
+ }
+
+ void VisitCXXNewExpr(const CXXNewExpr *S) {
+ auto &Loc = Env.createStorageLocation(*S);
+ Env.setStorageLocation(*S, Loc);
+ if (Value *Val = Env.createValue(S->getType()))
+ Env.setValue(Loc, *Val);
+ }
+
+ void VisitCXXDeleteExpr(const CXXDeleteExpr *S) {
+ // Empty method.
+ // We consciously don't do anything on deletes. Diagnosing double deletes
+ // (for example) should be done by a specific analysis, not by the
+ // framework.
}
void VisitReturnStmt(const ReturnStmt *S) {
- if (!Env.getAnalysisOptions().ContextSensitiveOpts)
+ if (!Env.getDataflowAnalysisContext().getOptions().ContextSensitiveOpts)
return;
auto *Ret = S->getRetValue();
if (Ret == nullptr)
return;
- auto *Val = Env.getValue(*Ret, SkipPast::None);
- if (Val == nullptr)
- return;
-
- // FIXME: Support reference-type returns.
- if (Val->getKind() == Value::Kind::Reference)
- return;
+ if (Ret->isPRValue()) {
+ auto *Val = Env.getValueStrict(*Ret);
+ if (Val == nullptr)
+ return;
- auto *Loc = Env.getReturnStorageLocation();
- assert(Loc != nullptr);
- // FIXME: Support reference-type returns.
- if (Loc->getType()->isReferenceType())
- return;
+ // FIXME: Model NRVO.
+ Env.setReturnValue(Val);
+ } else {
+ auto *Loc = Env.getStorageLocationStrict(*Ret);
+ if (Loc == nullptr)
+ return;
- // FIXME: Model NRVO.
- Env.setValue(*Loc, *Val);
+ // FIXME: Model NRVO.
+ Env.setReturnStorageLocation(Loc);
+ }
}
void VisitMemberExpr(const MemberExpr *S) {
@@ -471,72 +447,29 @@ public:
if (auto *D = dyn_cast<VarDecl>(Member)) {
if (D->hasGlobalStorage()) {
- auto *VarDeclLoc = Env.getStorageLocation(*D, SkipPast::None);
+ auto *VarDeclLoc = Env.getStorageLocation(*D);
if (VarDeclLoc == nullptr)
return;
- if (VarDeclLoc->getType()->isReferenceType()) {
- assert(isa_and_nonnull<ReferenceValue>(Env.getValue((*VarDeclLoc))) &&
- "reference-typed declarations map to `ReferenceValue`s");
- Env.setStorageLocation(*S, *VarDeclLoc);
- } else {
- auto &Loc = Env.createStorageLocation(*S);
- Env.setStorageLocation(*S, Loc);
- Env.setValue(Loc, Env.takeOwnership(
- std::make_unique<ReferenceValue>(*VarDeclLoc)));
- }
+ Env.setStorageLocation(*S, *VarDeclLoc);
return;
}
}
- // The receiver can be either a value or a pointer to a value. Skip past the
- // indirection to handle both cases.
- auto *BaseLoc = cast_or_null<AggregateStorageLocation>(
- Env.getStorageLocation(*S->getBase(), SkipPast::ReferenceThenPointer));
+ AggregateStorageLocation *BaseLoc = getBaseObjectLocation(*S, Env);
if (BaseLoc == nullptr)
return;
- auto &MemberLoc = BaseLoc->getChild(*Member);
- if (MemberLoc.getType()->isReferenceType()) {
- // Based on its type, `MemberLoc` must be mapped either to nothing or to a
- // `ReferenceValue`. For the former, we won't set a storage location for
- // this expression, so as to maintain an invariant lvalue expressions;
- // namely, that their location maps to a `ReferenceValue`. In this,
- // lvalues are unlike other expressions, where it is valid for their
- // location to map to nothing (because they are not modeled).
- //
- // Note: we need this invariant for lvalues so that, when accessing a
- // value, we can distinguish an rvalue from an lvalue. An alternative
- // design, which takes the expression's value category into account, would
- // avoid the need for this invariant.
- if (auto *V = Env.getValue(MemberLoc)) {
- assert(isa<ReferenceValue>(V) &&
- "reference-typed declarations map to `ReferenceValue`s");
- Env.setStorageLocation(*S, MemberLoc);
- }
- } else {
- auto &Loc = Env.createStorageLocation(*S);
- Env.setStorageLocation(*S, Loc);
- Env.setValue(
- Loc, Env.takeOwnership(std::make_unique<ReferenceValue>(MemberLoc)));
- }
+ auto *MemberLoc = BaseLoc->getChild(*Member);
+ if (MemberLoc == nullptr)
+ return;
+ Env.setStorageLocationStrict(*S, *MemberLoc);
}
void VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *S) {
const Expr *InitExpr = S->getExpr();
assert(InitExpr != nullptr);
-
- Value *InitExprVal = Env.getValue(*InitExpr, SkipPast::None);
- if (InitExprVal == nullptr)
- return;
-
- const FieldDecl *Field = S->getField();
- assert(Field != nullptr);
-
- auto &ThisLoc =
- *cast<AggregateStorageLocation>(Env.getThisPointeeStorageLocation());
- auto &FieldLoc = ThisLoc.getChild(*Field);
- Env.setValue(FieldLoc, *InitExprVal);
+ propagateValueOrStorageLocation(*InitExpr, *S, Env);
}
void VisitCXXConstructExpr(const CXXConstructExpr *S) {
@@ -544,29 +477,32 @@ public:
assert(ConstructorDecl != nullptr);
if (ConstructorDecl->isCopyOrMoveConstructor()) {
- assert(S->getNumArgs() == 1);
+ // It is permissible for a copy/move constructor to have additional
+ // parameters as long as they have default arguments defined for them.
+ assert(S->getNumArgs() != 0);
const Expr *Arg = S->getArg(0);
assert(Arg != nullptr);
- if (S->isElidable()) {
- auto *ArgLoc = Env.getStorageLocation(*Arg, SkipPast::Reference);
- if (ArgLoc == nullptr)
- return;
+ auto *ArgLoc = cast_or_null<AggregateStorageLocation>(
+ Env.getStorageLocation(*Arg, SkipPast::Reference));
+ if (ArgLoc == nullptr)
+ return;
+ if (S->isElidable()) {
Env.setStorageLocation(*S, *ArgLoc);
- } else if (auto *ArgVal = Env.getValue(*Arg, SkipPast::Reference)) {
- auto &Loc = Env.createStorageLocation(*S);
- Env.setStorageLocation(*S, Loc);
- Env.setValue(Loc, *ArgVal);
+ } else if (auto *ArgVal = cast_or_null<StructValue>(
+ Env.getValue(*Arg, SkipPast::Reference))) {
+ auto &Val = *cast<StructValue>(Env.createValue(S->getType()));
+ Env.setValueStrict(*S, Val);
+ copyRecord(ArgVal->getAggregateLoc(), Val.getAggregateLoc(), Env);
}
return;
}
- auto &Loc = Env.createStorageLocation(*S);
- Env.setStorageLocation(*S, Loc);
- if (Value *Val = Env.createValue(S->getType()))
- Env.setValue(Loc, *Val);
+ auto &InitialVal = *cast<StructValue>(Env.createValue(S->getType()));
+ copyRecord(InitialVal.getAggregateLoc(), Env.getResultObjectLocation(*S),
+ Env);
transferInlineCall(S, ConstructorDecl);
}
@@ -582,25 +518,23 @@ public:
assert(Arg1 != nullptr);
// Evaluate only copy and move assignment operators.
- auto *Arg0Type = Arg0->getType()->getUnqualifiedDesugaredType();
- auto *Arg1Type = Arg1->getType()->getUnqualifiedDesugaredType();
- if (Arg0Type != Arg1Type)
+ const auto *Method =
+ dyn_cast_or_null<CXXMethodDecl>(S->getDirectCallee());
+ if (!Method)
return;
-
- auto *ObjectLoc = Env.getStorageLocation(*Arg0, SkipPast::Reference);
- if (ObjectLoc == nullptr)
+ if (!Method->isCopyAssignmentOperator() &&
+ !Method->isMoveAssignmentOperator())
return;
- auto *Val = Env.getValue(*Arg1, SkipPast::Reference);
- if (Val == nullptr)
- return;
-
- // Assign a value to the storage location of the object.
- Env.setValue(*ObjectLoc, *Val);
+ auto *LocSrc = cast_or_null<AggregateStorageLocation>(
+ Env.getStorageLocationStrict(*Arg1));
+ auto *LocDst = cast_or_null<AggregateStorageLocation>(
+ Env.getStorageLocationStrict(*Arg0));
- // FIXME: Add a test for the value of the whole expression.
- // Assign a storage location for the whole expression.
- Env.setStorageLocation(*S, *ObjectLoc);
+ if (LocSrc != nullptr && LocDst != nullptr) {
+ copyRecord(*LocSrc, *LocDst, Env);
+ Env.setStorageLocationStrict(*S, *LocDst);
+ }
}
}
@@ -609,19 +543,13 @@ public:
const Expr *SubExpr = S->getSubExpr();
assert(SubExpr != nullptr);
- auto *SubExprLoc = Env.getStorageLocation(*SubExpr, SkipPast::None);
- if (SubExprLoc == nullptr)
- return;
-
- Env.setStorageLocation(*S, *SubExprLoc);
+ propagateValue(*SubExpr, *S, Env);
}
}
void VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *S) {
- auto &Loc = Env.createStorageLocation(*S);
- Env.setStorageLocation(*S, Loc);
if (Value *Val = Env.createValue(S->getType()))
- Env.setValue(Loc, *Val);
+ Env.setValueStrict(*S, *Val);
}
void VisitCallExpr(const CallExpr *S) {
@@ -659,22 +587,25 @@ public:
const Expr *SubExpr = S->getSubExpr();
assert(SubExpr != nullptr);
- auto *SubExprLoc = Env.getStorageLocation(*SubExpr, SkipPast::None);
- if (SubExprLoc == nullptr)
+ Value *SubExprVal = Env.getValueStrict(*SubExpr);
+ if (SubExprVal == nullptr)
return;
- Env.setStorageLocation(*S, *SubExprLoc);
+ if (StructValue *StructVal = dyn_cast<StructValue>(SubExprVal)) {
+ Env.setStorageLocation(*S, StructVal->getAggregateLoc());
+ return;
+ }
+
+ StorageLocation &Loc = Env.createStorageLocation(*S);
+ Env.setValue(Loc, *SubExprVal);
+ Env.setStorageLocation(*S, Loc);
}
void VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *S) {
const Expr *SubExpr = S->getSubExpr();
assert(SubExpr != nullptr);
- auto *SubExprLoc = Env.getStorageLocation(*SubExpr, SkipPast::None);
- if (SubExprLoc == nullptr)
- return;
-
- Env.setStorageLocation(*S, *SubExprLoc);
+ propagateValue(*SubExpr, *S, Env);
}
void VisitCXXStaticCastExpr(const CXXStaticCastExpr *S) {
@@ -682,11 +613,7 @@ public:
const Expr *SubExpr = S->getSubExpr();
assert(SubExpr != nullptr);
- auto *SubExprLoc = Env.getStorageLocation(*SubExpr, SkipPast::None);
- if (SubExprLoc == nullptr)
- return;
-
- Env.setStorageLocation(*S, *SubExprLoc);
+ propagateValueOrStorageLocation(*SubExpr, *S, Env);
}
}
@@ -694,43 +621,52 @@ public:
// FIXME: Revisit this once flow conditions are added to the framework. For
// `a = b ? c : d` we can add `b => a == c && !b => a == d` to the flow
// condition.
- auto &Loc = Env.createStorageLocation(*S);
- Env.setStorageLocation(*S, Loc);
- if (Value *Val = Env.createValue(S->getType()))
- Env.setValue(Loc, *Val);
+ if (S->isGLValue())
+ Env.setStorageLocationStrict(*S, Env.createObject(S->getType()));
+ else if (Value *Val = Env.createValue(S->getType()))
+ Env.setValueStrict(*S, *Val);
}
void VisitInitListExpr(const InitListExpr *S) {
QualType Type = S->getType();
- auto &Loc = Env.createStorageLocation(*S);
- Env.setStorageLocation(*S, Loc);
+ if (!Type->isStructureOrClassType()) {
+ if (auto *Val = Env.createValue(Type))
+ Env.setValueStrict(*S, *Val);
- auto *Val = Env.createValue(Type);
- if (Val == nullptr)
return;
+ }
- Env.setValue(Loc, *Val);
-
- if (Type->isStructureOrClassType()) {
- for (auto It : llvm::zip(Type->getAsRecordDecl()->fields(), S->inits())) {
- const FieldDecl *Field = std::get<0>(It);
- assert(Field != nullptr);
+ std::vector<FieldDecl *> Fields =
+ getFieldsForInitListExpr(Type->getAsRecordDecl());
+ llvm::DenseMap<const ValueDecl *, StorageLocation *> FieldLocs;
- const Expr *Init = std::get<1>(It);
- assert(Init != nullptr);
+ for (auto [Field, Init] : llvm::zip(Fields, S->inits())) {
+ assert(Field != nullptr);
+ assert(Init != nullptr);
- if (Value *InitVal = Env.getValue(*Init, SkipPast::None))
- cast<StructValue>(Val)->setChild(*Field, *InitVal);
- }
+ FieldLocs.insert({Field, &Env.createObject(Field->getType(), Init)});
}
+
+ auto &Loc =
+ Env.getDataflowAnalysisContext()
+ .arena()
+ .create<AggregateStorageLocation>(Type, std::move(FieldLocs));
+ StructValue &StructVal = Env.create<StructValue>(Loc);
+
+ Env.setValue(Loc, StructVal);
+
+ Env.setValueStrict(*S, StructVal);
+
// FIXME: Implement array initialization.
}
void VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *S) {
- auto &Loc = Env.createStorageLocation(*S);
- Env.setStorageLocation(*S, Loc);
- Env.setValue(Loc, Env.getBoolLiteralValue(S->getValue()));
+ Env.setValueStrict(*S, Env.getBoolLiteralValue(S->getValue()));
+ }
+
+ void VisitIntegerLiteral(const IntegerLiteral *S) {
+ Env.setValueStrict(*S, Env.getIntLiteralValue(S->getValue()));
}
void VisitParenExpr(const ParenExpr *S) {
@@ -752,27 +688,24 @@ public:
}
private:
+ /// Returns the value for the sub-expression `SubExpr` of a logic operator.
BoolValue &getLogicOperatorSubExprValue(const Expr &SubExpr) {
// `SubExpr` and its parent logic operator might be part of different basic
// blocks. We try to access the value that is assigned to `SubExpr` in the
// corresponding environment.
- if (const Environment *SubExprEnv = StmtToEnv.getEnvironment(SubExpr)) {
- if (auto *Val = dyn_cast_or_null<BoolValue>(
- SubExprEnv->getValue(SubExpr, SkipPast::Reference)))
+ if (const Environment *SubExprEnv = StmtToEnv.getEnvironment(SubExpr))
+ if (auto *Val =
+ dyn_cast_or_null<BoolValue>(SubExprEnv->getValueStrict(SubExpr)))
return *Val;
- }
- if (Env.getStorageLocation(SubExpr, SkipPast::None) == nullptr) {
- // Sub-expressions that are logic operators are not added in basic blocks
- // (e.g. see CFG for `bool d = a && (b || c);`). If `SubExpr` is a logic
- // operator, it may not have been evaluated and assigned a value yet. In
- // that case, we need to first visit `SubExpr` and then try to get the
- // value that gets assigned to it.
+ // The sub-expression may lie within a basic block that isn't reachable,
+ // even if we need it to evaluate the current (reachable) expression
+ // (see https://discourse.llvm.org/t/70775). In this case, visit `SubExpr`
+ // within the current environment and then try to get the value that gets
+ // assigned to it.
+ if (Env.getValueStrict(SubExpr) == nullptr)
Visit(&SubExpr);
- }
-
- if (auto *Val = dyn_cast_or_null<BoolValue>(
- Env.getValue(SubExpr, SkipPast::Reference)))
+ if (auto *Val = dyn_cast_or_null<BoolValue>(Env.getValueStrict(SubExpr)))
return *Val;
// If the value of `SubExpr` is still unknown, we create a fresh symbolic
@@ -784,12 +717,13 @@ private:
// `F` of `S`. The type `E` must be either `CallExpr` or `CXXConstructExpr`.
template <typename E>
void transferInlineCall(const E *S, const FunctionDecl *F) {
- const auto &Options = Env.getAnalysisOptions();
+ const auto &Options = Env.getDataflowAnalysisContext().getOptions();
if (!(Options.ContextSensitiveOpts &&
Env.canDescend(Options.ContextSensitiveOpts->Depth, F)))
return;
- const ControlFlowContext *CFCtx = Env.getControlFlowContext(F);
+ const ControlFlowContext *CFCtx =
+ Env.getDataflowAnalysisContext().getControlFlowContext(F);
if (!CFCtx)
return;
@@ -799,13 +733,6 @@ private:
auto ExitBlock = CFCtx->getCFG().getExit().getBlockID();
- if (const auto *NonConstructExpr = dyn_cast<CallExpr>(S)) {
- // Note that it is important for the storage location of `S` to be set
- // before `pushCall`, because the latter uses it to set the storage
- // location for `return`.
- auto &ReturnLoc = Env.createStorageLocation(*S);
- Env.setStorageLocation(*S, ReturnLoc);
- }
auto CalleeEnv = Env.pushCall(S);
// FIXME: Use the same analysis as the caller for the callee. Note,
@@ -821,16 +748,18 @@ private:
assert(BlockToOutputState);
assert(ExitBlock < BlockToOutputState->size());
- auto ExitState = (*BlockToOutputState)[ExitBlock];
+ auto &ExitState = (*BlockToOutputState)[ExitBlock];
assert(ExitState);
- Env.popCall(ExitState->Env);
+ Env.popCall(S, ExitState->Env);
}
const StmtToEnvMap &StmtToEnv;
Environment &Env;
};
+} // namespace
+
void transfer(const StmtToEnvMap &StmtToEnv, const Stmt &S, Environment &Env) {
TransferVisitor(StmtToEnv, Env).Visit(&S);
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
index b125701212c9..1b68d76ffc8c 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
@@ -18,6 +18,7 @@
#include <utility>
#include <vector>
+#include "clang/AST/ASTDumper.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/StmtVisitor.h"
@@ -26,6 +27,7 @@
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
#include "clang/Analysis/FlowSensitive/DataflowWorklist.h"
+#include "clang/Analysis/FlowSensitive/RecordOps.h"
#include "clang/Analysis/FlowSensitive/Transfer.h"
#include "clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h"
#include "clang/Analysis/FlowSensitive/Value.h"
@@ -40,27 +42,6 @@
namespace clang {
namespace dataflow {
-class StmtToEnvMapImpl : public StmtToEnvMap {
-public:
- StmtToEnvMapImpl(
- const ControlFlowContext &CFCtx,
- llvm::ArrayRef<std::optional<TypeErasedDataflowAnalysisState>>
- BlockToState)
- : CFCtx(CFCtx), BlockToState(BlockToState) {}
-
- const Environment *getEnvironment(const Stmt &S) const override {
- auto BlockIt = CFCtx.getStmtToBlock().find(&ignoreCFGOmittedNodes(S));
- assert(BlockIt != CFCtx.getStmtToBlock().end());
- const auto &State = BlockToState[BlockIt->getSecond()->getBlockID()];
- assert(State);
- return &State->Env;
- }
-
-private:
- const ControlFlowContext &CFCtx;
- llvm::ArrayRef<std::optional<TypeErasedDataflowAnalysisState>> BlockToState;
-};
-
/// Returns the index of `Block` in the successors of `Pred`.
static int blockIndexInPredecessor(const CFGBlock &Pred,
const CFGBlock &Block) {
@@ -85,6 +66,8 @@ static bool isLoopHead(const CFGBlock &B) {
return false;
}
+namespace {
+
// The return type of the visit functions in TerminatorVisitor. The first
// element represents the terminator expression (that is the conditional
// expression in case of a path split in the CFG). The second element
@@ -142,26 +125,17 @@ public:
private:
TerminatorVisitorRetTy extendFlowCondition(const Expr &Cond) {
// The terminator sub-expression might not be evaluated.
- if (Env.getStorageLocation(Cond, SkipPast::None) == nullptr)
+ if (Env.getValueStrict(Cond) == nullptr)
transfer(StmtToEnv, Cond, Env);
- // FIXME: The flow condition must be an r-value, so `SkipPast::None` should
- // suffice.
- auto *Val =
- cast_or_null<BoolValue>(Env.getValue(Cond, SkipPast::Reference));
+ auto *Val = cast_or_null<BoolValue>(Env.getValueStrict(Cond));
// Value merging depends on flow conditions from different environments
// being mutually exclusive -- that is, they cannot both be true in their
// entirety (even if they may share some clauses). So, we need *some* value
// for the condition expression, even if just an atom.
if (Val == nullptr) {
- // FIXME: Consider introducing a helper for this get-or-create pattern.
- auto *Loc = Env.getStorageLocation(Cond, SkipPast::None);
- if (Loc == nullptr) {
- Loc = &Env.createStorageLocation(Cond);
- Env.setStorageLocation(Cond, *Loc);
- }
Val = &Env.makeAtomicBoolValue();
- Env.setValue(*Loc, *Val);
+ Env.setValueStrict(Cond, *Val);
}
bool ConditionValue = true;
@@ -172,7 +146,7 @@ private:
ConditionValue = false;
}
- Env.addToFlowCondition(*Val);
+ Env.addToFlowCondition(Val->formula());
return {&Cond, ConditionValue};
}
@@ -189,7 +163,11 @@ struct AnalysisContext {
llvm::ArrayRef<std::optional<TypeErasedDataflowAnalysisState>>
BlockStates)
: CFCtx(CFCtx), Analysis(Analysis), InitEnv(InitEnv),
- BlockStates(BlockStates) {}
+ Log(*InitEnv.getDataflowAnalysisContext().getOptions().Log),
+ BlockStates(BlockStates) {
+ Log.beginAnalysis(CFCtx, Analysis);
+ }
+ ~AnalysisContext() { Log.endAnalysis(); }
/// Contains the CFG being analyzed.
const ControlFlowContext &CFCtx;
@@ -197,11 +175,97 @@ struct AnalysisContext {
TypeErasedDataflowAnalysis &Analysis;
/// Initial state to start the analysis.
const Environment &InitEnv;
+ Logger &Log;
/// Stores the state of a CFG block if it has been evaluated by the analysis.
/// The indices correspond to the block IDs.
llvm::ArrayRef<std::optional<TypeErasedDataflowAnalysisState>> BlockStates;
};
+class PrettyStackTraceAnalysis : public llvm::PrettyStackTraceEntry {
+public:
+ PrettyStackTraceAnalysis(const ControlFlowContext &CFCtx, const char *Message)
+ : CFCtx(CFCtx), Message(Message) {}
+
+ void print(raw_ostream &OS) const override {
+ OS << Message << "\n";
+ OS << "Decl:\n";
+ CFCtx.getDecl()->dump(OS);
+ OS << "CFG:\n";
+ CFCtx.getCFG().print(OS, LangOptions(), false);
+ }
+
+private:
+ const ControlFlowContext &CFCtx;
+ const char *Message;
+};
+
+class PrettyStackTraceCFGElement : public llvm::PrettyStackTraceEntry {
+public:
+ PrettyStackTraceCFGElement(const CFGElement &Element, int BlockIdx,
+ int ElementIdx, const char *Message)
+ : Element(Element), BlockIdx(BlockIdx), ElementIdx(ElementIdx),
+ Message(Message) {}
+
+ void print(raw_ostream &OS) const override {
+ OS << Message << ": Element [B" << BlockIdx << "." << ElementIdx << "]\n";
+ if (auto Stmt = Element.getAs<CFGStmt>()) {
+ OS << "Stmt:\n";
+ ASTDumper Dumper(OS, false);
+ Dumper.Visit(Stmt->getStmt());
+ }
+ }
+
+private:
+ const CFGElement &Element;
+ int BlockIdx;
+ int ElementIdx;
+ const char *Message;
+};
+
+// Builds a joined TypeErasedDataflowAnalysisState from 0 or more sources,
+// each of which may be owned (built as part of the join) or external (a
+// reference to an Environment that will outlive the builder).
+// Avoids unneccesary copies of the environment.
+class JoinedStateBuilder {
+ AnalysisContext &AC;
+ std::vector<const TypeErasedDataflowAnalysisState *> All;
+ std::deque<TypeErasedDataflowAnalysisState> Owned;
+
+ TypeErasedDataflowAnalysisState
+ join(const TypeErasedDataflowAnalysisState &L,
+ const TypeErasedDataflowAnalysisState &R) {
+ return {AC.Analysis.joinTypeErased(L.Lattice, R.Lattice),
+ Environment::join(L.Env, R.Env, AC.Analysis)};
+ }
+
+public:
+ JoinedStateBuilder(AnalysisContext &AC) : AC(AC) {}
+
+ void addOwned(TypeErasedDataflowAnalysisState State) {
+ Owned.push_back(std::move(State));
+ All.push_back(&Owned.back());
+ }
+ void addUnowned(const TypeErasedDataflowAnalysisState &State) {
+ All.push_back(&State);
+ }
+ TypeErasedDataflowAnalysisState take() && {
+ if (All.empty())
+ // FIXME: Consider passing `Block` to Analysis.typeErasedInitialElement
+ // to enable building analyses like computation of dominators that
+ // initialize the state of each basic block differently.
+ return {AC.Analysis.typeErasedInitialElement(), AC.InitEnv.fork()};
+ if (All.size() == 1)
+ return Owned.empty() ? All.front()->fork() : std::move(Owned.front());
+
+ auto Result = join(*All[0], *All[1]);
+ for (unsigned I = 2; I < All.size(); ++I)
+ Result = join(Result, *All[I]);
+ return Result;
+ }
+};
+
+} // namespace
+
/// Computes the input state for a given basic block by joining the output
/// states of its predecessors.
///
@@ -212,8 +276,7 @@ struct AnalysisContext {
/// `std::nullopt` represent basic blocks that are not evaluated yet.
static TypeErasedDataflowAnalysisState
computeBlockInputState(const CFGBlock &Block, AnalysisContext &AC) {
- llvm::DenseSet<const CFGBlock *> Preds;
- Preds.insert(Block.pred_begin(), Block.pred_end());
+ std::vector<const CFGBlock *> Preds(Block.pred_begin(), Block.pred_end());
if (Block.getTerminator().isTemporaryDtorsBranch()) {
// This handles a special case where the code that produced the CFG includes
// a conditional operator with a branch that constructs a temporary and
@@ -237,17 +300,16 @@ computeBlockInputState(const CFGBlock &Block, AnalysisContext &AC) {
// operator includes a branch that contains a noreturn destructor call.
//
// See `NoreturnDestructorTest` for concrete examples.
- if (Block.succ_begin()->getReachableBlock()->hasNoReturnElement()) {
+ if (Block.succ_begin()->getReachableBlock() != nullptr &&
+ Block.succ_begin()->getReachableBlock()->hasNoReturnElement()) {
auto &StmtToBlock = AC.CFCtx.getStmtToBlock();
auto StmtBlock = StmtToBlock.find(Block.getTerminatorStmt());
assert(StmtBlock != StmtToBlock.end());
- Preds.erase(StmtBlock->getSecond());
+ llvm::erase_value(Preds, StmtBlock->getSecond());
}
}
- std::optional<TypeErasedDataflowAnalysisState> MaybeState;
-
- auto &Analysis = AC.Analysis;
+ JoinedStateBuilder Builder(AC);
for (const CFGBlock *Pred : Preds) {
// Skip if the `Block` is unreachable or control flow cannot get past it.
if (!Pred || Pred->hasNoReturnElement())
@@ -260,86 +322,109 @@ computeBlockInputState(const CFGBlock &Block, AnalysisContext &AC) {
if (!MaybePredState)
continue;
- TypeErasedDataflowAnalysisState PredState = *MaybePredState;
- if (Analysis.builtinOptions()) {
+ if (AC.Analysis.builtinOptions()) {
if (const Stmt *PredTerminatorStmt = Pred->getTerminatorStmt()) {
- const StmtToEnvMapImpl StmtToEnv(AC.CFCtx, AC.BlockStates);
+ // We have a terminator: we need to mutate an environment to describe
+ // when the terminator is taken. Copy now.
+ TypeErasedDataflowAnalysisState Copy = MaybePredState->fork();
+
+ const StmtToEnvMap StmtToEnv(AC.CFCtx, AC.BlockStates);
auto [Cond, CondValue] =
- TerminatorVisitor(StmtToEnv, PredState.Env,
+ TerminatorVisitor(StmtToEnv, Copy.Env,
blockIndexInPredecessor(*Pred, Block))
.Visit(PredTerminatorStmt);
if (Cond != nullptr)
// FIXME: Call transferBranchTypeErased even if BuiltinTransferOpts
// are not set.
- Analysis.transferBranchTypeErased(CondValue, Cond, PredState.Lattice,
- PredState.Env);
+ AC.Analysis.transferBranchTypeErased(CondValue, Cond, Copy.Lattice,
+ Copy.Env);
+ Builder.addOwned(std::move(Copy));
+ continue;
}
}
-
- if (MaybeState) {
- Analysis.joinTypeErased(MaybeState->Lattice, PredState.Lattice);
- MaybeState->Env.join(PredState.Env, Analysis);
- } else {
- MaybeState = std::move(PredState);
- }
- }
- if (!MaybeState) {
- // FIXME: Consider passing `Block` to `Analysis.typeErasedInitialElement()`
- // to enable building analyses like computation of dominators that
- // initialize the state of each basic block differently.
- MaybeState.emplace(Analysis.typeErasedInitialElement(), AC.InitEnv);
+ Builder.addUnowned(*MaybePredState);
+ continue;
}
- return *MaybeState;
+ return std::move(Builder).take();
}
/// Built-in transfer function for `CFGStmt`.
-void builtinTransferStatement(const CFGStmt &Elt,
- TypeErasedDataflowAnalysisState &InputState,
- AnalysisContext &AC) {
+static void
+builtinTransferStatement(const CFGStmt &Elt,
+ TypeErasedDataflowAnalysisState &InputState,
+ AnalysisContext &AC) {
const Stmt *S = Elt.getStmt();
assert(S != nullptr);
- transfer(StmtToEnvMapImpl(AC.CFCtx, AC.BlockStates), *S, InputState.Env);
+ transfer(StmtToEnvMap(AC.CFCtx, AC.BlockStates), *S, InputState.Env);
}
/// Built-in transfer function for `CFGInitializer`.
-void builtinTransferInitializer(const CFGInitializer &Elt,
- TypeErasedDataflowAnalysisState &InputState) {
+static void
+builtinTransferInitializer(const CFGInitializer &Elt,
+ TypeErasedDataflowAnalysisState &InputState) {
const CXXCtorInitializer *Init = Elt.getInitializer();
assert(Init != nullptr);
auto &Env = InputState.Env;
- const auto &ThisLoc =
- *cast<AggregateStorageLocation>(Env.getThisPointeeStorageLocation());
+ auto &ThisLoc = *Env.getThisPointeeStorageLocation();
- const FieldDecl *Member = Init->getMember();
- if (Member == nullptr)
- // Not a field initializer.
+ if (!Init->isAnyMemberInitializer())
+ // FIXME: Handle base initialization
return;
- auto *InitStmt = Init->getInit();
- assert(InitStmt != nullptr);
+ auto *InitExpr = Init->getInit();
+ assert(InitExpr != nullptr);
- auto *InitStmtLoc = Env.getStorageLocation(*InitStmt, SkipPast::Reference);
- if (InitStmtLoc == nullptr)
- return;
-
- auto *InitStmtVal = Env.getValue(*InitStmtLoc);
- if (InitStmtVal == nullptr)
- return;
-
- if (Member->getType()->isReferenceType()) {
- auto &MemberLoc = ThisLoc.getChild(*Member);
- Env.setValue(MemberLoc, Env.takeOwnership(std::make_unique<ReferenceValue>(
- *InitStmtLoc)));
+ const FieldDecl *Member = nullptr;
+ AggregateStorageLocation *ParentLoc = &ThisLoc;
+ StorageLocation *MemberLoc = nullptr;
+ if (Init->isMemberInitializer()) {
+ Member = Init->getMember();
+ MemberLoc = ThisLoc.getChild(*Member);
} else {
- auto &MemberLoc = ThisLoc.getChild(*Member);
- Env.setValue(MemberLoc, *InitStmtVal);
+ IndirectFieldDecl *IndirectField = Init->getIndirectMember();
+ assert(IndirectField != nullptr);
+ MemberLoc = &ThisLoc;
+ for (const auto *I : IndirectField->chain()) {
+ Member = cast<FieldDecl>(I);
+ ParentLoc = cast<AggregateStorageLocation>(MemberLoc);
+ MemberLoc = ParentLoc->getChild(*Member);
+ }
+ }
+ assert(Member != nullptr);
+ assert(MemberLoc != nullptr);
+
+ // FIXME: Instead of these case distinctions, we would ideally want to be able
+ // to simply use `Environment::createObject()` here, the same way that we do
+ // this in `TransferVisitor::VisitInitListExpr()`. However, this would require
+ // us to be able to build a list of fields that we then use to initialize an
+ // `AggregateStorageLocation` -- and the problem is that, when we get here,
+ // the `AggregateStorageLocation` already exists. We should explore if there's
+ // anything that we can do to change this.
+ if (Member->getType()->isReferenceType()) {
+ auto *InitExprLoc = Env.getStorageLocationStrict(*InitExpr);
+ if (InitExprLoc == nullptr)
+ return;
+
+ ParentLoc->setChild(*Member, InitExprLoc);
+ } else if (auto *InitExprVal = Env.getValueStrict(*InitExpr)) {
+ if (Member->getType()->isRecordType()) {
+ auto *InitValStruct = cast<StructValue>(InitExprVal);
+ // FIXME: Rather than performing a copy here, we should really be
+ // initializing the field in place. This would require us to propagate the
+ // storage location of the field to the AST node that creates the
+ // `StructValue`.
+ copyRecord(InitValStruct->getAggregateLoc(),
+ *cast<AggregateStorageLocation>(MemberLoc), Env);
+ } else {
+ Env.setValue(*MemberLoc, *InitExprVal);
+ }
}
}
-void builtinTransfer(const CFGElement &Elt,
- TypeErasedDataflowAnalysisState &State,
- AnalysisContext &AC) {
+static void builtinTransfer(const CFGElement &Elt,
+ TypeErasedDataflowAnalysisState &State,
+ AnalysisContext &AC) {
switch (Elt.getKind()) {
case CFGElement::Statement:
builtinTransferStatement(Elt.castAs<CFGStmt>(), State, AC);
@@ -348,7 +433,18 @@ void builtinTransfer(const CFGElement &Elt,
builtinTransferInitializer(Elt.castAs<CFGInitializer>(), State);
break;
default:
- // FIXME: Evaluate other kinds of `CFGElement`.
+ // FIXME: Evaluate other kinds of `CFGElement`, including:
+ // - When encountering `CFGLifetimeEnds`, remove the declaration from
+ // `Environment::DeclToLoc`. This would serve two purposes:
+ // a) Eliminate unnecessary clutter from `Environment::DeclToLoc`
+ // b) Allow us to implement an assertion that, when joining two
+ // `Environments`, the two `DeclToLoc` maps never contain entries that
+ // map the same declaration to different storage locations.
+ // Unfortunately, however, we can't currently process `CFGLifetimeEnds`
+ // because the corresponding CFG option `AddLifetime` is incompatible with
+ // the option 'AddImplicitDtors`, which we already use. We will first
+ // need to modify the CFG implementation to make these two options
+ // compatible before we can process `CFGLifetimeEnds`.
break;
}
}
@@ -361,25 +457,33 @@ void builtinTransfer(const CFGElement &Elt,
/// user-specified analysis.
/// `PostVisitCFG` (if provided) will be applied to the element after evaluation
/// by the user-specified analysis.
-TypeErasedDataflowAnalysisState
+static TypeErasedDataflowAnalysisState
transferCFGBlock(const CFGBlock &Block, AnalysisContext &AC,
std::function<void(const CFGElement &,
const TypeErasedDataflowAnalysisState &)>
PostVisitCFG = nullptr) {
+ AC.Log.enterBlock(Block);
auto State = computeBlockInputState(Block, AC);
+ AC.Log.recordState(State);
+ int ElementIdx = 1;
for (const auto &Element : Block) {
+ PrettyStackTraceCFGElement CrashInfo(Element, Block.getBlockID(),
+ ElementIdx++, "transferCFGBlock");
+
+ AC.Log.enterElement(Element);
// Built-in analysis
if (AC.Analysis.builtinOptions()) {
builtinTransfer(Element, State, AC);
}
// User-provided analysis
- AC.Analysis.transferTypeErased(&Element, State.Lattice, State.Env);
+ AC.Analysis.transferTypeErased(Element, State.Lattice, State.Env);
// Post processing
if (PostVisitCFG) {
PostVisitCFG(Element, State);
}
+ AC.Log.recordState(State);
}
return State;
}
@@ -403,16 +507,18 @@ runTypeErasedDataflowAnalysis(
std::function<void(const CFGElement &,
const TypeErasedDataflowAnalysisState &)>
PostVisitCFG) {
+ PrettyStackTraceAnalysis CrashInfo(CFCtx, "runTypeErasedDataflowAnalysis");
+
PostOrderCFGView POV(&CFCtx.getCFG());
ForwardDataflowWorklist Worklist(CFCtx.getCFG(), &POV);
std::vector<std::optional<TypeErasedDataflowAnalysisState>> BlockStates(
- CFCtx.getCFG().size(), std::nullopt);
+ CFCtx.getCFG().size());
// The entry basic block doesn't contain statements so it can be skipped.
const CFGBlock &Entry = CFCtx.getCFG().getEntry();
BlockStates[Entry.getBlockID()] = {Analysis.typeErasedInitialElement(),
- InitEnv};
+ InitEnv.fork()};
Worklist.enqueueSuccessors(&Entry);
AnalysisContext AC(CFCtx, Analysis, InitEnv, BlockStates);
@@ -460,15 +566,18 @@ runTypeErasedDataflowAnalysis(
LatticeJoinEffect Effect2 =
NewBlockState.Env.widen(OldBlockState->Env, Analysis);
if (Effect1 == LatticeJoinEffect::Unchanged &&
- Effect2 == LatticeJoinEffect::Unchanged)
+ Effect2 == LatticeJoinEffect::Unchanged) {
// The state of `Block` didn't change from widening so there's no need
// to revisit its successors.
+ AC.Log.blockConverged();
continue;
+ }
} else if (Analysis.isEqualTypeErased(OldBlockState->Lattice,
NewBlockState.Lattice) &&
OldBlockState->Env.equivalentTo(NewBlockState.Env, Analysis)) {
// The state of `Block` didn't change after transfer so there's no need
// to revisit its successors.
+ AC.Log.blockConverged();
continue;
}
}
@@ -493,7 +602,7 @@ runTypeErasedDataflowAnalysis(
}
}
- return BlockStates;
+ return std::move(BlockStates);
}
} // namespace dataflow
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
index caa1ed266c5f..037886d09c4f 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
@@ -17,9 +17,10 @@
#include <queue>
#include <vector>
+#include "clang/Analysis/FlowSensitive/Formula.h"
#include "clang/Analysis/FlowSensitive/Solver.h"
-#include "clang/Analysis/FlowSensitive/Value.h"
#include "clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
@@ -78,7 +79,7 @@ using ClauseID = uint32_t;
static constexpr ClauseID NullClause = 0;
/// A boolean formula in conjunctive normal form.
-struct BooleanFormula {
+struct CNFFormula {
/// `LargestVar` is equal to the largest positive integer that represents a
/// variable in the formula.
const Variable LargestVar;
@@ -120,12 +121,12 @@ struct BooleanFormula {
/// clauses in the formula start from the element at index 1.
std::vector<ClauseID> NextWatched;
- /// Stores the variable identifier and value location for atomic booleans in
- /// the formula.
- llvm::DenseMap<Variable, AtomicBoolValue *> Atomics;
+ /// Stores the variable identifier and Atom for atomic booleans in the
+ /// formula.
+ llvm::DenseMap<Variable, Atom> Atomics;
- explicit BooleanFormula(Variable LargestVar,
- llvm::DenseMap<Variable, AtomicBoolValue *> Atomics)
+ explicit CNFFormula(Variable LargestVar,
+ llvm::DenseMap<Variable, Atom> Atomics)
: LargestVar(LargestVar), Atomics(std::move(Atomics)) {
Clauses.push_back(0);
ClauseStarts.push_back(0);
@@ -143,8 +144,8 @@ struct BooleanFormula {
///
/// All literals in the input that are not `NullLit` must be distinct.
void addClause(Literal L1, Literal L2 = NullLit, Literal L3 = NullLit) {
- // The literals are guaranteed to be distinct from properties of BoolValue
- // and the construction in `buildBooleanFormula`.
+ // The literals are guaranteed to be distinct from properties of Formula
+ // and the construction in `buildCNF`.
assert(L1 != NullLit && L1 != L2 && L1 != L3 &&
(L2 != L3 || L2 == NullLit));
@@ -177,98 +178,59 @@ struct BooleanFormula {
/// Converts the conjunction of `Vals` into a formula in conjunctive normal
/// form where each clause has at least one and at most three literals.
-BooleanFormula buildBooleanFormula(const llvm::DenseSet<BoolValue *> &Vals) {
+CNFFormula buildCNF(const llvm::ArrayRef<const Formula *> &Vals) {
// The general strategy of the algorithm implemented below is to map each
// of the sub-values in `Vals` to a unique variable and use these variables in
// the resulting CNF expression to avoid exponential blow up. The number of
// literals in the resulting formula is guaranteed to be linear in the number
- // of sub-values in `Vals`.
+ // of sub-formulas in `Vals`.
- // Map each sub-value in `Vals` to a unique variable.
- llvm::DenseMap<BoolValue *, Variable> SubValsToVar;
- // Store variable identifiers and value location of atomic booleans.
- llvm::DenseMap<Variable, AtomicBoolValue *> Atomics;
+ // Map each sub-formula in `Vals` to a unique variable.
+ llvm::DenseMap<const Formula *, Variable> SubValsToVar;
+ // Store variable identifiers and Atom of atomic booleans.
+ llvm::DenseMap<Variable, Atom> Atomics;
Variable NextVar = 1;
{
- std::queue<BoolValue *> UnprocessedSubVals;
- for (BoolValue *Val : Vals)
+ std::queue<const Formula *> UnprocessedSubVals;
+ for (const Formula *Val : Vals)
UnprocessedSubVals.push(Val);
while (!UnprocessedSubVals.empty()) {
Variable Var = NextVar;
- BoolValue *Val = UnprocessedSubVals.front();
+ const Formula *Val = UnprocessedSubVals.front();
UnprocessedSubVals.pop();
if (!SubValsToVar.try_emplace(Val, Var).second)
continue;
++NextVar;
- // Visit the sub-values of `Val`.
- switch (Val->getKind()) {
- case Value::Kind::Conjunction: {
- auto *C = cast<ConjunctionValue>(Val);
- UnprocessedSubVals.push(&C->getLeftSubValue());
- UnprocessedSubVals.push(&C->getRightSubValue());
- break;
- }
- case Value::Kind::Disjunction: {
- auto *D = cast<DisjunctionValue>(Val);
- UnprocessedSubVals.push(&D->getLeftSubValue());
- UnprocessedSubVals.push(&D->getRightSubValue());
- break;
- }
- case Value::Kind::Negation: {
- auto *N = cast<NegationValue>(Val);
- UnprocessedSubVals.push(&N->getSubVal());
- break;
- }
- case Value::Kind::Implication: {
- auto *I = cast<ImplicationValue>(Val);
- UnprocessedSubVals.push(&I->getLeftSubValue());
- UnprocessedSubVals.push(&I->getRightSubValue());
- break;
- }
- case Value::Kind::Biconditional: {
- auto *B = cast<BiconditionalValue>(Val);
- UnprocessedSubVals.push(&B->getLeftSubValue());
- UnprocessedSubVals.push(&B->getRightSubValue());
- break;
- }
- case Value::Kind::TopBool:
- // Nothing more to do. This `TopBool` instance has already been mapped
- // to a fresh solver variable (`NextVar`, above) and is thereafter
- // anonymous. The solver never sees `Top`.
- break;
- case Value::Kind::AtomicBool: {
- Atomics[Var] = cast<AtomicBoolValue>(Val);
- break;
- }
- default:
- llvm_unreachable("buildBooleanFormula: unhandled value kind");
- }
+ for (const Formula *F : Val->operands())
+ UnprocessedSubVals.push(F);
+ if (Val->kind() == Formula::AtomRef)
+ Atomics[Var] = Val->getAtom();
}
}
- auto GetVar = [&SubValsToVar](const BoolValue *Val) {
+ auto GetVar = [&SubValsToVar](const Formula *Val) {
auto ValIt = SubValsToVar.find(Val);
assert(ValIt != SubValsToVar.end());
return ValIt->second;
};
- BooleanFormula Formula(NextVar - 1, std::move(Atomics));
+ CNFFormula CNF(NextVar - 1, std::move(Atomics));
std::vector<bool> ProcessedSubVals(NextVar, false);
- // Add a conjunct for each variable that represents a top-level conjunction
- // value in `Vals`.
- for (BoolValue *Val : Vals)
- Formula.addClause(posLit(GetVar(Val)));
+ // Add a conjunct for each variable that represents a top-level formula in
+ // `Vals`.
+ for (const Formula *Val : Vals)
+ CNF.addClause(posLit(GetVar(Val)));
// Add conjuncts that represent the mapping between newly-created variables
- // and their corresponding sub-values.
- std::queue<BoolValue *> UnprocessedSubVals;
- for (BoolValue *Val : Vals)
+ // and their corresponding sub-formulas.
+ std::queue<const Formula *> UnprocessedSubVals;
+ for (const Formula *Val : Vals)
UnprocessedSubVals.push(Val);
while (!UnprocessedSubVals.empty()) {
- const BoolValue *Val = UnprocessedSubVals.front();
+ const Formula *Val = UnprocessedSubVals.front();
UnprocessedSubVals.pop();
const Variable Var = GetVar(Val);
@@ -276,117 +238,107 @@ BooleanFormula buildBooleanFormula(const llvm::DenseSet<BoolValue *> &Vals) {
continue;
ProcessedSubVals[Var] = true;
- if (auto *C = dyn_cast<ConjunctionValue>(Val)) {
- const Variable LeftSubVar = GetVar(&C->getLeftSubValue());
- const Variable RightSubVar = GetVar(&C->getRightSubValue());
+ switch (Val->kind()) {
+ case Formula::AtomRef:
+ break;
+ case Formula::And: {
+ const Variable LHS = GetVar(Val->operands()[0]);
+ const Variable RHS = GetVar(Val->operands()[1]);
- if (LeftSubVar == RightSubVar) {
+ if (LHS == RHS) {
// `X <=> (A ^ A)` is equivalent to `(!X v A) ^ (X v !A)` which is
// already in conjunctive normal form. Below we add each of the
// conjuncts of the latter expression to the result.
- Formula.addClause(negLit(Var), posLit(LeftSubVar));
- Formula.addClause(posLit(Var), negLit(LeftSubVar));
-
- // Visit a sub-value of `Val` (pick any, they are identical).
- UnprocessedSubVals.push(&C->getLeftSubValue());
+ CNF.addClause(negLit(Var), posLit(LHS));
+ CNF.addClause(posLit(Var), negLit(LHS));
} else {
// `X <=> (A ^ B)` is equivalent to `(!X v A) ^ (!X v B) ^ (X v !A v !B)`
// which is already in conjunctive normal form. Below we add each of the
// conjuncts of the latter expression to the result.
- Formula.addClause(negLit(Var), posLit(LeftSubVar));
- Formula.addClause(negLit(Var), posLit(RightSubVar));
- Formula.addClause(posLit(Var), negLit(LeftSubVar), negLit(RightSubVar));
-
- // Visit the sub-values of `Val`.
- UnprocessedSubVals.push(&C->getLeftSubValue());
- UnprocessedSubVals.push(&C->getRightSubValue());
+ CNF.addClause(negLit(Var), posLit(LHS));
+ CNF.addClause(negLit(Var), posLit(RHS));
+ CNF.addClause(posLit(Var), negLit(LHS), negLit(RHS));
}
- } else if (auto *D = dyn_cast<DisjunctionValue>(Val)) {
- const Variable LeftSubVar = GetVar(&D->getLeftSubValue());
- const Variable RightSubVar = GetVar(&D->getRightSubValue());
+ break;
+ }
+ case Formula::Or: {
+ const Variable LHS = GetVar(Val->operands()[0]);
+ const Variable RHS = GetVar(Val->operands()[1]);
- if (LeftSubVar == RightSubVar) {
+ if (LHS == RHS) {
// `X <=> (A v A)` is equivalent to `(!X v A) ^ (X v !A)` which is
// already in conjunctive normal form. Below we add each of the
// conjuncts of the latter expression to the result.
- Formula.addClause(negLit(Var), posLit(LeftSubVar));
- Formula.addClause(posLit(Var), negLit(LeftSubVar));
-
- // Visit a sub-value of `Val` (pick any, they are identical).
- UnprocessedSubVals.push(&D->getLeftSubValue());
+ CNF.addClause(negLit(Var), posLit(LHS));
+ CNF.addClause(posLit(Var), negLit(LHS));
} else {
- // `X <=> (A v B)` is equivalent to `(!X v A v B) ^ (X v !A) ^ (X v !B)`
- // which is already in conjunctive normal form. Below we add each of the
- // conjuncts of the latter expression to the result.
- Formula.addClause(negLit(Var), posLit(LeftSubVar), posLit(RightSubVar));
- Formula.addClause(posLit(Var), negLit(LeftSubVar));
- Formula.addClause(posLit(Var), negLit(RightSubVar));
-
- // Visit the sub-values of `Val`.
- UnprocessedSubVals.push(&D->getLeftSubValue());
- UnprocessedSubVals.push(&D->getRightSubValue());
+ // `X <=> (A v B)` is equivalent to `(!X v A v B) ^ (X v !A) ^ (X v
+ // !B)` which is already in conjunctive normal form. Below we add each
+ // of the conjuncts of the latter expression to the result.
+ CNF.addClause(negLit(Var), posLit(LHS), posLit(RHS));
+ CNF.addClause(posLit(Var), negLit(LHS));
+ CNF.addClause(posLit(Var), negLit(RHS));
}
- } else if (auto *N = dyn_cast<NegationValue>(Val)) {
- const Variable SubVar = GetVar(&N->getSubVal());
-
- // `X <=> !Y` is equivalent to `(!X v !Y) ^ (X v Y)` which is already in
- // conjunctive normal form. Below we add each of the conjuncts of the
- // latter expression to the result.
- Formula.addClause(negLit(Var), negLit(SubVar));
- Formula.addClause(posLit(Var), posLit(SubVar));
-
- // Visit the sub-values of `Val`.
- UnprocessedSubVals.push(&N->getSubVal());
- } else if (auto *I = dyn_cast<ImplicationValue>(Val)) {
- const Variable LeftSubVar = GetVar(&I->getLeftSubValue());
- const Variable RightSubVar = GetVar(&I->getRightSubValue());
+ break;
+ }
+ case Formula::Not: {
+ const Variable Operand = GetVar(Val->operands()[0]);
+
+ // `X <=> !Y` is equivalent to `(!X v !Y) ^ (X v Y)` which is
+ // already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ CNF.addClause(negLit(Var), negLit(Operand));
+ CNF.addClause(posLit(Var), posLit(Operand));
+ break;
+ }
+ case Formula::Implies: {
+ const Variable LHS = GetVar(Val->operands()[0]);
+ const Variable RHS = GetVar(Val->operands()[1]);
// `X <=> (A => B)` is equivalent to
// `(X v A) ^ (X v !B) ^ (!X v !A v B)` which is already in
- // conjunctive normal form. Below we add each of the conjuncts of the
- // latter expression to the result.
- Formula.addClause(posLit(Var), posLit(LeftSubVar));
- Formula.addClause(posLit(Var), negLit(RightSubVar));
- Formula.addClause(negLit(Var), negLit(LeftSubVar), posLit(RightSubVar));
-
- // Visit the sub-values of `Val`.
- UnprocessedSubVals.push(&I->getLeftSubValue());
- UnprocessedSubVals.push(&I->getRightSubValue());
- } else if (auto *B = dyn_cast<BiconditionalValue>(Val)) {
- const Variable LeftSubVar = GetVar(&B->getLeftSubValue());
- const Variable RightSubVar = GetVar(&B->getRightSubValue());
-
- if (LeftSubVar == RightSubVar) {
+ // conjunctive normal form. Below we add each of the conjuncts of
+ // the latter expression to the result.
+ CNF.addClause(posLit(Var), posLit(LHS));
+ CNF.addClause(posLit(Var), negLit(RHS));
+ CNF.addClause(negLit(Var), negLit(LHS), posLit(RHS));
+ break;
+ }
+ case Formula::Equal: {
+ const Variable LHS = GetVar(Val->operands()[0]);
+ const Variable RHS = GetVar(Val->operands()[1]);
+
+ if (LHS == RHS) {
// `X <=> (A <=> A)` is equvalent to `X` which is already in
// conjunctive normal form. Below we add each of the conjuncts of the
// latter expression to the result.
- Formula.addClause(posLit(Var));
+ CNF.addClause(posLit(Var));
// No need to visit the sub-values of `Val`.
- } else {
- // `X <=> (A <=> B)` is equivalent to
- // `(X v A v B) ^ (X v !A v !B) ^ (!X v A v !B) ^ (!X v !A v B)` which is
- // already in conjunctive normal form. Below we add each of the conjuncts
- // of the latter expression to the result.
- Formula.addClause(posLit(Var), posLit(LeftSubVar), posLit(RightSubVar));
- Formula.addClause(posLit(Var), negLit(LeftSubVar), negLit(RightSubVar));
- Formula.addClause(negLit(Var), posLit(LeftSubVar), negLit(RightSubVar));
- Formula.addClause(negLit(Var), negLit(LeftSubVar), posLit(RightSubVar));
-
- // Visit the sub-values of `Val`.
- UnprocessedSubVals.push(&B->getLeftSubValue());
- UnprocessedSubVals.push(&B->getRightSubValue());
+ continue;
}
+ // `X <=> (A <=> B)` is equivalent to
+ // `(X v A v B) ^ (X v !A v !B) ^ (!X v A v !B) ^ (!X v !A v B)` which
+ // is already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ CNF.addClause(posLit(Var), posLit(LHS), posLit(RHS));
+ CNF.addClause(posLit(Var), negLit(LHS), negLit(RHS));
+ CNF.addClause(negLit(Var), posLit(LHS), negLit(RHS));
+ CNF.addClause(negLit(Var), negLit(LHS), posLit(RHS));
+ break;
}
+ }
+ for (const Formula *Child : Val->operands())
+ UnprocessedSubVals.push(Child);
}
- return Formula;
+ return CNF;
}
class WatchedLiteralsSolverImpl {
/// A boolean formula in conjunctive normal form that the solver will attempt
/// to prove satisfiable. The formula will be modified in the process.
- BooleanFormula Formula;
+ CNFFormula CNF;
/// The search for a satisfying assignment of the variables in `Formula` will
/// proceed in levels, starting from 1 and going up to `Formula.LargestVar`
@@ -438,9 +390,10 @@ class WatchedLiteralsSolverImpl {
std::vector<Variable> ActiveVars;
public:
- explicit WatchedLiteralsSolverImpl(const llvm::DenseSet<BoolValue *> &Vals)
- : Formula(buildBooleanFormula(Vals)), LevelVars(Formula.LargestVar + 1),
- LevelStates(Formula.LargestVar + 1) {
+ explicit WatchedLiteralsSolverImpl(
+ const llvm::ArrayRef<const Formula *> &Vals)
+ : CNF(buildCNF(Vals)), LevelVars(CNF.LargestVar + 1),
+ LevelStates(CNF.LargestVar + 1) {
assert(!Vals.empty());
// Initialize the state at the root level to a decision so that in
@@ -449,25 +402,31 @@ public:
LevelStates[0] = State::Decision;
// Initialize all variables as unassigned.
- VarAssignments.resize(Formula.LargestVar + 1, Assignment::Unassigned);
+ VarAssignments.resize(CNF.LargestVar + 1, Assignment::Unassigned);
// Initialize the active variables.
- for (Variable Var = Formula.LargestVar; Var != NullVar; --Var) {
+ for (Variable Var = CNF.LargestVar; Var != NullVar; --Var) {
if (isWatched(posLit(Var)) || isWatched(negLit(Var)))
ActiveVars.push_back(Var);
}
}
- Solver::Result solve() && {
+ // Returns the `Result` and the number of iterations "remaining" from
+ // `MaxIterations` (that is, `MaxIterations` - iterations in this call).
+ std::pair<Solver::Result, std::int64_t> solve(std::int64_t MaxIterations) && {
size_t I = 0;
while (I < ActiveVars.size()) {
+ if (MaxIterations == 0)
+ return std::make_pair(Solver::Result::TimedOut(), 0);
+ --MaxIterations;
+
// Assert that the following invariants hold:
// 1. All active variables are unassigned.
// 2. All active variables form watched literals.
// 3. Unassigned variables that form watched literals are active.
// FIXME: Consider replacing these with test cases that fail if the any
// of the invariants is broken. That might not be easy due to the
- // transformations performed by `buildBooleanFormula`.
+ // transformations performed by `buildCNF`.
assert(activeVarsAreUnassigned());
assert(activeVarsFormWatchedLiterals());
assert(unassignedVarsFormingWatchedLiteralsAreActive());
@@ -487,7 +446,7 @@ public:
// If the root level is reached, then all possible assignments lead to
// a conflict.
if (Level == 0)
- return Solver::Result::Unsatisfiable();
+ return std::make_pair(Solver::Result::Unsatisfiable(), MaxIterations);
// Otherwise, take the other branch at the most recent level where a
// decision was made.
@@ -544,16 +503,14 @@ public:
++I;
}
}
- return Solver::Result::Satisfiable(buildSolution());
+ return std::make_pair(Solver::Result::Satisfiable(buildSolution()), MaxIterations);
}
private:
- /// Returns a satisfying truth assignment to the atomic values in the boolean
- /// formula.
- llvm::DenseMap<AtomicBoolValue *, Solver::Result::Assignment>
- buildSolution() {
- llvm::DenseMap<AtomicBoolValue *, Solver::Result::Assignment> Solution;
- for (auto &Atomic : Formula.Atomics) {
+ /// Returns a satisfying truth assignment to the atoms in the boolean formula.
+ llvm::DenseMap<Atom, Solver::Result::Assignment> buildSolution() {
+ llvm::DenseMap<Atom, Solver::Result::Assignment> Solution;
+ for (auto &Atomic : CNF.Atomics) {
// A variable may have a definite true/false assignment, or it may be
// unassigned indicating its truth value does not affect the result of
// the formula. Unassigned variables are assigned to true as a default.
@@ -589,24 +546,24 @@ private:
const Literal FalseLit = VarAssignments[Var] == Assignment::AssignedTrue
? negLit(Var)
: posLit(Var);
- ClauseID FalseLitWatcher = Formula.WatchedHead[FalseLit];
- Formula.WatchedHead[FalseLit] = NullClause;
+ ClauseID FalseLitWatcher = CNF.WatchedHead[FalseLit];
+ CNF.WatchedHead[FalseLit] = NullClause;
while (FalseLitWatcher != NullClause) {
- const ClauseID NextFalseLitWatcher = Formula.NextWatched[FalseLitWatcher];
+ const ClauseID NextFalseLitWatcher = CNF.NextWatched[FalseLitWatcher];
// Pick the first non-false literal as the new watched literal.
- const size_t FalseLitWatcherStart = Formula.ClauseStarts[FalseLitWatcher];
+ const size_t FalseLitWatcherStart = CNF.ClauseStarts[FalseLitWatcher];
size_t NewWatchedLitIdx = FalseLitWatcherStart + 1;
- while (isCurrentlyFalse(Formula.Clauses[NewWatchedLitIdx]))
+ while (isCurrentlyFalse(CNF.Clauses[NewWatchedLitIdx]))
++NewWatchedLitIdx;
- const Literal NewWatchedLit = Formula.Clauses[NewWatchedLitIdx];
+ const Literal NewWatchedLit = CNF.Clauses[NewWatchedLitIdx];
const Variable NewWatchedLitVar = var(NewWatchedLit);
// Swap the old watched literal for the new one in `FalseLitWatcher` to
// maintain the invariant that the watched literal is at the beginning of
// the clause.
- Formula.Clauses[NewWatchedLitIdx] = FalseLit;
- Formula.Clauses[FalseLitWatcherStart] = NewWatchedLit;
+ CNF.Clauses[NewWatchedLitIdx] = FalseLit;
+ CNF.Clauses[FalseLitWatcherStart] = NewWatchedLit;
// If the new watched literal isn't watched by any other clause and its
// variable isn't assigned we need to add it to the active variables.
@@ -614,8 +571,8 @@ private:
VarAssignments[NewWatchedLitVar] == Assignment::Unassigned)
ActiveVars.push_back(NewWatchedLitVar);
- Formula.NextWatched[FalseLitWatcher] = Formula.WatchedHead[NewWatchedLit];
- Formula.WatchedHead[NewWatchedLit] = FalseLitWatcher;
+ CNF.NextWatched[FalseLitWatcher] = CNF.WatchedHead[NewWatchedLit];
+ CNF.WatchedHead[NewWatchedLit] = FalseLitWatcher;
// Go to the next clause that watches `FalseLit`.
FalseLitWatcher = NextFalseLitWatcher;
@@ -625,16 +582,15 @@ private:
/// Returns true if and only if one of the clauses that watch `Lit` is a unit
/// clause.
bool watchedByUnitClause(Literal Lit) const {
- for (ClauseID LitWatcher = Formula.WatchedHead[Lit];
- LitWatcher != NullClause;
- LitWatcher = Formula.NextWatched[LitWatcher]) {
- llvm::ArrayRef<Literal> Clause = Formula.clauseLiterals(LitWatcher);
+ for (ClauseID LitWatcher = CNF.WatchedHead[Lit]; LitWatcher != NullClause;
+ LitWatcher = CNF.NextWatched[LitWatcher]) {
+ llvm::ArrayRef<Literal> Clause = CNF.clauseLiterals(LitWatcher);
// Assert the invariant that the watched literal is always the first one
// in the clause.
// FIXME: Consider replacing this with a test case that fails if the
// invariant is broken by `updateWatchedLiterals`. That might not be easy
- // due to the transformations performed by `buildBooleanFormula`.
+ // due to the transformations performed by `buildCNF`.
assert(Clause.front() == Lit);
if (isUnit(Clause))
@@ -658,7 +614,7 @@ private:
/// Returns true if and only if `Lit` is watched by a clause in `Formula`.
bool isWatched(Literal Lit) const {
- return Formula.WatchedHead[Lit] != NullClause;
+ return CNF.WatchedHead[Lit] != NullClause;
}
/// Returns an assignment for an unassigned variable.
@@ -671,8 +627,8 @@ private:
/// Returns a set of all watched literals.
llvm::DenseSet<Literal> watchedLiterals() const {
llvm::DenseSet<Literal> WatchedLiterals;
- for (Literal Lit = 2; Lit < Formula.WatchedHead.size(); Lit++) {
- if (Formula.WatchedHead[Lit] == NullClause)
+ for (Literal Lit = 2; Lit < CNF.WatchedHead.size(); Lit++) {
+ if (CNF.WatchedHead[Lit] == NullClause)
continue;
WatchedLiterals.insert(Lit);
}
@@ -712,9 +668,14 @@ private:
}
};
-Solver::Result WatchedLiteralsSolver::solve(llvm::DenseSet<BoolValue *> Vals) {
- return Vals.empty() ? Solver::Result::Satisfiable({{}})
- : WatchedLiteralsSolverImpl(Vals).solve();
+Solver::Result
+WatchedLiteralsSolver::solve(llvm::ArrayRef<const Formula *> Vals) {
+ if (Vals.empty())
+ return Solver::Result::Satisfiable({{}});
+ auto [Res, Iterations] =
+ WatchedLiteralsSolverImpl(Vals).solve(MaxIterations);
+ MaxIterations = Iterations;
+ return Res;
}
} // namespace dataflow
diff --git a/contrib/llvm-project/clang/lib/Analysis/IntervalPartition.cpp b/contrib/llvm-project/clang/lib/Analysis/IntervalPartition.cpp
new file mode 100644
index 000000000000..9d093d8986f7
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/IntervalPartition.cpp
@@ -0,0 +1,116 @@
+//===- IntervalPartition.cpp - CFG Partitioning into Intervals --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functionality for partitioning a CFG into intervals.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/IntervalPartition.h"
+#include "clang/Analysis/CFG.h"
+#include "llvm/ADT/BitVector.h"
+#include <queue>
+#include <set>
+#include <vector>
+
+namespace clang {
+
+static CFGInterval buildInterval(llvm::BitVector &Partitioned,
+ const CFGBlock &Header) {
+ CFGInterval Interval(&Header);
+ Partitioned.set(Header.getBlockID());
+
+ // Elements must not be null. Duplicates are prevented using `Workset`, below.
+ std::queue<const CFGBlock *> Worklist;
+ llvm::BitVector Workset(Header.getParent()->getNumBlockIDs(), false);
+ for (const CFGBlock *S : Header.succs())
+ if (S != nullptr)
+ if (auto SID = S->getBlockID(); !Partitioned.test(SID)) {
+ // Successors are unique, so we don't test against `Workset` before
+ // adding to `Worklist`.
+ Worklist.push(S);
+ Workset.set(SID);
+ }
+
+ // Contains successors of blocks in the interval that couldn't be added to the
+ // interval on their first encounter. This occurs when they have a predecessor
+ // that is either definitively outside the interval or hasn't been considered
+ // yet. In the latter case, we'll revisit the block through some other path
+ // from the interval. At the end of processing the worklist, we filter out any
+ // that ended up in the interval to produce the output set of interval
+ // successors. It may contain duplicates -- ultimately, all relevant elements
+ // are added to `Interval.Successors`, which is a set.
+ std::vector<const CFGBlock *> MaybeSuccessors;
+
+ while (!Worklist.empty()) {
+ const auto *B = Worklist.front();
+ auto ID = B->getBlockID();
+ Worklist.pop();
+ Workset.reset(ID);
+
+ // Check whether all predecessors are in the interval, in which case `B`
+ // is included as well.
+ bool AllInInterval = true;
+ for (const CFGBlock *P : B->preds())
+ if (Interval.Blocks.find(P) == Interval.Blocks.end()) {
+ MaybeSuccessors.push_back(B);
+ AllInInterval = false;
+ break;
+ }
+ if (AllInInterval) {
+ Interval.Blocks.insert(B);
+ Partitioned.set(ID);
+ for (const CFGBlock *S : B->succs())
+ if (S != nullptr)
+ if (auto SID = S->getBlockID();
+ !Partitioned.test(SID) && !Workset.test(SID)) {
+ Worklist.push(S);
+ Workset.set(SID);
+ }
+ }
+ }
+
+ // Any block successors not in the current interval are interval successors.
+ for (const CFGBlock *B : MaybeSuccessors)
+ if (Interval.Blocks.find(B) == Interval.Blocks.end())
+ Interval.Successors.insert(B);
+
+ return Interval;
+}
+
+CFGInterval buildInterval(const CFG &Cfg, const CFGBlock &Header) {
+ llvm::BitVector Partitioned(Cfg.getNumBlockIDs(), false);
+ return buildInterval(Partitioned, Header);
+}
+
+std::vector<CFGInterval> partitionIntoIntervals(const CFG &Cfg) {
+ std::vector<CFGInterval> Intervals;
+ llvm::BitVector Partitioned(Cfg.getNumBlockIDs(), false);
+ auto &EntryBlock = Cfg.getEntry();
+ Intervals.push_back(buildInterval(Partitioned, EntryBlock));
+
+ std::queue<const CFGBlock *> Successors;
+ for (const auto *S : Intervals[0].Successors)
+ Successors.push(S);
+
+ while (!Successors.empty()) {
+ const auto *B = Successors.front();
+ Successors.pop();
+ if (Partitioned.test(B->getBlockID()))
+ continue;
+
+ // B has not been partitioned, but it has a predecessor that has.
+ CFGInterval I = buildInterval(Partitioned, *B);
+ for (const auto *S : I.Successors)
+ Successors.push(S);
+ Intervals.push_back(std::move(I));
+ }
+
+ return Intervals;
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp b/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp
index 5cc63bb17b09..1bf0d9aec862 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/ReachableCode.h"
+#include "clang/AST/Attr.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
@@ -629,6 +630,10 @@ void DeadCodeScan::reportDeadCode(const CFGBlock *B,
UK = reachable_code::UK_Return;
}
+ const auto *AS = dyn_cast<AttributedStmt>(S);
+ bool HasFallThroughAttr =
+ AS && hasSpecificAttr<FallThroughAttr>(AS->getAttrs());
+
SourceRange SilenceableCondVal;
if (UK == reachable_code::UK_Other) {
@@ -645,8 +650,9 @@ void DeadCodeScan::reportDeadCode(const CFGBlock *B,
R2 = Inc->getSourceRange();
}
- CB.HandleUnreachable(reachable_code::UK_Loop_Increment,
- Loc, SourceRange(), SourceRange(Loc, Loc), R2);
+ CB.HandleUnreachable(reachable_code::UK_Loop_Increment, Loc,
+ SourceRange(), SourceRange(Loc, Loc), R2,
+ HasFallThroughAttr);
return;
}
@@ -665,7 +671,7 @@ void DeadCodeScan::reportDeadCode(const CFGBlock *B,
SourceRange R1, R2;
SourceLocation Loc = GetUnreachableLoc(S, R1, R2);
- CB.HandleUnreachable(UK, Loc, SilenceableCondVal, R1, R2);
+ CB.HandleUnreachable(UK, Loc, SilenceableCondVal, R1, R2, HasFallThroughAttr);
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp b/contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp
index 468e94b23c3a..8c997b645f15 100644
--- a/contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp
@@ -190,18 +190,18 @@ static bool hasRCAnnotation(const Decl *D, StringRef rcAnnotation) {
}
static bool isRetain(const FunctionDecl *FD, StringRef FName) {
- return FName.startswith_insensitive("retain") ||
- FName.endswith_insensitive("retain");
+ return FName.starts_with_insensitive("retain") ||
+ FName.ends_with_insensitive("retain");
}
static bool isRelease(const FunctionDecl *FD, StringRef FName) {
- return FName.startswith_insensitive("release") ||
- FName.endswith_insensitive("release");
+ return FName.starts_with_insensitive("release") ||
+ FName.ends_with_insensitive("release");
}
static bool isAutorelease(const FunctionDecl *FD, StringRef FName) {
- return FName.startswith_insensitive("autorelease") ||
- FName.endswith_insensitive("autorelease");
+ return FName.starts_with_insensitive("autorelease") ||
+ FName.ends_with_insensitive("autorelease");
}
static bool isMakeCollectable(StringRef FName) {
diff --git a/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp b/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp
index 899c6018895e..087994e6ebd7 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp
@@ -402,7 +402,7 @@ public:
// The map with which Exp should be interpreted.
Context Ctx;
- bool isReference() { return !Exp; }
+ bool isReference() const { return !Exp; }
private:
// Create ordinary variable definition
@@ -502,9 +502,8 @@ public:
for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
const NamedDecl *D = I.getKey();
D->printName(llvm::errs());
- const unsigned *i = C.lookup(D);
llvm::errs() << " -> ";
- dumpVarDefinitionName(*i);
+ dumpVarDefinitionName(I.getData());
llvm::errs() << "\n";
}
}
@@ -1163,7 +1162,7 @@ void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
}
// Transitively search other before sets, and warn on cycles.
if (traverse(Vdb)) {
- if (CycMap.find(Vd) == CycMap.end()) {
+ if (!CycMap.contains(Vd)) {
CycMap.insert(std::make_pair(Vd, true));
StringRef L1 = Vd->getName();
Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation());
diff --git a/contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp b/contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp
index a771149f1591..b8286cef396c 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp
@@ -69,12 +69,7 @@ static bool isIncompletePhi(const til::SExpr *E) {
using CallingContext = SExprBuilder::CallingContext;
-til::SExpr *SExprBuilder::lookupStmt(const Stmt *S) {
- auto It = SMap.find(S);
- if (It != SMap.end())
- return It->second;
- return nullptr;
-}
+til::SExpr *SExprBuilder::lookupStmt(const Stmt *S) { return SMap.lookup(S); }
til::SCFG *SExprBuilder::buildCFG(CFGWalker &Walker) {
Walker.walk(*this);
diff --git a/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp b/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
index 2437095a22cf..b796f7674cc1 100644
--- a/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
@@ -40,13 +40,31 @@ using namespace clang;
#define DEBUG_LOGGING 0
+static bool recordIsNotEmpty(const RecordDecl *RD) {
+ // We consider a record decl to be empty if it contains only unnamed bit-
+ // fields, zero-width fields, and fields of empty record type.
+ for (const auto *FD : RD->fields()) {
+ if (FD->isUnnamedBitfield())
+ continue;
+ if (FD->isZeroSize(FD->getASTContext()))
+ continue;
+ // The only case remaining to check is for a field declaration of record
+ // type and whether that record itself is empty.
+ if (const auto *FieldRD = FD->getType()->getAsRecordDecl();
+ !FieldRD || recordIsNotEmpty(FieldRD))
+ return true;
+ }
+ return false;
+}
+
static bool isTrackedVar(const VarDecl *vd, const DeclContext *dc) {
if (vd->isLocalVarDecl() && !vd->hasGlobalStorage() &&
- !vd->isExceptionVariable() && !vd->isInitCapture() &&
- !vd->isImplicit() && vd->getDeclContext() == dc) {
+ !vd->isExceptionVariable() && !vd->isInitCapture() && !vd->isImplicit() &&
+ vd->getDeclContext() == dc) {
QualType ty = vd->getType();
- return ty->isScalarType() || ty->isVectorType() || ty->isRecordType() ||
- ty->isRVVType();
+ if (const auto *RD = ty->getAsRecordDecl())
+ return recordIsNotEmpty(RD);
+ return ty->isScalarType() || ty->isVectorType() || ty->isRVVType();
}
return false;
}
@@ -586,28 +604,6 @@ public:
continue;
}
- if (AtPredExit == MayUninitialized) {
- // If the predecessor's terminator is an "asm goto" that initializes
- // the variable, then don't count it as "initialized" on the indirect
- // paths.
- CFGTerminator term = Pred->getTerminator();
- if (const auto *as = dyn_cast_or_null<GCCAsmStmt>(term.getStmt())) {
- const CFGBlock *fallthrough = *Pred->succ_begin();
- if (as->isAsmGoto() &&
- llvm::any_of(as->outputs(), [&](const Expr *output) {
- return vd == findVar(output).getDecl() &&
- llvm::any_of(as->labels(),
- [&](const AddrLabelExpr *label) {
- return label->getLabel()->getStmt() == B->Label &&
- B != fallthrough;
- });
- })) {
- Use.setUninitAfterDecl();
- continue;
- }
- }
- }
-
unsigned &SV = SuccsVisited[Pred->getBlockID()];
if (!SV) {
// When visiting the first successor of a block, mark all NULL
@@ -820,7 +816,8 @@ void TransferFunctions::VisitGCCAsmStmt(GCCAsmStmt *as) {
// it's used on an indirect path, where it's not guaranteed to be
// defined.
if (const VarDecl *VD = findVar(Ex).getDecl())
- vals[VD] = MayUninitialized;
+ if (vals[VD] != Initialized)
+ vals[VD] = MayUninitialized;
}
}
@@ -899,7 +896,7 @@ struct PruneBlocksHandler : public UninitVariablesHandler {
hadUse[currentBlock] = true;
hadAnyUse = true;
}
-
+
/// Called when the uninitialized variable analysis detects the
/// idiom 'int x = x'. All other uses of 'x' within the initializer
/// are handled by handleUseOfUninitVariable.
diff --git a/contrib/llvm-project/clang/lib/Analysis/UnsafeBufferUsage.cpp b/contrib/llvm-project/clang/lib/Analysis/UnsafeBufferUsage.cpp
index 2f1417487967..5cde60cefdf0 100644
--- a/contrib/llvm-project/clang/lib/Analysis/UnsafeBufferUsage.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/UnsafeBufferUsage.cpp
@@ -7,11 +7,16 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/UnsafeBufferUsage.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/SmallVector.h"
#include <memory>
#include <optional>
+#include <sstream>
+#include <queue>
using namespace llvm;
using namespace clang;
@@ -31,9 +36,10 @@ public:
MatchDescendantVisitor(const internal::DynTypedMatcher *Matcher,
internal::ASTMatchFinder *Finder,
internal::BoundNodesTreeBuilder *Builder,
- internal::ASTMatchFinder::BindKind Bind)
+ internal::ASTMatchFinder::BindKind Bind,
+ const bool ignoreUnevaluatedContext)
: Matcher(Matcher), Finder(Finder), Builder(Builder), Bind(Bind),
- Matches(false) {}
+ Matches(false), ignoreUnevaluatedContext(ignoreUnevaluatedContext) {}
// Returns true if a match is found in a subtree of `DynNode`, which belongs
// to the same callable of `DynNode`.
@@ -66,14 +72,53 @@ public:
return VisitorBase::TraverseDecl(Node);
}
+ bool TraverseGenericSelectionExpr(GenericSelectionExpr *Node) {
+ // These are unevaluated, except the result expression.
+ if(ignoreUnevaluatedContext)
+ return TraverseStmt(Node->getResultExpr());
+ return VisitorBase::TraverseGenericSelectionExpr(Node);
+ }
+
+ bool TraverseUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node) {
+ // Unevaluated context.
+ if(ignoreUnevaluatedContext)
+ return true;
+ return VisitorBase::TraverseUnaryExprOrTypeTraitExpr(Node);
+ }
+
+ bool TraverseTypeOfExprTypeLoc(TypeOfExprTypeLoc Node) {
+ // Unevaluated context.
+ if(ignoreUnevaluatedContext)
+ return true;
+ return VisitorBase::TraverseTypeOfExprTypeLoc(Node);
+ }
+
+ bool TraverseDecltypeTypeLoc(DecltypeTypeLoc Node) {
+ // Unevaluated context.
+ if(ignoreUnevaluatedContext)
+ return true;
+ return VisitorBase::TraverseDecltypeTypeLoc(Node);
+ }
+
+ bool TraverseCXXNoexceptExpr(CXXNoexceptExpr *Node) {
+ // Unevaluated context.
+ if(ignoreUnevaluatedContext)
+ return true;
+ return VisitorBase::TraverseCXXNoexceptExpr(Node);
+ }
+
+ bool TraverseCXXTypeidExpr(CXXTypeidExpr *Node) {
+ // Unevaluated context.
+ if(ignoreUnevaluatedContext)
+ return true;
+ return VisitorBase::TraverseCXXTypeidExpr(Node);
+ }
+
bool TraverseStmt(Stmt *Node, DataRecursionQueue *Queue = nullptr) {
if (!Node)
return true;
if (!match(*Node))
return false;
- // To skip callables:
- if (isa<LambdaExpr>(Node))
- return true;
return VisitorBase::TraverseStmt(Node);
}
@@ -107,14 +152,131 @@ private:
internal::BoundNodesTreeBuilder ResultBindings;
const internal::ASTMatchFinder::BindKind Bind;
bool Matches;
+ bool ignoreUnevaluatedContext;
};
-AST_MATCHER_P(Stmt, forEveryDescendant, internal::Matcher<Stmt>, innerMatcher) {
+// Because we're dealing with raw pointers, let's define what we mean by that.
+static auto hasPointerType() {
+ return hasType(hasCanonicalType(pointerType()));
+}
+
+static auto hasArrayType() {
+ return hasType(hasCanonicalType(arrayType()));
+}
+
+AST_MATCHER_P(Stmt, forEachDescendantEvaluatedStmt, internal::Matcher<Stmt>, innerMatcher) {
const DynTypedMatcher &DTM = static_cast<DynTypedMatcher>(innerMatcher);
-
- MatchDescendantVisitor Visitor(&DTM, Finder, Builder, ASTMatchFinder::BK_All);
+
+ MatchDescendantVisitor Visitor(&DTM, Finder, Builder, ASTMatchFinder::BK_All, true);
+ return Visitor.findMatch(DynTypedNode::create(Node));
+}
+
+AST_MATCHER_P(Stmt, forEachDescendantStmt, internal::Matcher<Stmt>, innerMatcher) {
+ const DynTypedMatcher &DTM = static_cast<DynTypedMatcher>(innerMatcher);
+
+ MatchDescendantVisitor Visitor(&DTM, Finder, Builder, ASTMatchFinder::BK_All, false);
return Visitor.findMatch(DynTypedNode::create(Node));
}
+
+// Matches a `Stmt` node iff the node is in a safe-buffer opt-out region
+AST_MATCHER_P(Stmt, notInSafeBufferOptOut, const UnsafeBufferUsageHandler *,
+ Handler) {
+ return !Handler->isSafeBufferOptOut(Node.getBeginLoc());
+}
+
+AST_MATCHER_P(CastExpr, castSubExpr, internal::Matcher<Expr>, innerMatcher) {
+ return innerMatcher.matches(*Node.getSubExpr(), Finder, Builder);
+}
+
+// Matches a `UnaryOperator` whose operator is pre-increment:
+AST_MATCHER(UnaryOperator, isPreInc) {
+ return Node.getOpcode() == UnaryOperator::Opcode::UO_PreInc;
+}
+
+// Returns a matcher that matches any expression 'e' such that `innerMatcher`
+// matches 'e' and 'e' is in an Unspecified Lvalue Context.
+static auto isInUnspecifiedLvalueContext(internal::Matcher<Expr> innerMatcher) {
+ // clang-format off
+ return
+ expr(anyOf(
+ implicitCastExpr(
+ hasCastKind(CastKind::CK_LValueToRValue),
+ castSubExpr(innerMatcher)),
+ binaryOperator(
+ hasAnyOperatorName("="),
+ hasLHS(innerMatcher)
+ )
+ ));
+// clang-format on
+}
+
+
+// Returns a matcher that matches any expression `e` such that `InnerMatcher`
+// matches `e` and `e` is in an Unspecified Pointer Context (UPC).
+static internal::Matcher<Stmt>
+isInUnspecifiedPointerContext(internal::Matcher<Stmt> InnerMatcher) {
+ // A UPC can be
+ // 1. an argument of a function call (except the callee has [[unsafe_...]]
+ // attribute), or
+ // 2. the operand of a pointer-to-(integer or bool) cast operation; or
+ // 3. the operand of a comparator operation; or
+ // 4. the operand of a pointer subtraction operation
+ // (i.e., computing the distance between two pointers); or ...
+
+ auto CallArgMatcher =
+ callExpr(forEachArgumentWithParam(InnerMatcher,
+ hasPointerType() /* array also decays to pointer type*/),
+ unless(callee(functionDecl(hasAttr(attr::UnsafeBufferUsage)))));
+
+ auto CastOperandMatcher =
+ castExpr(anyOf(hasCastKind(CastKind::CK_PointerToIntegral),
+ hasCastKind(CastKind::CK_PointerToBoolean)),
+ castSubExpr(allOf(hasPointerType(), InnerMatcher)));
+
+ auto CompOperandMatcher =
+ binaryOperator(hasAnyOperatorName("!=", "==", "<", "<=", ">", ">="),
+ eachOf(hasLHS(allOf(hasPointerType(), InnerMatcher)),
+ hasRHS(allOf(hasPointerType(), InnerMatcher))));
+
+ // A matcher that matches pointer subtractions:
+ auto PtrSubtractionMatcher =
+ binaryOperator(hasOperatorName("-"),
+ // Note that here we need both LHS and RHS to be
+ // pointer. Then the inner matcher can match any of
+ // them:
+ allOf(hasLHS(hasPointerType()),
+ hasRHS(hasPointerType())),
+ eachOf(hasLHS(InnerMatcher),
+ hasRHS(InnerMatcher)));
+
+ return stmt(anyOf(CallArgMatcher, CastOperandMatcher, CompOperandMatcher,
+ PtrSubtractionMatcher));
+ // FIXME: any more cases? (UPC excludes the RHS of an assignment. For now we
+ // don't have to check that.)
+}
+
+// Returns a matcher that matches any expression 'e' such that `innerMatcher`
+// matches 'e' and 'e' is in an unspecified untyped context (i.e the expression
+// 'e' isn't evaluated to an RValue). For example, consider the following code:
+// int *p = new int[4];
+// int *q = new int[4];
+// if ((p = q)) {}
+// p = q;
+// The expression `p = q` in the conditional of the `if` statement
+// `if ((p = q))` is evaluated as an RValue, whereas the expression `p = q;`
+// in the assignment statement is in an untyped context.
+static internal::Matcher<Stmt>
+isInUnspecifiedUntypedContext(internal::Matcher<Stmt> InnerMatcher) {
+ // An unspecified context can be
+ // 1. A compound statement,
+ // 2. The body of an if statement
+ // 3. Body of a loop
+ auto CompStmt = compoundStmt(forEach(InnerMatcher));
+ auto IfStmtThen = ifStmt(hasThen(InnerMatcher));
+ auto IfStmtElse = ifStmt(hasElse(InnerMatcher));
+ // FIXME: Handle loop bodies.
+ return stmt(anyOf(CompStmt, IfStmtThen, IfStmtElse));
+}
} // namespace clang::ast_matchers
namespace {
@@ -129,15 +291,6 @@ using FixItList = SmallVector<FixItHint, 4>;
class Strategy;
} // namespace
-// Because we're dealing with raw pointers, let's define what we mean by that.
-static auto hasPointerType() {
- return hasType(hasCanonicalType(pointerType()));
-}
-
-static auto hasArrayType() {
- return hasType(hasCanonicalType(arrayType()));
-}
-
namespace {
/// Gadget is an individual operation in the code that may be of interest to
/// this analysis. Each (non-abstract) subclass corresponds to a specific
@@ -200,11 +353,21 @@ public:
bool isWarningGadget() const final { return false; }
/// Returns a fixit that would fix the current gadget according to
- /// the current strategy. Returns None if the fix cannot be produced;
+ /// the current strategy. Returns std::nullopt if the fix cannot be produced;
/// returns an empty list if no fixes are necessary.
virtual std::optional<FixItList> getFixits(const Strategy &) const {
return std::nullopt;
}
+
+ /// Returns a list of two elements where the first element is the LHS of a pointer assignment
+ /// statement and the second element is the RHS. This two-element list represents the fact that
+ /// the LHS buffer gets its bounds information from the RHS buffer. This information will be used
+ /// later to group all those variables whose types must be modified together to prevent type
+ /// mismatches.
+ virtual std::optional<std::pair<const VarDecl *, const VarDecl *>>
+ getStrategyImplications() const {
+ return std::nullopt;
+ }
};
using FixableGadgetList = std::vector<std::unique_ptr<FixableGadget>>;
@@ -282,7 +445,7 @@ public:
/// Array subscript expressions on raw pointers as if they're arrays. Unsafe as
/// it doesn't have any bounds checks for the array.
class ArraySubscriptGadget : public WarningGadget {
- static constexpr const char *const ArraySubscrTag = "arraySubscr";
+ static constexpr const char *const ArraySubscrTag = "ArraySubscript";
const ArraySubscriptExpr *ASE;
public:
@@ -301,9 +464,11 @@ public:
return stmt(arraySubscriptExpr(
hasBase(ignoringParenImpCasts(
anyOf(hasPointerType(), hasArrayType()))),
- unless(hasIndex(integerLiteral(equals(0)))))
+ unless(hasIndex(
+ anyOf(integerLiteral(equals(0)), arrayInitIndexExpr())
+ )))
.bind(ArraySubscrTag));
- // clang-format on
+ // clang-format on
}
const ArraySubscriptExpr *getBaseStmt() const override { return ASE; }
@@ -326,10 +491,10 @@ class PointerArithmeticGadget : public WarningGadget {
static constexpr const char *const PointerArithmeticTag = "ptrAdd";
static constexpr const char *const PointerArithmeticPointerTag = "ptrAddPtr";
const BinaryOperator *PA; // pointer arithmetic expression
- const Expr * Ptr; // the pointer expression in `PA`
+ const Expr *Ptr; // the pointer expression in `PA`
public:
- PointerArithmeticGadget(const MatchFinder::MatchResult &Result)
+ PointerArithmeticGadget(const MatchFinder::MatchResult &Result)
: WarningGadget(Kind::PointerArithmetic),
PA(Result.Nodes.getNodeAs<BinaryOperator>(PointerArithmeticTag)),
Ptr(Result.Nodes.getNodeAs<Expr>(PointerArithmeticPointerTag)) {}
@@ -339,39 +504,314 @@ public:
}
static Matcher matcher() {
- auto HasIntegerType = anyOf(
- hasType(isInteger()), hasType(enumType()));
- auto PtrAtRight = allOf(hasOperatorName("+"),
- hasRHS(expr(hasPointerType()).bind(PointerArithmeticPointerTag)),
- hasLHS(HasIntegerType));
- auto PtrAtLeft = allOf(
- anyOf(hasOperatorName("+"), hasOperatorName("-"),
- hasOperatorName("+="), hasOperatorName("-=")),
- hasLHS(expr(hasPointerType()).bind(PointerArithmeticPointerTag)),
- hasRHS(HasIntegerType));
-
- return stmt(binaryOperator(anyOf(PtrAtLeft, PtrAtRight)).bind(PointerArithmeticTag));
+ auto HasIntegerType = anyOf(hasType(isInteger()), hasType(enumType()));
+ auto PtrAtRight =
+ allOf(hasOperatorName("+"),
+ hasRHS(expr(hasPointerType()).bind(PointerArithmeticPointerTag)),
+ hasLHS(HasIntegerType));
+ auto PtrAtLeft =
+ allOf(anyOf(hasOperatorName("+"), hasOperatorName("-"),
+ hasOperatorName("+="), hasOperatorName("-=")),
+ hasLHS(expr(hasPointerType()).bind(PointerArithmeticPointerTag)),
+ hasRHS(HasIntegerType));
+
+ return stmt(binaryOperator(anyOf(PtrAtLeft, PtrAtRight))
+ .bind(PointerArithmeticTag));
}
const Stmt *getBaseStmt() const override { return PA; }
DeclUseList getClaimedVarUseSites() const override {
- if (const auto *DRE =
- dyn_cast<DeclRefExpr>(Ptr->IgnoreParenImpCasts())) {
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Ptr->IgnoreParenImpCasts())) {
return {DRE};
}
return {};
}
// FIXME: pointer adding zero should be fine
- //FIXME: this gadge will need a fix-it
+ // FIXME: this gadge will need a fix-it
+};
+
+/// A pointer initialization expression of the form:
+/// \code
+/// int *p = q;
+/// \endcode
+class PointerInitGadget : public FixableGadget {
+private:
+ static constexpr const char *const PointerInitLHSTag = "ptrInitLHS";
+ static constexpr const char *const PointerInitRHSTag = "ptrInitRHS";
+ const VarDecl * PtrInitLHS; // the LHS pointer expression in `PI`
+ const DeclRefExpr * PtrInitRHS; // the RHS pointer expression in `PI`
+
+public:
+ PointerInitGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::PointerInit),
+ PtrInitLHS(Result.Nodes.getNodeAs<VarDecl>(PointerInitLHSTag)),
+ PtrInitRHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerInitRHSTag)) {}
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::PointerInit;
+ }
+
+ static Matcher matcher() {
+ auto PtrInitStmt = declStmt(hasSingleDecl(varDecl(
+ hasInitializer(ignoringImpCasts(declRefExpr(
+ hasPointerType()).
+ bind(PointerInitRHSTag)))).
+ bind(PointerInitLHSTag)));
+
+ return stmt(PtrInitStmt);
+ }
+
+ virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
+
+ virtual const Stmt *getBaseStmt() const override { return nullptr; }
+
+ virtual DeclUseList getClaimedVarUseSites() const override {
+ return DeclUseList{PtrInitRHS};
+ }
+
+ virtual std::optional<std::pair<const VarDecl *, const VarDecl *>>
+ getStrategyImplications() const override {
+ return std::make_pair(PtrInitLHS,
+ cast<VarDecl>(PtrInitRHS->getDecl()));
+ }
+};
+
+/// A pointer assignment expression of the form:
+/// \code
+/// p = q;
+/// \endcode
+class PointerAssignmentGadget : public FixableGadget {
+private:
+ static constexpr const char *const PointerAssignLHSTag = "ptrLHS";
+ static constexpr const char *const PointerAssignRHSTag = "ptrRHS";
+ const DeclRefExpr * PtrLHS; // the LHS pointer expression in `PA`
+ const DeclRefExpr * PtrRHS; // the RHS pointer expression in `PA`
+
+public:
+ PointerAssignmentGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::PointerAssignment),
+ PtrLHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerAssignLHSTag)),
+ PtrRHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerAssignRHSTag)) {}
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::PointerAssignment;
+ }
+
+ static Matcher matcher() {
+ auto PtrAssignExpr = binaryOperator(allOf(hasOperatorName("="),
+ hasRHS(ignoringParenImpCasts(declRefExpr(hasPointerType(),
+ to(varDecl())).
+ bind(PointerAssignRHSTag))),
+ hasLHS(declRefExpr(hasPointerType(),
+ to(varDecl())).
+ bind(PointerAssignLHSTag))));
+
+ return stmt(isInUnspecifiedUntypedContext(PtrAssignExpr));
+ }
+
+ virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
+
+ virtual const Stmt *getBaseStmt() const override { return nullptr; }
+
+ virtual DeclUseList getClaimedVarUseSites() const override {
+ return DeclUseList{PtrLHS, PtrRHS};
+ }
+
+ virtual std::optional<std::pair<const VarDecl *, const VarDecl *>>
+ getStrategyImplications() const override {
+ return std::make_pair(cast<VarDecl>(PtrLHS->getDecl()),
+ cast<VarDecl>(PtrRHS->getDecl()));
+ }
+};
+
+/// A call of a function or method that performs unchecked buffer operations
+/// over one of its pointer parameters.
+class UnsafeBufferUsageAttrGadget : public WarningGadget {
+ constexpr static const char *const OpTag = "call_expr";
+ const CallExpr *Op;
+
+public:
+ UnsafeBufferUsageAttrGadget(const MatchFinder::MatchResult &Result)
+ : WarningGadget(Kind::UnsafeBufferUsageAttr),
+ Op(Result.Nodes.getNodeAs<CallExpr>(OpTag)) {}
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::UnsafeBufferUsageAttr;
+ }
+
+ static Matcher matcher() {
+ return stmt(callExpr(callee(functionDecl(hasAttr(attr::UnsafeBufferUsage))))
+ .bind(OpTag));
+ }
+ const Stmt *getBaseStmt() const override { return Op; }
+
+ DeclUseList getClaimedVarUseSites() const override { return {}; }
+};
+
+// Represents expressions of the form `DRE[*]` in the Unspecified Lvalue
+// Context (see `isInUnspecifiedLvalueContext`).
+// Note here `[]` is the built-in subscript operator.
+class ULCArraySubscriptGadget : public FixableGadget {
+private:
+ static constexpr const char *const ULCArraySubscriptTag =
+ "ArraySubscriptUnderULC";
+ const ArraySubscriptExpr *Node;
+
+public:
+ ULCArraySubscriptGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::ULCArraySubscript),
+ Node(Result.Nodes.getNodeAs<ArraySubscriptExpr>(ULCArraySubscriptTag)) {
+ assert(Node != nullptr && "Expecting a non-null matching result");
+ }
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::ULCArraySubscript;
+ }
+
+ static Matcher matcher() {
+ auto ArrayOrPtr = anyOf(hasPointerType(), hasArrayType());
+ auto BaseIsArrayOrPtrDRE =
+ hasBase(ignoringParenImpCasts(declRefExpr(ArrayOrPtr)));
+ auto Target =
+ arraySubscriptExpr(BaseIsArrayOrPtrDRE).bind(ULCArraySubscriptTag);
+
+ return expr(isInUnspecifiedLvalueContext(Target));
+ }
+
+ virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
+
+ virtual const Stmt *getBaseStmt() const override { return Node; }
+
+ virtual DeclUseList getClaimedVarUseSites() const override {
+ if (const auto *DRE =
+ dyn_cast<DeclRefExpr>(Node->getBase()->IgnoreImpCasts())) {
+ return {DRE};
+ }
+ return {};
+ }
+};
+
+// Fixable gadget to handle stand alone pointers of the form `UPC(DRE)` in the
+// unspecified pointer context (isInUnspecifiedPointerContext). The gadget emits
+// fixit of the form `UPC(DRE.data())`.
+class UPCStandalonePointerGadget : public FixableGadget {
+private:
+ static constexpr const char *const DeclRefExprTag = "StandalonePointer";
+ const DeclRefExpr *Node;
+
+public:
+ UPCStandalonePointerGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::UPCStandalonePointer),
+ Node(Result.Nodes.getNodeAs<DeclRefExpr>(DeclRefExprTag)) {
+ assert(Node != nullptr && "Expecting a non-null matching result");
+ }
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::UPCStandalonePointer;
+ }
+
+ static Matcher matcher() {
+ auto ArrayOrPtr = anyOf(hasPointerType(), hasArrayType());
+ auto target = expr(
+ ignoringParenImpCasts(declRefExpr(allOf(ArrayOrPtr, to(varDecl()))).bind(DeclRefExprTag)));
+ return stmt(isInUnspecifiedPointerContext(target));
+ }
+
+ virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
+
+ virtual const Stmt *getBaseStmt() const override { return Node; }
+
+ virtual DeclUseList getClaimedVarUseSites() const override {
+ return {Node};
+ }
+};
+
+class PointerDereferenceGadget : public FixableGadget {
+ static constexpr const char *const BaseDeclRefExprTag = "BaseDRE";
+ static constexpr const char *const OperatorTag = "op";
+
+ const DeclRefExpr *BaseDeclRefExpr = nullptr;
+ const UnaryOperator *Op = nullptr;
+
+public:
+ PointerDereferenceGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::PointerDereference),
+ BaseDeclRefExpr(
+ Result.Nodes.getNodeAs<DeclRefExpr>(BaseDeclRefExprTag)),
+ Op(Result.Nodes.getNodeAs<UnaryOperator>(OperatorTag)) {}
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::PointerDereference;
+ }
+
+ static Matcher matcher() {
+ auto Target =
+ unaryOperator(
+ hasOperatorName("*"),
+ has(expr(ignoringParenImpCasts(
+ declRefExpr(to(varDecl())).bind(BaseDeclRefExprTag)))))
+ .bind(OperatorTag);
+
+ return expr(isInUnspecifiedLvalueContext(Target));
+ }
+
+ DeclUseList getClaimedVarUseSites() const override {
+ return {BaseDeclRefExpr};
+ }
+
+ virtual const Stmt *getBaseStmt() const final { return Op; }
+
+ virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
+};
+
+// Represents expressions of the form `&DRE[any]` in the Unspecified Pointer
+// Context (see `isInUnspecifiedPointerContext`).
+// Note here `[]` is the built-in subscript operator.
+class UPCAddressofArraySubscriptGadget : public FixableGadget {
+private:
+ static constexpr const char *const UPCAddressofArraySubscriptTag =
+ "AddressofArraySubscriptUnderUPC";
+ const UnaryOperator *Node; // the `&DRE[any]` node
+
+public:
+ UPCAddressofArraySubscriptGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::ULCArraySubscript),
+ Node(Result.Nodes.getNodeAs<UnaryOperator>(
+ UPCAddressofArraySubscriptTag)) {
+ assert(Node != nullptr && "Expecting a non-null matching result");
+ }
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::UPCAddressofArraySubscript;
+ }
+
+ static Matcher matcher() {
+ return expr(isInUnspecifiedPointerContext(expr(ignoringImpCasts(
+ unaryOperator(hasOperatorName("&"),
+ hasUnaryOperand(arraySubscriptExpr(
+ hasBase(ignoringParenImpCasts(declRefExpr())))))
+ .bind(UPCAddressofArraySubscriptTag)))));
+ }
+
+ virtual std::optional<FixItList> getFixits(const Strategy &) const override;
+
+ virtual const Stmt *getBaseStmt() const override { return Node; }
+
+ virtual DeclUseList getClaimedVarUseSites() const override {
+ const auto *ArraySubst = cast<ArraySubscriptExpr>(Node->getSubExpr());
+ const auto *DRE =
+ cast<DeclRefExpr>(ArraySubst->getBase()->IgnoreImpCasts());
+ return {DRE};
+ }
};
} // namespace
namespace {
// An auxiliary tracking facility for the fixit analysis. It helps connect
-// declarations to its and make sure we've covered all uses with our analysis
-// before we try to fix the declaration.
+// declarations to its uses and make sure we've covered all uses with our
+// analysis before we try to fix the declaration.
class DeclUseTracker {
using UseSetTy = SmallSet<const DeclRefExpr *, 16>;
using DefMapTy = DenseMap<const VarDecl *, const DeclStmt *>;
@@ -383,6 +823,7 @@ class DeclUseTracker {
public:
DeclUseTracker() = default;
DeclUseTracker(const DeclUseTracker &) = delete; // Let's avoid copies.
+ DeclUseTracker &operator=(const DeclUseTracker &) = delete;
DeclUseTracker(DeclUseTracker &&) = default;
DeclUseTracker &operator=(DeclUseTracker &&) = default;
@@ -433,11 +874,11 @@ namespace {
class Strategy {
public:
enum class Kind {
- Wontfix, // We don't plan to emit a fixit for this variable.
- Span, // We recommend replacing the variable with std::span.
- Iterator, // We recommend replacing the variable with std::span::iterator.
- Array, // We recommend replacing the variable with std::array.
- Vector // We recommend replacing the variable with std::vector.
+ Wontfix, // We don't plan to emit a fixit for this variable.
+ Span, // We recommend replacing the variable with std::span.
+ Iterator, // We recommend replacing the variable with std::span::iterator.
+ Array, // We recommend replacing the variable with std::array.
+ Vector // We recommend replacing the variable with std::vector.
};
private:
@@ -448,11 +889,11 @@ private:
public:
Strategy() = default;
Strategy(const Strategy &) = delete; // Let's avoid copies.
+ Strategy &operator=(const Strategy &) = delete;
Strategy(Strategy &&) = default;
+ Strategy &operator=(Strategy &&) = default;
- void set(const VarDecl *VD, Kind K) {
- Map[VD] = K;
- }
+ void set(const VarDecl *VD, Kind K) { Map[VD] = K; }
Kind lookup(const VarDecl *VD) const {
auto I = Map.find(VD);
@@ -464,8 +905,100 @@ public:
};
} // namespace
+
+// Representing a pointer type expression of the form `++Ptr` in an Unspecified
+// Pointer Context (UPC):
+class UPCPreIncrementGadget : public FixableGadget {
+private:
+ static constexpr const char *const UPCPreIncrementTag =
+ "PointerPreIncrementUnderUPC";
+ const UnaryOperator *Node; // the `++Ptr` node
+
+public:
+ UPCPreIncrementGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::UPCPreIncrement),
+ Node(Result.Nodes.getNodeAs<UnaryOperator>(UPCPreIncrementTag)) {
+ assert(Node != nullptr && "Expecting a non-null matching result");
+ }
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::UPCPreIncrement;
+ }
+
+ static Matcher matcher() {
+ // Note here we match `++Ptr` for any expression `Ptr` of pointer type.
+ // Although currently we can only provide fix-its when `Ptr` is a DRE, we
+ // can have the matcher be general, so long as `getClaimedVarUseSites` does
+ // things right.
+ return stmt(isInUnspecifiedPointerContext(expr(ignoringImpCasts(
+ unaryOperator(isPreInc(),
+ hasUnaryOperand(declRefExpr())
+ ).bind(UPCPreIncrementTag)))));
+ }
+
+ virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
+
+ virtual const Stmt *getBaseStmt() const override { return Node; }
+
+ virtual DeclUseList getClaimedVarUseSites() const override {
+ return {dyn_cast<DeclRefExpr>(Node->getSubExpr())};
+ }
+};
+
+// Representing a fixable expression of the form `*(ptr + 123)` or `*(123 +
+// ptr)`:
+class DerefSimplePtrArithFixableGadget : public FixableGadget {
+ static constexpr const char *const BaseDeclRefExprTag = "BaseDRE";
+ static constexpr const char *const DerefOpTag = "DerefOp";
+ static constexpr const char *const AddOpTag = "AddOp";
+ static constexpr const char *const OffsetTag = "Offset";
+
+ const DeclRefExpr *BaseDeclRefExpr = nullptr;
+ const UnaryOperator *DerefOp = nullptr;
+ const BinaryOperator *AddOp = nullptr;
+ const IntegerLiteral *Offset = nullptr;
+
+public:
+ DerefSimplePtrArithFixableGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::DerefSimplePtrArithFixable),
+ BaseDeclRefExpr(
+ Result.Nodes.getNodeAs<DeclRefExpr>(BaseDeclRefExprTag)),
+ DerefOp(Result.Nodes.getNodeAs<UnaryOperator>(DerefOpTag)),
+ AddOp(Result.Nodes.getNodeAs<BinaryOperator>(AddOpTag)),
+ Offset(Result.Nodes.getNodeAs<IntegerLiteral>(OffsetTag)) {}
+
+ static Matcher matcher() {
+ // clang-format off
+ auto ThePtr = expr(hasPointerType(),
+ ignoringImpCasts(declRefExpr(to(varDecl())).bind(BaseDeclRefExprTag)));
+ auto PlusOverPtrAndInteger = expr(anyOf(
+ binaryOperator(hasOperatorName("+"), hasLHS(ThePtr),
+ hasRHS(integerLiteral().bind(OffsetTag)))
+ .bind(AddOpTag),
+ binaryOperator(hasOperatorName("+"), hasRHS(ThePtr),
+ hasLHS(integerLiteral().bind(OffsetTag)))
+ .bind(AddOpTag)));
+ return isInUnspecifiedLvalueContext(unaryOperator(
+ hasOperatorName("*"),
+ hasUnaryOperand(ignoringParens(PlusOverPtrAndInteger)))
+ .bind(DerefOpTag));
+ // clang-format on
+ }
+
+ virtual std::optional<FixItList> getFixits(const Strategy &s) const final;
+
+ // TODO remove this method from FixableGadget interface
+ virtual const Stmt *getBaseStmt() const final { return nullptr; }
+
+ virtual DeclUseList getClaimedVarUseSites() const final {
+ return {BaseDeclRefExpr};
+ }
+};
+
/// Scan the function and return a list of gadgets found with provided kits.
-static std::tuple<FixableGadgetList, WarningGadgetList, DeclUseTracker> findGadgets(const Decl *D) {
+static std::tuple<FixableGadgetList, WarningGadgetList, DeclUseTracker>
+findGadgets(const Decl *D, const UnsafeBufferUsageHandler &Handler,
+ bool EmitSuggestions) {
struct GadgetFinderCallback : MatchFinder::MatchCallback {
FixableGadgetList FixableGadgets;
@@ -495,17 +1028,17 @@ static std::tuple<FixableGadgetList, WarningGadgetList, DeclUseTracker> findGadg
// Figure out which matcher we've found, and call the appropriate
// subclass constructor.
// FIXME: Can we do this more logarithmically?
-#define FIXABLE_GADGET(name) \
- if (Result.Nodes.getNodeAs<Stmt>(#name)) { \
- FixableGadgets.push_back(std::make_unique<name ## Gadget>(Result)); \
- NEXT; \
- }
+#define FIXABLE_GADGET(name) \
+ if (Result.Nodes.getNodeAs<Stmt>(#name)) { \
+ FixableGadgets.push_back(std::make_unique<name##Gadget>(Result)); \
+ NEXT; \
+ }
#include "clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def"
-#define WARNING_GADGET(name) \
- if (Result.Nodes.getNodeAs<Stmt>(#name)) { \
- WarningGadgets.push_back(std::make_unique<name ## Gadget>(Result)); \
- NEXT; \
- }
+#define WARNING_GADGET(name) \
+ if (Result.Nodes.getNodeAs<Stmt>(#name)) { \
+ WarningGadgets.push_back(std::make_unique<name##Gadget>(Result)); \
+ NEXT; \
+ }
#include "clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def"
assert(numFound >= 1 && "Gadgets not found in match result!");
@@ -518,32 +1051,44 @@ static std::tuple<FixableGadgetList, WarningGadgetList, DeclUseTracker> findGadg
// clang-format off
M.addMatcher(
- stmt(forEveryDescendant(
- stmt(anyOf(
- // Add Gadget::matcher() for every gadget in the registry.
-#define GADGET(x) \
- x ## Gadget::matcher().bind(#x),
+ stmt(
+ forEachDescendantEvaluatedStmt(stmt(anyOf(
+ // Add Gadget::matcher() for every gadget in the registry.
+#define WARNING_GADGET(x) \
+ allOf(x ## Gadget::matcher().bind(#x), \
+ notInSafeBufferOptOut(&Handler)),
#include "clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def"
- // In parallel, match all DeclRefExprs so that to find out
- // whether there are any uncovered by gadgets.
- declRefExpr(anyOf(hasPointerType(), hasArrayType()),
- to(varDecl())).bind("any_dre"),
- // Also match DeclStmts because we'll need them when fixing
- // their underlying VarDecls that otherwise don't have
- // any backreferences to DeclStmts.
- declStmt().bind("any_ds")
- ))
- // FIXME: Idiomatically there should be a forCallable(equalsNode(D))
- // here, to make sure that the statement actually belongs to the
- // function and not to a nested function. However, forCallable uses
- // ParentMap which can't be used before the AST is fully constructed.
- // The original problem doesn't sound like it needs ParentMap though,
- // maybe there's a more direct solution?
- )),
+ // Avoid a hanging comma.
+ unless(stmt())
+ )))
+ ),
&CB
);
// clang-format on
+ if (EmitSuggestions) {
+ // clang-format off
+ M.addMatcher(
+ stmt(
+ forEachDescendantStmt(stmt(eachOf(
+#define FIXABLE_GADGET(x) \
+ x ## Gadget::matcher().bind(#x),
+#include "clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def"
+ // In parallel, match all DeclRefExprs so that to find out
+ // whether there are any uncovered by gadgets.
+ declRefExpr(anyOf(hasPointerType(), hasArrayType()),
+ to(varDecl())).bind("any_dre"),
+ // Also match DeclStmts because we'll need them when fixing
+ // their underlying VarDecls that otherwise don't have
+ // any backreferences to DeclStmts.
+ declStmt().bind("any_ds")
+ )))
+ ),
+ &CB
+ );
+ // clang-format on
+ }
+
M.match(*D->getBody(), D->getASTContext());
// Gadgets "claim" variables they're responsible for. Once this loop finishes,
@@ -555,17 +1100,30 @@ static std::tuple<FixableGadgetList, WarningGadgetList, DeclUseTracker> findGadg
}
}
- return {std::move(CB.FixableGadgets), std::move(CB.WarningGadgets), std::move(CB.Tracker)};
+ return {std::move(CB.FixableGadgets), std::move(CB.WarningGadgets),
+ std::move(CB.Tracker)};
}
+// Compares AST nodes by source locations.
+template <typename NodeTy> struct CompareNode {
+ bool operator()(const NodeTy *N1, const NodeTy *N2) const {
+ return N1->getBeginLoc().getRawEncoding() <
+ N2->getBeginLoc().getRawEncoding();
+ }
+};
+
struct WarningGadgetSets {
- std::map<const VarDecl *, std::set<std::unique_ptr<WarningGadget>>> byVar;
+ std::map<const VarDecl *, std::set<const WarningGadget *>,
+ // To keep keys sorted by their locations in the map so that the
+ // order is deterministic:
+ CompareNode<VarDecl>>
+ byVar;
// These Gadgets are not related to pointer variables (e. g. temporaries).
- llvm::SmallVector<std::unique_ptr<WarningGadget>, 16> noVar;
+ llvm::SmallVector<const WarningGadget *, 16> noVar;
};
static WarningGadgetSets
-groupWarningGadgetsByVar(WarningGadgetList &&AllUnsafeOperations) {
+groupWarningGadgetsByVar(const WarningGadgetList &AllUnsafeOperations) {
WarningGadgetSets result;
// If some gadgets cover more than one
// variable, they'll appear more than once in the map.
@@ -575,13 +1133,13 @@ groupWarningGadgetsByVar(WarningGadgetList &&AllUnsafeOperations) {
bool AssociatedWithVarDecl = false;
for (const DeclRefExpr *DRE : ClaimedVarUseSites) {
if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
- result.byVar[VD].emplace(std::move(G));
+ result.byVar[VD].insert(G.get());
AssociatedWithVarDecl = true;
}
}
if (!AssociatedWithVarDecl) {
- result.noVar.emplace_back(std::move(G));
+ result.noVar.push_back(G.get());
continue;
}
}
@@ -589,7 +1147,7 @@ groupWarningGadgetsByVar(WarningGadgetList &&AllUnsafeOperations) {
}
struct FixableGadgetSets {
- std::map<const VarDecl *, std::set<std::unique_ptr<FixableGadget>>> byVar;
+ std::map<const VarDecl *, std::set<const FixableGadget *>> byVar;
};
static FixableGadgetSets
@@ -600,22 +1158,969 @@ groupFixablesByVar(FixableGadgetList &&AllFixableOperations) {
for (const DeclRefExpr *DRE : DREs) {
if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
- FixablesForUnsafeVars.byVar[VD].emplace(std::move(F));
+ FixablesForUnsafeVars.byVar[VD].insert(F.get());
}
}
}
return FixablesForUnsafeVars;
}
+bool clang::internal::anyConflict(const SmallVectorImpl<FixItHint> &FixIts,
+ const SourceManager &SM) {
+ // A simple interval overlap detection algorithm. Sorts all ranges by their
+ // begin location then finds the first overlap in one pass.
+ std::vector<const FixItHint *> All; // a copy of `FixIts`
+
+ for (const FixItHint &H : FixIts)
+ All.push_back(&H);
+ std::sort(All.begin(), All.end(),
+ [&SM](const FixItHint *H1, const FixItHint *H2) {
+ return SM.isBeforeInTranslationUnit(H1->RemoveRange.getBegin(),
+ H2->RemoveRange.getBegin());
+ });
+
+ const FixItHint *CurrHint = nullptr;
+
+ for (const FixItHint *Hint : All) {
+ if (!CurrHint ||
+ SM.isBeforeInTranslationUnit(CurrHint->RemoveRange.getEnd(),
+ Hint->RemoveRange.getBegin())) {
+ // Either to initialize `CurrHint` or `CurrHint` does not
+ // overlap with `Hint`:
+ CurrHint = Hint;
+ } else
+ // In case `Hint` overlaps the `CurrHint`, we found at least one
+ // conflict:
+ return true;
+ }
+ return false;
+}
+
+std::optional<FixItList>
+PointerAssignmentGadget::getFixits(const Strategy &S) const {
+ const auto *LeftVD = cast<VarDecl>(PtrLHS->getDecl());
+ const auto *RightVD = cast<VarDecl>(PtrRHS->getDecl());
+ switch (S.lookup(LeftVD)) {
+ case Strategy::Kind::Span:
+ if (S.lookup(RightVD) == Strategy::Kind::Span)
+ return FixItList{};
+ return std::nullopt;
+ case Strategy::Kind::Wontfix:
+ return std::nullopt;
+ case Strategy::Kind::Iterator:
+ case Strategy::Kind::Array:
+ case Strategy::Kind::Vector:
+ llvm_unreachable("unsupported strategies for FixableGadgets");
+ }
+ return std::nullopt;
+}
+
+std::optional<FixItList>
+PointerInitGadget::getFixits(const Strategy &S) const {
+ const auto *LeftVD = PtrInitLHS;
+ const auto *RightVD = cast<VarDecl>(PtrInitRHS->getDecl());
+ switch (S.lookup(LeftVD)) {
+ case Strategy::Kind::Span:
+ if (S.lookup(RightVD) == Strategy::Kind::Span)
+ return FixItList{};
+ return std::nullopt;
+ case Strategy::Kind::Wontfix:
+ return std::nullopt;
+ case Strategy::Kind::Iterator:
+ case Strategy::Kind::Array:
+ case Strategy::Kind::Vector:
+ llvm_unreachable("unsupported strategies for FixableGadgets");
+ }
+ return std::nullopt;
+}
+
+std::optional<FixItList>
+ULCArraySubscriptGadget::getFixits(const Strategy &S) const {
+ if (const auto *DRE =
+ dyn_cast<DeclRefExpr>(Node->getBase()->IgnoreImpCasts()))
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ switch (S.lookup(VD)) {
+ case Strategy::Kind::Span: {
+ // If the index has a negative constant value, we give up as no valid
+ // fix-it can be generated:
+ const ASTContext &Ctx = // FIXME: we need ASTContext to be passed in!
+ VD->getASTContext();
+ if (auto ConstVal = Node->getIdx()->getIntegerConstantExpr(Ctx)) {
+ if (ConstVal->isNegative())
+ return std::nullopt;
+ } else if (!Node->getIdx()->getType()->isUnsignedIntegerType())
+ return std::nullopt;
+ // no-op is a good fix-it, otherwise
+ return FixItList{};
+ }
+ case Strategy::Kind::Wontfix:
+ case Strategy::Kind::Iterator:
+ case Strategy::Kind::Array:
+ case Strategy::Kind::Vector:
+ llvm_unreachable("unsupported strategies for FixableGadgets");
+ }
+ }
+ return std::nullopt;
+}
+
+static std::optional<FixItList> // forward declaration
+fixUPCAddressofArraySubscriptWithSpan(const UnaryOperator *Node);
+
+std::optional<FixItList>
+UPCAddressofArraySubscriptGadget::getFixits(const Strategy &S) const {
+ auto DREs = getClaimedVarUseSites();
+ const auto *VD = cast<VarDecl>(DREs.front()->getDecl());
+
+ switch (S.lookup(VD)) {
+ case Strategy::Kind::Span:
+ return fixUPCAddressofArraySubscriptWithSpan(Node);
+ case Strategy::Kind::Wontfix:
+ case Strategy::Kind::Iterator:
+ case Strategy::Kind::Array:
+ case Strategy::Kind::Vector:
+ llvm_unreachable("unsupported strategies for FixableGadgets");
+ }
+ return std::nullopt; // something went wrong, no fix-it
+}
+
+// FIXME: this function should be customizable through format
+static StringRef getEndOfLine() {
+ static const char *const EOL = "\n";
+ return EOL;
+}
+
+// Returns the text indicating that the user needs to provide input there:
+std::string getUserFillPlaceHolder(StringRef HintTextToUser = "placeholder") {
+ std::string s = std::string("<# ");
+ s += HintTextToUser;
+ s += " #>";
+ return s;
+}
+
+// Return the text representation of the given `APInt Val`:
+static std::string getAPIntText(APInt Val) {
+ SmallVector<char> Txt;
+ Val.toString(Txt, 10, true);
+ // APInt::toString does not add '\0' to the end of the string for us:
+ Txt.push_back('\0');
+ return Txt.data();
+}
+
+// Return the source location of the last character of the AST `Node`.
+template <typename NodeTy>
+static std::optional<SourceLocation>
+getEndCharLoc(const NodeTy *Node, const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ unsigned TkLen = Lexer::MeasureTokenLength(Node->getEndLoc(), SM, LangOpts);
+ SourceLocation Loc = Node->getEndLoc().getLocWithOffset(TkLen - 1);
+
+ if (Loc.isValid())
+ return Loc;
+
+ return std::nullopt;
+}
+
+// Return the source location just past the last character of the AST `Node`.
+template <typename NodeTy>
+static std::optional<SourceLocation> getPastLoc(const NodeTy *Node,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ SourceLocation Loc =
+ Lexer::getLocForEndOfToken(Node->getEndLoc(), 0, SM, LangOpts);
+
+ if (Loc.isValid())
+ return Loc;
+
+ return std::nullopt;
+}
+
+// Return text representation of an `Expr`.
+static std::optional<StringRef> getExprText(const Expr *E,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ std::optional<SourceLocation> LastCharLoc = getPastLoc(E, SM, LangOpts);
+
+ if (LastCharLoc)
+ return Lexer::getSourceText(
+ CharSourceRange::getCharRange(E->getBeginLoc(), *LastCharLoc), SM,
+ LangOpts);
+
+ return std::nullopt;
+}
+
+// Returns the literal text in `SourceRange SR`, if `SR` is a valid range.
+static std::optional<StringRef> getRangeText(SourceRange SR,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ bool Invalid = false;
+ CharSourceRange CSR = CharSourceRange::getCharRange(SR.getBegin(), SR.getEnd());
+ StringRef Text = Lexer::getSourceText(CSR, SM, LangOpts, &Invalid);
+
+ if (!Invalid)
+ return Text;
+ return std::nullopt;
+}
+
+// Returns the text of the pointee type of `T` from a `VarDecl` of a pointer
+// type. The text is obtained through from `TypeLoc`s. Since `TypeLoc` does not
+// have source ranges of qualifiers ( The `QualifiedTypeLoc` looks hacky too me
+// :( ), `Qualifiers` of the pointee type is returned separately through the
+// output parameter `QualifiersToAppend`.
+static std::optional<std::string>
+getPointeeTypeText(const ParmVarDecl *VD, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ std::optional<Qualifiers> *QualifiersToAppend) {
+ QualType Ty = VD->getType();
+ QualType PteTy;
+
+ assert(Ty->isPointerType() && !Ty->isFunctionPointerType() &&
+ "Expecting a VarDecl of type of pointer to object type");
+ PteTy = Ty->getPointeeType();
+ if (PteTy->hasUnnamedOrLocalType())
+ // If the pointee type is unnamed, we can't refer to it
+ return std::nullopt;
+
+ TypeLoc TyLoc = VD->getTypeSourceInfo()->getTypeLoc();
+ TypeLoc PteTyLoc = TyLoc.getUnqualifiedLoc().getNextTypeLoc();
+ SourceLocation VDNameStartLoc = VD->getLocation();
+
+ if (!(VDNameStartLoc.isValid() && PteTyLoc.getSourceRange().isValid())) {
+ // We are expecting these locations to be valid. But in some cases, they are
+ // not all valid. It is a Clang bug to me and we are not responsible for
+ // fixing it. So we will just give up for now when it happens.
+ return std::nullopt;
+ }
+
+ // Note that TypeLoc.getEndLoc() returns the begin location of the last token:
+ SourceLocation PteEndOfTokenLoc =
+ Lexer::getLocForEndOfToken(PteTyLoc.getEndLoc(), 0, SM, LangOpts);
+
+ if (!SM.isBeforeInTranslationUnit(PteEndOfTokenLoc, VDNameStartLoc)) {
+ // We only deal with the cases where the source text of the pointee type
+ // appears on the left-hand side of the variable identifier completely,
+ // including the following forms:
+ // `T ident`,
+ // `T ident[]`, where `T` is any type.
+ // Examples of excluded cases are `T (*ident)[]` or `T ident[][n]`.
+ return std::nullopt;
+ }
+ if (PteTy.hasQualifiers()) {
+ // TypeLoc does not provide source ranges for qualifiers (it says it's
+ // intentional but seems fishy to me), so we cannot get the full text
+ // `PteTy` via source ranges.
+ *QualifiersToAppend = PteTy.getQualifiers();
+ }
+ return getRangeText({PteTyLoc.getBeginLoc(), PteEndOfTokenLoc}, SM, LangOpts)
+ ->str();
+}
+
+// Returns the text of the name (with qualifiers) of a `FunctionDecl`.
+static std::optional<StringRef> getFunNameText(const FunctionDecl *FD,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ SourceLocation BeginLoc = FD->getQualifier()
+ ? FD->getQualifierLoc().getBeginLoc()
+ : FD->getNameInfo().getBeginLoc();
+ // Note that `FD->getNameInfo().getEndLoc()` returns the begin location of the
+ // last token:
+ SourceLocation EndLoc = Lexer::getLocForEndOfToken(
+ FD->getNameInfo().getEndLoc(), 0, SM, LangOpts);
+ SourceRange NameRange{BeginLoc, EndLoc};
+
+ return getRangeText(NameRange, SM, LangOpts);
+}
+
+std::optional<FixItList>
+DerefSimplePtrArithFixableGadget::getFixits(const Strategy &s) const {
+ const VarDecl *VD = dyn_cast<VarDecl>(BaseDeclRefExpr->getDecl());
+
+ if (VD && s.lookup(VD) == Strategy::Kind::Span) {
+ ASTContext &Ctx = VD->getASTContext();
+ // std::span can't represent elements before its begin()
+ if (auto ConstVal = Offset->getIntegerConstantExpr(Ctx))
+ if (ConstVal->isNegative())
+ return std::nullopt;
+
+ // note that the expr may (oddly) has multiple layers of parens
+ // example:
+ // *((..(pointer + 123)..))
+ // goal:
+ // pointer[123]
+ // Fix-It:
+ // remove '*('
+ // replace ' + ' with '['
+ // replace ')' with ']'
+
+ // example:
+ // *((..(123 + pointer)..))
+ // goal:
+ // 123[pointer]
+ // Fix-It:
+ // remove '*('
+ // replace ' + ' with '['
+ // replace ')' with ']'
+
+ const Expr *LHS = AddOp->getLHS(), *RHS = AddOp->getRHS();
+ const SourceManager &SM = Ctx.getSourceManager();
+ const LangOptions &LangOpts = Ctx.getLangOpts();
+ CharSourceRange StarWithTrailWhitespace =
+ clang::CharSourceRange::getCharRange(DerefOp->getOperatorLoc(),
+ LHS->getBeginLoc());
+
+ std::optional<SourceLocation> LHSLocation = getPastLoc(LHS, SM, LangOpts);
+ if (!LHSLocation)
+ return std::nullopt;
+
+ CharSourceRange PlusWithSurroundingWhitespace =
+ clang::CharSourceRange::getCharRange(*LHSLocation, RHS->getBeginLoc());
+
+ std::optional<SourceLocation> AddOpLocation =
+ getPastLoc(AddOp, SM, LangOpts);
+ std::optional<SourceLocation> DerefOpLocation =
+ getPastLoc(DerefOp, SM, LangOpts);
+
+ if (!AddOpLocation || !DerefOpLocation)
+ return std::nullopt;
+
+ CharSourceRange ClosingParenWithPrecWhitespace =
+ clang::CharSourceRange::getCharRange(*AddOpLocation, *DerefOpLocation);
+
+ return FixItList{
+ {FixItHint::CreateRemoval(StarWithTrailWhitespace),
+ FixItHint::CreateReplacement(PlusWithSurroundingWhitespace, "["),
+ FixItHint::CreateReplacement(ClosingParenWithPrecWhitespace, "]")}};
+ }
+ return std::nullopt; // something wrong or unsupported, give up
+}
+
+std::optional<FixItList>
+PointerDereferenceGadget::getFixits(const Strategy &S) const {
+ const VarDecl *VD = cast<VarDecl>(BaseDeclRefExpr->getDecl());
+ switch (S.lookup(VD)) {
+ case Strategy::Kind::Span: {
+ ASTContext &Ctx = VD->getASTContext();
+ SourceManager &SM = Ctx.getSourceManager();
+ // Required changes: *(ptr); => (ptr[0]); and *ptr; => ptr[0]
+ // Deletes the *operand
+ CharSourceRange derefRange = clang::CharSourceRange::getCharRange(
+ Op->getBeginLoc(), Op->getBeginLoc().getLocWithOffset(1));
+ // Inserts the [0]
+ std::optional<SourceLocation> EndOfOperand =
+ getEndCharLoc(BaseDeclRefExpr, SM, Ctx.getLangOpts());
+ if (EndOfOperand) {
+ return FixItList{{FixItHint::CreateRemoval(derefRange),
+ FixItHint::CreateInsertion(
+ (*EndOfOperand).getLocWithOffset(1), "[0]")}};
+ }
+ }
+ [[fallthrough]];
+ case Strategy::Kind::Iterator:
+ case Strategy::Kind::Array:
+ case Strategy::Kind::Vector:
+ llvm_unreachable("Strategy not implemented yet!");
+ case Strategy::Kind::Wontfix:
+ llvm_unreachable("Invalid strategy!");
+ }
+
+ return std::nullopt;
+}
+
+// Generates fix-its replacing an expression of the form UPC(DRE) with
+// `DRE.data()`
+std::optional<FixItList> UPCStandalonePointerGadget::getFixits(const Strategy &S)
+ const {
+ const auto VD = cast<VarDecl>(Node->getDecl());
+ switch (S.lookup(VD)) {
+ case Strategy::Kind::Span: {
+ ASTContext &Ctx = VD->getASTContext();
+ SourceManager &SM = Ctx.getSourceManager();
+ // Inserts the .data() after the DRE
+ std::optional<SourceLocation> EndOfOperand =
+ getPastLoc(Node, SM, Ctx.getLangOpts());
+
+ if (EndOfOperand)
+ return FixItList{{FixItHint::CreateInsertion(
+ *EndOfOperand, ".data()")}};
+ }
+ [[fallthrough]];
+ case Strategy::Kind::Wontfix:
+ case Strategy::Kind::Iterator:
+ case Strategy::Kind::Array:
+ case Strategy::Kind::Vector:
+ llvm_unreachable("unsupported strategies for FixableGadgets");
+ }
+
+ return std::nullopt;
+}
+
+// Generates fix-its replacing an expression of the form `&DRE[e]` with
+// `&DRE.data()[e]`:
+static std::optional<FixItList>
+fixUPCAddressofArraySubscriptWithSpan(const UnaryOperator *Node) {
+ const auto *ArraySub = cast<ArraySubscriptExpr>(Node->getSubExpr());
+ const auto *DRE = cast<DeclRefExpr>(ArraySub->getBase()->IgnoreImpCasts());
+ // FIXME: this `getASTContext` call is costly, we should pass the
+ // ASTContext in:
+ const ASTContext &Ctx = DRE->getDecl()->getASTContext();
+ const Expr *Idx = ArraySub->getIdx();
+ const SourceManager &SM = Ctx.getSourceManager();
+ const LangOptions &LangOpts = Ctx.getLangOpts();
+ std::stringstream SS;
+ bool IdxIsLitZero = false;
+
+ if (auto ICE = Idx->getIntegerConstantExpr(Ctx))
+ if ((*ICE).isZero())
+ IdxIsLitZero = true;
+ std::optional<StringRef> DreString = getExprText(DRE, SM, LangOpts);
+ if (!DreString)
+ return std::nullopt;
+
+ if (IdxIsLitZero) {
+ // If the index is literal zero, we produce the most concise fix-it:
+ SS << (*DreString).str() << ".data()";
+ } else {
+ std::optional<StringRef> IndexString = getExprText(Idx, SM, LangOpts);
+ if (!IndexString)
+ return std::nullopt;
+
+ SS << "&" << (*DreString).str() << ".data()"
+ << "[" << (*IndexString).str() << "]";
+ }
+ return FixItList{
+ FixItHint::CreateReplacement(Node->getSourceRange(), SS.str())};
+}
+
+
+std::optional<FixItList> UPCPreIncrementGadget::getFixits(const Strategy &S) const {
+ DeclUseList DREs = getClaimedVarUseSites();
+
+ if (DREs.size() != 1)
+ return std::nullopt; // In cases of `++Ptr` where `Ptr` is not a DRE, we
+ // give up
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DREs.front()->getDecl())) {
+ if (S.lookup(VD) == Strategy::Kind::Span) {
+ FixItList Fixes;
+ std::stringstream SS;
+ const Stmt *PreIncNode = getBaseStmt();
+ StringRef varName = VD->getName();
+ const ASTContext &Ctx = VD->getASTContext();
+
+ // To transform UPC(++p) to UPC((p = p.subspan(1)).data()):
+ SS << "(" << varName.data() << " = " << varName.data()
+ << ".subspan(1)).data()";
+ std::optional<SourceLocation> PreIncLocation =
+ getEndCharLoc(PreIncNode, Ctx.getSourceManager(), Ctx.getLangOpts());
+ if (!PreIncLocation)
+ return std::nullopt;
+
+ Fixes.push_back(FixItHint::CreateReplacement(
+ SourceRange(PreIncNode->getBeginLoc(), *PreIncLocation), SS.str()));
+ return Fixes;
+ }
+ }
+ return std::nullopt; // Not in the cases that we can handle for now, give up.
+}
+
+
+// For a non-null initializer `Init` of `T *` type, this function returns
+// `FixItHint`s producing a list initializer `{Init, S}` as a part of a fix-it
+// to output stream.
+// In many cases, this function cannot figure out the actual extent `S`. It
+// then will use a place holder to replace `S` to ask users to fill `S` in. The
+// initializer shall be used to initialize a variable of type `std::span<T>`.
+//
+// FIXME: Support multi-level pointers
+//
+// Parameters:
+// `Init` a pointer to the initializer expression
+// `Ctx` a reference to the ASTContext
+static FixItList
+populateInitializerFixItWithSpan(const Expr *Init, ASTContext &Ctx,
+ const StringRef UserFillPlaceHolder) {
+ const SourceManager &SM = Ctx.getSourceManager();
+ const LangOptions &LangOpts = Ctx.getLangOpts();
+
+ // If `Init` has a constant value that is (or equivalent to) a
+ // NULL pointer, we use the default constructor to initialize the span
+ // object, i.e., a `std:span` variable declaration with no initializer.
+ // So the fix-it is just to remove the initializer.
+ if (Init->isNullPointerConstant(Ctx,
+ // FIXME: Why does this function not ask for `const ASTContext
+ // &`? It should. Maybe worth an NFC patch later.
+ Expr::NullPointerConstantValueDependence::
+ NPC_ValueDependentIsNotNull)) {
+ std::optional<SourceLocation> InitLocation =
+ getEndCharLoc(Init, SM, LangOpts);
+ if (!InitLocation)
+ return {};
+
+ SourceRange SR(Init->getBeginLoc(), *InitLocation);
+
+ return {FixItHint::CreateRemoval(SR)};
+ }
+
+ FixItList FixIts{};
+ std::string ExtentText = UserFillPlaceHolder.data();
+ StringRef One = "1";
+
+ // Insert `{` before `Init`:
+ FixIts.push_back(FixItHint::CreateInsertion(Init->getBeginLoc(), "{"));
+ // Try to get the data extent. Break into different cases:
+ if (auto CxxNew = dyn_cast<CXXNewExpr>(Init->IgnoreImpCasts())) {
+ // In cases `Init` is `new T[n]` and there is no explicit cast over
+ // `Init`, we know that `Init` must evaluates to a pointer to `n` objects
+ // of `T`. So the extent is `n` unless `n` has side effects. Similar but
+ // simpler for the case where `Init` is `new T`.
+ if (const Expr *Ext = CxxNew->getArraySize().value_or(nullptr)) {
+ if (!Ext->HasSideEffects(Ctx)) {
+ std::optional<StringRef> ExtentString = getExprText(Ext, SM, LangOpts);
+ if (!ExtentString)
+ return {};
+ ExtentText = *ExtentString;
+ }
+ } else if (!CxxNew->isArray())
+ // Although the initializer is not allocating a buffer, the pointer
+ // variable could still be used in buffer access operations.
+ ExtentText = One;
+ } else if (const auto *CArrTy = Ctx.getAsConstantArrayType(
+ Init->IgnoreImpCasts()->getType())) {
+ // In cases `Init` is of an array type after stripping off implicit casts,
+ // the extent is the array size. Note that if the array size is not a
+ // constant, we cannot use it as the extent.
+ ExtentText = getAPIntText(CArrTy->getSize());
+ } else {
+ // In cases `Init` is of the form `&Var` after stripping of implicit
+ // casts, where `&` is the built-in operator, the extent is 1.
+ if (auto AddrOfExpr = dyn_cast<UnaryOperator>(Init->IgnoreImpCasts()))
+ if (AddrOfExpr->getOpcode() == UnaryOperatorKind::UO_AddrOf &&
+ isa_and_present<DeclRefExpr>(AddrOfExpr->getSubExpr()))
+ ExtentText = One;
+ // TODO: we can handle more cases, e.g., `&a[0]`, `&a`, `std::addressof`,
+ // and explicit casting, etc. etc.
+ }
+
+ SmallString<32> StrBuffer{};
+ std::optional<SourceLocation> LocPassInit = getPastLoc(Init, SM, LangOpts);
+
+ if (!LocPassInit)
+ return {};
+
+ StrBuffer.append(", ");
+ StrBuffer.append(ExtentText);
+ StrBuffer.append("}");
+ FixIts.push_back(FixItHint::CreateInsertion(*LocPassInit, StrBuffer.str()));
+ return FixIts;
+}
+
+// For a `VarDecl` of the form `T * var (= Init)?`, this
+// function generates a fix-it for the declaration, which re-declares `var` to
+// be of `span<T>` type and transforms the initializer, if present, to a span
+// constructor---`span<T> var {Init, Extent}`, where `Extent` may need the user
+// to fill in.
+//
+// FIXME: support Multi-level pointers
+//
+// Parameters:
+// `D` a pointer the variable declaration node
+// `Ctx` a reference to the ASTContext
+// Returns:
+// the generated fix-it
+static FixItList fixVarDeclWithSpan(const VarDecl *D, ASTContext &Ctx,
+ const StringRef UserFillPlaceHolder) {
+ const QualType &SpanEltT = D->getType()->getPointeeType();
+ assert(!SpanEltT.isNull() && "Trying to fix a non-pointer type variable!");
+
+ FixItList FixIts{};
+ std::optional<SourceLocation>
+ ReplacementLastLoc; // the inclusive end location of the replacement
+ const SourceManager &SM = Ctx.getSourceManager();
+
+ if (const Expr *Init = D->getInit()) {
+ FixItList InitFixIts =
+ populateInitializerFixItWithSpan(Init, Ctx, UserFillPlaceHolder);
+
+ if (InitFixIts.empty())
+ return {};
+
+ // The loc right before the initializer:
+ ReplacementLastLoc = Init->getBeginLoc().getLocWithOffset(-1);
+ FixIts.insert(FixIts.end(), std::make_move_iterator(InitFixIts.begin()),
+ std::make_move_iterator(InitFixIts.end()));
+ } else
+ ReplacementLastLoc = getEndCharLoc(D, SM, Ctx.getLangOpts());
+
+ // Producing fix-it for variable declaration---make `D` to be of span type:
+ SmallString<32> Replacement;
+ raw_svector_ostream OS(Replacement);
+
+ OS << "std::span<" << SpanEltT.getAsString() << "> " << D->getName();
+
+ if (!ReplacementLastLoc)
+ return {};
+
+ FixIts.push_back(FixItHint::CreateReplacement(
+ SourceRange{D->getBeginLoc(), *ReplacementLastLoc}, OS.str()));
+ return FixIts;
+}
+
+static bool hasConflictingOverload(const FunctionDecl *FD) {
+ return !FD->getDeclContext()->lookup(FD->getDeclName()).isSingleResult();
+}
+
+// For a `FunDecl`, one of whose `ParmVarDecl`s is being changed to have a new
+// type, this function produces fix-its to make the change self-contained. Let
+// 'F' be the entity defined by the original `FunDecl` and "NewF" be the entity
+// defined by the `FunDecl` after the change to the parameter. Fix-its produced
+// by this function are
+// 1. Add the `[[clang::unsafe_buffer_usage]]` attribute to each declaration
+// of 'F';
+// 2. Create a declaration of "NewF" next to each declaration of `F`;
+// 3. Create a definition of "F" (as its' original definition is now belongs
+// to "NewF") next to its original definition. The body of the creating
+// definition calls to "NewF".
+//
+// Example:
+//
+// void f(int *p); // original declaration
+// void f(int *p) { // original definition
+// p[5];
+// }
+//
+// To change the parameter `p` to be of `std::span<int>` type, we
+// also add overloads:
+//
+// [[clang::unsafe_buffer_usage]] void f(int *p); // original decl
+// void f(std::span<int> p); // added overload decl
+// void f(std::span<int> p) { // original def where param is changed
+// p[5];
+// }
+// [[clang::unsafe_buffer_usage]] void f(int *p) { // added def
+// return f(std::span(p, <# size #>));
+// }
+//
+// The actual fix-its may contain more details, e.g., the attribute may be guard
+// by a macro
+// #if __has_cpp_attribute(clang::unsafe_buffer_usage)
+// [[clang::unsafe_buffer_usage]]
+// #endif
+//
+// `NewTypeText` is the string representation of the new type, to which the
+// parameter indexed by `ParmIdx` is being changed.
+static std::optional<FixItList>
+createOverloadsForFixedParams(unsigned ParmIdx, StringRef NewTyText,
+ const FunctionDecl *FD, const ASTContext &Ctx,
+ UnsafeBufferUsageHandler &Handler) {
+ // FIXME: need to make this conflict checking better:
+ if (hasConflictingOverload(FD))
+ return std::nullopt;
+
+ const SourceManager &SM = Ctx.getSourceManager();
+ const LangOptions &LangOpts = Ctx.getLangOpts();
+ // FIXME Respect indentation of the original code.
+
+ // A lambda that creates the text representation of a function declaration
+ // with the new type signature:
+ const auto NewOverloadSignatureCreator =
+ [&SM, &LangOpts](const FunctionDecl *FD, unsigned ParmIdx,
+ StringRef NewTypeText) -> std::optional<std::string> {
+ std::stringstream SS;
+
+ SS << ";";
+ SS << getEndOfLine().str();
+ // Append: ret-type func-name "("
+ if (auto Prefix = getRangeText(
+ SourceRange(FD->getBeginLoc(), (*FD->param_begin())->getBeginLoc()),
+ SM, LangOpts))
+ SS << Prefix->str();
+ else
+ return std::nullopt; // give up
+ // Append: parameter-type-list
+ const unsigned NumParms = FD->getNumParams();
+
+ for (unsigned i = 0; i < NumParms; i++) {
+ const ParmVarDecl *Parm = FD->getParamDecl(i);
+
+ if (Parm->isImplicit())
+ continue;
+ if (i == ParmIdx) {
+ SS << NewTypeText.str();
+ // print parameter name if provided:
+ if (IdentifierInfo * II = Parm->getIdentifier())
+ SS << " " << II->getName().str();
+ } else if (auto ParmTypeText =
+ getRangeText(Parm->getSourceRange(), SM, LangOpts)) {
+ // print the whole `Parm` without modification:
+ SS << ParmTypeText->str();
+ } else
+ return std::nullopt; // something wrong, give up
+ if (i != NumParms - 1)
+ SS << ", ";
+ }
+ SS << ")";
+ return SS.str();
+ };
+
+ // A lambda that creates the text representation of a function definition with
+ // the original signature:
+ const auto OldOverloadDefCreator =
+ [&SM, &Handler,
+ &LangOpts](const FunctionDecl *FD, unsigned ParmIdx,
+ StringRef NewTypeText) -> std::optional<std::string> {
+ std::stringstream SS;
+
+ SS << getEndOfLine().str();
+ // Append: attr-name ret-type func-name "(" param-list ")" "{"
+ if (auto FDPrefix = getRangeText(
+ SourceRange(FD->getBeginLoc(), FD->getBody()->getBeginLoc()), SM,
+ LangOpts))
+ SS << Handler.getUnsafeBufferUsageAttributeTextAt(FD->getBeginLoc(), " ")
+ << FDPrefix->str() << "{";
+ else
+ return std::nullopt;
+ // Append: "return" func-name "("
+ if (auto FunQualName = getFunNameText(FD, SM, LangOpts))
+ SS << "return " << FunQualName->str() << "(";
+ else
+ return std::nullopt;
+
+ // Append: arg-list
+ const unsigned NumParms = FD->getNumParams();
+ for (unsigned i = 0; i < NumParms; i++) {
+ const ParmVarDecl *Parm = FD->getParamDecl(i);
+
+ if (Parm->isImplicit())
+ continue;
+ // FIXME: If a parameter has no name, it is unused in the
+ // definition. So we could just leave it as it is.
+ if (!Parm->getIdentifier())
+ // If a parameter of a function definition has no name:
+ return std::nullopt;
+ if (i == ParmIdx)
+ // This is our spanified paramter!
+ SS << NewTypeText.str() << "(" << Parm->getIdentifier()->getName().str() << ", "
+ << getUserFillPlaceHolder("size") << ")";
+ else
+ SS << Parm->getIdentifier()->getName().str();
+ if (i != NumParms - 1)
+ SS << ", ";
+ }
+ // finish call and the body
+ SS << ");}" << getEndOfLine().str();
+ // FIXME: 80-char line formatting?
+ return SS.str();
+ };
+
+ FixItList FixIts{};
+ for (FunctionDecl *FReDecl : FD->redecls()) {
+ std::optional<SourceLocation> Loc = getPastLoc(FReDecl, SM, LangOpts);
+
+ if (!Loc)
+ return {};
+ if (FReDecl->isThisDeclarationADefinition()) {
+ assert(FReDecl == FD && "inconsistent function definition");
+ // Inserts a definition with the old signature to the end of
+ // `FReDecl`:
+ if (auto OldOverloadDef =
+ OldOverloadDefCreator(FReDecl, ParmIdx, NewTyText))
+ FixIts.emplace_back(FixItHint::CreateInsertion(*Loc, *OldOverloadDef));
+ else
+ return {}; // give up
+ } else {
+ // Adds the unsafe-buffer attribute (if not already there) to `FReDecl`:
+ if (!FReDecl->hasAttr<UnsafeBufferUsageAttr>()) {
+ FixIts.emplace_back(FixItHint::CreateInsertion(
+ FReDecl->getBeginLoc(), Handler.getUnsafeBufferUsageAttributeTextAt(
+ FReDecl->getBeginLoc(), " ")));
+ }
+ // Inserts a declaration with the new signature to the end of `FReDecl`:
+ if (auto NewOverloadDecl =
+ NewOverloadSignatureCreator(FReDecl, ParmIdx, NewTyText))
+ FixIts.emplace_back(FixItHint::CreateInsertion(*Loc, *NewOverloadDecl));
+ else
+ return {};
+ }
+ }
+ return FixIts;
+}
+
+// To fix a `ParmVarDecl` to be of `std::span` type. In addition, creating a
+// new overload of the function so that the change is self-contained (see
+// `createOverloadsForFixedParams`).
+static FixItList fixParamWithSpan(const ParmVarDecl *PVD, const ASTContext &Ctx,
+ UnsafeBufferUsageHandler &Handler) {
+ if (PVD->hasDefaultArg())
+ // FIXME: generate fix-its for default values:
+ return {};
+ assert(PVD->getType()->isPointerType());
+ auto *FD = dyn_cast<FunctionDecl>(PVD->getDeclContext());
+
+ if (!FD)
+ return {};
+
+ std::optional<Qualifiers> PteTyQualifiers = std::nullopt;
+ std::optional<std::string> PteTyText = getPointeeTypeText(
+ PVD, Ctx.getSourceManager(), Ctx.getLangOpts(), &PteTyQualifiers);
+
+ if (!PteTyText)
+ return {};
+
+ std::optional<StringRef> PVDNameText = PVD->getIdentifier()->getName();
+
+ if (!PVDNameText)
+ return {};
+
+ std::string SpanOpen = "std::span<";
+ std::string SpanClose = ">";
+ std::string SpanTyText;
+ std::stringstream SS;
+
+ SS << SpanOpen << *PteTyText;
+ // Append qualifiers to span element type:
+ if (PteTyQualifiers)
+ SS << " " << PteTyQualifiers->getAsString();
+ SS << SpanClose;
+ // Save the Span type text:
+ SpanTyText = SS.str();
+ // Append qualifiers to the type of the parameter:
+ if (PVD->getType().hasQualifiers())
+ SS << " " << PVD->getType().getQualifiers().getAsString();
+ // Append parameter's name:
+ SS << " " << PVDNameText->str();
+
+ FixItList Fixes;
+ unsigned ParmIdx = 0;
+
+ Fixes.push_back(
+ FixItHint::CreateReplacement(PVD->getSourceRange(), SS.str()));
+ for (auto *ParmIter : FD->parameters()) {
+ if (PVD == ParmIter)
+ break;
+ ParmIdx++;
+ }
+ if (ParmIdx < FD->getNumParams())
+ if (auto OverloadFix = createOverloadsForFixedParams(ParmIdx, SpanTyText,
+ FD, Ctx, Handler)) {
+ Fixes.append(*OverloadFix);
+ return Fixes;
+ }
+ return {};
+}
+
+static FixItList fixVariableWithSpan(const VarDecl *VD,
+ const DeclUseTracker &Tracker,
+ ASTContext &Ctx,
+ UnsafeBufferUsageHandler &Handler) {
+ const DeclStmt *DS = Tracker.lookupDecl(VD);
+ assert(DS && "Fixing non-local variables not implemented yet!");
+ if (!DS->isSingleDecl()) {
+ // FIXME: to support handling multiple `VarDecl`s in a single `DeclStmt`
+ return {};
+ }
+ // Currently DS is an unused variable but we'll need it when
+ // non-single decls are implemented, where the pointee type name
+ // and the '*' are spread around the place.
+ (void)DS;
+
+ // FIXME: handle cases where DS has multiple declarations
+ return fixVarDeclWithSpan(VD, Ctx, getUserFillPlaceHolder());
+}
+
+// TODO: we should be consistent to use `std::nullopt` to represent no-fix due
+// to any unexpected problem.
+static FixItList
+fixVariable(const VarDecl *VD, Strategy::Kind K,
+ /* The function decl under analysis */ const Decl *D,
+ const DeclUseTracker &Tracker, ASTContext &Ctx,
+ UnsafeBufferUsageHandler &Handler) {
+ if (const auto *PVD = dyn_cast<ParmVarDecl>(VD)) {
+ auto *FD = dyn_cast<clang::FunctionDecl>(PVD->getDeclContext());
+ if (!FD || FD != D)
+ // `FD != D` means that `PVD` belongs to a function that is not being
+ // analyzed currently. Thus `FD` may not be complete.
+ return {};
+
+ // TODO If function has a try block we can't change params unless we check
+ // also its catch block for their use.
+ // FIXME We might support static class methods, some select methods,
+ // operators and possibly lamdas.
+ if (FD->isMain() || FD->isConstexpr() ||
+ FD->getTemplatedKind() != FunctionDecl::TemplatedKind::TK_NonTemplate ||
+ FD->isVariadic() ||
+ // also covers call-operator of lamdas
+ isa<CXXMethodDecl>(FD) ||
+ // skip when the function body is a try-block
+ (FD->hasBody() && isa<CXXTryStmt>(FD->getBody())) ||
+ FD->isOverloadedOperator())
+ return {}; // TODO test all these cases
+ }
+
+ switch (K) {
+ case Strategy::Kind::Span: {
+ if (VD->getType()->isPointerType()) {
+ if (const auto *PVD = dyn_cast<ParmVarDecl>(VD))
+ return fixParamWithSpan(PVD, Ctx, Handler);
+
+ if (VD->isLocalVarDecl())
+ return fixVariableWithSpan(VD, Tracker, Ctx, Handler);
+ }
+ return {};
+ }
+ case Strategy::Kind::Iterator:
+ case Strategy::Kind::Array:
+ case Strategy::Kind::Vector:
+ llvm_unreachable("Strategy not implemented yet!");
+ case Strategy::Kind::Wontfix:
+ llvm_unreachable("Invalid strategy!");
+ }
+ llvm_unreachable("Unknown strategy!");
+}
+
+// Returns true iff there exists a `FixItHint` 'h' in `FixIts` such that the
+// `RemoveRange` of 'h' overlaps with a macro use.
+static bool overlapWithMacro(const FixItList &FixIts) {
+ // FIXME: For now we only check if the range (or the first token) is (part of)
+ // a macro expansion. Ideally, we want to check for all tokens in the range.
+ return llvm::any_of(FixIts, [](const FixItHint &Hint) {
+ auto Range = Hint.RemoveRange;
+ if (Range.getBegin().isMacroID() || Range.getEnd().isMacroID())
+ // If the range (or the first token) is (part of) a macro expansion:
+ return true;
+ return false;
+ });
+}
+
+static bool impossibleToFixForVar(const FixableGadgetSets &FixablesForAllVars,
+ const Strategy &S,
+ const VarDecl * Var) {
+ for (const auto &F : FixablesForAllVars.byVar.find(Var)->second) {
+ std::optional<FixItList> Fixits = F->getFixits(S);
+ if (!Fixits) {
+ return true;
+ }
+ }
+ return false;
+}
+
static std::map<const VarDecl *, FixItList>
-getFixIts(FixableGadgetSets &FixablesForUnsafeVars, const Strategy &S) {
+getFixIts(FixableGadgetSets &FixablesForAllVars, const Strategy &S,
+ ASTContext &Ctx,
+ /* The function decl under analysis */ const Decl *D,
+ const DeclUseTracker &Tracker, UnsafeBufferUsageHandler &Handler,
+ const DefMapTy &VarGrpMap) {
std::map<const VarDecl *, FixItList> FixItsForVariable;
- for (const auto &[VD, Fixables] : FixablesForUnsafeVars.byVar) {
- // TODO fixVariable - fixit for the variable itself
+ for (const auto &[VD, Fixables] : FixablesForAllVars.byVar) {
+ FixItsForVariable[VD] =
+ fixVariable(VD, S.lookup(VD), D, Tracker, Ctx, Handler);
+ // If we fail to produce Fix-It for the declaration we have to skip the
+ // variable entirely.
+ if (FixItsForVariable[VD].empty()) {
+ FixItsForVariable.erase(VD);
+ continue;
+ }
bool ImpossibleToFix = false;
llvm::SmallVector<FixItHint, 16> FixItsForVD;
for (const auto &F : Fixables) {
- llvm::Optional<FixItList> Fixits = F->getFixits(S);
+ std::optional<FixItList> Fixits = F->getFixits(S);
if (!Fixits) {
ImpossibleToFix = true;
break;
@@ -625,15 +2130,71 @@ getFixIts(FixableGadgetSets &FixablesForUnsafeVars, const Strategy &S) {
CorrectFixes.end());
}
}
- if (ImpossibleToFix)
+
+ if (ImpossibleToFix) {
FixItsForVariable.erase(VD);
- else
- FixItsForVariable[VD].insert(FixItsForVariable[VD].end(),
- FixItsForVD.begin(), FixItsForVD.end());
+ continue;
+ }
+
+ const auto VarGroupForVD = VarGrpMap.find(VD);
+ if (VarGroupForVD != VarGrpMap.end()) {
+ for (const VarDecl * V : VarGroupForVD->second) {
+ if (V == VD) {
+ continue;
+ }
+ if (impossibleToFixForVar(FixablesForAllVars, S, V)) {
+ ImpossibleToFix = true;
+ break;
+ }
+ }
+
+ if (ImpossibleToFix) {
+ FixItsForVariable.erase(VD);
+ for (const VarDecl * V : VarGroupForVD->second) {
+ FixItsForVariable.erase(V);
+ }
+ continue;
+ }
+ }
+ FixItsForVariable[VD].insert(FixItsForVariable[VD].end(),
+ FixItsForVD.begin(), FixItsForVD.end());
+
+ // Fix-it shall not overlap with macros or/and templates:
+ if (overlapWithMacro(FixItsForVariable[VD]) ||
+ clang::internal::anyConflict(FixItsForVariable[VD],
+ Ctx.getSourceManager())) {
+ FixItsForVariable.erase(VD);
+ continue;
+ }
+ }
+
+ for (auto VD : FixItsForVariable) {
+ const auto VarGroupForVD = VarGrpMap.find(VD.first);
+ const Strategy::Kind ReplacementTypeForVD = S.lookup(VD.first);
+ if (VarGroupForVD != VarGrpMap.end()) {
+ for (const VarDecl * Var : VarGroupForVD->second) {
+ if (Var == VD.first) {
+ continue;
+ }
+
+ FixItList GroupFix;
+ if (FixItsForVariable.find(Var) == FixItsForVariable.end()) {
+ GroupFix = fixVariable(Var, ReplacementTypeForVD, D, Tracker,
+ Var->getASTContext(), Handler);
+ } else {
+ GroupFix = FixItsForVariable[Var];
+ }
+
+ for (auto Fix : GroupFix) {
+ FixItsForVariable[VD.first].push_back(Fix);
+ }
+ }
+ }
}
return FixItsForVariable;
}
+
static Strategy
getNaiveStrategy(const llvm::SmallVectorImpl<const VarDecl *> &UnsafeVars) {
Strategy S;
@@ -644,38 +2205,161 @@ getNaiveStrategy(const llvm::SmallVectorImpl<const VarDecl *> &UnsafeVars) {
}
void clang::checkUnsafeBufferUsage(const Decl *D,
- UnsafeBufferUsageHandler &Handler) {
+ UnsafeBufferUsageHandler &Handler,
+ bool EmitSuggestions) {
assert(D && D->getBody());
+ // We do not want to visit a Lambda expression defined inside a method independently.
+ // Instead, it should be visited along with the outer method.
+ if (const auto *fd = dyn_cast<CXXMethodDecl>(D)) {
+ if (fd->getParent()->isLambda() && fd->getParent()->isLocalClass())
+ return;
+ }
+
+ // Do not emit fixit suggestions for functions declared in an
+ // extern "C" block.
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ for (FunctionDecl *FReDecl : FD->redecls()) {
+ if (FReDecl->isExternC()) {
+ EmitSuggestions = false;
+ break;
+ }
+ }
+ }
+
WarningGadgetSets UnsafeOps;
- FixableGadgetSets FixablesForUnsafeVars;
- DeclUseTracker Tracker;
+ FixableGadgetSets FixablesForAllVars;
+
+ auto [FixableGadgets, WarningGadgets, Tracker] =
+ findGadgets(D, Handler, EmitSuggestions);
- {
- auto [FixableGadgets, WarningGadgets, TrackerRes] = findGadgets(D);
- UnsafeOps = groupWarningGadgetsByVar(std::move(WarningGadgets));
- FixablesForUnsafeVars = groupFixablesByVar(std::move(FixableGadgets));
- Tracker = std::move(TrackerRes);
+ if (!EmitSuggestions) {
+ // Our job is very easy without suggestions. Just warn about
+ // every problematic operation and consider it done. No need to deal
+ // with fixable gadgets, no need to group operations by variable.
+ for (const auto &G : WarningGadgets) {
+ Handler.handleUnsafeOperation(G->getBaseStmt(),
+ /*IsRelatedToDecl=*/false);
+ }
+
+ // This return guarantees that most of the machine doesn't run when
+ // suggestions aren't requested.
+ assert(FixableGadgets.size() == 0 &&
+ "Fixable gadgets found but suggestions not requested!");
+ return;
}
+ UnsafeOps = groupWarningGadgetsByVar(std::move(WarningGadgets));
+ FixablesForAllVars = groupFixablesByVar(std::move(FixableGadgets));
+
+ std::map<const VarDecl *, FixItList> FixItsForVariableGroup;
+ DefMapTy VariableGroupsMap{};
+
// Filter out non-local vars and vars with unclaimed DeclRefExpr-s.
- for (auto it = FixablesForUnsafeVars.byVar.cbegin();
- it != FixablesForUnsafeVars.byVar.cend();) {
- // FIXME: Support ParmVarDecl as well.
- if (!it->first->isLocalVarDecl() || Tracker.hasUnclaimedUses(it->first)) {
- it = FixablesForUnsafeVars.byVar.erase(it);
+ for (auto it = FixablesForAllVars.byVar.cbegin();
+ it != FixablesForAllVars.byVar.cend();) {
+ // FIXME: need to deal with global variables later
+ if ((!it->first->isLocalVarDecl() && !isa<ParmVarDecl>(it->first)) ||
+ Tracker.hasUnclaimedUses(it->first) || it->first->isInitCapture()) {
+ it = FixablesForAllVars.byVar.erase(it);
} else {
++it;
}
}
llvm::SmallVector<const VarDecl *, 16> UnsafeVars;
- for (const auto &[VD, ignore] : FixablesForUnsafeVars.byVar)
+ for (const auto &[VD, ignore] : FixablesForAllVars.byVar)
UnsafeVars.push_back(VD);
+ // Fixpoint iteration for pointer assignments
+ using DepMapTy = DenseMap<const VarDecl *, std::set<const VarDecl *>>;
+ DepMapTy DependenciesMap{};
+ DepMapTy PtrAssignmentGraph{};
+
+ for (auto it : FixablesForAllVars.byVar) {
+ for (const FixableGadget *fixable : it.second) {
+ std::optional<std::pair<const VarDecl *, const VarDecl *>> ImplPair =
+ fixable->getStrategyImplications();
+ if (ImplPair) {
+ std::pair<const VarDecl *, const VarDecl *> Impl = ImplPair.value();
+ PtrAssignmentGraph[Impl.first].insert(Impl.second);
+ }
+ }
+ }
+
+ /*
+ The following code does a BFS traversal of the `PtrAssignmentGraph`
+ considering all unsafe vars as starting nodes and constructs an undirected
+ graph `DependenciesMap`. Constructing the `DependenciesMap` in this manner
+ elimiates all variables that are unreachable from any unsafe var. In other
+ words, this removes all dependencies that don't include any unsafe variable
+ and consequently don't need any fixit generation.
+ Note: A careful reader would observe that the code traverses
+ `PtrAssignmentGraph` using `CurrentVar` but adds edges between `Var` and
+ `Adj` and not between `CurrentVar` and `Adj`. Both approaches would
+ achieve the same result but the one used here dramatically cuts the
+ amount of hoops the second part of the algorithm needs to jump, given that
+ a lot of these connections become "direct". The reader is advised not to
+ imagine how the graph is transformed because of using `Var` instead of
+ `CurrentVar`. The reader can continue reading as if `CurrentVar` was used,
+ and think about why it's equivalent later.
+ */
+ std::set<const VarDecl *> VisitedVarsDirected{};
+ for (const auto &[Var, ignore] : UnsafeOps.byVar) {
+ if (VisitedVarsDirected.find(Var) == VisitedVarsDirected.end()) {
+
+ std::queue<const VarDecl*> QueueDirected{};
+ QueueDirected.push(Var);
+ while(!QueueDirected.empty()) {
+ const VarDecl* CurrentVar = QueueDirected.front();
+ QueueDirected.pop();
+ VisitedVarsDirected.insert(CurrentVar);
+ auto AdjacentNodes = PtrAssignmentGraph[CurrentVar];
+ for (const VarDecl *Adj : AdjacentNodes) {
+ if (VisitedVarsDirected.find(Adj) == VisitedVarsDirected.end()) {
+ QueueDirected.push(Adj);
+ }
+ DependenciesMap[Var].insert(Adj);
+ DependenciesMap[Adj].insert(Var);
+ }
+ }
+ }
+ }
+
+ // Group Connected Components for Unsafe Vars
+ // (Dependencies based on pointer assignments)
+ std::set<const VarDecl *> VisitedVars{};
+ for (const auto &[Var, ignore] : UnsafeOps.byVar) {
+ if (VisitedVars.find(Var) == VisitedVars.end()) {
+ std::vector<const VarDecl *> VarGroup{};
+
+ std::queue<const VarDecl*> Queue{};
+ Queue.push(Var);
+ while(!Queue.empty()) {
+ const VarDecl* CurrentVar = Queue.front();
+ Queue.pop();
+ VisitedVars.insert(CurrentVar);
+ VarGroup.push_back(CurrentVar);
+ auto AdjacentNodes = DependenciesMap[CurrentVar];
+ for (const VarDecl *Adj : AdjacentNodes) {
+ if (VisitedVars.find(Adj) == VisitedVars.end()) {
+ Queue.push(Adj);
+ }
+ }
+ }
+ for (const VarDecl * V : VarGroup) {
+ if (UnsafeOps.byVar.find(V) != UnsafeOps.byVar.end()) {
+ VariableGroupsMap[V] = VarGroup;
+ }
+ }
+ }
+ }
+
Strategy NaiveStrategy = getNaiveStrategy(UnsafeVars);
- std::map<const VarDecl *, FixItList> FixItsForVariable =
- getFixIts(FixablesForUnsafeVars, NaiveStrategy);
+
+ FixItsForVariableGroup =
+ getFixIts(FixablesForAllVars, NaiveStrategy, D->getASTContext(), D,
+ Tracker, Handler, VariableGroupsMap);
// FIXME Detect overlapping FixIts.
@@ -684,10 +2368,11 @@ void clang::checkUnsafeBufferUsage(const Decl *D,
}
for (const auto &[VD, WarningGadgets] : UnsafeOps.byVar) {
- auto FixItsIt = FixItsForVariable.find(VD);
- Handler.handleFixableVariable(VD, FixItsIt != FixItsForVariable.end()
- ? std::move(FixItsIt->second)
- : FixItList{});
+ auto FixItsIt = FixItsForVariableGroup.find(VD);
+ Handler.handleUnsafeVariableGroup(VD, VariableGroupsMap,
+ FixItsIt != FixItsForVariableGroup.end()
+ ? std::move(FixItsIt->second)
+ : FixItList{});
for (const auto &G : WarningGadgets) {
Handler.handleUnsafeOperation(G->getBaseStmt(), /*IsRelatedToDecl=*/true);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Attributes.cpp b/contrib/llvm-project/clang/lib/Basic/Attributes.cpp
index a961e68f4ac1..6c0cc87430ee 100644
--- a/contrib/llvm-project/clang/lib/Basic/Attributes.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Attributes.cpp
@@ -2,8 +2,18 @@
#include "clang/Basic/AttrSubjectMatchRules.h"
#include "clang/Basic/AttributeCommonInfo.h"
#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/ParsedAttrInfo.h"
using namespace clang;
+static int hasAttributeImpl(AttributeCommonInfo::Syntax Syntax, StringRef Name,
+ StringRef ScopeName, const TargetInfo &Target,
+ const LangOptions &LangOpts) {
+
+#include "clang/Basic/AttrHasAttributeImpl.inc"
+
+ return 0;
+}
+
int clang::hasAttribute(AttributeCommonInfo::Syntax Syntax,
const IdentifierInfo *Scope, const IdentifierInfo *Attr,
const TargetInfo &Target, const LangOptions &LangOpts) {
@@ -23,11 +33,17 @@ int clang::hasAttribute(AttributeCommonInfo::Syntax Syntax,
// attributes. We support those, but not through the typical attribute
// machinery that goes through TableGen. We support this in all OpenMP modes
// so long as double square brackets are enabled.
- if (LangOpts.OpenMP && LangOpts.DoubleSquareBracketAttributes &&
- ScopeName == "omp")
+ if (LangOpts.OpenMP && ScopeName == "omp")
return (Name == "directive" || Name == "sequence") ? 1 : 0;
-#include "clang/Basic/AttrHasAttributeImpl.inc"
+ int res = hasAttributeImpl(Syntax, Name, ScopeName, Target, LangOpts);
+ if (res)
+ return res;
+
+ // Check if any plugin provides this attribute.
+ for (auto &Ptr : getAttributePluginInstances())
+ if (Ptr->hasSpelling(Syntax, Name))
+ return 1;
return 0;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Builtins.cpp b/contrib/llvm-project/clang/lib/Basic/Builtins.cpp
index 74081a7c2ec6..d366989bafc5 100644
--- a/contrib/llvm-project/clang/lib/Basic/Builtins.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Builtins.cpp
@@ -151,7 +151,7 @@ void Builtin::Context::initializeBuiltins(IdentifierTable &Table,
unsigned ID = NameIt->second->getBuiltinID();
if (ID != Builtin::NotBuiltin && isPredefinedLibFunction(ID) &&
isInStdNamespace(ID) == InStdNamespace) {
- Table.get(Name).setBuiltinID(Builtin::NotBuiltin);
+ NameIt->second->clearBuiltinID();
}
}
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Cuda.cpp b/contrib/llvm-project/clang/lib/Basic/Cuda.cpp
index b4cf6cbe95f8..2307352bd3be 100644
--- a/contrib/llvm-project/clang/lib/Basic/Cuda.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Cuda.cpp
@@ -37,6 +37,8 @@ static const CudaVersionMapEntry CudaNameVersionMap[] = {
CUDA_ENTRY(11, 6),
CUDA_ENTRY(11, 7),
CUDA_ENTRY(11, 8),
+ CUDA_ENTRY(12, 0),
+ CUDA_ENTRY(12, 1),
{"", CudaVersion::NEW, llvm::VersionTuple(std::numeric_limits<int>::max())},
{"unknown", CudaVersion::UNKNOWN, {}} // End of list tombstone.
};
@@ -114,6 +116,8 @@ static const CudaArchToStringMap arch_names[] = {
GFX(90a), // gfx90a
GFX(90c), // gfx90c
GFX(940), // gfx940
+ GFX(941), // gfx941
+ GFX(942), // gfx942
GFX(1010), // gfx1010
GFX(1011), // gfx1011
GFX(1012), // gfx1012
@@ -129,6 +133,8 @@ static const CudaArchToStringMap arch_names[] = {
GFX(1101), // gfx1101
GFX(1102), // gfx1102
GFX(1103), // gfx1103
+ GFX(1150), // gfx1150
+ GFX(1151), // gfx1151
{CudaArch::Generic, "generic", ""},
// clang-format on
};
@@ -218,7 +224,11 @@ CudaVersion MaxVersionForCudaArch(CudaArch A) {
case CudaArch::SM_21:
return CudaVersion::CUDA_80;
case CudaArch::SM_30:
- return CudaVersion::CUDA_110;
+ case CudaArch::SM_32:
+ return CudaVersion::CUDA_102;
+ case CudaArch::SM_35:
+ case CudaArch::SM_37:
+ return CudaVersion::CUDA_118;
default:
return CudaVersion::NEW;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp b/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp
index dbe62ecb50d3..7a54d27ef9d8 100644
--- a/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp
@@ -43,28 +43,12 @@ using namespace clang;
const StreamingDiagnostic &clang::operator<<(const StreamingDiagnostic &DB,
DiagNullabilityKind nullability) {
- StringRef string;
- switch (nullability.first) {
- case NullabilityKind::NonNull:
- string = nullability.second ? "'nonnull'" : "'_Nonnull'";
- break;
-
- case NullabilityKind::Nullable:
- string = nullability.second ? "'nullable'" : "'_Nullable'";
- break;
-
- case NullabilityKind::Unspecified:
- string = nullability.second ? "'null_unspecified'" : "'_Null_unspecified'";
- break;
-
- case NullabilityKind::NullableResult:
- assert(!nullability.second &&
- "_Nullable_result isn't supported as context-sensitive keyword");
- string = "_Nullable_result";
- break;
- }
-
- DB.AddString(string);
+ DB.AddString(
+ ("'" +
+ getNullabilitySpelling(nullability.first,
+ /*isContextSensitive=*/nullability.second) +
+ "'")
+ .str());
return DB;
}
@@ -176,6 +160,18 @@ void DiagnosticsEngine::ReportDelayed() {
Report(ID) << DelayedDiagArg1 << DelayedDiagArg2 << DelayedDiagArg3;
}
+DiagnosticMapping &
+DiagnosticsEngine::DiagState::getOrAddMapping(diag::kind Diag) {
+ std::pair<iterator, bool> Result =
+ DiagMap.insert(std::make_pair(Diag, DiagnosticMapping()));
+
+ // Initialize the entry if we added it.
+ if (Result.second)
+ Result.first->second = DiagnosticIDs::getDefaultMapping(Diag);
+
+ return Result.first->second;
+}
+
void DiagnosticsEngine::DiagStateMap::appendFirst(DiagState *State) {
assert(Files.empty() && "not first");
FirstDiagState = CurDiagState = State;
@@ -793,8 +789,8 @@ static const char *getTokenDescForDiagnostic(tok::TokenKind Kind) {
/// array.
void Diagnostic::
FormatDiagnostic(SmallVectorImpl<char> &OutStr) const {
- if (!StoredDiagMessage.empty()) {
- OutStr.append(StoredDiagMessage.begin(), StoredDiagMessage.end());
+ if (StoredDiagMessage.has_value()) {
+ OutStr.append(StoredDiagMessage->begin(), StoredDiagMessage->end());
return;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp b/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp
index ac08e98a278d..e5667d57f8cf 100644
--- a/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp
@@ -256,7 +256,7 @@ CATEGORY(REFACTORING, ANALYSIS)
return Found;
}
-static DiagnosticMapping GetDefaultDiagMapping(unsigned DiagID) {
+DiagnosticMapping DiagnosticIDs::getDefaultMapping(unsigned DiagID) {
DiagnosticMapping Info = DiagnosticMapping::Make(
diag::Severity::Fatal, /*IsUser=*/false, /*IsPragma=*/false);
@@ -293,21 +293,6 @@ namespace {
};
}
-// Unfortunately, the split between DiagnosticIDs and Diagnostic is not
-// particularly clean, but for now we just implement this method here so we can
-// access GetDefaultDiagMapping.
-DiagnosticMapping &
-DiagnosticsEngine::DiagState::getOrAddMapping(diag::kind Diag) {
- std::pair<iterator, bool> Result =
- DiagMap.insert(std::make_pair(Diag, DiagnosticMapping()));
-
- // Initialize the entry if we added it.
- if (Result.second)
- Result.first->second = GetDefaultDiagMapping(Diag);
-
- return Result.first->second;
-}
-
static const StaticDiagCategoryRec CategoryNameTable[] = {
#define GET_CATEGORY_TABLE
#define CATEGORY(X, ENUM) { X, STR_SIZE(X, uint8_t) },
@@ -449,7 +434,7 @@ bool DiagnosticIDs::isBuiltinExtensionDiag(unsigned DiagID,
return false;
EnabledByDefault =
- GetDefaultDiagMapping(DiagID).getSeverity() != diag::Severity::Ignored;
+ getDefaultMapping(DiagID).getSeverity() != diag::Severity::Ignored;
return true;
}
@@ -457,7 +442,7 @@ bool DiagnosticIDs::isDefaultMappingAsError(unsigned DiagID) {
if (DiagID >= diag::DIAG_UPPER_LIMIT)
return false;
- return GetDefaultDiagMapping(DiagID).getSeverity() >= diag::Severity::Error;
+ return getDefaultMapping(DiagID).getSeverity() >= diag::Severity::Error;
}
/// getDescription - Given a diagnostic ID, return a description of the
diff --git a/contrib/llvm-project/clang/lib/Basic/FileManager.cpp b/contrib/llvm-project/clang/lib/Basic/FileManager.cpp
index e8d0f20019eb..f92c1aeb2112 100644
--- a/contrib/llvm-project/clang/lib/Basic/FileManager.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/FileManager.cpp
@@ -319,7 +319,7 @@ FileManager::getFileRef(StringRef Filename, bool openFile, bool CacheFailure) {
// Cache the redirection in the previously-inserted entry, still available
// in the tentative return value.
- NamedFileEnt->second = FileEntryRef::MapValue(Redirection);
+ NamedFileEnt->second = FileEntryRef::MapValue(Redirection, DirInfo);
}
FileEntryRef ReturnedRef(*NamedFileEnt);
@@ -403,8 +403,7 @@ FileEntryRef FileManager::getVirtualFileRef(StringRef Filename, off_t Size,
FileEntryRef::MapValue Value = *NamedFileEnt.second;
if (LLVM_LIKELY(Value.V.is<FileEntry *>()))
return FileEntryRef(NamedFileEnt);
- return FileEntryRef(*reinterpret_cast<const FileEntryRef::MapEntry *>(
- Value.V.get<const void *>()));
+ return FileEntryRef(*Value.V.get<const FileEntryRef::MapEntry *>());
}
// We've not seen this before, or the file is cached as non-existent.
@@ -632,16 +631,15 @@ void FileManager::GetUniqueIDMapping(
UIDToFiles[VFE->getUID()] = VFE;
}
-StringRef FileManager::getCanonicalName(const DirectoryEntry *Dir) {
- llvm::DenseMap<const void *, llvm::StringRef>::iterator Known
- = CanonicalNames.find(Dir);
+StringRef FileManager::getCanonicalName(DirectoryEntryRef Dir) {
+ auto Known = CanonicalNames.find(Dir);
if (Known != CanonicalNames.end())
return Known->second;
- StringRef CanonicalName(Dir->getName());
+ StringRef CanonicalName(Dir.getName());
SmallString<4096> CanonicalNameBuf;
- if (!FS->getRealPath(Dir->getName(), CanonicalNameBuf))
+ if (!FS->getRealPath(Dir.getName(), CanonicalNameBuf))
CanonicalName = CanonicalNameBuf.str().copy(CanonicalNameStorage);
CanonicalNames.insert({Dir, CanonicalName});
diff --git a/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp b/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp
index 63b08d8d0459..0065a6173c20 100644
--- a/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp
@@ -191,7 +191,7 @@ static KeywordStatus getKeywordStatusHelper(const LangOptions &LangOpts,
case KEYCOROUTINES:
return LangOpts.Coroutines ? KS_Enabled : KS_Unknown;
case KEYMODULES:
- return LangOpts.ModulesTS ? KS_Enabled : KS_Unknown;
+ return KS_Unknown;
case KEYOPENCLCXX:
return LangOpts.OpenCLCPlusPlus ? KS_Enabled : KS_Unknown;
case KEYMSCOMPAT:
@@ -279,6 +279,16 @@ static void AddObjCKeyword(StringRef Name,
Table.get(Name).setObjCKeywordID(ObjCID);
}
+static void AddInterestingIdentifier(StringRef Name,
+ tok::InterestingIdentifierKind BTID,
+ IdentifierTable &Table) {
+ // Don't add 'not_interesting' identifier.
+ if (BTID != tok::not_interesting) {
+ IdentifierInfo &Info = Table.get(Name, tok::identifier);
+ Info.setInterestingIdentifierID(BTID);
+ }
+}
+
/// AddKeywords - Add all keywords to the symbol table.
///
void IdentifierTable::AddKeywords(const LangOptions &LangOpts) {
@@ -295,6 +305,9 @@ void IdentifierTable::AddKeywords(const LangOptions &LangOpts) {
#define OBJC_AT_KEYWORD(NAME) \
if (LangOpts.ObjC) \
AddObjCKeyword(StringRef(#NAME), tok::objc_##NAME, *this);
+#define INTERESTING_IDENTIFIER(NAME) \
+ AddInterestingIdentifier(StringRef(#NAME), tok::NAME, *this);
+
#define TESTING_KEYWORD(NAME, FLAGS)
#include "clang/Basic/TokenKinds.def"
@@ -384,6 +397,19 @@ IdentifierInfo::isReserved(const LangOptions &LangOpts) const {
return ReservedIdentifierStatus::NotReserved;
}
+ReservedLiteralSuffixIdStatus
+IdentifierInfo::isReservedLiteralSuffixId() const {
+ StringRef Name = getName();
+
+ if (Name[0] != '_')
+ return ReservedLiteralSuffixIdStatus::NotStartsWithUnderscore;
+
+ if (Name.contains("__"))
+ return ReservedLiteralSuffixIdStatus::ContainsDoubleUnderscore;
+
+ return ReservedLiteralSuffixIdStatus::NotReserved;
+}
+
StringRef IdentifierInfo::deuglifiedName() const {
StringRef Name = getName();
if (Name.size() >= 2 && Name.front() == '_' &&
@@ -849,6 +875,21 @@ StringRef clang::getNullabilitySpelling(NullabilityKind kind,
llvm_unreachable("Unknown nullability kind.");
}
+llvm::raw_ostream &clang::operator<<(llvm::raw_ostream &OS,
+ NullabilityKind NK) {
+ switch (NK) {
+ case NullabilityKind::NonNull:
+ return OS << "NonNull";
+ case NullabilityKind::Nullable:
+ return OS << "Nullable";
+ case NullabilityKind::NullableResult:
+ return OS << "NullableResult";
+ case NullabilityKind::Unspecified:
+ return OS << "Unspecified";
+ }
+ llvm_unreachable("Unknown nullability kind.");
+}
+
diag::kind
IdentifierTable::getFutureCompatDiagKind(const IdentifierInfo &II,
const LangOptions &LangOpts) {
diff --git a/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp b/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp
index 753b6bfe18a3..b44c71f572be 100644
--- a/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp
@@ -29,6 +29,14 @@ void LangOptions::resetNonModularOptions() {
Name = static_cast<unsigned>(Default);
#include "clang/Basic/LangOptions.def"
+ // Reset "benign" options with implied values (Options.td ImpliedBy relations)
+ // rather than their defaults. This avoids unexpected combinations and
+ // invocations that cannot be round-tripped to arguments.
+ // FIXME: we should derive this automatically from ImpliedBy in tablegen.
+ AllowFPReassoc = UnsafeFPMath;
+ NoHonorNaNs = FiniteMathOnly;
+ NoHonorInfs = FiniteMathOnly;
+
// These options do not affect AST generation.
NoSanitizeFiles.clear();
XRayAlwaysInstrumentFiles.clear();
@@ -109,7 +117,8 @@ void LangOptions::setLangDefaults(LangOptions &Opts, Language Lang,
Opts.CPlusPlus14 = Std.isCPlusPlus14();
Opts.CPlusPlus17 = Std.isCPlusPlus17();
Opts.CPlusPlus20 = Std.isCPlusPlus20();
- Opts.CPlusPlus2b = Std.isCPlusPlus2b();
+ Opts.CPlusPlus23 = Std.isCPlusPlus23();
+ Opts.CPlusPlus26 = Std.isCPlusPlus26();
Opts.GNUMode = Std.isGNUMode();
Opts.GNUCVersion = 0;
Opts.HexFloats = Std.hasHexFloats();
diff --git a/contrib/llvm-project/clang/lib/Basic/LangStandards.cpp b/contrib/llvm-project/clang/lib/Basic/LangStandards.cpp
index 4b36f7bf4786..af9cf4f27392 100644
--- a/contrib/llvm-project/clang/lib/Basic/LangStandards.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/LangStandards.cpp
@@ -9,8 +9,8 @@
#include "clang/Basic/LangStandard.h"
#include "clang/Config/config.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/TargetParser/Triple.h"
using namespace clang;
#define LANGSTANDARD(id, name, lang, desc, features) \
@@ -54,8 +54,6 @@ LangStandard::Kind clang::getDefaultLanguageStandard(clang::Language Lang,
return LangStandard::lang_opencl12;
case Language::OpenCLCXX:
return LangStandard::lang_openclcpp10;
- case Language::CUDA:
- return LangStandard::lang_cuda;
case Language::Asm:
case Language::C:
// The PS4 uses C99 as the default C standard.
@@ -66,13 +64,11 @@ LangStandard::Kind clang::getDefaultLanguageStandard(clang::Language Lang,
return LangStandard::lang_gnu11;
case Language::CXX:
case Language::ObjCXX:
- if (T.isPS())
- return LangStandard::lang_gnucxx14;
+ case Language::CUDA:
+ case Language::HIP:
return LangStandard::lang_gnucxx17;
case Language::RenderScript:
return LangStandard::lang_c99;
- case Language::HIP:
- return LangStandard::lang_hip;
case Language::HLSL:
return LangStandard::lang_hlsl2021;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Module.cpp b/contrib/llvm-project/clang/lib/Basic/Module.cpp
index 9c4c83486c2d..8ec68237a0fc 100644
--- a/contrib/llvm-project/clang/lib/Basic/Module.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Module.cpp
@@ -59,9 +59,8 @@ Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
}
Module::~Module() {
- for (submodule_iterator I = submodule_begin(), IEnd = submodule_end();
- I != IEnd; ++I) {
- delete *I;
+ for (auto *Submodule : SubModules) {
+ delete Submodule;
}
}
@@ -108,6 +107,9 @@ static bool hasFeature(StringRef Feature, const LangOptions &LangOpts,
.Case("cplusplus11", LangOpts.CPlusPlus11)
.Case("cplusplus14", LangOpts.CPlusPlus14)
.Case("cplusplus17", LangOpts.CPlusPlus17)
+ .Case("cplusplus20", LangOpts.CPlusPlus20)
+ .Case("cplusplus23", LangOpts.CPlusPlus23)
+ .Case("cplusplus26", LangOpts.CPlusPlus26)
.Case("c99", LangOpts.C99)
.Case("c11", LangOpts.C11)
.Case("c17", LangOpts.C17)
@@ -261,26 +263,24 @@ bool Module::fullModuleNameIs(ArrayRef<StringRef> nameParts) const {
return nameParts.empty();
}
-Module::DirectoryName Module::getUmbrellaDir() const {
- if (Header U = getUmbrellaHeader())
- return {"", "", U.Entry->getDir()};
-
- return {UmbrellaAsWritten, UmbrellaRelativeToRootModuleDirectory,
- Umbrella.dyn_cast<const DirectoryEntry *>()};
+OptionalDirectoryEntryRef Module::getEffectiveUmbrellaDir() const {
+ if (Umbrella && Umbrella.is<FileEntryRef>())
+ return Umbrella.get<FileEntryRef>().getDir();
+ if (Umbrella && Umbrella.is<DirectoryEntryRef>())
+ return Umbrella.get<DirectoryEntryRef>();
+ return std::nullopt;
}
-void Module::addTopHeader(const FileEntry *File) {
+void Module::addTopHeader(FileEntryRef File) {
assert(File);
TopHeaders.insert(File);
}
-ArrayRef<const FileEntry *> Module::getTopHeaders(FileManager &FileMgr) {
+ArrayRef<FileEntryRef> Module::getTopHeaders(FileManager &FileMgr) {
if (!TopHeaderNames.empty()) {
- for (std::vector<std::string>::iterator
- I = TopHeaderNames.begin(), E = TopHeaderNames.end(); I != E; ++I) {
- if (auto FE = FileMgr.getFile(*I))
+ for (StringRef TopHeaderName : TopHeaderNames)
+ if (auto FE = FileMgr.getOptionalFileRef(TopHeaderName))
TopHeaders.insert(*FE);
- }
TopHeaderNames.clear();
}
@@ -339,11 +339,9 @@ void Module::markUnavailable(bool Unimportable) {
Current->IsAvailable = false;
Current->IsUnimportable |= Unimportable;
- for (submodule_iterator Sub = Current->submodule_begin(),
- SubEnd = Current->submodule_end();
- Sub != SubEnd; ++Sub) {
- if (needUpdate(*Sub))
- Stack.push_back(*Sub);
+ for (auto *Submodule : Current->submodules()) {
+ if (needUpdate(Submodule))
+ Stack.push_back(Submodule);
}
}
}
@@ -483,15 +481,15 @@ void Module::print(raw_ostream &OS, unsigned Indent, bool Dump) const {
OS << "\n";
}
- if (Header H = getUmbrellaHeader()) {
+ if (std::optional<Header> H = getUmbrellaHeaderAsWritten()) {
OS.indent(Indent + 2);
OS << "umbrella header \"";
- OS.write_escaped(H.NameAsWritten);
+ OS.write_escaped(H->NameAsWritten);
OS << "\"\n";
- } else if (DirectoryName D = getUmbrellaDir()) {
+ } else if (std::optional<DirectoryName> D = getUmbrellaDirAsWritten()) {
OS.indent(Indent + 2);
OS << "umbrella \"";
- OS.write_escaped(D.NameAsWritten);
+ OS.write_escaped(D->NameAsWritten);
OS << "\"\n";
}
@@ -523,8 +521,8 @@ void Module::print(raw_ostream &OS, unsigned Indent, bool Dump) const {
OS.indent(Indent + 2);
OS << K.Prefix << "header \"";
OS.write_escaped(H.NameAsWritten);
- OS << "\" { size " << H.Entry->getSize()
- << " mtime " << H.Entry->getModificationTime() << " }\n";
+ OS << "\" { size " << H.Entry.getSize()
+ << " mtime " << H.Entry.getModificationTime() << " }\n";
}
}
for (auto *Unresolved : {&UnresolvedHeaders, &MissingHeaders}) {
@@ -550,14 +548,13 @@ void Module::print(raw_ostream &OS, unsigned Indent, bool Dump) const {
OS << "export_as" << ExportAsModule << "\n";
}
- for (submodule_const_iterator MI = submodule_begin(), MIEnd = submodule_end();
- MI != MIEnd; ++MI)
+ for (auto *Submodule : submodules())
// Print inferred subframework modules so that we don't need to re-infer
// them (requires expensive directory iteration + stat calls) when we build
// the module. Regular inferred submodules are OK, as we need to look at all
// those header files anyway.
- if (!(*MI)->IsInferred || (*MI)->IsFramework)
- (*MI)->print(OS, Indent + 2, Dump);
+ if (!Submodule->IsInferred || Submodule->IsFramework)
+ Submodule->print(OS, Indent + 2, Dump);
for (unsigned I = 0, N = Exports.size(); I != N; ++I) {
OS.indent(Indent + 2);
@@ -698,6 +695,14 @@ void VisibleModuleSet::setVisible(Module *M, SourceLocation Loc,
VisitModule({M, nullptr});
}
+void VisibleModuleSet::makeTransitiveImportsVisible(Module *M,
+ SourceLocation Loc,
+ VisibleCallback Vis,
+ ConflictCallback Cb) {
+ for (auto *I : M->Imports)
+ setVisible(I, Loc, Vis, Cb);
+}
+
ASTSourceDescriptor::ASTSourceDescriptor(Module &M)
: Signature(M.Signature), ClangModule(&M) {
if (M.Directory)
diff --git a/contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp b/contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp
index 44edf5402540..d39686ea688e 100644
--- a/contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp
@@ -26,7 +26,7 @@ static const std::pair<StringRef, StringRef> FeatureExtensionMap[] = {
{"cl_khr_3d_image_writes", "__opencl_c_3d_image_writes"}};
bool OpenCLOptions::isKnown(llvm::StringRef Ext) const {
- return OptMap.find(Ext) != OptMap.end();
+ return OptMap.contains(Ext);
}
bool OpenCLOptions::isAvailableOption(llvm::StringRef Ext,
diff --git a/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp b/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp
index 36bce7e44afb..1c59a9091b4a 100644
--- a/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp
@@ -50,6 +50,11 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str,
return OMPC_DEPEND_unknown;
return Type;
}
+ case OMPC_doacross:
+ return llvm::StringSwitch<OpenMPDoacrossClauseModifier>(Str)
+#define OPENMP_DOACROSS_MODIFIER(Name) .Case(#Name, OMPC_DOACROSS_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_DOACROSS_unknown);
case OMPC_linear:
return llvm::StringSwitch<OpenMPLinearClauseKind>(Str)
#define OPENMP_LINEAR_KIND(Name) .Case(#Name, OMPC_LINEAR_##Name)
@@ -282,6 +287,16 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
#include "clang/Basic/OpenMPKinds.def"
}
llvm_unreachable("Invalid OpenMP 'depend' clause type");
+ case OMPC_doacross:
+ switch (Type) {
+ case OMPC_DOACROSS_unknown:
+ return "unknown";
+#define OPENMP_DOACROSS_MODIFIER(Name) \
+ case OMPC_DOACROSS_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'doacross' clause type");
case OMPC_linear:
switch (Type) {
case OMPC_LINEAR_unknown:
@@ -588,7 +603,9 @@ bool clang::isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_teams_distribute_parallel_for_simd ||
DKind == OMPD_teams_distribute_parallel_for ||
DKind == OMPD_target_teams_distribute_parallel_for ||
- DKind == OMPD_target_teams_distribute_parallel_for_simd;
+ DKind == OMPD_target_teams_distribute_parallel_for_simd ||
+ DKind == OMPD_parallel_loop || DKind == OMPD_teams_loop ||
+ DKind == OMPD_target_parallel_loop || DKind == OMPD_target_teams_loop;
}
bool clang::isOpenMPTaskLoopDirective(OpenMPDirectiveKind DKind) {
@@ -617,7 +634,8 @@ bool clang::isOpenMPParallelDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_parallel_master_taskloop_simd ||
DKind == OMPD_parallel_masked_taskloop ||
DKind == OMPD_parallel_masked_taskloop_simd ||
- DKind == OMPD_parallel_loop || DKind == OMPD_target_parallel_loop;
+ DKind == OMPD_parallel_loop || DKind == OMPD_target_parallel_loop ||
+ DKind == OMPD_teams_loop;
}
bool clang::isOpenMPTargetExecutionDirective(OpenMPDirectiveKind DKind) {
@@ -714,7 +732,8 @@ bool clang::isOpenMPLoopBoundSharingDirective(OpenMPDirectiveKind Kind) {
Kind == OMPD_teams_distribute_parallel_for_simd ||
Kind == OMPD_teams_distribute_parallel_for ||
Kind == OMPD_target_teams_distribute_parallel_for ||
- Kind == OMPD_target_teams_distribute_parallel_for_simd;
+ Kind == OMPD_target_teams_distribute_parallel_for_simd ||
+ Kind == OMPD_teams_loop || Kind == OMPD_target_teams_loop;
}
bool clang::isOpenMPLoopTransformationDirective(OpenMPDirectiveKind DKind) {
@@ -751,7 +770,6 @@ void clang::getOpenMPCaptureRegions(
case OMPD_target_teams:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd:
- case OMPD_target_teams_loop:
CaptureRegions.push_back(OMPD_task);
CaptureRegions.push_back(OMPD_target);
CaptureRegions.push_back(OMPD_teams);
@@ -766,6 +784,7 @@ void clang::getOpenMPCaptureRegions(
CaptureRegions.push_back(OMPD_task);
CaptureRegions.push_back(OMPD_target);
break;
+ case OMPD_teams_loop:
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd:
CaptureRegions.push_back(OMPD_teams);
@@ -800,6 +819,7 @@ void clang::getOpenMPCaptureRegions(
CaptureRegions.push_back(OMPD_parallel);
CaptureRegions.push_back(OMPD_taskloop);
break;
+ case OMPD_target_teams_loop:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
CaptureRegions.push_back(OMPD_task);
@@ -807,9 +827,6 @@ void clang::getOpenMPCaptureRegions(
CaptureRegions.push_back(OMPD_teams);
CaptureRegions.push_back(OMPD_parallel);
break;
- case OMPD_teams_loop:
- CaptureRegions.push_back(OMPD_teams);
- break;
case OMPD_nothing:
CaptureRegions.push_back(OMPD_nothing);
break;
diff --git a/contrib/llvm-project/clang/lib/Basic/ParsedAttrInfo.cpp b/contrib/llvm-project/clang/lib/Basic/ParsedAttrInfo.cpp
new file mode 100644
index 000000000000..16fa314b642b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/ParsedAttrInfo.cpp
@@ -0,0 +1,32 @@
+//===- ParsedAttrInfo.cpp - Registry for attribute plugins ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Registry of attributes added by plugins which
+// derive the ParsedAttrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/ParsedAttrInfo.h"
+#include "llvm/Support/ManagedStatic.h"
+#include <list>
+#include <memory>
+
+using namespace clang;
+
+LLVM_INSTANTIATE_REGISTRY(ParsedAttrInfoRegistry)
+
+const std::list<std::unique_ptr<ParsedAttrInfo>> &
+clang::getAttributePluginInstances() {
+ static llvm::ManagedStatic<std::list<std::unique_ptr<ParsedAttrInfo>>>
+ PluginAttrInstances;
+ if (PluginAttrInstances->empty())
+ for (const auto &It : ParsedAttrInfoRegistry::entries())
+ PluginAttrInstances->emplace_back(It.instantiate());
+
+ return *PluginAttrInstances;
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/Sarif.cpp b/contrib/llvm-project/clang/lib/Basic/Sarif.cpp
index c4a1ea40d125..e2af25c8143b 100644
--- a/contrib/llvm-project/clang/lib/Basic/Sarif.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Sarif.cpp
@@ -19,6 +19,7 @@
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ConvertUTF.h"
@@ -293,8 +294,11 @@ void SarifDocumentWriter::endRun() {
// Flush all the artifacts.
json::Object &Run = getCurrentRun();
json::Array *Artifacts = Run.getArray("artifacts");
- for (const auto &Pair : CurrentArtifacts) {
- const SarifArtifact &A = Pair.getValue();
+ SmallVector<std::pair<StringRef, SarifArtifact>, 0> Vec;
+ for (const auto &[K, V] : CurrentArtifacts)
+ Vec.emplace_back(K, V);
+ llvm::sort(Vec, llvm::less_first());
+ for (const auto &[_, A] : Vec) {
json::Object Loc{{"uri", A.Location.URI}};
if (A.Location.Index.has_value()) {
Loc["index"] = static_cast<int64_t>(*A.Location.Index);
diff --git a/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp b/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp
index 3d7a53879584..6fa802a33a50 100644
--- a/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp
@@ -1302,8 +1302,7 @@ LineOffsetMapping LineOffsetMapping::get(llvm::MemoryBufferRef Buffer,
// in [\n, \r + 1 [
// Scan for the next newline - it's very likely there's one.
- unsigned N =
- llvm::countTrailingZeros(Mask) - 7; // -7 because 0x80 is the marker
+ unsigned N = llvm::countr_zero(Mask) - 7; // -7 because 0x80 is the marker
Word >>= N;
Buf += N / 8 + 1;
unsigned char Byte = Word;
@@ -1313,7 +1312,7 @@ LineOffsetMapping LineOffsetMapping::get(llvm::MemoryBufferRef Buffer,
if (*Buf == '\n') {
++Buf;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case '\n':
LineOffsets.push_back(Buf - Start);
};
diff --git a/contrib/llvm-project/clang/lib/Basic/TargetID.cpp b/contrib/llvm-project/clang/lib/Basic/TargetID.cpp
index 7cc4d67e3a52..3c06d9bad1dc 100644
--- a/contrib/llvm-project/clang/lib/Basic/TargetID.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/TargetID.cpp
@@ -8,9 +8,9 @@
#include "clang/Basic/TargetID.h"
#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/Triple.h"
-#include "llvm/Support/TargetParser.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/TargetParser.h"
+#include "llvm/TargetParser/Triple.h"
#include <map>
#include <optional>
@@ -133,7 +133,7 @@ std::string getCanonicalTargetID(llvm::StringRef Processor,
std::map<const llvm::StringRef, bool> OrderedMap;
for (const auto &F : Features)
OrderedMap[F.first()] = F.second;
- for (auto F : OrderedMap)
+ for (const auto &F : OrderedMap)
TargetID = TargetID + ':' + F.first.str() + (F.second ? "+" : "-");
return TargetID;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp b/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp
index 8ee43261fc1d..6cd5d618a4ac 100644
--- a/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements the TargetInfo and TargetInfoImpl interfaces.
+// This file implements the TargetInfo interface.
//
//===----------------------------------------------------------------------===//
@@ -19,7 +19,7 @@
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/TargetParser/TargetParser.h"
#include <cstdlib>
using namespace clang;
@@ -47,6 +47,7 @@ static const LangASMap FakeAddrSpaceMap = {
11, // ptr32_uptr
12, // ptr64
13, // hlsl_groupshared
+ 20, // wasm_funcref
};
// TargetInfo Constructor.
@@ -63,6 +64,7 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : Triple(T) {
HasIbm128 = false;
HasFloat16 = false;
HasBFloat16 = false;
+ HasFullBFloat16 = false;
HasLongDouble = true;
HasFPReturn = true;
HasStrictFP = false;
@@ -98,7 +100,8 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : Triple(T) {
// https://www.gnu.org/software/libc/manual/html_node/Malloc-Examples.html.
// This alignment guarantee also applies to Windows and Android. On Darwin
// and OpenBSD, the alignment is 16 bytes on both 64-bit and 32-bit systems.
- if (T.isGNUEnvironment() || T.isWindowsMSVCEnvironment() || T.isAndroid())
+ if (T.isGNUEnvironment() || T.isWindowsMSVCEnvironment() || T.isAndroid() ||
+ T.isOHOSFamily())
NewAlign = Triple.isArch64Bit() ? 128 : Triple.isArch32Bit() ? 64 : 0;
else if (T.isOSDarwin() || T.isOSOpenBSD())
NewAlign = 128;
@@ -119,7 +122,6 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : Triple(T) {
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 0;
MaxVectorAlign = 0;
MaxTLSAlign = 0;
- SimdDefaultAlign = 0;
SizeType = UnsignedLong;
PtrDiffType = SignedLong;
IntMaxType = SignedLongLong;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets.cpp b/contrib/llvm-project/clang/lib/Basic/Targets.cpp
index 8400774db93d..636b59fd1272 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets.cpp
@@ -42,8 +42,9 @@
#include "Targets/X86.h"
#include "Targets/XCore.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticFrontend.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/TargetParser/Triple.h"
using namespace clang;
@@ -108,8 +109,8 @@ void addCygMingDefines(const LangOptions &Opts, MacroBuilder &Builder) {
// Driver code
//===----------------------------------------------------------------------===//
-TargetInfo *AllocateTarget(const llvm::Triple &Triple,
- const TargetOptions &Opts) {
+std::unique_ptr<TargetInfo> AllocateTarget(const llvm::Triple &Triple,
+ const TargetOptions &Opts) {
llvm::Triple::OSType os = Triple.getOS();
switch (Triple.getArch()) {
@@ -117,512 +118,576 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return nullptr;
case llvm::Triple::arc:
- return new ARCTargetInfo(Triple, Opts);
+ return std::make_unique<ARCTargetInfo>(Triple, Opts);
case llvm::Triple::xcore:
- return new XCoreTargetInfo(Triple, Opts);
+ return std::make_unique<XCoreTargetInfo>(Triple, Opts);
case llvm::Triple::hexagon:
if (os == llvm::Triple::Linux &&
Triple.getEnvironment() == llvm::Triple::Musl)
- return new LinuxTargetInfo<HexagonTargetInfo>(Triple, Opts);
- return new HexagonTargetInfo(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<HexagonTargetInfo>>(Triple, Opts);
+ return std::make_unique<HexagonTargetInfo>(Triple, Opts);
case llvm::Triple::lanai:
- return new LanaiTargetInfo(Triple, Opts);
+ return std::make_unique<LanaiTargetInfo>(Triple, Opts);
case llvm::Triple::aarch64_32:
if (Triple.isOSDarwin())
- return new DarwinAArch64TargetInfo(Triple, Opts);
+ return std::make_unique<DarwinAArch64TargetInfo>(Triple, Opts);
return nullptr;
case llvm::Triple::aarch64:
if (Triple.isOSDarwin())
- return new DarwinAArch64TargetInfo(Triple, Opts);
+ return std::make_unique<DarwinAArch64TargetInfo>(Triple, Opts);
switch (os) {
case llvm::Triple::CloudABI:
- return new CloudABITargetInfo<AArch64leTargetInfo>(Triple, Opts);
+ return std::make_unique<CloudABITargetInfo<AArch64leTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<AArch64leTargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<AArch64leTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Fuchsia:
- return new FuchsiaTargetInfo<AArch64leTargetInfo>(Triple, Opts);
+ return std::make_unique<FuchsiaTargetInfo<AArch64leTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Linux:
- return new LinuxTargetInfo<AArch64leTargetInfo>(Triple, Opts);
+ switch (Triple.getEnvironment()) {
+ default:
+ return std::make_unique<LinuxTargetInfo<AArch64leTargetInfo>>(Triple,
+ Opts);
+ case llvm::Triple::OpenHOS:
+ return std::make_unique<OHOSTargetInfo<AArch64leTargetInfo>>(Triple,
+ Opts);
+ }
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<AArch64leTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<AArch64leTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<AArch64leTargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<AArch64leTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Win32:
switch (Triple.getEnvironment()) {
case llvm::Triple::GNU:
- return new MinGWARM64TargetInfo(Triple, Opts);
+ return std::make_unique<MinGWARM64TargetInfo>(Triple, Opts);
case llvm::Triple::MSVC:
default: // Assume MSVC for unknown environments
- return new MicrosoftARM64TargetInfo(Triple, Opts);
+ return std::make_unique<MicrosoftARM64TargetInfo>(Triple, Opts);
}
default:
- return new AArch64leTargetInfo(Triple, Opts);
+ return std::make_unique<AArch64leTargetInfo>(Triple, Opts);
}
case llvm::Triple::aarch64_be:
switch (os) {
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<AArch64beTargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<AArch64beTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Fuchsia:
- return new FuchsiaTargetInfo<AArch64beTargetInfo>(Triple, Opts);
+ return std::make_unique<FuchsiaTargetInfo<AArch64beTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Linux:
- return new LinuxTargetInfo<AArch64beTargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<AArch64beTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<AArch64beTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<AArch64beTargetInfo>>(Triple,
+ Opts);
default:
- return new AArch64beTargetInfo(Triple, Opts);
+ return std::make_unique<AArch64beTargetInfo>(Triple, Opts);
}
case llvm::Triple::arm:
case llvm::Triple::thumb:
if (Triple.isOSBinFormatMachO())
- return new DarwinARMTargetInfo(Triple, Opts);
+ return std::make_unique<DarwinARMTargetInfo>(Triple, Opts);
switch (os) {
case llvm::Triple::CloudABI:
- return new CloudABITargetInfo<ARMleTargetInfo>(Triple, Opts);
+ return std::make_unique<CloudABITargetInfo<ARMleTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Linux:
- return new LinuxTargetInfo<ARMleTargetInfo>(Triple, Opts);
+ switch (Triple.getEnvironment()) {
+ default:
+ return std::make_unique<LinuxTargetInfo<ARMleTargetInfo>>(Triple, Opts);
+ case llvm::Triple::OpenHOS:
+ return std::make_unique<OHOSTargetInfo<ARMleTargetInfo>>(Triple, Opts);
+ }
+ case llvm::Triple::LiteOS:
+ return std::make_unique<OHOSTargetInfo<ARMleTargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<ARMleTargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<ARMleTargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<ARMleTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<ARMleTargetInfo>>(Triple, Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<ARMleTargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<ARMleTargetInfo>>(Triple, Opts);
case llvm::Triple::RTEMS:
- return new RTEMSTargetInfo<ARMleTargetInfo>(Triple, Opts);
+ return std::make_unique<RTEMSTargetInfo<ARMleTargetInfo>>(Triple, Opts);
case llvm::Triple::NaCl:
- return new NaClTargetInfo<ARMleTargetInfo>(Triple, Opts);
+ return std::make_unique<NaClTargetInfo<ARMleTargetInfo>>(Triple, Opts);
case llvm::Triple::Win32:
switch (Triple.getEnvironment()) {
case llvm::Triple::Cygnus:
- return new CygwinARMTargetInfo(Triple, Opts);
+ return std::make_unique<CygwinARMTargetInfo>(Triple, Opts);
case llvm::Triple::GNU:
- return new MinGWARMTargetInfo(Triple, Opts);
+ return std::make_unique<MinGWARMTargetInfo>(Triple, Opts);
case llvm::Triple::Itanium:
- return new ItaniumWindowsARMleTargetInfo(Triple, Opts);
+ return std::make_unique<ItaniumWindowsARMleTargetInfo>(Triple, Opts);
case llvm::Triple::MSVC:
default: // Assume MSVC for unknown environments
- return new MicrosoftARMleTargetInfo(Triple, Opts);
+ return std::make_unique<MicrosoftARMleTargetInfo>(Triple, Opts);
}
default:
- return new ARMleTargetInfo(Triple, Opts);
+ return std::make_unique<ARMleTargetInfo>(Triple, Opts);
}
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
if (Triple.isOSDarwin())
- return new DarwinARMTargetInfo(Triple, Opts);
+ return std::make_unique<DarwinARMTargetInfo>(Triple, Opts);
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<ARMbeTargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<ARMbeTargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<ARMbeTargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<ARMbeTargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<ARMbeTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<ARMbeTargetInfo>>(Triple, Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<ARMbeTargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<ARMbeTargetInfo>>(Triple, Opts);
case llvm::Triple::RTEMS:
- return new RTEMSTargetInfo<ARMbeTargetInfo>(Triple, Opts);
+ return std::make_unique<RTEMSTargetInfo<ARMbeTargetInfo>>(Triple, Opts);
case llvm::Triple::NaCl:
- return new NaClTargetInfo<ARMbeTargetInfo>(Triple, Opts);
+ return std::make_unique<NaClTargetInfo<ARMbeTargetInfo>>(Triple, Opts);
default:
- return new ARMbeTargetInfo(Triple, Opts);
+ return std::make_unique<ARMbeTargetInfo>(Triple, Opts);
}
case llvm::Triple::avr:
- return new AVRTargetInfo(Triple, Opts);
+ return std::make_unique<AVRTargetInfo>(Triple, Opts);
case llvm::Triple::bpfeb:
case llvm::Triple::bpfel:
- return new BPFTargetInfo(Triple, Opts);
+ return std::make_unique<BPFTargetInfo>(Triple, Opts);
case llvm::Triple::msp430:
- return new MSP430TargetInfo(Triple, Opts);
+ return std::make_unique<MSP430TargetInfo>(Triple, Opts);
case llvm::Triple::mips:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::RTEMS:
- return new RTEMSTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<RTEMSTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
default:
- return new MipsTargetInfo(Triple, Opts);
+ return std::make_unique<MipsTargetInfo>(Triple, Opts);
}
case llvm::Triple::mipsel:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<MipsTargetInfo>(Triple, Opts);
+ switch (Triple.getEnvironment()) {
+ default:
+ return std::make_unique<LinuxTargetInfo<MipsTargetInfo>>(Triple, Opts);
+ case llvm::Triple::OpenHOS:
+ return std::make_unique<OHOSTargetInfo<MipsTargetInfo>>(Triple, Opts);
+ }
case llvm::Triple::RTEMS:
- return new RTEMSTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<RTEMSTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::NaCl:
- return new NaClTargetInfo<NaClMips32TargetInfo>(Triple, Opts);
+ return std::make_unique<NaClTargetInfo<NaClMips32TargetInfo>>(Triple,
+ Opts);
default:
- return new MipsTargetInfo(Triple, Opts);
+ return std::make_unique<MipsTargetInfo>(Triple, Opts);
}
case llvm::Triple::mips64:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::RTEMS:
- return new RTEMSTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<RTEMSTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
default:
- return new MipsTargetInfo(Triple, Opts);
+ return std::make_unique<MipsTargetInfo>(Triple, Opts);
}
case llvm::Triple::mips64el:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::RTEMS:
- return new RTEMSTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<RTEMSTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
default:
- return new MipsTargetInfo(Triple, Opts);
+ return std::make_unique<MipsTargetInfo>(Triple, Opts);
}
case llvm::Triple::m68k:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<M68kTargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<M68kTargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<M68kTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<M68kTargetInfo>>(Triple, Opts);
default:
- return new M68kTargetInfo(Triple, Opts);
+ return std::make_unique<M68kTargetInfo>(Triple, Opts);
}
case llvm::Triple::le32:
switch (os) {
case llvm::Triple::NaCl:
- return new NaClTargetInfo<PNaClTargetInfo>(Triple, Opts);
+ return std::make_unique<NaClTargetInfo<PNaClTargetInfo>>(Triple, Opts);
default:
return nullptr;
}
case llvm::Triple::le64:
- return new Le64TargetInfo(Triple, Opts);
+ return std::make_unique<Le64TargetInfo>(Triple, Opts);
case llvm::Triple::ppc:
- if (Triple.isOSDarwin())
- return new DarwinPPC32TargetInfo(Triple, Opts);
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<PPC32TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<PPC32TargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<PPC32TargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<PPC32TargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<PPC32TargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<PPC32TargetInfo>>(Triple, Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<PPC32TargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<PPC32TargetInfo>>(Triple, Opts);
case llvm::Triple::RTEMS:
- return new RTEMSTargetInfo<PPC32TargetInfo>(Triple, Opts);
+ return std::make_unique<RTEMSTargetInfo<PPC32TargetInfo>>(Triple, Opts);
case llvm::Triple::AIX:
- return new AIXPPC32TargetInfo(Triple, Opts);
+ return std::make_unique<AIXPPC32TargetInfo>(Triple, Opts);
default:
- return new PPC32TargetInfo(Triple, Opts);
+ return std::make_unique<PPC32TargetInfo>(Triple, Opts);
}
case llvm::Triple::ppcle:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<PPC32TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<PPC32TargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<PPC32TargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<PPC32TargetInfo>>(Triple, Opts);
default:
- return new PPC32TargetInfo(Triple, Opts);
+ return std::make_unique<PPC32TargetInfo>(Triple, Opts);
}
case llvm::Triple::ppc64:
- if (Triple.isOSDarwin())
- return new DarwinPPC64TargetInfo(Triple, Opts);
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<PPC64TargetInfo>>(Triple, Opts);
case llvm::Triple::Lv2:
- return new PS3PPUTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ return std::make_unique<PS3PPUTargetInfo<PPC64TargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<PPC64TargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<PPC64TargetInfo>>(Triple, Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<PPC64TargetInfo>>(Triple, Opts);
case llvm::Triple::AIX:
- return new AIXPPC64TargetInfo(Triple, Opts);
+ return std::make_unique<AIXPPC64TargetInfo>(Triple, Opts);
default:
- return new PPC64TargetInfo(Triple, Opts);
+ return std::make_unique<PPC64TargetInfo>(Triple, Opts);
}
case llvm::Triple::ppc64le:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<PPC64TargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<PPC64TargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<PPC64TargetInfo>>(Triple, Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<PPC64TargetInfo>>(Triple, Opts);
default:
- return new PPC64TargetInfo(Triple, Opts);
+ return std::make_unique<PPC64TargetInfo>(Triple, Opts);
}
case llvm::Triple::nvptx:
- return new NVPTXTargetInfo(Triple, Opts, /*TargetPointerWidth=*/32);
+ return std::make_unique<NVPTXTargetInfo>(Triple, Opts,
+ /*TargetPointerWidth=*/32);
case llvm::Triple::nvptx64:
- return new NVPTXTargetInfo(Triple, Opts, /*TargetPointerWidth=*/64);
+ return std::make_unique<NVPTXTargetInfo>(Triple, Opts,
+ /*TargetPointerWidth=*/64);
case llvm::Triple::amdgcn:
case llvm::Triple::r600:
- return new AMDGPUTargetInfo(Triple, Opts);
+ return std::make_unique<AMDGPUTargetInfo>(Triple, Opts);
case llvm::Triple::riscv32:
// TODO: add cases for NetBSD, RTEMS once tested.
switch (os) {
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<RISCV32TargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<RISCV32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Linux:
- return new LinuxTargetInfo<RISCV32TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<RISCV32TargetInfo>>(Triple, Opts);
default:
- return new RISCV32TargetInfo(Triple, Opts);
+ return std::make_unique<RISCV32TargetInfo>(Triple, Opts);
}
case llvm::Triple::riscv64:
// TODO: add cases for NetBSD, RTEMS once tested.
switch (os) {
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<RISCV64TargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<RISCV64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<RISCV64TargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<RISCV64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Fuchsia:
- return new FuchsiaTargetInfo<RISCV64TargetInfo>(Triple, Opts);
+ return std::make_unique<FuchsiaTargetInfo<RISCV64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Linux:
- return new LinuxTargetInfo<RISCV64TargetInfo>(Triple, Opts);
+ switch (Triple.getEnvironment()) {
+ default:
+ return std::make_unique<LinuxTargetInfo<RISCV64TargetInfo>>(Triple,
+ Opts);
+ case llvm::Triple::OpenHOS:
+ return std::make_unique<OHOSTargetInfo<RISCV64TargetInfo>>(Triple,
+ Opts);
+ }
default:
- return new RISCV64TargetInfo(Triple, Opts);
+ return std::make_unique<RISCV64TargetInfo>(Triple, Opts);
}
case llvm::Triple::sparc:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<SparcV8TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<SparcV8TargetInfo>>(Triple, Opts);
case llvm::Triple::Solaris:
- return new SolarisTargetInfo<SparcV8TargetInfo>(Triple, Opts);
+ return std::make_unique<SolarisTargetInfo<SparcV8TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<SparcV8TargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<SparcV8TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::RTEMS:
- return new RTEMSTargetInfo<SparcV8TargetInfo>(Triple, Opts);
+ return std::make_unique<RTEMSTargetInfo<SparcV8TargetInfo>>(Triple, Opts);
default:
- return new SparcV8TargetInfo(Triple, Opts);
+ return std::make_unique<SparcV8TargetInfo>(Triple, Opts);
}
// The 'sparcel' architecture copies all the above cases except for Solaris.
case llvm::Triple::sparcel:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<SparcV8elTargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<SparcV8elTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<SparcV8elTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<SparcV8elTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::RTEMS:
- return new RTEMSTargetInfo<SparcV8elTargetInfo>(Triple, Opts);
+ return std::make_unique<RTEMSTargetInfo<SparcV8elTargetInfo>>(Triple,
+ Opts);
default:
- return new SparcV8elTargetInfo(Triple, Opts);
+ return std::make_unique<SparcV8elTargetInfo>(Triple, Opts);
}
case llvm::Triple::sparcv9:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<SparcV9TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<SparcV9TargetInfo>>(Triple, Opts);
case llvm::Triple::Solaris:
- return new SolarisTargetInfo<SparcV9TargetInfo>(Triple, Opts);
+ return std::make_unique<SolarisTargetInfo<SparcV9TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<SparcV9TargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<SparcV9TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<SparcV9TargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<SparcV9TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<SparcV9TargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<SparcV9TargetInfo>>(Triple,
+ Opts);
default:
- return new SparcV9TargetInfo(Triple, Opts);
+ return std::make_unique<SparcV9TargetInfo>(Triple, Opts);
}
case llvm::Triple::systemz:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<SystemZTargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<SystemZTargetInfo>>(Triple, Opts);
case llvm::Triple::ZOS:
- return new ZOSTargetInfo<SystemZTargetInfo>(Triple, Opts);
+ return std::make_unique<ZOSTargetInfo<SystemZTargetInfo>>(Triple, Opts);
default:
- return new SystemZTargetInfo(Triple, Opts);
+ return std::make_unique<SystemZTargetInfo>(Triple, Opts);
}
case llvm::Triple::tce:
- return new TCETargetInfo(Triple, Opts);
+ return std::make_unique<TCETargetInfo>(Triple, Opts);
case llvm::Triple::tcele:
- return new TCELETargetInfo(Triple, Opts);
+ return std::make_unique<TCELETargetInfo>(Triple, Opts);
case llvm::Triple::x86:
if (Triple.isOSDarwin())
- return new DarwinI386TargetInfo(Triple, Opts);
+ return std::make_unique<DarwinI386TargetInfo>(Triple, Opts);
switch (os) {
case llvm::Triple::Ananas:
- return new AnanasTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<AnanasTargetInfo<X86_32TargetInfo>>(Triple, Opts);
case llvm::Triple::CloudABI:
- return new CloudABITargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<CloudABITargetInfo<X86_32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Linux: {
switch (Triple.getEnvironment()) {
default:
- return new LinuxTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<X86_32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Android:
- return new AndroidX86_32TargetInfo(Triple, Opts);
+ return std::make_unique<AndroidX86_32TargetInfo>(Triple, Opts);
}
}
case llvm::Triple::DragonFly:
- return new DragonFlyBSDTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<DragonFlyBSDTargetInfo<X86_32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::NetBSD:
- return new NetBSDI386TargetInfo(Triple, Opts);
+ return std::make_unique<NetBSDI386TargetInfo>(Triple, Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDI386TargetInfo(Triple, Opts);
+ return std::make_unique<OpenBSDI386TargetInfo>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<X86_32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Fuchsia:
- return new FuchsiaTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<FuchsiaTargetInfo<X86_32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::KFreeBSD:
- return new KFreeBSDTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<KFreeBSDTargetInfo<X86_32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Minix:
- return new MinixTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<MinixTargetInfo<X86_32TargetInfo>>(Triple, Opts);
case llvm::Triple::Solaris:
- return new SolarisTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<SolarisTargetInfo<X86_32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Win32: {
switch (Triple.getEnvironment()) {
case llvm::Triple::Cygnus:
- return new CygwinX86_32TargetInfo(Triple, Opts);
+ return std::make_unique<CygwinX86_32TargetInfo>(Triple, Opts);
case llvm::Triple::GNU:
- return new MinGWX86_32TargetInfo(Triple, Opts);
+ return std::make_unique<MinGWX86_32TargetInfo>(Triple, Opts);
case llvm::Triple::Itanium:
case llvm::Triple::MSVC:
default: // Assume MSVC for unknown environments
- return new MicrosoftX86_32TargetInfo(Triple, Opts);
+ return std::make_unique<MicrosoftX86_32TargetInfo>(Triple, Opts);
}
}
case llvm::Triple::Haiku:
- return new HaikuX86_32TargetInfo(Triple, Opts);
+ return std::make_unique<HaikuX86_32TargetInfo>(Triple, Opts);
case llvm::Triple::RTEMS:
- return new RTEMSX86_32TargetInfo(Triple, Opts);
+ return std::make_unique<RTEMSX86_32TargetInfo>(Triple, Opts);
case llvm::Triple::NaCl:
- return new NaClTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<NaClTargetInfo<X86_32TargetInfo>>(Triple, Opts);
case llvm::Triple::ELFIAMCU:
- return new MCUX86_32TargetInfo(Triple, Opts);
+ return std::make_unique<MCUX86_32TargetInfo>(Triple, Opts);
case llvm::Triple::Hurd:
- return new HurdTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<HurdTargetInfo<X86_32TargetInfo>>(Triple, Opts);
default:
- return new X86_32TargetInfo(Triple, Opts);
+ return std::make_unique<X86_32TargetInfo>(Triple, Opts);
}
case llvm::Triple::x86_64:
if (Triple.isOSDarwin() || Triple.isOSBinFormatMachO())
- return new DarwinX86_64TargetInfo(Triple, Opts);
+ return std::make_unique<DarwinX86_64TargetInfo>(Triple, Opts);
switch (os) {
case llvm::Triple::Ananas:
- return new AnanasTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<AnanasTargetInfo<X86_64TargetInfo>>(Triple, Opts);
case llvm::Triple::CloudABI:
- return new CloudABITargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<CloudABITargetInfo<X86_64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Linux: {
switch (Triple.getEnvironment()) {
default:
- return new LinuxTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<X86_64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Android:
- return new AndroidX86_64TargetInfo(Triple, Opts);
+ return std::make_unique<AndroidX86_64TargetInfo>(Triple, Opts);
+ case llvm::Triple::OpenHOS:
+ return std::make_unique<OHOSX86_64TargetInfo>(Triple, Opts);
}
}
case llvm::Triple::DragonFly:
- return new DragonFlyBSDTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<DragonFlyBSDTargetInfo<X86_64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<X86_64TargetInfo>>(Triple, Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDX86_64TargetInfo(Triple, Opts);
+ return std::make_unique<OpenBSDX86_64TargetInfo>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<X86_64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Fuchsia:
- return new FuchsiaTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<FuchsiaTargetInfo<X86_64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::KFreeBSD:
- return new KFreeBSDTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<KFreeBSDTargetInfo<X86_64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Solaris:
- return new SolarisTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<SolarisTargetInfo<X86_64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Win32: {
switch (Triple.getEnvironment()) {
case llvm::Triple::Cygnus:
- return new CygwinX86_64TargetInfo(Triple, Opts);
+ return std::make_unique<CygwinX86_64TargetInfo>(Triple, Opts);
case llvm::Triple::GNU:
- return new MinGWX86_64TargetInfo(Triple, Opts);
+ return std::make_unique<MinGWX86_64TargetInfo>(Triple, Opts);
case llvm::Triple::MSVC:
default: // Assume MSVC for unknown environments
- return new MicrosoftX86_64TargetInfo(Triple, Opts);
+ return std::make_unique<MicrosoftX86_64TargetInfo>(Triple, Opts);
}
}
case llvm::Triple::Haiku:
- return new HaikuTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<HaikuTargetInfo<X86_64TargetInfo>>(Triple, Opts);
case llvm::Triple::NaCl:
- return new NaClTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<NaClTargetInfo<X86_64TargetInfo>>(Triple, Opts);
case llvm::Triple::PS4:
- return new PS4OSTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<PS4OSTargetInfo<X86_64TargetInfo>>(Triple, Opts);
case llvm::Triple::PS5:
- return new PS5OSTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<PS5OSTargetInfo<X86_64TargetInfo>>(Triple, Opts);
default:
- return new X86_64TargetInfo(Triple, Opts);
+ return std::make_unique<X86_64TargetInfo>(Triple, Opts);
}
case llvm::Triple::spir: {
if (os != llvm::Triple::UnknownOS ||
Triple.getEnvironment() != llvm::Triple::UnknownEnvironment)
return nullptr;
- return new SPIR32TargetInfo(Triple, Opts);
+ return std::make_unique<SPIR32TargetInfo>(Triple, Opts);
}
case llvm::Triple::spir64: {
if (os != llvm::Triple::UnknownOS ||
Triple.getEnvironment() != llvm::Triple::UnknownEnvironment)
return nullptr;
- return new SPIR64TargetInfo(Triple, Opts);
+ return std::make_unique<SPIR64TargetInfo>(Triple, Opts);
}
case llvm::Triple::spirv32: {
if (os != llvm::Triple::UnknownOS ||
Triple.getEnvironment() != llvm::Triple::UnknownEnvironment)
return nullptr;
- return new SPIRV32TargetInfo(Triple, Opts);
+ return std::make_unique<SPIRV32TargetInfo>(Triple, Opts);
}
case llvm::Triple::spirv64: {
if (os != llvm::Triple::UnknownOS ||
Triple.getEnvironment() != llvm::Triple::UnknownEnvironment)
return nullptr;
- return new SPIRV64TargetInfo(Triple, Opts);
+ return std::make_unique<SPIRV64TargetInfo>(Triple, Opts);
}
case llvm::Triple::wasm32:
if (Triple.getSubArch() != llvm::Triple::NoSubArch ||
@@ -631,11 +696,14 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return nullptr;
switch (os) {
case llvm::Triple::WASI:
- return new WASITargetInfo<WebAssembly32TargetInfo>(Triple, Opts);
+ return std::make_unique<WASITargetInfo<WebAssembly32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Emscripten:
- return new EmscriptenTargetInfo<WebAssembly32TargetInfo>(Triple, Opts);
+ return std::make_unique<EmscriptenTargetInfo<WebAssembly32TargetInfo>>(
+ Triple, Opts);
case llvm::Triple::UnknownOS:
- return new WebAssemblyOSTargetInfo<WebAssembly32TargetInfo>(Triple, Opts);
+ return std::make_unique<WebAssemblyOSTargetInfo<WebAssembly32TargetInfo>>(
+ Triple, Opts);
default:
return nullptr;
}
@@ -646,45 +714,52 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return nullptr;
switch (os) {
case llvm::Triple::WASI:
- return new WASITargetInfo<WebAssembly64TargetInfo>(Triple, Opts);
+ return std::make_unique<WASITargetInfo<WebAssembly64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Emscripten:
- return new EmscriptenTargetInfo<WebAssembly64TargetInfo>(Triple, Opts);
+ return std::make_unique<EmscriptenTargetInfo<WebAssembly64TargetInfo>>(
+ Triple, Opts);
case llvm::Triple::UnknownOS:
- return new WebAssemblyOSTargetInfo<WebAssembly64TargetInfo>(Triple, Opts);
+ return std::make_unique<WebAssemblyOSTargetInfo<WebAssembly64TargetInfo>>(
+ Triple, Opts);
default:
return nullptr;
}
case llvm::Triple::dxil:
- return new DirectXTargetInfo(Triple,Opts);
+ return std::make_unique<DirectXTargetInfo>(Triple, Opts);
case llvm::Triple::renderscript32:
- return new LinuxTargetInfo<RenderScript32TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<RenderScript32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::renderscript64:
- return new LinuxTargetInfo<RenderScript64TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<RenderScript64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::ve:
- return new LinuxTargetInfo<VETargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<VETargetInfo>>(Triple, Opts);
case llvm::Triple::csky:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<CSKYTargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<CSKYTargetInfo>>(Triple, Opts);
default:
- return new CSKYTargetInfo(Triple, Opts);
+ return std::make_unique<CSKYTargetInfo>(Triple, Opts);
}
case llvm::Triple::loongarch32:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<LoongArch32TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<LoongArch32TargetInfo>>(Triple,
+ Opts);
default:
- return new LoongArch32TargetInfo(Triple, Opts);
+ return std::make_unique<LoongArch32TargetInfo>(Triple, Opts);
}
case llvm::Triple::loongarch64:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<LoongArch64TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<LoongArch64TargetInfo>>(Triple,
+ Opts);
default:
- return new LoongArch64TargetInfo(Triple, Opts);
+ return std::make_unique<LoongArch64TargetInfo>(Triple, Opts);
}
}
}
@@ -700,7 +775,7 @@ TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
llvm::Triple Triple(Opts->Triple);
// Construct the target
- std::unique_ptr<TargetInfo> Target(AllocateTarget(Triple, *Opts));
+ std::unique_ptr<TargetInfo> Target = AllocateTarget(Triple, *Opts);
if (!Target) {
Diags.Report(diag::err_target_unknown_triple) << Triple.str();
return nullptr;
@@ -742,6 +817,13 @@ TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
// Compute the default target features, we need the target to handle this
// because features may have dependencies on one another.
+ llvm::erase_if(Opts->FeaturesAsWritten, [&](StringRef Name) {
+ if (Target->isReadOnlyFeature(Name.substr(1))) {
+ Diags.Report(diag::warn_fe_backend_readonly_feature_flag) << Name;
+ return true;
+ }
+ return false;
+ });
if (!Target->initFeatureMap(Opts->FeatureMap, Diags, Opts->CPU,
Opts->FeaturesAsWritten))
return nullptr;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets.h b/contrib/llvm-project/clang/lib/Basic/Targets.h
index a063204e69e6..b4d2486b5d2b 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets.h
@@ -24,8 +24,8 @@ namespace clang {
namespace targets {
LLVM_LIBRARY_VISIBILITY
-clang::TargetInfo *AllocateTarget(const llvm::Triple &Triple,
- const clang::TargetOptions &Opts);
+std::unique_ptr<clang::TargetInfo>
+AllocateTarget(const llvm::Triple &Triple, const clang::TargetOptions &Opts);
/// DefineStd - Define a macro name and standard variants. For example if
/// MacroName is "unix", then this will define "__unix", "__unix__", and "unix"
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
index 997398da7972..7c4cc5fb33f8 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
@@ -17,8 +17,8 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/AArch64TargetParser.h"
-#include "llvm/Support/ARMTargetParserCommon.h"
+#include "llvm/TargetParser/AArch64TargetParser.h"
+#include "llvm/TargetParser/ARMTargetParserCommon.h"
#include <optional>
using namespace clang;
@@ -39,6 +39,12 @@ static constexpr Builtin::Info BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
{#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#include "clang/Basic/BuiltinsSME.def"
+
+#define BUILTIN(ID, TYPE, ATTRS) \
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
{#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
@@ -223,8 +229,7 @@ bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
}
bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
- return Name == "generic" ||
- llvm::AArch64::parseCpu(Name).Arch != llvm::AArch64::INVALID;
+ return Name == "generic" || llvm::AArch64::parseCpu(Name);
}
bool AArch64TargetInfo::setCPU(const std::string &Name) {
@@ -239,8 +244,6 @@ void AArch64TargetInfo::fillValidCPUList(
void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
- Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
- Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
}
void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
@@ -334,16 +337,8 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Target identification.
Builder.defineMacro("__aarch64__");
- // For bare-metal.
- if (getTriple().getOS() == llvm::Triple::UnknownOS &&
- getTriple().isOSBinFormatELF())
- Builder.defineMacro("__ELF__");
-
- // Target properties.
- if (!getTriple().isOSWindows() && getTriple().isArch64Bit()) {
- Builder.defineMacro("_LP64");
- Builder.defineMacro("__LP64__");
- }
+ // Inline assembly supports AArch64 flag outputs.
+ Builder.defineMacro("__GCC_ASM_FLAG_OUTPUTS__");
std::string CodeModel = getTargetOpts().CodeModel;
if (CodeModel == "default")
@@ -374,7 +369,8 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
// 0xe implies support for half, single and double precision operations.
- Builder.defineMacro("__ARM_FP", "0xE");
+ if (FPU & FPUMode)
+ Builder.defineMacro("__ARM_FP", "0xE");
// PCS specifies this for SysV variants, which is all we support. Other ABIs
// may choose __ARM_FP16_FORMAT_ALTERNATIVE.
@@ -419,7 +415,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasCRC)
Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
- if (HasRCPC)
+ if (HasRCPC3)
+ Builder.defineMacro("__ARM_FEATURE_RCPC", "3");
+ else if (HasRCPC)
Builder.defineMacro("__ARM_FEATURE_RCPC", "1");
if (HasFMV)
@@ -607,16 +605,18 @@ unsigned AArch64TargetInfo::multiVersionFeatureCost() const {
return llvm::AArch64::ExtensionInfo::MaxFMVPriority;
}
-bool AArch64TargetInfo::getFeatureDepOptions(StringRef Name,
- std::string &FeatureVec) const {
- FeatureVec = "";
- for (const auto &E : llvm::AArch64::Extensions) {
- if (Name == E.Name) {
- FeatureVec = E.DependentFeatures;
- break;
- }
- }
- return FeatureVec != "";
+bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
+ auto F = llvm::find_if(llvm::AArch64::Extensions, [&](const auto &E) {
+ return Name == E.Name && !E.DependentFeatures.empty();
+ });
+ return F != std::end(llvm::AArch64::Extensions);
+}
+
+StringRef AArch64TargetInfo::getFeatureDependencies(StringRef Name) const {
+ auto F = llvm::find_if(llvm::AArch64::Extensions,
+ [&](const auto &E) { return Name == E.Name; });
+ return F != std::end(llvm::AArch64::Extensions) ? F->DependentFeatures
+ : StringRef();
}
bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
@@ -664,8 +664,8 @@ bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
.Case("sve2-sha3", FPU & SveMode && HasSVE2SHA3)
.Case("sve2-sm4", FPU & SveMode && HasSVE2SM4)
.Case("sme", HasSME)
- .Case("sme-f64f64", HasSMEF64)
- .Case("sme-i16i64", HasSMEI64)
+ .Case("sme-f64f64", HasSMEF64F64)
+ .Case("sme-i16i64", HasSMEI16I64)
.Cases("memtag", "memtag2", HasMTE)
.Case("sb", HasSB)
.Case("predres", HasPredRes)
@@ -673,6 +673,7 @@ bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
.Case("bti", HasBTI)
.Cases("ls64", "ls64_v", "ls64_accdata", HasLS64)
.Case("wfxt", HasWFxT)
+ .Case("rcpc3", HasRCPC3)
.Default(false);
}
@@ -681,25 +682,23 @@ void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
Features[Name] = Enabled;
// If the feature is an architecture feature (like v8.2a), add all previous
// architecture versions and any dependant target features.
- const llvm::AArch64::ArchInfo &ArchInfo =
+ const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
llvm::AArch64::ArchInfo::findBySubArch(Name);
- if (ArchInfo == llvm::AArch64::INVALID)
- return; // Not an architecure, nothing more to do.
+ if (!ArchInfo)
+ return; // Not an architecture, nothing more to do.
// Disabling an architecture feature does not affect dependent features
if (!Enabled)
return;
for (const auto *OtherArch : llvm::AArch64::ArchInfos)
- if (ArchInfo.implies(*OtherArch))
+ if (ArchInfo->implies(*OtherArch))
Features[OtherArch->getSubArch()] = true;
// Set any features implied by the architecture
- uint64_t Extensions =
- llvm::AArch64::getDefaultExtensions("generic", ArchInfo);
std::vector<StringRef> CPUFeats;
- if (llvm::AArch64::getExtensionFeatures(Extensions, CPUFeats)) {
+ if (llvm::AArch64::getExtensionFeatures(ArchInfo->DefaultExts, CPUFeats)) {
for (auto F : CPUFeats) {
assert(F[0] == '+' && "Expected + in target feature!");
Features[F.drop_front(1)] = true;
@@ -710,6 +709,8 @@ void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) {
for (const auto &Feature : Features) {
+ if (Feature == "-fp-armv8")
+ HasNoFP = true;
if (Feature == "-neon")
HasNoNeon = true;
if (Feature == "-sve")
@@ -780,16 +781,19 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
if (Feature == "+sme") {
HasSME = true;
HasBFloat16 = true;
+ HasFullFP16 = true;
}
if (Feature == "+sme-f64f64") {
HasSME = true;
- HasSMEF64 = true;
+ HasSMEF64F64 = true;
HasBFloat16 = true;
+ HasFullFP16 = true;
}
if (Feature == "+sme-i16i64") {
HasSME = true;
- HasSMEI64 = true;
+ HasSMEI16I64 = true;
HasBFloat16 = true;
+ HasFullFP16 = true;
}
if (Feature == "+sb")
HasSB = true;
@@ -925,6 +929,10 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasMOPS = true;
if (Feature == "+d128")
HasD128 = true;
+ if (Feature == "+gcs")
+ HasGCS = true;
+ if (Feature == "+rcpc3")
+ HasRCPC3 = true;
}
// Check features that are manually disabled by command line options.
@@ -938,6 +946,11 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
setDataLayout();
setArchFeatures();
+ if (HasNoFP) {
+ FPU &= ~FPUMode;
+ FPU &= ~NeonMode;
+ FPU &= ~SveMode;
+ }
if (HasNoNeon) {
FPU &= ~NeonMode;
FPU &= ~SveMode;
@@ -953,9 +966,9 @@ bool AArch64TargetInfo::initFeatureMap(
const std::vector<std::string> &FeaturesVec) const {
std::vector<std::string> UpdatedFeaturesVec;
// Parse the CPU and add any implied features.
- const llvm::AArch64::ArchInfo &Arch = llvm::AArch64::parseCpu(CPU).Arch;
- if (Arch != llvm::AArch64::INVALID) {
- uint64_t Exts = llvm::AArch64::getDefaultExtensions(CPU, Arch);
+ std::optional<llvm::AArch64::CpuInfo> CpuInfo = llvm::AArch64::parseCpu(CPU);
+ if (CpuInfo) {
+ uint64_t Exts = CpuInfo->getImpliedExtensions();
std::vector<StringRef> CPUFeats;
llvm::AArch64::getExtensionFeatures(Exts, CPUFeats);
for (auto F : CPUFeats) {
@@ -965,26 +978,30 @@ bool AArch64TargetInfo::initFeatureMap(
}
// Process target and dependent features. This is done in two loops collecting
- // them into UpdatedFeaturesVec: first to add dependent '+'features,
- // second to add target '+/-'features that can later disable some of
- // features added on the first loop.
+ // them into UpdatedFeaturesVec: first to add dependent '+'features, second to
+ // add target '+/-'features that can later disable some of features added on
+ // the first loop. Function Multi Versioning features begin with '?'.
for (const auto &Feature : FeaturesVec)
- if ((Feature[0] == '?' || Feature[0] == '+')) {
- std::string Options;
- if (AArch64TargetInfo::getFeatureDepOptions(Feature.substr(1), Options)) {
- SmallVector<StringRef, 1> AttrFeatures;
- StringRef(Options).split(AttrFeatures, ",");
- for (auto F : AttrFeatures)
- UpdatedFeaturesVec.push_back(F.str());
- }
+ if (((Feature[0] == '?' || Feature[0] == '+')) &&
+ AArch64TargetInfo::doesFeatureAffectCodeGen(Feature.substr(1))) {
+ StringRef DepFeatures =
+ AArch64TargetInfo::getFeatureDependencies(Feature.substr(1));
+ SmallVector<StringRef, 1> AttrFeatures;
+ DepFeatures.split(AttrFeatures, ",");
+ for (auto F : AttrFeatures)
+ UpdatedFeaturesVec.push_back(F.str());
}
for (const auto &Feature : FeaturesVec)
- if (Feature[0] == '+') {
- std::string F;
- llvm::AArch64::getFeatureOption(Feature, F);
- UpdatedFeaturesVec.push_back(F);
- } else if (Feature[0] != '?')
- UpdatedFeaturesVec.push_back(Feature);
+ if (Feature[0] != '?') {
+ std::string UpdatedFeature = Feature;
+ if (Feature[0] == '+') {
+ std::optional<llvm::AArch64::ExtensionInfo> Extension =
+ llvm::AArch64::parseArchExtension(Feature.substr(1));
+ if (Extension)
+ UpdatedFeature = Extension->Feature.str();
+ }
+ UpdatedFeaturesVec.push_back(UpdatedFeature);
+ }
return TargetInfo::initFeatureMap(Features, Diags, CPU, UpdatedFeaturesVec);
}
@@ -1037,13 +1054,14 @@ ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
FoundArch = true;
std::pair<StringRef, StringRef> Split =
Feature.split("=").second.trim().split("+");
- const llvm::AArch64::ArchInfo &AI = llvm::AArch64::parseArch(Split.first);
+ const std::optional<llvm::AArch64::ArchInfo> AI =
+ llvm::AArch64::parseArch(Split.first);
// Parse the architecture version, adding the required features to
// Ret.Features.
- if (AI == llvm::AArch64::INVALID)
+ if (!AI)
continue;
- Ret.Features.push_back(AI.ArchFeature.str());
+ Ret.Features.push_back(AI->ArchFeature.str());
// Add any extra features, after the +
SplitAndAddFeatures(Split.second, Ret.Features);
} else if (Feature.startswith("cpu=")) {
@@ -1146,7 +1164,11 @@ const char *const AArch64TargetInfo::GCCRegNames[] = {
// SVE predicate registers
"p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10",
- "p11", "p12", "p13", "p14", "p15"
+ "p11", "p12", "p13", "p14", "p15",
+
+ // SVE predicate-as-counter registers
+ "pn0", "pn1", "pn2", "pn3", "pn4", "pn5", "pn6", "pn7", "pn8",
+ "pn9", "pn10", "pn11", "pn12", "pn13", "pn14", "pn15"
};
ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
@@ -1196,6 +1218,52 @@ ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
return llvm::ArrayRef(GCCRegAliases);
}
+// Returns the length of cc constraint.
+static unsigned matchAsmCCConstraint(const char *Name) {
+ constexpr unsigned len = 5;
+ auto RV = llvm::StringSwitch<unsigned>(Name)
+ .Case("@cceq", len)
+ .Case("@ccne", len)
+ .Case("@cchs", len)
+ .Case("@cccs", len)
+ .Case("@cccc", len)
+ .Case("@cclo", len)
+ .Case("@ccmi", len)
+ .Case("@ccpl", len)
+ .Case("@ccvs", len)
+ .Case("@ccvc", len)
+ .Case("@cchi", len)
+ .Case("@ccls", len)
+ .Case("@ccge", len)
+ .Case("@cclt", len)
+ .Case("@ccgt", len)
+ .Case("@ccle", len)
+ .Default(0);
+ return RV;
+}
+
+std::string
+AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
+ std::string R;
+ switch (*Constraint) {
+ case 'U': // Three-character constraint; add "@3" hint for later parsing.
+ R = std::string("@3") + std::string(Constraint, 3);
+ Constraint += 2;
+ break;
+ case '@':
+ if (const unsigned Len = matchAsmCCConstraint(Constraint)) {
+ std::string Converted = "{" + std::string(Constraint, Len) + "}";
+ Constraint += Len - 1;
+ return Converted;
+ }
+ return std::string(1, *Constraint);
+ default:
+ R = TargetInfo::convertConstraint(Constraint);
+ break;
+ }
+ return R;
+}
+
bool AArch64TargetInfo::validateAsmConstraint(
const char *&Name, TargetInfo::ConstraintInfo &Info) const {
switch (*Name) {
@@ -1243,6 +1311,13 @@ bool AArch64TargetInfo::validateAsmConstraint(
case 'y': // SVE registers (V0-V7)
Info.setAllowsRegister();
return true;
+ case '@':
+ // CC condition
+ if (const unsigned Len = matchAsmCCConstraint(Name)) {
+ Name += Len - 1;
+ Info.setAllowsRegister();
+ return true;
+ }
}
return false;
}
@@ -1281,7 +1356,7 @@ bool AArch64TargetInfo::validateConstraintModifier(
}
}
-const char *AArch64TargetInfo::getClobbers() const { return ""; }
+std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
if (RegNo == 0)
@@ -1454,7 +1529,6 @@ void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
else
Builder.defineMacro("__ARM64_ARCH_8__");
Builder.defineMacro("__ARM_NEON__");
- Builder.defineMacro("__LITTLE_ENDIAN__");
Builder.defineMacro("__REGISTER_PREFIX__", "");
Builder.defineMacro("__arm64", "1");
Builder.defineMacro("__arm64__", "1");
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
index 34df886377ea..4304693e473d 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
@@ -15,7 +15,7 @@
#include "OSTargets.h"
#include "clang/Basic/TargetBuiltins.h"
-#include "llvm/Support/AArch64TargetParser.h"
+#include "llvm/TargetParser/AArch64TargetParser.h"
#include <optional>
namespace clang {
@@ -26,7 +26,11 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
static const TargetInfo::GCCRegAlias GCCRegAliases[];
static const char *const GCCRegNames[];
- enum FPUModeEnum { FPUMode, NeonMode = (1 << 0), SveMode = (1 << 1) };
+ enum FPUModeEnum {
+ FPUMode = (1 << 0),
+ NeonMode = (1 << 1),
+ SveMode = (1 << 2),
+ };
unsigned FPU = FPUMode;
bool HasCRC = false;
@@ -64,8 +68,8 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
bool HasCCDP = false;
bool HasFRInt3264 = false;
bool HasSME = false;
- bool HasSMEF64 = false;
- bool HasSMEI64 = false;
+ bool HasSMEF64F64 = false;
+ bool HasSMEI16I64 = false;
bool HasSB = false;
bool HasPredRes = false;
bool HasSSBS = false;
@@ -73,9 +77,12 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
bool HasWFxT = false;
bool HasJSCVT = false;
bool HasFCMA = false;
+ bool HasNoFP = false;
bool HasNoNeon = false;
bool HasNoSVE = false;
bool HasFMV = true;
+ bool HasGCS = false;
+ bool HasRCPC3 = false;
const llvm::AArch64::ArchInfo *ArchInfo = &llvm::AArch64::ARMV8A;
@@ -143,9 +150,8 @@ public:
std::optional<std::pair<unsigned, unsigned>>
getVScaleRange(const LangOptions &LangOpts) const override;
-
- bool getFeatureDepOptions(StringRef Feature,
- std::string &Options) const override;
+ bool doesFeatureAffectCodeGen(StringRef Name) const override;
+ StringRef getFeatureDependencies(StringRef Name) const override;
bool validateCpuSupports(StringRef FeatureStr) const override;
bool hasFeature(StringRef Feature) const override;
void setFeatureEnabled(llvm::StringMap<bool> &Features, StringRef Name,
@@ -155,6 +161,8 @@ public:
ParsedTargetAttr parseTargetAttr(StringRef Str) const override;
bool supportsTargetAttributeTune() const override { return true; }
+ bool checkArithmeticFenceSupported() const override { return true; }
+
bool hasBFloat16Type() const override;
CallingConvCheckResult checkCallingConvention(CallingConv CC) const override;
@@ -166,26 +174,14 @@ public:
ArrayRef<const char *> getGCCRegNames() const override;
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
- std::string convertConstraint(const char *&Constraint) const override {
- std::string R;
- switch (*Constraint) {
- case 'U': // Three-character constraint; add "@3" hint for later parsing.
- R = std::string("@3") + std::string(Constraint, 3);
- Constraint += 2;
- break;
- default:
- R = TargetInfo::convertConstraint(Constraint);
- break;
- }
- return R;
- }
+ std::string convertConstraint(const char *&Constraint) const override;
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override;
bool
validateConstraintModifier(StringRef Constraint, char Modifier, unsigned Size,
std::string &SuggestedModifier) const override;
- const char *getClobbers() const override;
+ std::string_view getClobbers() const override;
StringRef getConstraintRegister(StringRef Constraint,
StringRef Expression) const override {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp
index fe50fbcf3b88..f15216ac15a1 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp
@@ -17,7 +17,6 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/TargetBuiltins.h"
-
using namespace clang;
using namespace clang::targets;
@@ -33,9 +32,9 @@ static const char *const DataLayoutStringR600 =
static const char *const DataLayoutStringAMDGCN =
"e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
- "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
+ "-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
"-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1"
- "-ni:7";
+ "-ni:7:8";
const LangASMap AMDGPUTargetInfo::AMDGPUDefIsGenMap = {
Generic, // Default
@@ -179,187 +178,19 @@ ArrayRef<const char *> AMDGPUTargetInfo::getGCCRegNames() const {
bool AMDGPUTargetInfo::initFeatureMap(
llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
const std::vector<std::string> &FeatureVec) const {
- const bool IsNullCPU = CPU.empty();
- bool IsWave32Capable = false;
using namespace llvm::AMDGPU;
-
- // XXX - What does the member GPU mean if device name string passed here?
- if (isAMDGCN(getTriple())) {
- switch (llvm::AMDGPU::parseArchAMDGCN(CPU)) {
- case GK_GFX1103:
- case GK_GFX1102:
- case GK_GFX1101:
- case GK_GFX1100:
- IsWave32Capable = true;
- Features["ci-insts"] = true;
- Features["dot5-insts"] = true;
- Features["dot7-insts"] = true;
- Features["dot8-insts"] = true;
- Features["dot9-insts"] = true;
- Features["dl-insts"] = true;
- Features["16-bit-insts"] = true;
- Features["dpp"] = true;
- Features["gfx8-insts"] = true;
- Features["gfx9-insts"] = true;
- Features["gfx10-insts"] = true;
- Features["gfx10-3-insts"] = true;
- Features["gfx11-insts"] = true;
- break;
- case GK_GFX1036:
- case GK_GFX1035:
- case GK_GFX1034:
- case GK_GFX1033:
- case GK_GFX1032:
- case GK_GFX1031:
- case GK_GFX1030:
- IsWave32Capable = true;
- Features["ci-insts"] = true;
- Features["dot1-insts"] = true;
- Features["dot2-insts"] = true;
- Features["dot5-insts"] = true;
- Features["dot6-insts"] = true;
- Features["dot7-insts"] = true;
- Features["dl-insts"] = true;
- Features["16-bit-insts"] = true;
- Features["dpp"] = true;
- Features["gfx8-insts"] = true;
- Features["gfx9-insts"] = true;
- Features["gfx10-insts"] = true;
- Features["gfx10-3-insts"] = true;
- Features["s-memrealtime"] = true;
- Features["s-memtime-inst"] = true;
- break;
- case GK_GFX1012:
- case GK_GFX1011:
- Features["dot1-insts"] = true;
- Features["dot2-insts"] = true;
- Features["dot5-insts"] = true;
- Features["dot6-insts"] = true;
- Features["dot7-insts"] = true;
- [[fallthrough]];
- case GK_GFX1013:
- case GK_GFX1010:
- IsWave32Capable = true;
- Features["dl-insts"] = true;
- Features["ci-insts"] = true;
- Features["16-bit-insts"] = true;
- Features["dpp"] = true;
- Features["gfx8-insts"] = true;
- Features["gfx9-insts"] = true;
- Features["gfx10-insts"] = true;
- Features["s-memrealtime"] = true;
- Features["s-memtime-inst"] = true;
- break;
- case GK_GFX940:
- Features["gfx940-insts"] = true;
- Features["fp8-insts"] = true;
- [[fallthrough]];
- case GK_GFX90A:
- Features["gfx90a-insts"] = true;
- [[fallthrough]];
- case GK_GFX908:
- Features["dot3-insts"] = true;
- Features["dot4-insts"] = true;
- Features["dot5-insts"] = true;
- Features["dot6-insts"] = true;
- Features["mai-insts"] = true;
- [[fallthrough]];
- case GK_GFX906:
- Features["dl-insts"] = true;
- Features["dot1-insts"] = true;
- Features["dot2-insts"] = true;
- Features["dot7-insts"] = true;
- [[fallthrough]];
- case GK_GFX90C:
- case GK_GFX909:
- case GK_GFX904:
- case GK_GFX902:
- case GK_GFX900:
- Features["gfx9-insts"] = true;
- [[fallthrough]];
- case GK_GFX810:
- case GK_GFX805:
- case GK_GFX803:
- case GK_GFX802:
- case GK_GFX801:
- Features["gfx8-insts"] = true;
- Features["16-bit-insts"] = true;
- Features["dpp"] = true;
- Features["s-memrealtime"] = true;
- [[fallthrough]];
- case GK_GFX705:
- case GK_GFX704:
- case GK_GFX703:
- case GK_GFX702:
- case GK_GFX701:
- case GK_GFX700:
- Features["ci-insts"] = true;
- [[fallthrough]];
- case GK_GFX602:
- case GK_GFX601:
- case GK_GFX600:
- Features["s-memtime-inst"] = true;
- break;
- case GK_NONE:
- break;
- default:
- llvm_unreachable("Unhandled GPU!");
- }
- } else {
- if (CPU.empty())
- CPU = "r600";
-
- switch (llvm::AMDGPU::parseArchR600(CPU)) {
- case GK_CAYMAN:
- case GK_CYPRESS:
- case GK_RV770:
- case GK_RV670:
- // TODO: Add fp64 when implemented.
- break;
- case GK_TURKS:
- case GK_CAICOS:
- case GK_BARTS:
- case GK_SUMO:
- case GK_REDWOOD:
- case GK_JUNIPER:
- case GK_CEDAR:
- case GK_RV730:
- case GK_RV710:
- case GK_RS880:
- case GK_R630:
- case GK_R600:
- break;
- default:
- llvm_unreachable("Unhandled GPU!");
- }
- }
-
+ fillAMDGPUFeatureMap(CPU, getTriple(), Features);
if (!TargetInfo::initFeatureMap(Features, Diags, CPU, FeatureVec))
return false;
- // FIXME: Not diagnosing wavefrontsize32 on wave64 only targets.
- const bool HaveWave32 =
- (IsWave32Capable || IsNullCPU) && Features.count("wavefrontsize32");
- const bool HaveWave64 = Features.count("wavefrontsize64");
-
// TODO: Should move this logic into TargetParser
- if (HaveWave32 && HaveWave64) {
- Diags.Report(diag::err_invalid_feature_combination)
- << "'wavefrontsize32' and 'wavefrontsize64' are mutually exclusive";
+ std::string ErrorMsg;
+ if (!insertWaveSizeFeature(CPU, getTriple(), Features, ErrorMsg)) {
+ Diags.Report(diag::err_invalid_feature_combination) << ErrorMsg;
return false;
}
- // Don't assume any wavesize with an unknown subtarget.
- if (!IsNullCPU) {
- // Default to wave32 if available, or wave64 if not
- if (!HaveWave32 && !HaveWave64) {
- StringRef DefaultWaveSizeFeature =
- IsWave32Capable ? "wavefrontsize32" : "wavefrontsize64";
- Features.insert(std::make_pair(DefaultWaveSizeFeature, true));
- }
- }
-
return true;
}
@@ -412,6 +243,8 @@ AMDGPUTargetInfo::AMDGPUTargetInfo(const llvm::Triple &Triple,
}
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ CUMode = !(GPUFeatures & llvm::AMDGPU::FEATURE_WGP);
+ ReadOnlyFeatures.insert("image-insts");
}
void AMDGPUTargetInfo::adjust(DiagnosticsEngine &Diags, LangOptions &Opts) {
@@ -482,7 +315,10 @@ void AMDGPUTargetInfo::getTargetDefines(const LangOptions &Opts,
if (hasFastFMA())
Builder.defineMacro("FP_FAST_FMA");
+ Builder.defineMacro("__AMDGCN_WAVEFRONT_SIZE__", Twine(WavefrontSize));
+ // ToDo: deprecate this macro for naming consistency.
Builder.defineMacro("__AMDGCN_WAVEFRONT_SIZE", Twine(WavefrontSize));
+ Builder.defineMacro("__AMDGCN_CUMODE__", Twine(CUMode));
}
void AMDGPUTargetInfo::setAuxTarget(const TargetInfo *Aux) {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h
index 576bcf9d5401..300d9691d8a0 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h
@@ -17,9 +17,9 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
#include "llvm/ADT/StringSet.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/TargetParser/TargetParser.h"
+#include "llvm/TargetParser/Triple.h"
#include <optional>
namespace clang {
@@ -43,6 +43,12 @@ class LLVM_LIBRARY_VISIBILITY AMDGPUTargetInfo final : public TargetInfo {
unsigned GPUFeatures;
unsigned WavefrontSize;
+ /// Whether to use cumode or WGP mode. True for cumode. False for WGP mode.
+ bool CUMode;
+
+ /// Whether having image instructions.
+ bool HasImage = false;
+
/// Target ID is device name followed by optional feature name postfixed
/// by plus or minus sign delimitted by colon, e.g. gfx908:xnack+:sramecc-.
/// If the target ID contains feature+, map it to true.
@@ -115,9 +121,8 @@ public:
}
bool hasBFloat16Type() const override { return isAMDGCN(getTriple()); }
- const char *getBFloat16Mangling() const override { return "u6__bf16"; };
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
ArrayRef<const char *> getGCCRegNames() const override;
@@ -443,11 +448,17 @@ public:
assert(F.front() == '+' || F.front() == '-');
if (F == "+wavefrontsize64")
WavefrontSize = 64;
+ else if (F == "+cumode")
+ CUMode = true;
+ else if (F == "-cumode")
+ CUMode = false;
+ else if (F == "+image-insts")
+ HasImage = true;
bool IsOn = F.front() == '+';
StringRef Name = StringRef(F).drop_front();
if (!llvm::is_contained(TargetIDFeatures, Name))
continue;
- assert(OffloadArchFeatures.find(Name) == OffloadArchFeatures.end());
+ assert(!OffloadArchFeatures.contains(Name));
OffloadArchFeatures[Name] = IsOn;
}
return true;
@@ -463,6 +474,8 @@ public:
return getCanonicalTargetID(getArchNameAMDGCN(GPUKind),
OffloadArchFeatures);
}
+
+ bool hasHIPImageSupport() const override { return HasImage; }
};
} // namespace targets
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/ARC.h b/contrib/llvm-project/clang/lib/Basic/Targets/ARC.h
index 3cb338a821e1..fcbfdd6eec58 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/ARC.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/ARC.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -48,7 +48,7 @@ public:
return TargetInfo::VoidPtrBuiltinVaList;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
ArrayRef<const char *> getGCCRegNames() const override {
static const char *const GCCRegNames[] = {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
index b85d5dc2d347..06e99e67c875 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
@@ -310,6 +310,7 @@ ARMTargetInfo::ARMTargetInfo(const llvm::Triple &Triple,
case llvm::Triple::GNUEABIHF:
case llvm::Triple::MuslEABI:
case llvm::Triple::MuslEABIHF:
+ case llvm::Triple::OpenHOS:
setABI("aapcs-linux");
break;
case llvm::Triple::EABIHF:
@@ -452,7 +453,7 @@ bool ARMTargetInfo::initFeatureMap(
}
// get default FPU features
- unsigned FPUKind = llvm::ARM::getDefaultFPU(CPU, Arch);
+ llvm::ARM::FPUKind FPUKind = llvm::ARM::getDefaultFPU(CPU, Arch);
llvm::ARM::getFPUFeatures(FPUKind, TargetFeatures);
// get default Extension features
@@ -513,6 +514,7 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasFloat16 = true;
ARMCDECoprocMask = 0;
HasBFloat16 = false;
+ HasFullBFloat16 = false;
FPRegsDisabled = false;
// This does not diagnose illegal cases like having both
@@ -595,6 +597,8 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
} else if (Feature == "+pacbti") {
HasPAC = 1;
HasBTI = 1;
+ } else if (Feature == "+fullbf16") {
+ HasFullBFloat16 = true;
}
}
@@ -710,10 +714,9 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
// For bare-metal none-eabi.
if (getTriple().getOS() == llvm::Triple::UnknownOS &&
(getTriple().getEnvironment() == llvm::Triple::EABI ||
- getTriple().getEnvironment() == llvm::Triple::EABIHF)) {
- Builder.defineMacro("__ELF__");
- if (Opts.CPlusPlus)
- Builder.defineMacro("_GNU_SOURCE");
+ getTriple().getEnvironment() == llvm::Triple::EABIHF) &&
+ Opts.CPlusPlus) {
+ Builder.defineMacro("_GNU_SOURCE");
}
// Target properties.
@@ -1243,7 +1246,7 @@ bool ARMTargetInfo::validateConstraintModifier(
return true;
}
-const char *ARMTargetInfo::getClobbers() const {
+std::string_view ARMTargetInfo::getClobbers() const {
// FIXME: Is this really right?
return "";
}
@@ -1405,11 +1408,6 @@ DarwinARMTargetInfo::DarwinARMTargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: DarwinTargetInfo<ARMleTargetInfo>(Triple, Opts) {
HasAlignMac68kSupport = true;
- // iOS always has 64-bit atomic instructions.
- // FIXME: This should be based off of the target features in
- // ARMleTargetInfo.
- MaxAtomicInlineWidth = 64;
-
if (Triple.isWatchABI()) {
// Darwin on iOS uses a variant of the ARM C++ ABI.
TheCXXABI.set(TargetCXXABI::WatchOS);
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h
index e662a609017b..b1aa2794c7e4 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h
@@ -16,10 +16,10 @@
#include "OSTargets.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
-#include "llvm/Support/ARMTargetParser.h"
-#include "llvm/Support/ARMTargetParserCommon.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/ARMTargetParser.h"
+#include "llvm/TargetParser/ARMTargetParserCommon.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -181,7 +181,7 @@ public:
bool
validateConstraintModifier(StringRef Constraint, char Modifier, unsigned Size,
std::string &SuggestedModifier) const override;
- const char *getClobbers() const override;
+ std::string_view getClobbers() const override;
StringRef getConstraintRegister(StringRef Constraint,
StringRef Expression) const override {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp
index 3d662cc3ba74..85ca4bc30c46 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp
@@ -428,12 +428,28 @@ bool AVRTargetInfo::setCPU(const std::string &Name) {
return false;
}
+std::optional<std::string>
+AVRTargetInfo::handleAsmEscapedChar(char EscChar) const {
+ switch (EscChar) {
+ // "%~" represents for 'r' depends on the device has long jump/call.
+ case '~':
+ return ArchHasJMPCALL(Arch) ? std::string("") : std::string(1, 'r');
+
+ // "%!" represents for 'e' depends on the PC register size.
+ case '!':
+ return ArchHas3BytePC(Arch) ? std::string(1, 'e') : std::string("");
+
+ // This is an invalid escape character for AVR.
+ default:
+ return std::nullopt;
+ }
+}
+
void AVRTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("AVR");
Builder.defineMacro("__AVR");
Builder.defineMacro("__AVR__");
- Builder.defineMacro("__ELF__");
if (ABI == "avrtiny")
Builder.defineMacro("__AVR_TINY__", "1");
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AVR.h b/contrib/llvm-project/clang/lib/Basic/Targets/AVR.h
index 7730bb8e5132..854a51d78c39 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AVR.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AVR.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -69,7 +69,7 @@ public:
return TargetInfo::VoidPtrBuiltinVaList;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
ArrayRef<const char *> getGCCRegNames() const override {
static const char *const GCCRegNames[] = {
@@ -170,6 +170,7 @@ public:
bool isValidCPUName(StringRef Name) const override;
void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
bool setCPU(const std::string &Name) override;
+ std::optional<std::string> handleAsmEscapedChar(char EscChar) const override;
StringRef getABI() const override { return ABI; }
protected:
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/BPF.h b/contrib/llvm-project/clang/lib/Basic/Targets/BPF.h
index 4750e3e9bf9b..8a9227bca34c 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/BPF.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/BPF.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -60,7 +60,7 @@ public:
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::VoidPtrBuiltinVaList;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.cpp
index f272bedc170b..851f27dbb1e5 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.cpp
@@ -33,7 +33,6 @@ bool CSKYTargetInfo::setCPU(const std::string &Name) {
void CSKYTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
- Builder.defineMacro("__ELF__");
Builder.defineMacro("__csky__", "2");
Builder.defineMacro("__CSKY__", "2");
Builder.defineMacro("__ckcore__", "2");
@@ -52,8 +51,10 @@ void CSKYTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__" + ArchName.upper() + "__");
Builder.defineMacro("__" + ArchName.lower() + "__");
- Builder.defineMacro("__" + CPUName.upper() + "__");
- Builder.defineMacro("__" + CPUName.lower() + "__");
+ if (ArchName != CPUName) {
+ Builder.defineMacro("__" + CPUName.upper() + "__");
+ Builder.defineMacro("__" + CPUName.lower() + "__");
+ }
// TODO: Add support for BE if BE was supported later
StringRef endian = "__cskyLE__";
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h b/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h
index 6edd035d9eb8..11404e37db36 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h
@@ -15,7 +15,7 @@
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/TargetInfo.h"
-#include "llvm/Support/CSKYTargetParser.h"
+#include "llvm/TargetParser/CSKYTargetParser.h"
namespace clang {
namespace targets {
@@ -82,7 +82,7 @@ public:
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override;
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.h b/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.h
index 06cfa79756f9..acfcc8c47ba9 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.h
@@ -14,8 +14,8 @@
#define LLVM_CLANG_LIB_BASIC_TARGETS_DIRECTX_H
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -42,6 +42,9 @@ static const unsigned DirectXAddrSpaceMap[] = {
0, // ptr32_uptr
0, // ptr64
3, // hlsl_groupshared
+ // Wasm address space values for this target are dummy values,
+ // as it is only enabled for Wasm targets.
+ 20, // wasm_funcref
};
class LLVM_LIBRARY_VISIBILITY DirectXTargetInfo : public TargetInfo {
@@ -74,7 +77,7 @@ public:
return std::nullopt;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
ArrayRef<const char *> getGCCRegNames() const override {
return std::nullopt;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp
index 0b98c48fea55..ac747e371fb4 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp
@@ -24,8 +24,6 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__qdsp6__", "1");
Builder.defineMacro("__hexagon__", "1");
- Builder.defineMacro("__ELF__");
-
// The macro __HVXDBL__ is deprecated.
bool DefineHvxDbl = false;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h
index 191d99b11d53..cdb47dbae799 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -112,7 +112,7 @@ public:
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
static const char *getHexagonCPUSuffix(StringRef Name);
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h b/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h
index 6ab4aea6ac4a..144cbc7de989 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -87,7 +87,7 @@ public:
return false;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
bool hasBitIntType() const override { return true; }
};
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Le64.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/Le64.cpp
index 5c961ff81e05..f7afa0e747d6 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Le64.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Le64.cpp
@@ -27,5 +27,4 @@ void Le64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
DefineStd(Builder, "unix", Opts);
defineCPUMacros(Builder, "le64", /*Tuning=*/false);
- Builder.defineMacro("__ELF__");
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Le64.h b/contrib/llvm-project/clang/lib/Basic/Targets/Le64.h
index 6652bcea78df..45f6a4e9dd75 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Le64.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Le64.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -41,7 +41,7 @@ public:
return TargetInfo::PNaClABIBuiltinVaList;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
ArrayRef<const char *> getGCCRegNames() const override {
return std::nullopt;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.cpp
index 2079c89e3e15..6958479cd7c4 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.cpp
@@ -14,8 +14,8 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/TargetBuiltins.h"
-#include "llvm/Support/TargetParser.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/TargetParser.h"
using namespace clang;
using namespace clang::targets;
@@ -31,34 +31,89 @@ ArrayRef<const char *> LoongArchTargetInfo::getGCCRegNames() const {
"$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9",
"$f10", "$f11", "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18",
"$f19", "$f20", "$f21", "$f22", "$f23", "$f24", "$f25", "$f26", "$f27",
- "$f28", "$f29", "$f30", "$f31"};
+ "$f28", "$f29", "$f30", "$f31",
+ // Condition flag registers.
+ "$fcc0", "$fcc1", "$fcc2", "$fcc3", "$fcc4", "$fcc5", "$fcc6", "$fcc7",
+ // 128-bit vector registers.
+ "$vr0", "$vr1", "$vr2", "$vr3", "$vr4", "$vr5", "$vr6", "$vr7", "$vr8",
+ "$vr9", "$vr10", "$vr11", "$vr12", "$vr13", "$vr14", "$vr15", "$vr16",
+ "$vr17", "$vr18", "$vr19", "$vr20", "$vr21", "$vr22", "$vr23", "$vr24",
+ "$vr25", "$vr26", "$vr27", "$vr28", "$vr29", "$vr30", "$vr31",
+ // 256-bit vector registers.
+ "$xr0", "$xr1", "$xr2", "$xr3", "$xr4", "$xr5", "$xr6", "$xr7", "$xr8",
+ "$xr9", "$xr10", "$xr11", "$xr12", "$xr13", "$xr14", "$xr15", "$xr16",
+ "$xr17", "$xr18", "$xr19", "$xr20", "$xr21", "$xr22", "$xr23", "$xr24",
+ "$xr25", "$xr26", "$xr27", "$xr28", "$xr29", "$xr30", "$xr31"};
return llvm::ArrayRef(GCCRegNames);
}
ArrayRef<TargetInfo::GCCRegAlias>
LoongArchTargetInfo::getGCCRegAliases() const {
static const TargetInfo::GCCRegAlias GCCRegAliases[] = {
- {{"$zero"}, "$r0"}, {{"$ra"}, "$r1"}, {{"$tp"}, "$r2"},
- {{"$sp"}, "$r3"}, {{"$a0"}, "$r4"}, {{"$a1"}, "$r5"},
- {{"$a2"}, "$r6"}, {{"$a3"}, "$r7"}, {{"$a4"}, "$r8"},
- {{"$a5"}, "$r9"}, {{"$a6"}, "$r10"}, {{"$a7"}, "$r11"},
- {{"$t0"}, "$r12"}, {{"$t1"}, "$r13"}, {{"$t2"}, "$r14"},
- {{"$t3"}, "$r15"}, {{"$t4"}, "$r16"}, {{"$t5"}, "$r17"},
- {{"$t6"}, "$r18"}, {{"$t7"}, "$r19"}, {{"$t8"}, "$r20"},
- {{"$fp", "$s9"}, "$r22"}, {{"$s0"}, "$r23"}, {{"$s1"}, "$r24"},
- {{"$s2"}, "$r25"}, {{"$s3"}, "$r26"}, {{"$s4"}, "$r27"},
- {{"$s5"}, "$r28"}, {{"$s6"}, "$r29"}, {{"$s7"}, "$r30"},
- {{"$s8"}, "$r31"}, {{"$fa0"}, "$f0"}, {{"$fa1"}, "$f1"},
- {{"$fa2"}, "$f2"}, {{"$fa3"}, "$f3"}, {{"$fa4"}, "$f4"},
- {{"$fa5"}, "$f5"}, {{"$fa6"}, "$f6"}, {{"$fa7"}, "$f7"},
- {{"$ft0"}, "$f8"}, {{"$ft1"}, "$f9"}, {{"$ft2"}, "$f10"},
- {{"$ft3"}, "$f11"}, {{"$ft4"}, "$f12"}, {{"$ft5"}, "$f13"},
- {{"$ft6"}, "$f14"}, {{"$ft7"}, "$f15"}, {{"$ft8"}, "$f16"},
- {{"$ft9"}, "$f17"}, {{"$ft10"}, "$f18"}, {{"$ft11"}, "$f19"},
- {{"$ft12"}, "$f20"}, {{"$ft13"}, "$f21"}, {{"$ft14"}, "$f22"},
- {{"$ft15"}, "$f23"}, {{"$fs0"}, "$f24"}, {{"$fs1"}, "$f25"},
- {{"$fs2"}, "$f26"}, {{"$fs3"}, "$f27"}, {{"$fs4"}, "$f28"},
- {{"$fs5"}, "$f29"}, {{"$fs6"}, "$f30"}, {{"$fs7"}, "$f31"},
+ {{"zero", "$zero", "r0"}, "$r0"},
+ {{"ra", "$ra", "r1"}, "$r1"},
+ {{"tp", "$tp", "r2"}, "$r2"},
+ {{"sp", "$sp", "r3"}, "$r3"},
+ {{"a0", "$a0", "r4"}, "$r4"},
+ {{"a1", "$a1", "r5"}, "$r5"},
+ {{"a2", "$a2", "r6"}, "$r6"},
+ {{"a3", "$a3", "r7"}, "$r7"},
+ {{"a4", "$a4", "r8"}, "$r8"},
+ {{"a5", "$a5", "r9"}, "$r9"},
+ {{"a6", "$a6", "r10"}, "$r10"},
+ {{"a7", "$a7", "r11"}, "$r11"},
+ {{"t0", "$t0", "r12"}, "$r12"},
+ {{"t1", "$t1", "r13"}, "$r13"},
+ {{"t2", "$t2", "r14"}, "$r14"},
+ {{"t3", "$t3", "r15"}, "$r15"},
+ {{"t4", "$t4", "r16"}, "$r16"},
+ {{"t5", "$t5", "r17"}, "$r17"},
+ {{"t6", "$t6", "r18"}, "$r18"},
+ {{"t7", "$t7", "r19"}, "$r19"},
+ {{"t8", "$t8", "r20"}, "$r20"},
+ {{"r21"}, "$r21"},
+ {{"s9", "$s9", "r22", "fp", "$fp"}, "$r22"},
+ {{"s0", "$s0", "r23"}, "$r23"},
+ {{"s1", "$s1", "r24"}, "$r24"},
+ {{"s2", "$s2", "r25"}, "$r25"},
+ {{"s3", "$s3", "r26"}, "$r26"},
+ {{"s4", "$s4", "r27"}, "$r27"},
+ {{"s5", "$s5", "r28"}, "$r28"},
+ {{"s6", "$s6", "r29"}, "$r29"},
+ {{"s7", "$s7", "r30"}, "$r30"},
+ {{"s8", "$s8", "r31"}, "$r31"},
+ {{"$fa0"}, "$f0"},
+ {{"$fa1"}, "$f1"},
+ {{"$fa2"}, "$f2"},
+ {{"$fa3"}, "$f3"},
+ {{"$fa4"}, "$f4"},
+ {{"$fa5"}, "$f5"},
+ {{"$fa6"}, "$f6"},
+ {{"$fa7"}, "$f7"},
+ {{"$ft0"}, "$f8"},
+ {{"$ft1"}, "$f9"},
+ {{"$ft2"}, "$f10"},
+ {{"$ft3"}, "$f11"},
+ {{"$ft4"}, "$f12"},
+ {{"$ft5"}, "$f13"},
+ {{"$ft6"}, "$f14"},
+ {{"$ft7"}, "$f15"},
+ {{"$ft8"}, "$f16"},
+ {{"$ft9"}, "$f17"},
+ {{"$ft10"}, "$f18"},
+ {{"$ft11"}, "$f19"},
+ {{"$ft12"}, "$f20"},
+ {{"$ft13"}, "$f21"},
+ {{"$ft14"}, "$f22"},
+ {{"$ft15"}, "$f23"},
+ {{"$fs0"}, "$f24"},
+ {{"$fs1"}, "$f25"},
+ {{"$fs2"}, "$f26"},
+ {{"$fs3"}, "$f27"},
+ {{"$fs4"}, "$f28"},
+ {{"$fs5"}, "$f29"},
+ {{"$fs6"}, "$f30"},
+ {{"$fs7"}, "$f31"},
};
return llvm::ArrayRef(GCCRegAliases);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.h b/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.h
index 646c8d071f1d..52c4ce425368 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -51,7 +51,7 @@ public:
return TargetInfo::VoidPtrBuiltinVaList;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
ArrayRef<const char *> getGCCRegNames() const override;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp
index 6c2d77444f13..1b0cc4d0b13f 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp
@@ -17,7 +17,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/TargetParser/TargetParser.h"
#include <cstdint>
#include <cstring>
#include <limits>
@@ -27,8 +27,8 @@ namespace clang {
namespace targets {
M68kTargetInfo::M68kTargetInfo(const llvm::Triple &Triple,
- const TargetOptions &)
- : TargetInfo(Triple) {
+ const TargetOptions &Opts)
+ : TargetInfo(Triple), TargetOpts(Opts) {
std::string Layout;
@@ -120,6 +120,11 @@ void M68kTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
}
+
+ // Floating point
+ if (TargetOpts.FeatureMap.lookup("isa-68881") ||
+ TargetOpts.FeatureMap.lookup("isa-68882"))
+ Builder.defineMacro("__HAVE_68881__");
}
ArrayRef<Builtin::Info> M68kTargetInfo::getTargetBuiltins() const {
@@ -192,6 +197,12 @@ bool M68kTargetInfo::validateAsmConstraint(
break;
}
break;
+ case 'Q': // address register indirect addressing
+ case 'U': // address register indirect w/ constant offset addressing
+ // TODO: Handle 'S' (basically 'm' when pc-rel is enforced) when
+ // '-mpcrel' flag is properly handled by the driver.
+ info.setAllowsMemory();
+ return true;
default:
break;
}
@@ -230,7 +241,7 @@ std::string M68kTargetInfo::convertConstraint(const char *&Constraint) const {
return std::string(1, *Constraint);
}
-const char *M68kTargetInfo::getClobbers() const {
+std::string_view M68kTargetInfo::getClobbers() const {
// FIXME: Is this really right?
return "";
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/M68k.h b/contrib/llvm-project/clang/lib/Basic/Targets/M68k.h
index 16b32248418c..1af00115a505 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/M68k.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/M68k.h
@@ -16,8 +16,8 @@
#include "OSTargets.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
#include <optional>
namespace clang {
@@ -36,6 +36,8 @@ class LLVM_LIBRARY_VISIBILITY M68kTargetInfo : public TargetInfo {
CK_68060
} CPU = CK_Unknown;
+ const TargetOptions &TargetOpts;
+
public:
M68kTargetInfo(const llvm::Triple &Triple, const TargetOptions &);
@@ -49,7 +51,7 @@ public:
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override;
std::optional<std::string> handleAsmEscapedChar(char EscChar) const override;
- const char *getClobbers() const override;
+ std::string_view getClobbers() const override;
BuiltinVaListKind getBuiltinVaListKind() const override;
bool setCPU(const std::string &Name) override;
};
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.cpp
index de8704fe97e7..844f5d3af703 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.cpp
@@ -29,6 +29,5 @@ void MSP430TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("MSP430");
Builder.defineMacro("__MSP430__");
- Builder.defineMacro("__ELF__");
// FIXME: defines for different 'flavours' of MCU
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.h b/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.h
index 3761fc7667b8..25639b8c1e0a 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -87,7 +87,7 @@ public:
return false;
}
- const char *getClobbers() const override {
+ std::string_view getClobbers() const override {
// FIXME: Is this really right?
return "";
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Mips.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/Mips.cpp
index 078a8fe62ac2..cdf652c49f60 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Mips.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Mips.cpp
@@ -238,12 +238,6 @@ bool MipsTargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
Diags.Report(diag::err_target_unsupported_cpu_for_micromips) << CPU;
return false;
}
- // FIXME: It's valid to use O32 on a 64-bit CPU but the backend can't handle
- // this yet. It's better to fail here than on the backend assertion.
- if (processorSupportsGPR64() && ABI == "o32") {
- Diags.Report(diag::err_target_unsupported_abi) << ABI << CPU;
- return false;
- }
// 64-bit ABI's require 64-bit CPU's.
if (!processorSupportsGPR64() && (ABI == "n32" || ABI == "n64")) {
@@ -251,24 +245,6 @@ bool MipsTargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
return false;
}
- // FIXME: It's valid to use O32 on a mips64/mips64el triple but the backend
- // can't handle this yet. It's better to fail here than on the
- // backend assertion.
- if (getTriple().isMIPS64() && ABI == "o32") {
- Diags.Report(diag::err_target_unsupported_abi_for_triple)
- << ABI << getTriple().str();
- return false;
- }
-
- // FIXME: It's valid to use N32/N64 on a mips/mipsel triple but the backend
- // can't handle this yet. It's better to fail here than on the
- // backend assertion.
- if (getTriple().isMIPS32() && (ABI == "n32" || ABI == "n64")) {
- Diags.Report(diag::err_target_unsupported_abi_for_triple)
- << ABI << getTriple().str();
- return false;
- }
-
// -fpxx is valid only for the o32 ABI
if (FPMode == FPXX && (ABI == "n32" || ABI == "n64")) {
Diags.Report(diag::err_unsupported_abi_for_opt) << "-mfpxx" << "o32";
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h b/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h
index 9e84dbc386ed..7ecbd8633cb3 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -278,7 +278,7 @@ public:
return TargetInfo::convertConstraint(Constraint);
}
- const char *getClobbers() const override {
+ std::string_view getClobbers() const override {
// In GCC, $1 is not widely used in generated code (it's used only in a few
// specific situations), so there is no real need for users to add it to
// the clobbers list if they want to use it in their inline assembly code.
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp
index bacd93ee1c37..a9fc88295700 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp
@@ -73,7 +73,7 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
// types.
llvm::Triple HostTriple(Opts.HostTriple);
if (!HostTriple.isNVPTX())
- HostTarget.reset(AllocateTarget(llvm::Triple(Opts.HostTriple), Opts));
+ HostTarget = AllocateTarget(llvm::Triple(Opts.HostTriple), Opts);
// If no host target, make some guesses about the data layout and return.
if (!HostTarget) {
@@ -93,6 +93,8 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
default:
llvm_unreachable("TargetPointerWidth must be 32 or 64");
}
+
+ MaxAtomicInlineWidth = TargetPointerWidth;
return;
}
@@ -166,7 +168,7 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__PTX__");
Builder.defineMacro("__NVPTX__");
- if (Opts.CUDAIsDevice || Opts.OpenMPIsDevice) {
+ if (Opts.CUDAIsDevice || Opts.OpenMPIsTargetDevice || !HostTarget) {
// Set __CUDA_ARCH__ for the GPU specified.
std::string CUDAArchCode = [this] {
switch (GPU) {
@@ -193,6 +195,8 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
case CudaArch::GFX90a:
case CudaArch::GFX90c:
case CudaArch::GFX940:
+ case CudaArch::GFX941:
+ case CudaArch::GFX942:
case CudaArch::GFX1010:
case CudaArch::GFX1011:
case CudaArch::GFX1012:
@@ -208,6 +212,8 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
case CudaArch::GFX1101:
case CudaArch::GFX1102:
case CudaArch::GFX1103:
+ case CudaArch::GFX1150:
+ case CudaArch::GFX1151:
case CudaArch::Generic:
case CudaArch::LAST:
break;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h
index aab3abfa6294..6fa0b8df97d7 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h
@@ -16,8 +16,8 @@
#include "clang/Basic/Cuda.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
#include <optional>
namespace clang {
@@ -45,6 +45,9 @@ static const unsigned NVPTXAddrSpaceMap[] = {
0, // ptr32_uptr
0, // ptr64
0, // hlsl_groupshared
+ // Wasm address space values for this target are dummy values,
+ // as it is only enabled for Wasm targets.
+ 20, // wasm_funcref
};
/// The DWARF address class. Taken from
@@ -106,7 +109,7 @@ public:
}
}
- const char *getClobbers() const override {
+ std::string_view getClobbers() const override {
// FIXME: Is this really right?
return "";
}
@@ -178,7 +181,6 @@ public:
bool hasBitIntType() const override { return true; }
bool hasBFloat16Type() const override { return true; }
- const char *getBFloat16Mangling() const override { return "u6__bf16"; };
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp
index 33a5b500e2d1..627bc912fa23 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp
@@ -108,9 +108,16 @@ void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
Builder.defineMacro("__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__", Str);
}
- // Tell users about the kernel if there is one.
- if (Triple.isOSDarwin())
+ if (Triple.isOSDarwin()) {
+ // Any darwin OS defines a general darwin OS version macro in addition
+ // to the other OS specific macros.
+ assert(OsVersion.getMinor().value_or(0) < 100 &&
+ OsVersion.getSubminor().value_or(0) < 100 && "Invalid version!");
+ Builder.defineMacro("__ENVIRONMENT_OS_VERSION_MIN_REQUIRED__", Str);
+
+ // Tell users about the kernel if there is one.
Builder.defineMacro("__MACH__");
+ }
PlatformMinVersion = OsVersion;
}
@@ -207,7 +214,8 @@ static void addVisualCDefines(const LangOptions &Opts, MacroBuilder &Builder) {
Builder.defineMacro("_HAS_CHAR16_T_LANGUAGE_SUPPORT", Twine(1));
if (Opts.isCompatibleWithMSVC(LangOptions::MSVC2015)) {
- if (Opts.CPlusPlus2b)
+ if (Opts.CPlusPlus23)
+ // TODO update to the proper value.
Builder.defineMacro("_MSVC_LANG", "202004L");
else if (Opts.CPlusPlus20)
Builder.defineMacro("_MSVC_LANG", "202002L");
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h
index fd372cb12df2..8f4331b02f3b 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h
@@ -41,12 +41,9 @@ protected:
void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
MacroBuilder &Builder) const override {
Builder.defineMacro("__CloudABI__");
- Builder.defineMacro("__ELF__");
// CloudABI uses ISO/IEC 10646:2012 for wchar_t, char16_t and char32_t.
Builder.defineMacro("__STDC_ISO_10646__", "201206L");
- Builder.defineMacro("__STDC_UTF_16__");
- Builder.defineMacro("__STDC_UTF_32__");
}
public:
@@ -61,7 +58,6 @@ protected:
MacroBuilder &Builder) const override {
// Ananas defines
Builder.defineMacro("__Ananas__");
- Builder.defineMacro("__ELF__");
}
public:
@@ -177,7 +173,6 @@ protected:
// DragonFly defines; list based off of gcc output
Builder.defineMacro("__DragonFly__");
Builder.defineMacro("__DragonFly_cc_version", "100001");
- Builder.defineMacro("__ELF__");
Builder.defineMacro("__KPRINTF_ATTRIBUTE__");
Builder.defineMacro("__tune_i386__");
DefineStd(Builder, "unix", Opts);
@@ -222,7 +217,6 @@ protected:
Builder.defineMacro("__FreeBSD_cc_version", Twine(CCVersion));
Builder.defineMacro("__KPRINTF_ATTRIBUTE__");
DefineStd(Builder, "unix", Opts);
- Builder.defineMacro("__ELF__");
// On FreeBSD, wchar_t contains the number of the code point as
// used by the character set of the locale. These character sets are
@@ -274,7 +268,6 @@ protected:
DefineStd(Builder, "unix", Opts);
Builder.defineMacro("__FreeBSD_kernel__");
Builder.defineMacro("__GLIBC__");
- Builder.defineMacro("__ELF__");
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
if (Opts.CPlusPlus)
@@ -293,7 +286,6 @@ protected:
MacroBuilder &Builder) const override {
// Haiku defines; list based off of gcc output
Builder.defineMacro("__HAIKU__");
- Builder.defineMacro("__ELF__");
DefineStd(Builder, "unix", Opts);
if (this->HasFloat128)
Builder.defineMacro("__FLOAT128__");
@@ -330,7 +322,6 @@ protected:
Builder.defineMacro("__gnu_hurd__");
Builder.defineMacro("__MACH__");
Builder.defineMacro("__GLIBC__");
- Builder.defineMacro("__ELF__");
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
if (Opts.CPlusPlus)
@@ -355,7 +346,6 @@ protected:
Builder.defineMacro("_EM_LSIZE", "4");
Builder.defineMacro("_EM_FSIZE", "4");
Builder.defineMacro("_EM_DSIZE", "8");
- Builder.defineMacro("__ELF__");
DefineStd(Builder, "unix", Opts);
}
@@ -372,7 +362,6 @@ protected:
// Linux defines; list based off of gcc output
DefineStd(Builder, "unix", Opts);
DefineStd(Builder, "linux", Opts);
- Builder.defineMacro("__ELF__");
if (Triple.isAndroid()) {
Builder.defineMacro("__ANDROID__", "1");
this->PlatformName = "android";
@@ -434,7 +423,6 @@ protected:
// NetBSD defines; list based off of gcc output
Builder.defineMacro("__NetBSD__");
Builder.defineMacro("__unix__");
- Builder.defineMacro("__ELF__");
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
}
@@ -456,7 +444,6 @@ protected:
Builder.defineMacro("__OpenBSD__");
DefineStd(Builder, "unix", Opts);
- Builder.defineMacro("__ELF__");
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
if (this->HasFloat128)
@@ -502,10 +489,8 @@ protected:
void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
MacroBuilder &Builder) const override {
// PS3 PPU defines.
- Builder.defineMacro("__PPC__");
Builder.defineMacro("__PPU__");
Builder.defineMacro("__CELLOS_LV2__");
- Builder.defineMacro("__ELF__");
Builder.defineMacro("__LP32__");
Builder.defineMacro("_ARCH_PPC64");
Builder.defineMacro("__powerpc64__");
@@ -519,7 +504,7 @@ public:
this->IntMaxType = TargetInfo::SignedLongLong;
this->Int64Type = TargetInfo::SignedLongLong;
this->SizeType = TargetInfo::UnsignedInt;
- this->resetDataLayout("E-m:e-p:32:32-i64:64-n32:64");
+ this->resetDataLayout("E-m:e-p:32:32-Fi64-i64:64-n32:64");
}
};
@@ -533,8 +518,9 @@ protected:
Builder.defineMacro("__FreeBSD_cc_version", "900001");
Builder.defineMacro("__KPRINTF_ATTRIBUTE__");
DefineStd(Builder, "unix", Opts);
- Builder.defineMacro("__ELF__");
Builder.defineMacro("__SCE__");
+ Builder.defineMacro("__STDC_NO_COMPLEX__");
+ Builder.defineMacro("__STDC_NO_THREADS__");
}
public:
@@ -606,7 +592,6 @@ protected:
// RTEMS defines; list based off of gcc output
Builder.defineMacro("__rtems__");
- Builder.defineMacro("__ELF__");
if (Opts.CPlusPlus)
Builder.defineMacro("_GNU_SOURCE");
}
@@ -641,7 +626,6 @@ protected:
MacroBuilder &Builder) const override {
DefineStd(Builder, "sun", Opts);
DefineStd(Builder, "unix", Opts);
- Builder.defineMacro("__ELF__");
Builder.defineMacro("__svr4__");
Builder.defineMacro("__SVR4");
// Solaris headers require _XOPEN_SOURCE to be set to 600 for C99 and
@@ -787,13 +771,11 @@ protected:
MacroBuilder &Builder) const override {
// FIXME: _LONG_LONG should not be defined under -std=c89.
Builder.defineMacro("_LONG_LONG");
- Builder.defineMacro("_OPEN_DEFAULT");
- // _UNIX03_WITHDRAWN is required to build libcxx.
- Builder.defineMacro("_UNIX03_WITHDRAWN");
Builder.defineMacro("__370__");
Builder.defineMacro("__BFP__");
// FIXME: __BOOL__ should not be defined under -std=c89.
Builder.defineMacro("__BOOL__");
+ Builder.defineMacro("__COMPILER_VER__", "0x50000000");
Builder.defineMacro("__LONGNAME__");
Builder.defineMacro("__MVS__");
Builder.defineMacro("__THW_370__");
@@ -805,17 +787,6 @@ protected:
if (this->PointerWidth == 64)
Builder.defineMacro("__64BIT__");
- if (Opts.CPlusPlus) {
- Builder.defineMacro("__DLL__");
- // _XOPEN_SOURCE=600 is required to build libcxx.
- Builder.defineMacro("_XOPEN_SOURCE", "600");
- }
-
- if (Opts.GNUMode) {
- Builder.defineMacro("_MI_BUILTIN");
- Builder.defineMacro("_EXT");
- }
-
if (Opts.CPlusPlus && Opts.WChar) {
// Macro __wchar_t is defined so that the wchar_t data
// type is not declared as a typedef in system headers.
@@ -834,6 +805,7 @@ public:
this->UseZeroLengthBitfieldAlignment = true;
this->UseLeadingZeroLengthBitfield = false;
this->ZeroLengthBitfieldBoundary = 32;
+ this->TheCXXABI.set(TargetCXXABI::XL);
}
bool areDefaultedSMFStillPOD(const LangOptions &) const override {
@@ -872,7 +844,6 @@ protected:
Builder.defineMacro("_GNU_SOURCE");
DefineStd(Builder, "unix", Opts);
- Builder.defineMacro("__ELF__");
Builder.defineMacro("__native_client__");
}
@@ -919,7 +890,6 @@ protected:
void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
MacroBuilder &Builder) const override {
Builder.defineMacro("__Fuchsia__");
- Builder.defineMacro("__ELF__");
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
// Required by the libc++ locale support.
@@ -1005,6 +975,66 @@ public:
}
};
+// OHOS target
+template <typename Target>
+class LLVM_LIBRARY_VISIBILITY OHOSTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // Linux defines; list based off of gcc output
+ DefineStd(Builder, "unix", Opts);
+
+ // Generic OHOS target defines
+ if (Triple.isOHOSFamily()) {
+ Builder.defineMacro("__OHOS_FAMILY__", "1");
+
+ auto Version = Triple.getEnvironmentVersion();
+ this->PlatformName = "ohos";
+ this->PlatformMinVersion = Version;
+ Builder.defineMacro("__OHOS_Major__", Twine(Version.getMajor()));
+ if (auto Minor = Version.getMinor())
+ Builder.defineMacro("__OHOS_Minor__", Twine(*Minor));
+ if (auto Subminor = Version.getSubminor())
+ Builder.defineMacro("__OHOS_Micro__", Twine(*Subminor));
+ }
+
+ if (Triple.isOpenHOS())
+ Builder.defineMacro("__OHOS__");
+
+ if (Triple.isOSLinux()) {
+ DefineStd(Builder, "linux", Opts);
+ } else if (Triple.isOSLiteOS()) {
+ Builder.defineMacro("__LITEOS__");
+ }
+
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
+ if (this->HasFloat128)
+ Builder.defineMacro("__FLOAT128__");
+ }
+
+public:
+ OHOSTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : OSTargetInfo<Target>(Triple, Opts) {
+ this->WIntType = TargetInfo::UnsignedInt;
+
+ switch (Triple.getArch()) {
+ default:
+ break;
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ this->HasFloat128 = true;
+ break;
+ }
+ }
+
+ const char *getStaticInitSectionSpecifier() const override {
+ return ".text.startup";
+ }
+};
+
} // namespace targets
} // namespace clang
#endif // LLVM_CLANG_LIB_BASIC_TARGETS_OSTARGETS_H
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h b/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h
index 72c586359cea..595c4d83b1d1 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h
@@ -16,8 +16,8 @@
#include "Mips.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -69,7 +69,7 @@ public:
return false;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
bool hasBitIntType() const override { return true; }
};
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
index d46b1c55cf81..89aa9bd58511 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
@@ -21,6 +21,8 @@ using namespace clang::targets;
static constexpr Builtin::Info BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
{#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
{#ID, TYPE, ATTRS, nullptr, HeaderDesc::HEADER, ALL_LANGUAGES},
#include "clang/Basic/BuiltinsPPC.def"
@@ -336,9 +338,8 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__LONGDOUBLE64");
}
- // Define this for elfv2 (64-bit only) or 64-bit darwin.
- if (ABI == "elfv2" ||
- (getTriple().getOS() == llvm::Triple::Darwin && PointerWidth == 64))
+ // Define this for elfv2 (64-bit only).
+ if (ABI == "elfv2")
Builder.defineMacro("__STRUCT_PARM_ALIGN__", "16");
if (ArchDefs & ArchDefineName)
@@ -790,10 +791,10 @@ ArrayRef<TargetInfo::GCCRegAlias> PPCTargetInfo::getGCCRegAliases() const {
// PPC ELFABIv2 DWARF Definitoin "Table 2.26. Mappings of Common Registers".
// vs0 ~ vs31 is mapping to 32 - 63,
-// vs32 ~ vs63 is mapping to 77 - 108.
+// vs32 ~ vs63 is mapping to 77 - 108.
const TargetInfo::AddlRegName GCCAddlRegNames[] = {
// Table of additional register names to use in user input.
- {{"vs0"}, 32}, {{"vs1"}, 33}, {{"vs2"}, 34}, {{"vs3"}, 35},
+ {{"vs0"}, 32}, {{"vs1"}, 33}, {{"vs2"}, 34}, {{"vs3"}, 35},
{{"vs4"}, 36}, {{"vs5"}, 37}, {{"vs6"}, 38}, {{"vs7"}, 39},
{{"vs8"}, 40}, {{"vs9"}, 41}, {{"vs10"}, 42}, {{"vs11"}, 43},
{{"vs12"}, 44}, {{"vs13"}, 45}, {{"vs14"}, 46}, {{"vs15"}, 47},
@@ -814,8 +815,8 @@ const TargetInfo::AddlRegName GCCAddlRegNames[] = {
ArrayRef<TargetInfo::AddlRegName> PPCTargetInfo::getGCCAddlRegNames() const {
if (ABI == "elfv2")
return llvm::ArrayRef(GCCAddlRegNames);
- else
- return TargetInfo::getGCCAddlRegNames();
+ else
+ return TargetInfo::getGCCAddlRegNames();
}
static constexpr llvm::StringLiteral ValidCPUNames[] = {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
index c8197154fff7..bc06e7978ac3 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
@@ -16,9 +16,9 @@
#include "OSTargets.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -87,7 +87,6 @@ public:
PPCTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple) {
SuitableAlign = 128;
- SimdDefaultAlign = 128;
LongDoubleWidth = LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble();
HasStrictFP = true;
@@ -331,7 +330,7 @@ public:
return R;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
int getEHDataRegisterNumber(unsigned RegNo) const override {
if (RegNo == 0)
return 3;
@@ -364,11 +363,11 @@ public:
PPC32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: PPCTargetInfo(Triple, Opts) {
if (Triple.isOSAIX())
- resetDataLayout("E-m:a-p:32:32-i64:64-n32");
+ resetDataLayout("E-m:a-p:32:32-Fi32-i64:64-n32");
else if (Triple.getArch() == llvm::Triple::ppcle)
- resetDataLayout("e-m:e-p:32:32-i64:64-n32");
+ resetDataLayout("e-m:e-p:32:32-Fn32-i64:64-n32");
else
- resetDataLayout("E-m:e-p:32:32-i64:64-n32");
+ resetDataLayout("E-m:e-p:32:32-Fn32-i64:64-n32");
switch (getTriple().getOS()) {
case llvm::Triple::Linux:
@@ -401,7 +400,7 @@ public:
}
BuiltinVaListKind getBuiltinVaListKind() const override {
- // This is the ELF definition, and is overridden by the Darwin sub-target
+ // This is the ELF definition
return TargetInfo::PowerABIBuiltinVaList;
}
};
@@ -419,19 +418,23 @@ public:
if (Triple.isOSAIX()) {
// TODO: Set appropriate ABI for AIX platform.
- DataLayout = "E-m:a-i64:64-n32:64";
+ DataLayout = "E-m:a-Fi64-i64:64-n32:64";
LongDoubleWidth = 64;
LongDoubleAlign = DoubleAlign = 32;
LongDoubleFormat = &llvm::APFloat::IEEEdouble();
} else if ((Triple.getArch() == llvm::Triple::ppc64le)) {
- DataLayout = "e-m:e-i64:64-n32:64";
+ DataLayout = "e-m:e-Fn32-i64:64-n32:64";
ABI = "elfv2";
} else {
- DataLayout = "E-m:e-i64:64-n32:64";
- if (Triple.isPPC64ELFv2ABI())
+ DataLayout = "E-m:e";
+ if (Triple.isPPC64ELFv2ABI()) {
ABI = "elfv2";
- else
+ DataLayout += "-Fn32";
+ } else {
ABI = "elfv1";
+ DataLayout += "-Fi64";
+ }
+ DataLayout += "-i64:64-n32:64";
}
if (Triple.isOSFreeBSD() || Triple.isOSOpenBSD() || Triple.isMusl()) {
@@ -482,33 +485,6 @@ public:
}
};
-class LLVM_LIBRARY_VISIBILITY DarwinPPC32TargetInfo
- : public DarwinTargetInfo<PPC32TargetInfo> {
-public:
- DarwinPPC32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
- : DarwinTargetInfo<PPC32TargetInfo>(Triple, Opts) {
- HasAlignMac68kSupport = true;
- BoolWidth = BoolAlign = 32; // XXX support -mone-byte-bool?
- PtrDiffType = SignedInt; // for http://llvm.org/bugs/show_bug.cgi?id=15726
- LongLongAlign = 32;
- resetDataLayout("E-m:o-p:32:32-f64:32:64-n32", "_");
- }
-
- BuiltinVaListKind getBuiltinVaListKind() const override {
- return TargetInfo::CharPtrBuiltinVaList;
- }
-};
-
-class LLVM_LIBRARY_VISIBILITY DarwinPPC64TargetInfo
- : public DarwinTargetInfo<PPC64TargetInfo> {
-public:
- DarwinPPC64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
- : DarwinTargetInfo<PPC64TargetInfo>(Triple, Opts) {
- HasAlignMac68kSupport = true;
- resetDataLayout("E-m:o-i64:64-n32:64", "_");
- }
-};
-
class LLVM_LIBRARY_VISIBILITY AIXPPC32TargetInfo :
public AIXTargetInfo<PPC32TargetInfo> {
public:
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
index 7c801657b6ac..94c894dfec0b 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
@@ -1,4 +1,4 @@
-//===--- RISCV.cpp - Implement RISCV target feature support ---------------===//
+//===--- RISCV.cpp - Implement RISC-V target feature support --------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements RISCV TargetInfo objects.
+// This file implements RISC-V TargetInfo objects.
//
//===----------------------------------------------------------------------===//
@@ -124,7 +124,6 @@ static unsigned getVersionValue(unsigned MajorVersion, unsigned MinorVersion) {
void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
- Builder.defineMacro("__ELF__");
Builder.defineMacro("__riscv");
bool Is64Bit = getTriple().getArch() == llvm::Triple::riscv64;
Builder.defineMacro("__riscv_xlen", Is64Bit ? "64" : "32");
@@ -200,6 +199,11 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
// Currently we support the v0.11 RISC-V V intrinsics.
Builder.defineMacro("__riscv_v_intrinsic", Twine(getVersionValue(0, 11)));
}
+
+ auto VScale = getVScaleRange(Opts);
+ if (VScale && VScale->first && VScale->first == VScale->second)
+ Builder.defineMacro("__riscv_v_fixed_vlen",
+ Twine(VScale->first * llvm::RISCV::RVVBitsPerBlock));
}
static constexpr Builtin::Info BuiltinInfo[] = {
@@ -315,12 +319,15 @@ bool RISCVTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
if (ABI.empty())
ABI = ISAInfo->computeDefaultABI().str();
+ if (ISAInfo->hasExtension("zfh") || ISAInfo->hasExtension("zhinx"))
+ HasLegalHalfType = true;
+
return true;
}
bool RISCVTargetInfo::isValidCPUName(StringRef Name) const {
bool Is64Bit = getTriple().isArch64Bit();
- return llvm::RISCV::checkCPUKind(llvm::RISCV::parseCPUKind(Name), Is64Bit);
+ return llvm::RISCV::parseCPU(Name, Is64Bit);
}
void RISCVTargetInfo::fillValidCPUList(
@@ -331,8 +338,7 @@ void RISCVTargetInfo::fillValidCPUList(
bool RISCVTargetInfo::isValidTuneCPUName(StringRef Name) const {
bool Is64Bit = getTriple().isArch64Bit();
- return llvm::RISCV::checkTuneCPUKind(
- llvm::RISCV::parseTuneCPUKind(Name, Is64Bit), Is64Bit);
+ return llvm::RISCV::parseTuneCPU(Name, Is64Bit);
}
void RISCVTargetInfo::fillValidTuneCPUList(
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h
index adff1da4ad5e..e4e39506bccf 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h
@@ -1,4 +1,4 @@
-//===--- RISCV.h - Declare RISCV target feature support ---------*- C++ -*-===//
+//===--- RISCV.h - Declare RISC-V target feature support --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file declares RISCV TargetInfo objects.
+// This file declares RISC-V TargetInfo objects.
//
//===----------------------------------------------------------------------===//
@@ -15,9 +15,9 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/RISCVISAInfo.h"
+#include "llvm/TargetParser/Triple.h"
#include <optional>
namespace clang {
@@ -41,6 +41,7 @@ public:
HasRISCVVTypes = true;
MCountName = "_mcount";
HasFloat16 = true;
+ HasStrictFP = true;
}
bool setCPU(const std::string &Name) override {
@@ -60,7 +61,7 @@ public:
return TargetInfo::VoidPtrBuiltinVaList;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
StringRef getConstraintRegister(StringRef Constraint,
StringRef Expression) const override {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h b/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h
index 69596c6eb6fe..a7ea03e7a5dd 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h
@@ -13,10 +13,11 @@
#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_SPIR_H
#define LLVM_CLANG_LIB_BASIC_TARGETS_SPIR_H
+#include "Targets.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
#include <optional>
namespace clang {
@@ -45,6 +46,9 @@ static const unsigned SPIRDefIsPrivMap[] = {
0, // ptr32_uptr
0, // ptr64
0, // hlsl_groupshared
+ // Wasm address space values for this target are dummy values,
+ // as it is only enabled for Wasm targets.
+ 20, // wasm_funcref
};
// Used by both the SPIR and SPIR-V targets.
@@ -75,12 +79,17 @@ static const unsigned SPIRDefIsGenMap[] = {
0, // ptr32_uptr
0, // ptr64
0, // hlsl_groupshared
+ // Wasm address space values for this target are dummy values,
+ // as it is only enabled for Wasm targets.
+ 20, // wasm_funcref
};
// Base class for SPIR and SPIR-V target info.
class LLVM_LIBRARY_VISIBILITY BaseSPIRTargetInfo : public TargetInfo {
+ std::unique_ptr<TargetInfo> HostTarget;
+
protected:
- BaseSPIRTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
+ BaseSPIRTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: TargetInfo(Triple) {
assert((Triple.isSPIR() || Triple.isSPIRV()) &&
"Invalid architecture for SPIR or SPIR-V.");
@@ -98,6 +107,52 @@ protected:
// Define available target features
// These must be defined in sorted order!
NoAsmVariants = true;
+
+ llvm::Triple HostTriple(Opts.HostTriple);
+ if (!HostTriple.isSPIR() && !HostTriple.isSPIRV() &&
+ HostTriple.getArch() != llvm::Triple::UnknownArch) {
+ HostTarget = AllocateTarget(llvm::Triple(Opts.HostTriple), Opts);
+
+ // Copy properties from host target.
+ BoolWidth = HostTarget->getBoolWidth();
+ BoolAlign = HostTarget->getBoolAlign();
+ IntWidth = HostTarget->getIntWidth();
+ IntAlign = HostTarget->getIntAlign();
+ HalfWidth = HostTarget->getHalfWidth();
+ HalfAlign = HostTarget->getHalfAlign();
+ FloatWidth = HostTarget->getFloatWidth();
+ FloatAlign = HostTarget->getFloatAlign();
+ DoubleWidth = HostTarget->getDoubleWidth();
+ DoubleAlign = HostTarget->getDoubleAlign();
+ LongWidth = HostTarget->getLongWidth();
+ LongAlign = HostTarget->getLongAlign();
+ LongLongWidth = HostTarget->getLongLongWidth();
+ LongLongAlign = HostTarget->getLongLongAlign();
+ MinGlobalAlign = HostTarget->getMinGlobalAlign(/* TypeSize = */ 0);
+ NewAlign = HostTarget->getNewAlign();
+ DefaultAlignForAttributeAligned =
+ HostTarget->getDefaultAlignForAttributeAligned();
+ IntMaxType = HostTarget->getIntMaxType();
+ WCharType = HostTarget->getWCharType();
+ WIntType = HostTarget->getWIntType();
+ Char16Type = HostTarget->getChar16Type();
+ Char32Type = HostTarget->getChar32Type();
+ Int64Type = HostTarget->getInt64Type();
+ SigAtomicType = HostTarget->getSigAtomicType();
+ ProcessIDType = HostTarget->getProcessIDType();
+
+ UseBitFieldTypeAlignment = HostTarget->useBitFieldTypeAlignment();
+ UseZeroLengthBitfieldAlignment =
+ HostTarget->useZeroLengthBitfieldAlignment();
+ UseExplicitBitFieldAlignment = HostTarget->useExplicitBitFieldAlignment();
+ ZeroLengthBitfieldBoundary = HostTarget->getZeroLengthBitfieldBoundary();
+
+ // This is a bit of a lie, but it controls __GCC_ATOMIC_XXX_LOCK_FREE, and
+ // we need those macros to be identical on host and device, because (among
+ // other things) they affect which standard library classes are defined,
+ // and we need all classes to be defined on both the host and device.
+ MaxAtomicInlineWidth = HostTarget->getMaxAtomicInlineWidth();
+ }
}
public:
@@ -109,7 +164,7 @@ public:
return std::nullopt;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
ArrayRef<const char *> getGCCRegNames() const override {
return std::nullopt;
@@ -191,6 +246,8 @@ public:
bool hasFeature(StringRef Feature) const override {
return Feature == "spir";
}
+
+ bool checkArithmeticFenceSupported() const override { return true; }
};
class LLVM_LIBRARY_VISIBILITY SPIR32TargetInfo : public SPIRTargetInfo {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h b/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h
index 4860b023d6e6..214fef88e1dc 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h
@@ -14,8 +14,8 @@
#define LLVM_CLANG_LIB_BASIC_TARGETS_SPARC_H
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
// Shared base class for SPARC v8 (32-bit) and SPARC v9 (64-bit).
@@ -77,7 +77,7 @@ public:
}
return false;
}
- const char *getClobbers() const override {
+ std::string_view getClobbers() const override {
// FIXME: Implement!
return "";
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h b/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h
index 030f5ec7d69b..9ba255745cf2 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -36,7 +36,6 @@ public:
HasTransactionalExecution(false), HasVector(false), SoftFloat(false) {
IntMaxType = SignedLong;
Int64Type = SignedLong;
- TLSSupported = true;
IntWidth = IntAlign = 32;
LongWidth = LongLongWidth = LongAlign = LongLongAlign = 64;
Int128Align = 64;
@@ -47,16 +46,20 @@ public:
DefaultAlignForAttributeAligned = 64;
MinGlobalAlign = 16;
if (Triple.isOSzOS()) {
+ TLSSupported = false;
// All vector types are default aligned on an 8-byte boundary, even if the
// vector facility is not available. That is different from Linux.
MaxVectorAlign = 64;
- // Compared to Linux/ELF, the data layout differs only in that name
- // mangling is GOFF.
- resetDataLayout(
- "E-m:l-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64");
- } else
+ // Compared to Linux/ELF, the data layout differs only in some details:
+ // - name mangling is GOFF.
+ // - 32 bit pointers, either as default or special address space
+ resetDataLayout("E-m:l-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-"
+ "a:8:16-n32:64");
+ } else {
+ TLSSupported = true;
resetDataLayout("E-m:e-i1:8:16-i8:8:16-i64:64-f128:64"
"-v128:64-a:8:16-n32:64");
+ }
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
HasStrictFP = true;
}
@@ -106,7 +109,7 @@ public:
return TargetInfo::convertConstraint(Constraint);
}
- const char *getClobbers() const override {
+ std::string_view getClobbers() const override {
// FIXME: Is this really right?
return "";
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/TCE.h b/contrib/llvm-project/clang/lib/Basic/Targets/TCE.h
index 430ace3ab237..dcf684fe6dbc 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/TCE.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/TCE.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -51,6 +51,9 @@ static const unsigned TCEOpenCLAddrSpaceMap[] = {
0, // ptr32_uptr
0, // ptr64
0, // hlsl_groupshared
+ // Wasm address space values for this target are dummy values,
+ // as it is only enabled for Wasm targets.
+ 20, // wasm_funcref
};
class LLVM_LIBRARY_VISIBILITY TCETargetInfo : public TargetInfo {
@@ -96,7 +99,7 @@ public:
return std::nullopt;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::VoidPtrBuiltinVaList;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp
index a223b65a8fa7..67cae8faf605 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp
@@ -26,14 +26,8 @@ static constexpr Builtin::Info BuiltinInfo[] = {
void VETargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
- Builder.defineMacro("_LP64", "1");
- Builder.defineMacro("unix", "1");
- Builder.defineMacro("__unix__", "1");
- Builder.defineMacro("__linux__", "1");
Builder.defineMacro("__ve", "1");
Builder.defineMacro("__ve__", "1");
- Builder.defineMacro("__STDC_HOSTED__", "1");
- Builder.defineMacro("__STDC__", "1");
Builder.defineMacro("__NEC__", "1");
// FIXME: define __FAST_MATH__ 1 if -ffast-math is enabled
// FIXME: define __OPTIMIZE__ n if -On is enabled
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/VE.h b/contrib/llvm-project/clang/lib/Basic/Targets/VE.h
index d42d3e9199b1..ea9a092cad80 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/VE.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/VE.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -69,7 +69,7 @@ public:
}
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
ArrayRef<const char *> getGCCRegNames() const override {
static const char *const GCCRegNames[] = {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp
index 3a5e0350192e..e9b77e03c031 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp
@@ -151,6 +151,7 @@ bool WebAssemblyTargetInfo::initFeatureMap(
Features["atomics"] = true;
Features["mutable-globals"] = true;
Features["tail-call"] = true;
+ Features["reference-types"] = true;
setSIMDLevel(Features, SIMD128, true);
} else if (CPU == "generic") {
Features["sign-ext"] = true;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h
index 1e73450fdd0c..9484898fe1c5 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h
@@ -15,12 +15,36 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
+static const unsigned WebAssemblyAddrSpaceMap[] = {
+ 0, // Default
+ 0, // opencl_global
+ 0, // opencl_local
+ 0, // opencl_constant
+ 0, // opencl_private
+ 0, // opencl_generic
+ 0, // opencl_global_device
+ 0, // opencl_global_host
+ 0, // cuda_device
+ 0, // cuda_constant
+ 0, // cuda_shared
+ 0, // sycl_global
+ 0, // sycl_global_device
+ 0, // sycl_global_host
+ 0, // sycl_local
+ 0, // sycl_private
+ 0, // ptr32_sptr
+ 0, // ptr32_uptr
+ 0, // ptr64
+ 0, // hlsl_groupshared
+ 20, // wasm_funcref
+};
+
class LLVM_LIBRARY_VISIBILITY WebAssemblyTargetInfo : public TargetInfo {
enum SIMDEnum {
@@ -45,11 +69,11 @@ class LLVM_LIBRARY_VISIBILITY WebAssemblyTargetInfo : public TargetInfo {
public:
explicit WebAssemblyTargetInfo(const llvm::Triple &T, const TargetOptions &)
: TargetInfo(T) {
+ AddrSpaceMap = &WebAssemblyAddrSpaceMap;
NoAsmVariants = true;
SuitableAlign = 128;
LargeArrayMinWidth = 128;
LargeArrayAlign = 128;
- SimdDefaultAlign = 128;
SigAtomicType = SignedLong;
LongDoubleWidth = LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::IEEEquad();
@@ -106,7 +130,7 @@ private:
return false;
}
- const char *getClobbers() const final { return ""; }
+ std::string_view getClobbers() const final { return ""; }
bool isCLZForZeroUndef() const final { return false; }
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp
index 490e20ce4514..11cb2dda15cc 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp
@@ -17,7 +17,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/X86TargetParser.h"
+#include "llvm/TargetParser/X86TargetParser.h"
#include <optional>
namespace clang {
@@ -261,8 +261,14 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasAVX512VP2INTERSECT = true;
} else if (Feature == "+sha") {
HasSHA = true;
+ } else if (Feature == "+sha512") {
+ HasSHA512 = true;
} else if (Feature == "+shstk") {
HasSHSTK = true;
+ } else if (Feature == "+sm3") {
+ HasSM3 = true;
+ } else if (Feature == "+sm4") {
+ HasSM4 = true;
} else if (Feature == "+movbe") {
HasMOVBE = true;
} else if (Feature == "+sgx") {
@@ -335,6 +341,8 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasAMXINT8 = true;
} else if (Feature == "+amx-tile") {
HasAMXTILE = true;
+ } else if (Feature == "+amx-complex") {
+ HasAMXCOMPLEX = true;
} else if (Feature == "+cmpccxadd") {
HasCMPCCXADD = true;
} else if (Feature == "+raoint") {
@@ -345,6 +353,8 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasAVXNECONVERT= true;
} else if (Feature == "+avxvnni") {
HasAVXVNNI = true;
+ } else if (Feature == "+avxvnniint16") {
+ HasAVXVNNIINT16 = true;
} else if (Feature == "+avxvnniint8") {
HasAVXVNNIINT8 = true;
} else if (Feature == "+serialize") {
@@ -357,6 +367,8 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasCRC32 = true;
} else if (Feature == "+x87") {
HasX87 = true;
+ } else if (Feature == "+fullbf16") {
+ HasFullBFloat16 = true;
}
X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
@@ -374,6 +386,15 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasFloat16 = SSELevel >= SSE2;
+ // X86 target has bfloat16 emulation support in the backend, where
+ // bfloat16 is treated as a 32-bit float, arithmetic operations are
+ // performed in 32-bit, and the result is converted back to bfloat16.
+ // Truncation and extension between bfloat16 and 32-bit float are supported
+ // by the compiler-rt library. However, native bfloat16 support is currently
+ // not available in the X86 target. Hence, HasFullBFloat16 will be false
+ // until native bfloat16 support is available. HasFullBFloat16 is used to
+ // determine whether to automatically use excess floating point precision
+ // for bfloat16 arithmetic operations in the front-end.
HasBFloat16 = SSELevel >= SSE2;
MMX3DNowEnum ThreeDNowLevel = llvm::StringSwitch<MMX3DNowEnum>(Feature)
@@ -400,9 +421,6 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
return false;
}
- SimdDefaultAlign =
- hasFeature("avx512f") ? 512 : hasFeature("avx") ? 256 : 128;
-
// FIXME: We should allow long double type on 32-bits to match with GCC.
// This requires backend to be able to lower f80 without x87 first.
if (!HasX87 && LongDoubleFormat == &llvm::APFloat::x87DoubleExtended())
@@ -530,6 +548,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_Sierraforest:
case CK_Grandridge:
case CK_Graniterapids:
+ case CK_GraniterapidsD:
case CK_Emeraldrapids:
// FIXME: Historically, we defined this legacy name, it would be nice to
// remove it at some point. We've never exposed fine-grained names for
@@ -739,6 +758,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__AVX512VP2INTERSECT__");
if (HasSHA)
Builder.defineMacro("__SHA__");
+ if (HasSHA512)
+ Builder.defineMacro("__SHA512__");
if (HasFXSR)
Builder.defineMacro("__FXSR__");
@@ -762,6 +783,10 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__SHSTK__");
if (HasSGX)
Builder.defineMacro("__SGX__");
+ if (HasSM3)
+ Builder.defineMacro("__SM3__");
+ if (HasSM4)
+ Builder.defineMacro("__SM4__");
if (HasPREFETCHI)
Builder.defineMacro("__PREFETCHI__");
if (HasPREFETCHWT1)
@@ -802,6 +827,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__AMX_BF16__");
if (HasAMXFP16)
Builder.defineMacro("__AMX_FP16__");
+ if (HasAMXCOMPLEX)
+ Builder.defineMacro("__AMX_COMPLEX__");
if (HasCMPCCXADD)
Builder.defineMacro("__CMPCCXADD__");
if (HasRAOINT)
@@ -812,6 +839,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__AVXNECONVERT__");
if (HasAVXVNNI)
Builder.defineMacro("__AVXVNNI__");
+ if (HasAVXVNNIINT16)
+ Builder.defineMacro("__AVXVNNIINT16__");
if (HasAVXVNNIINT8)
Builder.defineMacro("__AVXVNNIINT8__");
if (HasSERIALIZE)
@@ -915,6 +944,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("adx", true)
.Case("aes", true)
.Case("amx-bf16", true)
+ .Case("amx-complex", true)
.Case("amx-fp16", true)
.Case("amx-int8", true)
.Case("amx-tile", true)
@@ -939,6 +969,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("avxifma", true)
.Case("avxneconvert", true)
.Case("avxvnni", true)
+ .Case("avxvnniint16", true)
.Case("avxvnniint8", true)
.Case("bmi", true)
.Case("bmi2", true)
@@ -986,7 +1017,10 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("serialize", true)
.Case("sgx", true)
.Case("sha", true)
+ .Case("sha512", true)
.Case("shstk", true)
+ .Case("sm3", true)
+ .Case("sm4", true)
.Case("sse", true)
.Case("sse2", true)
.Case("sse3", true)
@@ -1016,6 +1050,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("adx", HasADX)
.Case("aes", HasAES)
.Case("amx-bf16", HasAMXBF16)
+ .Case("amx-complex", HasAMXCOMPLEX)
.Case("amx-fp16", HasAMXFP16)
.Case("amx-int8", HasAMXINT8)
.Case("amx-tile", HasAMXTILE)
@@ -1040,6 +1075,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("avxifma", HasAVXIFMA)
.Case("avxneconvert", HasAVXNECONVERT)
.Case("avxvnni", HasAVXVNNI)
+ .Case("avxvnniint16", HasAVXVNNIINT16)
.Case("avxvnniint8", HasAVXVNNIINT8)
.Case("bmi", HasBMI)
.Case("bmi2", HasBMI2)
@@ -1090,7 +1126,10 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("serialize", HasSERIALIZE)
.Case("sgx", HasSGX)
.Case("sha", HasSHA)
+ .Case("sha512", HasSHA512)
.Case("shstk", HasSHSTK)
+ .Case("sm3", HasSM3)
+ .Case("sm4", HasSM4)
.Case("sse", SSELevel >= SSE1)
.Case("sse2", SSELevel >= SSE2)
.Case("sse3", SSELevel >= SSE3)
@@ -1114,6 +1153,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("xsavec", HasXSAVEC)
.Case("xsaves", HasXSAVES)
.Case("xsaveopt", HasXSAVEOPT)
+ .Case("fullbf16", HasFullBFloat16)
.Default(false);
}
@@ -1156,43 +1196,19 @@ unsigned X86TargetInfo::multiVersionSortPriority(StringRef Name) const {
}
bool X86TargetInfo::validateCPUSpecificCPUDispatch(StringRef Name) const {
- return llvm::StringSwitch<bool>(Name)
-#define CPU_SPECIFIC(NAME, TUNE_NAME, MANGLING, FEATURES) .Case(NAME, true)
-#define CPU_SPECIFIC_ALIAS(NEW_NAME, TUNE_NAME, NAME) .Case(NEW_NAME, true)
-#include "llvm/TargetParser/X86TargetParser.def"
- .Default(false);
-}
-
-static StringRef CPUSpecificCPUDispatchNameDealias(StringRef Name) {
- return llvm::StringSwitch<StringRef>(Name)
-#define CPU_SPECIFIC_ALIAS(NEW_NAME, TUNE_NAME, NAME) .Case(NEW_NAME, NAME)
-#include "llvm/TargetParser/X86TargetParser.def"
- .Default(Name);
+ return llvm::X86::validateCPUSpecificCPUDispatch(Name);
}
char X86TargetInfo::CPUSpecificManglingCharacter(StringRef Name) const {
- return llvm::StringSwitch<char>(CPUSpecificCPUDispatchNameDealias(Name))
-#define CPU_SPECIFIC(NAME, TUNE_NAME, MANGLING, FEATURES) .Case(NAME, MANGLING)
-#include "llvm/TargetParser/X86TargetParser.def"
- .Default(0);
+ return llvm::X86::getCPUDispatchMangling(Name);
}
void X86TargetInfo::getCPUSpecificCPUDispatchFeatures(
StringRef Name, llvm::SmallVectorImpl<StringRef> &Features) const {
- StringRef WholeList =
- llvm::StringSwitch<StringRef>(CPUSpecificCPUDispatchNameDealias(Name))
-#define CPU_SPECIFIC(NAME, TUNE_NAME, MANGLING, FEATURES) .Case(NAME, FEATURES)
-#include "llvm/TargetParser/X86TargetParser.def"
- .Default("");
- WholeList.split(Features, ',', /*MaxSplit=*/-1, /*KeepEmpty=*/false);
-}
-
-StringRef X86TargetInfo::getCPUSpecificTuneName(StringRef Name) const {
- return llvm::StringSwitch<StringRef>(Name)
-#define CPU_SPECIFIC(NAME, TUNE_NAME, MANGLING, FEATURES) .Case(NAME, TUNE_NAME)
-#define CPU_SPECIFIC_ALIAS(NEW_NAME, TUNE_NAME, NAME) .Case(NEW_NAME, TUNE_NAME)
-#include "llvm/TargetParser/X86TargetParser.def"
- .Default("");
+ SmallVector<StringRef, 32> TargetCPUFeatures;
+ llvm::X86::getFeaturesForCPU(Name, TargetCPUFeatures, true);
+ for (auto &F : TargetCPUFeatures)
+ Features.push_back(F);
}
// We can't use a generic validation scheme for the cpus accepted here
@@ -1210,7 +1226,7 @@ bool X86TargetInfo::validateCpuIs(StringRef FeatureStr) const {
.Default(false);
}
-static unsigned matchAsmCCConstraint(const char *&Name) {
+static unsigned matchAsmCCConstraint(const char *Name) {
auto RV = llvm::StringSwitch<unsigned>(Name)
.Case("@cca", 4)
.Case("@ccae", 5)
@@ -1350,7 +1366,7 @@ bool X86TargetInfo::validateAsmConstraint(
// | Sandy Bridge | 64 | https://en.wikipedia.org/wiki/Sandy_Bridge and https://www.7-cpu.com/cpu/SandyBridge.html |
// | Ivy Bridge | 64 | https://blog.stuffedcow.net/2013/01/ivb-cache-replacement/ and https://www.7-cpu.com/cpu/IvyBridge.html |
// | Haswell | 64 | https://www.7-cpu.com/cpu/Haswell.html |
-// | Boadwell | 64 | https://www.7-cpu.com/cpu/Broadwell.html |
+// | Broadwell | 64 | https://www.7-cpu.com/cpu/Broadwell.html |
// | Skylake (including skylake-avx512) | 64 | https://www.nas.nasa.gov/hecc/support/kb/skylake-processors_550.html "Cache Hierarchy" |
// | Cascade Lake | 64 | https://www.nas.nasa.gov/hecc/support/kb/cascade-lake-processors_579.html "Cache Hierarchy" |
// | Skylake | 64 | https://en.wikichip.org/wiki/intel/microarchitectures/kaby_lake "Memory Hierarchy" |
@@ -1424,6 +1440,7 @@ std::optional<unsigned> X86TargetInfo::getCPUCacheLineSize() const {
case CK_Sierraforest:
case CK_Grandridge:
case CK_Graniterapids:
+ case CK_GraniterapidsD:
case CK_Emeraldrapids:
case CK_KNL:
case CK_KNM:
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/X86.h b/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
index 5fcc97e95c2b..039c05893d26 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
@@ -17,9 +17,9 @@
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
-#include "llvm/Support/X86TargetParser.h"
+#include "llvm/TargetParser/Triple.h"
+#include "llvm/TargetParser/X86TargetParser.h"
#include <optional>
namespace clang {
@@ -46,6 +46,9 @@ static const unsigned X86AddrSpaceMap[] = {
271, // ptr32_uptr
272, // ptr64
0, // hlsl_groupshared
+ // Wasm address space values for this target are dummy values,
+ // as it is only enabled for Wasm targets.
+ 20, // wasm_funcref
};
// X86 target abstract base class; x86-32 and x86-64 are very close, so
@@ -109,8 +112,11 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasAVX512IFMA = false;
bool HasAVX512VP2INTERSECT = false;
bool HasSHA = false;
+ bool HasSHA512 = false;
bool HasSHSTK = false;
+ bool HasSM3 = false;
bool HasSGX = false;
+ bool HasSM4 = false;
bool HasCX8 = false;
bool HasCX16 = false;
bool HasFXSR = false;
@@ -139,6 +145,7 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasPTWRITE = false;
bool HasINVPCID = false;
bool HasENQCMD = false;
+ bool HasAVXVNNIINT16 = false;
bool HasAMXFP16 = false;
bool HasCMPCCXADD = false;
bool HasRAOINT = false;
@@ -151,6 +158,7 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasAMXTILE = false;
bool HasAMXINT8 = false;
bool HasAMXBF16 = false;
+ bool HasAMXCOMPLEX = false;
bool HasSERIALIZE = false;
bool HasTSXLDTRK = false;
bool HasUINTR = false;
@@ -202,9 +210,9 @@ public:
return RegName.equals("esp") || RegName.equals("rsp");
}
- bool validateCpuSupports(StringRef Name) const override;
+ bool validateCpuSupports(StringRef FeatureStr) const override;
- bool validateCpuIs(StringRef Name) const override;
+ bool validateCpuIs(StringRef FeatureStr) const override;
bool validateCPUSpecificCPUDispatch(StringRef Name) const override;
@@ -214,8 +222,6 @@ public:
StringRef Name,
llvm::SmallVectorImpl<StringRef> &Features) const override;
- StringRef getCPUSpecificTuneName(StringRef Name) const override;
-
std::optional<unsigned> getCPUCacheLineSize() const override;
bool validateAsmConstraint(const char *&Name,
@@ -258,7 +264,7 @@ public:
StringRef Constraint, unsigned Size) const;
std::string convertConstraint(const char *&Constraint) const override;
- const char *getClobbers() const override {
+ std::string_view getClobbers() const override {
return "~{dirflag},~{fpsr},~{flags}";
}
@@ -413,7 +419,6 @@ public:
return getPointerWidthV(AddrSpace);
}
- const char *getBFloat16Mangling() const override { return "u6__bf16"; };
};
// X86-32 generic target
@@ -963,6 +968,28 @@ public:
LongDoubleFormat = &llvm::APFloat::IEEEquad();
}
};
+
+// x86_32 OHOS target
+class LLVM_LIBRARY_VISIBILITY OHOSX86_32TargetInfo
+ : public OHOSTargetInfo<X86_32TargetInfo> {
+public:
+ OHOSX86_32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : OHOSTargetInfo<X86_32TargetInfo>(Triple, Opts) {
+ SuitableAlign = 32;
+ LongDoubleWidth = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+ }
+};
+
+// x86_64 OHOS target
+class LLVM_LIBRARY_VISIBILITY OHOSX86_64TargetInfo
+ : public OHOSTargetInfo<X86_64TargetInfo> {
+public:
+ OHOSX86_64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : OHOSTargetInfo<X86_64TargetInfo>(Triple, Opts) {
+ LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ }
+};
} // namespace targets
} // namespace clang
#endif // LLVM_CLANG_LIB_BASIC_TARGETS_X86_H
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/XCore.h b/contrib/llvm-project/clang/lib/Basic/Targets/XCore.h
index 8eef9de0356e..a58d3e8acf47 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/XCore.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/XCore.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -49,7 +49,7 @@ public:
return TargetInfo::VoidPtrBuiltinVaList;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
ArrayRef<const char *> getGCCRegNames() const override {
static const char *const GCCRegNames[] = {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.cpp
new file mode 100644
index 000000000000..1b56cf7c596d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.cpp
@@ -0,0 +1,231 @@
+//===- ABIInfo.cpp --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfo.h"
+#include "ABIInfoImpl.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+// Pin the vtable to this file.
+ABIInfo::~ABIInfo() = default;
+
+CGCXXABI &ABIInfo::getCXXABI() const { return CGT.getCXXABI(); }
+
+ASTContext &ABIInfo::getContext() const { return CGT.getContext(); }
+
+llvm::LLVMContext &ABIInfo::getVMContext() const {
+ return CGT.getLLVMContext();
+}
+
+const llvm::DataLayout &ABIInfo::getDataLayout() const {
+ return CGT.getDataLayout();
+}
+
+const TargetInfo &ABIInfo::getTarget() const { return CGT.getTarget(); }
+
+const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
+ return CGT.getCodeGenOpts();
+}
+
+bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
+
+bool ABIInfo::isOHOSFamily() const {
+ return getTarget().getTriple().isOHOSFamily();
+}
+
+Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return Address::invalid();
+}
+
+bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ return false;
+}
+
+bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const {
+ return false;
+}
+
+bool ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const {
+ // For compatibility with GCC, ignore empty bitfields in C++ mode.
+ return getContext().getLangOpts().CPlusPlus;
+}
+
+bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
+ uint64_t &Members) const {
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ uint64_t NElements = AT->getSize().getZExtValue();
+ if (NElements == 0)
+ return false;
+ if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
+ return false;
+ Members *= NElements;
+ } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ Members = 0;
+
+ // If this is a C++ record, check the properties of the record such as
+ // bases and ABI specific restrictions
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (!getCXXABI().isPermittedToBeHomogeneousAggregate(CXXRD))
+ return false;
+
+ for (const auto &I : CXXRD->bases()) {
+ // Ignore empty records.
+ if (isEmptyRecord(getContext(), I.getType(), true))
+ continue;
+
+ uint64_t FldMembers;
+ if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
+ return false;
+
+ Members += FldMembers;
+ }
+ }
+
+ for (const auto *FD : RD->fields()) {
+ // Ignore (non-zero arrays of) empty records.
+ QualType FT = FD->getType();
+ while (const ConstantArrayType *AT =
+ getContext().getAsConstantArrayType(FT)) {
+ if (AT->getSize().getZExtValue() == 0)
+ return false;
+ FT = AT->getElementType();
+ }
+ if (isEmptyRecord(getContext(), FT, true))
+ continue;
+
+ if (isZeroLengthBitfieldPermittedInHomogeneousAggregate() &&
+ FD->isZeroLengthBitField(getContext()))
+ continue;
+
+ uint64_t FldMembers;
+ if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
+ return false;
+
+ Members = (RD->isUnion() ?
+ std::max(Members, FldMembers) : Members + FldMembers);
+ }
+
+ if (!Base)
+ return false;
+
+ // Ensure there is no padding.
+ if (getContext().getTypeSize(Base) * Members !=
+ getContext().getTypeSize(Ty))
+ return false;
+ } else {
+ Members = 1;
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ Members = 2;
+ Ty = CT->getElementType();
+ }
+
+ // Most ABIs only support float, double, and some vector type widths.
+ if (!isHomogeneousAggregateBaseType(Ty))
+ return false;
+
+ // The base type must be the same for all members. Types that
+ // agree in both total size and mode (float vs. vector) are
+ // treated as being equivalent here.
+ const Type *TyPtr = Ty.getTypePtr();
+ if (!Base) {
+ Base = TyPtr;
+ // If it's a non-power-of-2 vector, its size is already a power-of-2,
+ // so make sure to widen it explicitly.
+ if (const VectorType *VT = Base->getAs<VectorType>()) {
+ QualType EltTy = VT->getElementType();
+ unsigned NumElements =
+ getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
+ Base = getContext()
+ .getVectorType(EltTy, NumElements, VT->getVectorKind())
+ .getTypePtr();
+ }
+ }
+
+ if (Base->isVectorType() != TyPtr->isVectorType() ||
+ getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
+ return false;
+ }
+ return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
+}
+
+bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
+ if (getContext().isPromotableIntegerType(Ty))
+ return true;
+
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy))
+ return true;
+
+ return false;
+}
+
+ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByVal,
+ bool Realign,
+ llvm::Type *Padding) const {
+ return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), ByVal,
+ Realign, Padding);
+}
+
+ABIArgInfo ABIInfo::getNaturalAlignIndirectInReg(QualType Ty,
+ bool Realign) const {
+ return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
+ /*ByVal*/ false, Realign);
+}
+
+// Pin the vtable to this file.
+SwiftABIInfo::~SwiftABIInfo() = default;
+
+/// Does the given lowering require more than the given number of
+/// registers when expanded?
+///
+/// This is intended to be the basis of a reasonable basic implementation
+/// of should{Pass,Return}Indirectly.
+///
+/// For most targets, a limit of four total registers is reasonable; this
+/// limits the amount of code required in order to move around the value
+/// in case it wasn't produced immediately prior to the call by the caller
+/// (or wasn't produced in exactly the right registers) or isn't used
+/// immediately within the callee. But some targets may need to further
+/// limit the register count due to an inability to support that many
+/// return registers.
+bool SwiftABIInfo::occupiesMoreThan(ArrayRef<llvm::Type *> scalarTypes,
+ unsigned maxAllRegisters) const {
+ unsigned intCount = 0, fpCount = 0;
+ for (llvm::Type *type : scalarTypes) {
+ if (type->isPointerTy()) {
+ intCount++;
+ } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
+ auto ptrWidth = CGT.getTarget().getPointerWidth(LangAS::Default);
+ intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
+ } else {
+ assert(type->isVectorTy() || type->isFloatingPointTy());
+ fpCount++;
+ }
+ }
+
+ return (intCount + fpCount > maxAllRegisters);
+}
+
+bool SwiftABIInfo::shouldPassIndirectly(ArrayRef<llvm::Type *> ComponentTys,
+ bool AsReturnValue) const {
+ return occupiesMoreThan(ComponentTys, /*total=*/4);
+}
+
+bool SwiftABIInfo::isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
+ unsigned NumElts) const {
+ // The default implementation of this assumes that the target guarantees
+ // 128-bit SIMD support but nothing more.
+ return (VectorSize.getQuantity() > 8 && VectorSize.getQuantity() <= 16);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
index 755d2aaa7beb..b9a5ef6e4366 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
@@ -15,130 +15,134 @@
#include "llvm/IR/Type.h"
namespace llvm {
- class Value;
- class LLVMContext;
- class DataLayout;
- class Type;
-}
+class Value;
+class LLVMContext;
+class DataLayout;
+class Type;
+} // namespace llvm
namespace clang {
- class ASTContext;
- class CodeGenOptions;
- class TargetInfo;
+class ASTContext;
+class CodeGenOptions;
+class TargetInfo;
namespace CodeGen {
- class ABIArgInfo;
- class Address;
- class CGCXXABI;
- class CGFunctionInfo;
- class CodeGenFunction;
- class CodeGenTypes;
-
- // FIXME: All of this stuff should be part of the target interface
- // somehow. It is currently here because it is not clear how to factor
- // the targets to support this, since the Targets currently live in a
- // layer below types n'stuff.
-
-
- /// ABIInfo - Target specific hooks for defining how a type should be
- /// passed or returned from functions.
- class ABIInfo {
- protected:
- CodeGen::CodeGenTypes &CGT;
- llvm::CallingConv::ID RuntimeCC;
- public:
- ABIInfo(CodeGen::CodeGenTypes &cgt)
- : CGT(cgt), RuntimeCC(llvm::CallingConv::C) {}
-
- virtual ~ABIInfo();
-
- virtual bool allowBFloatArgsAndRet() const { return false; }
-
- CodeGen::CGCXXABI &getCXXABI() const;
- ASTContext &getContext() const;
- llvm::LLVMContext &getVMContext() const;
- const llvm::DataLayout &getDataLayout() const;
- const TargetInfo &getTarget() const;
- const CodeGenOptions &getCodeGenOpts() const;
-
- /// Return the calling convention to use for system runtime
- /// functions.
- llvm::CallingConv::ID getRuntimeCC() const {
- return RuntimeCC;
- }
-
- virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const = 0;
-
- /// EmitVAArg - Emit the target dependent code to load a value of
- /// \arg Ty from the va_list pointed to by \arg VAListAddr.
-
- // FIXME: This is a gaping layering violation if we wanted to drop
- // the ABI information any lower than CodeGen. Of course, for
- // VAArg handling it has to be at this level; there is no way to
- // abstract this out.
- virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF,
+class ABIArgInfo;
+class Address;
+class CGCXXABI;
+class CGFunctionInfo;
+class CodeGenFunction;
+class CodeGenTypes;
+
+// FIXME: All of this stuff should be part of the target interface
+// somehow. It is currently here because it is not clear how to factor
+// the targets to support this, since the Targets currently live in a
+// layer below types n'stuff.
+
+/// ABIInfo - Target specific hooks for defining how a type should be
+/// passed or returned from functions.
+class ABIInfo {
+protected:
+ CodeGen::CodeGenTypes &CGT;
+ llvm::CallingConv::ID RuntimeCC;
+
+public:
+ ABIInfo(CodeGen::CodeGenTypes &cgt)
+ : CGT(cgt), RuntimeCC(llvm::CallingConv::C) {}
+
+ virtual ~ABIInfo();
+
+ virtual bool allowBFloatArgsAndRet() const { return false; }
+
+ CodeGen::CGCXXABI &getCXXABI() const;
+ ASTContext &getContext() const;
+ llvm::LLVMContext &getVMContext() const;
+ const llvm::DataLayout &getDataLayout() const;
+ const TargetInfo &getTarget() const;
+ const CodeGenOptions &getCodeGenOpts() const;
+
+ /// Return the calling convention to use for system runtime
+ /// functions.
+ llvm::CallingConv::ID getRuntimeCC() const { return RuntimeCC; }
+
+ virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const = 0;
+
+ /// EmitVAArg - Emit the target dependent code to load a value of
+ /// \arg Ty from the va_list pointed to by \arg VAListAddr.
+
+ // FIXME: This is a gaping layering violation if we wanted to drop
+ // the ABI information any lower than CodeGen. Of course, for
+ // VAArg handling it has to be at this level; there is no way to
+ // abstract this out.
+ virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF,
+ CodeGen::Address VAListAddr,
+ QualType Ty) const = 0;
+
+ bool isAndroid() const;
+ bool isOHOSFamily() const;
+
+ /// Emit the target dependent code to load a value of
+ /// \arg Ty from the \c __builtin_ms_va_list pointed to by \arg VAListAddr.
+ virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF,
CodeGen::Address VAListAddr,
- QualType Ty) const = 0;
-
- bool isAndroid() const;
-
- /// Emit the target dependent code to load a value of
- /// \arg Ty from the \c __builtin_ms_va_list pointed to by \arg VAListAddr.
- virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF,
- CodeGen::Address VAListAddr,
- QualType Ty) const;
-
- virtual bool isHomogeneousAggregateBaseType(QualType Ty) const;
-
- virtual bool isHomogeneousAggregateSmallEnough(const Type *Base,
- uint64_t Members) const;
- virtual bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const;
-
- bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
- uint64_t &Members) const;
-
- // Implement the Type::IsPromotableIntegerType for ABI specific needs. The
- // only difference is that this considers bit-precise integer types as well.
- bool isPromotableIntegerTypeForABI(QualType Ty) const;
-
- /// A convenience method to return an indirect ABIArgInfo with an
- /// expected alignment equal to the ABI alignment of the given type.
- CodeGen::ABIArgInfo
- getNaturalAlignIndirect(QualType Ty, bool ByVal = true,
- bool Realign = false,
- llvm::Type *Padding = nullptr) const;
-
- CodeGen::ABIArgInfo
- getNaturalAlignIndirectInReg(QualType Ty, bool Realign = false) const;
- };
-
- /// Target specific hooks for defining how a type should be passed or returned
- /// from functions with one of the Swift calling conventions.
- class SwiftABIInfo {
- protected:
- CodeGenTypes &CGT;
- bool SwiftErrorInRegister;
-
- public:
- SwiftABIInfo(CodeGen::CodeGenTypes &CGT, bool SwiftErrorInRegister)
- : CGT(CGT), SwiftErrorInRegister(SwiftErrorInRegister) {}
-
- virtual ~SwiftABIInfo();
-
- /// Returns true if an aggregate which expands to the given type sequence
- /// should be passed / returned indirectly.
- virtual bool shouldPassIndirectly(ArrayRef<llvm::Type *> ComponentTys,
- bool AsReturnValue) const;
-
- /// Returns true if the given vector type is legal from Swift's calling
- /// convention perspective.
- virtual bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
- unsigned NumElts) const;
-
- /// Returns true if swifterror is lowered to a register by the target ABI.
- bool isSwiftErrorInRegister() const { return SwiftErrorInRegister; };
- };
-} // end namespace CodeGen
-} // end namespace clang
+ QualType Ty) const;
+
+ virtual bool isHomogeneousAggregateBaseType(QualType Ty) const;
+
+ virtual bool isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const;
+ virtual bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const;
+
+ /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
+ /// aggregate. Base is set to the base element type, and Members is set
+ /// to the number of base elements.
+ bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
+ uint64_t &Members) const;
+
+ // Implement the Type::IsPromotableIntegerType for ABI specific needs. The
+ // only difference is that this considers bit-precise integer types as well.
+ bool isPromotableIntegerTypeForABI(QualType Ty) const;
+
+ /// A convenience method to return an indirect ABIArgInfo with an
+ /// expected alignment equal to the ABI alignment of the given type.
+ CodeGen::ABIArgInfo
+ getNaturalAlignIndirect(QualType Ty, bool ByVal = true, bool Realign = false,
+ llvm::Type *Padding = nullptr) const;
+
+ CodeGen::ABIArgInfo getNaturalAlignIndirectInReg(QualType Ty,
+ bool Realign = false) const;
+};
+
+/// Target specific hooks for defining how a type should be passed or returned
+/// from functions with one of the Swift calling conventions.
+class SwiftABIInfo {
+protected:
+ CodeGenTypes &CGT;
+ bool SwiftErrorInRegister;
+
+ bool occupiesMoreThan(ArrayRef<llvm::Type *> scalarTypes,
+ unsigned maxAllRegisters) const;
+
+public:
+ SwiftABIInfo(CodeGen::CodeGenTypes &CGT, bool SwiftErrorInRegister)
+ : CGT(CGT), SwiftErrorInRegister(SwiftErrorInRegister) {}
+
+ virtual ~SwiftABIInfo();
+
+ /// Returns true if an aggregate which expands to the given type sequence
+ /// should be passed / returned indirectly.
+ virtual bool shouldPassIndirectly(ArrayRef<llvm::Type *> ComponentTys,
+ bool AsReturnValue) const;
+
+ /// Returns true if the given vector type is legal from Swift's calling
+ /// convention perspective.
+ virtual bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
+ unsigned NumElts) const;
+
+ /// Returns true if swifterror is lowered to a register by the target ABI.
+ bool isSwiftErrorInRegister() const { return SwiftErrorInRegister; };
+};
+} // end namespace CodeGen
+} // end namespace clang
#endif
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.cpp b/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.cpp
new file mode 100644
index 000000000000..7c30cecfdb9b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.cpp
@@ -0,0 +1,452 @@
+//===- ABIInfoImpl.cpp ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+// Pin the vtable to this file.
+DefaultABIInfo::~DefaultABIInfo() = default;
+
+ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // passed by value.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ return getNaturalAlignIndirect(Ty);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ ASTContext &Context = getContext();
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() >
+ Context.getTypeSize(Context.getTargetInfo().hasInt128Type()
+ ? Context.Int128Ty
+ : Context.LongLongTy))
+ return getNaturalAlignIndirect(Ty);
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy))
+ return getNaturalAlignIndirect(RetTy);
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
+ if (EIT->getNumBits() >
+ getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type()
+ ? getContext().Int128Ty
+ : getContext().LongLongTy))
+ return getNaturalAlignIndirect(RetTy);
+
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
+}
+
+void DefaultABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+}
+
+Address DefaultABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
+}
+
+ABIArgInfo CodeGen::coerceToIntArray(QualType Ty, ASTContext &Context,
+ llvm::LLVMContext &LLVMContext) {
+ // Alignment and Size are measured in bits.
+ const uint64_t Size = Context.getTypeSize(Ty);
+ const uint64_t Alignment = Context.getTypeAlign(Ty);
+ llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
+ const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
+}
+
+void CodeGen::AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
+ llvm::Value *Array, llvm::Value *Value,
+ unsigned FirstIndex, unsigned LastIndex) {
+ // Alternatively, we could emit this as a loop in the source.
+ for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
+ llvm::Value *Cell =
+ Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
+ Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
+ }
+}
+
+bool CodeGen::isAggregateTypeForABI(QualType T) {
+ return !CodeGenFunction::hasScalarEvaluationKind(T) ||
+ T->isMemberFunctionPointerType();
+}
+
+llvm::Type *CodeGen::getVAListElementType(CodeGenFunction &CGF) {
+ return CGF.ConvertTypeForMem(
+ CGF.getContext().getBuiltinVaListType()->getPointeeType());
+}
+
+CGCXXABI::RecordArgABI CodeGen::getRecordArgABI(const RecordType *RT,
+ CGCXXABI &CXXABI) {
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD) {
+ if (!RT->getDecl()->canPassInRegisters())
+ return CGCXXABI::RAA_Indirect;
+ return CGCXXABI::RAA_Default;
+ }
+ return CXXABI.getRecordArgABI(RD);
+}
+
+CGCXXABI::RecordArgABI CodeGen::getRecordArgABI(QualType T, CGCXXABI &CXXABI) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return CGCXXABI::RAA_Default;
+ return getRecordArgABI(RT, CXXABI);
+}
+
+bool CodeGen::classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
+ const ABIInfo &Info) {
+ QualType Ty = FI.getReturnType();
+
+ if (const auto *RT = Ty->getAs<RecordType>())
+ if (!isa<CXXRecordDecl>(RT->getDecl()) &&
+ !RT->getDecl()->canPassInRegisters()) {
+ FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty);
+ return true;
+ }
+
+ return CXXABI.classifyReturnType(FI);
+}
+
+QualType CodeGen::useFirstFieldIfTransparentUnion(QualType Ty) {
+ if (const RecordType *UT = Ty->getAsUnionType()) {
+ const RecordDecl *UD = UT->getDecl();
+ if (UD->hasAttr<TransparentUnionAttr>()) {
+ assert(!UD->field_empty() && "sema created an empty transparent union");
+ return UD->field_begin()->getType();
+ }
+ }
+ return Ty;
+}
+
+llvm::Value *CodeGen::emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
+ llvm::Value *Ptr,
+ CharUnits Align) {
+ // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
+ llvm::Value *RoundUp = CGF.Builder.CreateConstInBoundsGEP1_32(
+ CGF.Builder.getInt8Ty(), Ptr, Align.getQuantity() - 1);
+ return CGF.Builder.CreateIntrinsic(
+ llvm::Intrinsic::ptrmask, {CGF.AllocaInt8PtrTy, CGF.IntPtrTy},
+ {RoundUp, llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity())},
+ nullptr, Ptr->getName() + ".aligned");
+}
+
+Address
+CodeGen::emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ llvm::Type *DirectTy, CharUnits DirectSize,
+ CharUnits DirectAlign, CharUnits SlotSize,
+ bool AllowHigherAlign, bool ForceRightAdjust) {
+ // Cast the element type to i8* if necessary. Some platforms define
+ // va_list as a struct containing an i8* instead of just an i8*.
+ if (VAListAddr.getElementType() != CGF.Int8PtrTy)
+ VAListAddr = VAListAddr.withElementType(CGF.Int8PtrTy);
+
+ llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
+
+ // If the CC aligns values higher than the slot size, do so if needed.
+ Address Addr = Address::invalid();
+ if (AllowHigherAlign && DirectAlign > SlotSize) {
+ Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
+ CGF.Int8Ty, DirectAlign);
+ } else {
+ Addr = Address(Ptr, CGF.Int8Ty, SlotSize);
+ }
+
+ // Advance the pointer past the argument, then store that back.
+ CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
+ Address NextPtr =
+ CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next");
+ CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
+
+ // If the argument is smaller than a slot, and this is a big-endian
+ // target, the argument will be right-adjusted in its slot.
+ if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
+ (!DirectTy->isStructTy() || ForceRightAdjust)) {
+ Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
+ }
+
+ return Addr.withElementType(DirectTy);
+}
+
+Address CodeGen::emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType ValueTy, bool IsIndirect,
+ TypeInfoChars ValueInfo,
+ CharUnits SlotSizeAndAlign,
+ bool AllowHigherAlign,
+ bool ForceRightAdjust) {
+ // The size and alignment of the value that was passed directly.
+ CharUnits DirectSize, DirectAlign;
+ if (IsIndirect) {
+ DirectSize = CGF.getPointerSize();
+ DirectAlign = CGF.getPointerAlign();
+ } else {
+ DirectSize = ValueInfo.Width;
+ DirectAlign = ValueInfo.Align;
+ }
+
+ // Cast the address we've calculated to the right type.
+ llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy), *ElementTy = DirectTy;
+ if (IsIndirect) {
+ unsigned AllocaAS = CGF.CGM.getDataLayout().getAllocaAddrSpace();
+ DirectTy = llvm::PointerType::get(CGF.getLLVMContext(), AllocaAS);
+ }
+
+ Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy, DirectSize,
+ DirectAlign, SlotSizeAndAlign,
+ AllowHigherAlign, ForceRightAdjust);
+
+ if (IsIndirect) {
+ Addr = Address(CGF.Builder.CreateLoad(Addr), ElementTy, ValueInfo.Align);
+ }
+
+ return Addr;
+}
+
+Address CodeGen::emitMergePHI(CodeGenFunction &CGF, Address Addr1,
+ llvm::BasicBlock *Block1, Address Addr2,
+ llvm::BasicBlock *Block2,
+ const llvm::Twine &Name) {
+ assert(Addr1.getType() == Addr2.getType());
+ llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
+ PHI->addIncoming(Addr1.getPointer(), Block1);
+ PHI->addIncoming(Addr2.getPointer(), Block2);
+ CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
+ return Address(PHI, Addr1.getElementType(), Align);
+}
+
+bool CodeGen::isEmptyField(ASTContext &Context, const FieldDecl *FD,
+ bool AllowArrays) {
+ if (FD->isUnnamedBitfield())
+ return true;
+
+ QualType FT = FD->getType();
+
+ // Constant arrays of empty records count as empty, strip them off.
+ // Constant arrays of zero length always count as empty.
+ bool WasArray = false;
+ if (AllowArrays)
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+ if (AT->getSize() == 0)
+ return true;
+ FT = AT->getElementType();
+ // The [[no_unique_address]] special case below does not apply to
+ // arrays of C++ empty records, so we need to remember this fact.
+ WasArray = true;
+ }
+
+ const RecordType *RT = FT->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ // C++ record fields are never empty, at least in the Itanium ABI.
+ //
+ // FIXME: We should use a predicate for whether this behavior is true in the
+ // current ABI.
+ //
+ // The exception to the above rule are fields marked with the
+ // [[no_unique_address]] attribute (since C++20). Those do count as empty
+ // according to the Itanium ABI. The exception applies only to records,
+ // not arrays of records, so we must also check whether we stripped off an
+ // array type above.
+ if (isa<CXXRecordDecl>(RT->getDecl()) &&
+ (WasArray || !FD->hasAttr<NoUniqueAddressAttr>()))
+ return false;
+
+ return isEmptyRecord(Context, FT, AllowArrays);
+}
+
+bool CodeGen::isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (const auto &I : CXXRD->bases())
+ if (!isEmptyRecord(Context, I.getType(), true))
+ return false;
+
+ for (const auto *I : RD->fields())
+ if (!isEmptyField(Context, I, AllowArrays))
+ return false;
+ return true;
+}
+
+const Type *CodeGen::isSingleElementStruct(QualType T, ASTContext &Context) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return nullptr;
+
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return nullptr;
+
+ const Type *Found = nullptr;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const auto &I : CXXRD->bases()) {
+ // Ignore empty records.
+ if (isEmptyRecord(Context, I.getType(), true))
+ continue;
+
+ // If we already found an element then this isn't a single-element struct.
+ if (Found)
+ return nullptr;
+
+ // If this is non-empty and not a single element struct, the composite
+ // cannot be a single element struct.
+ Found = isSingleElementStruct(I.getType(), Context);
+ if (!Found)
+ return nullptr;
+ }
+ }
+
+ // Check for single element.
+ for (const auto *FD : RD->fields()) {
+ QualType FT = FD->getType();
+
+ // Ignore empty fields.
+ if (isEmptyField(Context, FD, true))
+ continue;
+
+ // If we already found an element then this isn't a single-element
+ // struct.
+ if (Found)
+ return nullptr;
+
+ // Treat single element arrays as the element.
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+ if (AT->getSize().getZExtValue() != 1)
+ break;
+ FT = AT->getElementType();
+ }
+
+ if (!isAggregateTypeForABI(FT)) {
+ Found = FT.getTypePtr();
+ } else {
+ Found = isSingleElementStruct(FT, Context);
+ if (!Found)
+ return nullptr;
+ }
+ }
+
+ // We don't consider a struct a single-element struct if it has
+ // padding beyond the element type.
+ if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
+ return nullptr;
+
+ return Found;
+}
+
+Address CodeGen::EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, const ABIArgInfo &AI) {
+ // This default implementation defers to the llvm backend's va_arg
+ // instruction. It can handle only passing arguments directly
+ // (typically only handled in the backend for primitive types), or
+ // aggregates passed indirectly by pointer (NOTE: if the "byval"
+ // flag has ABI impact in the callee, this implementation cannot
+ // work.)
+
+ // Only a few cases are covered here at the moment -- those needed
+ // by the default abi.
+ llvm::Value *Val;
+
+ if (AI.isIndirect()) {
+ assert(!AI.getPaddingType() &&
+ "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
+ assert(
+ !AI.getIndirectRealign() &&
+ "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
+
+ auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
+ CharUnits TyAlignForABI = TyInfo.Align;
+
+ llvm::Type *ElementTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Type *BaseTy = llvm::PointerType::getUnqual(ElementTy);
+ llvm::Value *Addr =
+ CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
+ return Address(Addr, ElementTy, TyAlignForABI);
+ } else {
+ assert((AI.isDirect() || AI.isExtend()) &&
+ "Unexpected ArgInfo Kind in generic VAArg emitter!");
+
+ assert(!AI.getInReg() &&
+ "Unexpected InReg seen in arginfo in generic VAArg emitter!");
+ assert(!AI.getPaddingType() &&
+ "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
+ assert(!AI.getDirectOffset() &&
+ "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
+ assert(!AI.getCoerceToType() &&
+ "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
+
+ Address Temp = CGF.CreateMemTemp(Ty, "varet");
+ Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(),
+ CGF.ConvertTypeForMem(Ty));
+ CGF.Builder.CreateStore(Val, Temp);
+ return Temp;
+ }
+}
+
+bool CodeGen::isSIMDVectorType(ASTContext &Context, QualType Ty) {
+ return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
+}
+
+bool CodeGen::isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) {
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT)
+ return false;
+ const RecordDecl *RD = RT->getDecl();
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (const auto &I : CXXRD->bases())
+ if (!isRecordWithSIMDVectorType(Context, I.getType()))
+ return false;
+
+ for (const auto *i : RD->fields()) {
+ QualType FT = i->getType();
+
+ if (isSIMDVectorType(Context, FT))
+ return true;
+
+ if (isRecordWithSIMDVectorType(Context, FT))
+ return true;
+ }
+
+ return false;
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.h b/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.h
new file mode 100644
index 000000000000..5f0cc289af68
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.h
@@ -0,0 +1,152 @@
+//===- ABIInfoImpl.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_ABIINFOIMPL_H
+#define LLVM_CLANG_LIB_CODEGEN_ABIINFOIMPL_H
+
+#include "ABIInfo.h"
+#include "CGCXXABI.h"
+
+namespace clang::CodeGen {
+
+/// DefaultABIInfo - The default implementation for ABI specific
+/// details. This implementation provides information which results in
+/// self-consistent and sensible LLVM IR generation, but does not
+/// conform to any particular ABI.
+class DefaultABIInfo : public ABIInfo {
+public:
+ DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ virtual ~DefaultABIInfo();
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+// Helper for coercing an aggregate argument or return value into an integer
+// array of the same size (including padding) and alignment. This alternate
+// coercion happens only for the RenderScript ABI and can be removed after
+// runtimes that rely on it are no longer supported.
+//
+// RenderScript assumes that the size of the argument / return value in the IR
+// is the same as the size of the corresponding qualified type. This helper
+// coerces the aggregate type into an array of the same size (including
+// padding). This coercion is used in lieu of expansion of struct members or
+// other canonical coercions that return a coerced-type of larger size.
+//
+// Ty - The argument / return value type
+// Context - The associated ASTContext
+// LLVMContext - The associated LLVMContext
+ABIArgInfo coerceToIntArray(QualType Ty, ASTContext &Context,
+ llvm::LLVMContext &LLVMContext);
+
+void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array,
+ llvm::Value *Value, unsigned FirstIndex,
+ unsigned LastIndex);
+
+bool isAggregateTypeForABI(QualType T);
+
+llvm::Type *getVAListElementType(CodeGenFunction &CGF);
+
+CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI);
+
+CGCXXABI::RecordArgABI getRecordArgABI(QualType T, CGCXXABI &CXXABI);
+
+bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
+ const ABIInfo &Info);
+
+/// Pass transparent unions as if they were the type of the first element. Sema
+/// should ensure that all elements of the union have the same "machine type".
+QualType useFirstFieldIfTransparentUnion(QualType Ty);
+
+// Dynamically round a pointer up to a multiple of the given alignment.
+llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
+ llvm::Value *Ptr, CharUnits Align);
+
+/// Emit va_arg for a platform using the common void* representation,
+/// where arguments are simply emitted in an array of slots on the stack.
+///
+/// This version implements the core direct-value passing rules.
+///
+/// \param SlotSize - The size and alignment of a stack slot.
+/// Each argument will be allocated to a multiple of this number of
+/// slots, and all the slots will be aligned to this value.
+/// \param AllowHigherAlign - The slot alignment is not a cap;
+/// an argument type with an alignment greater than the slot size
+/// will be emitted on a higher-alignment address, potentially
+/// leaving one or more empty slots behind as padding. If this
+/// is false, the returned address might be less-aligned than
+/// DirectAlign.
+/// \param ForceRightAdjust - Default is false. On big-endian platform and
+/// if the argument is smaller than a slot, set this flag will force
+/// right-adjust the argument in its slot irrespective of the type.
+Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ llvm::Type *DirectTy, CharUnits DirectSize,
+ CharUnits DirectAlign, CharUnits SlotSize,
+ bool AllowHigherAlign,
+ bool ForceRightAdjust = false);
+
+/// Emit va_arg for a platform using the common void* representation,
+/// where arguments are simply emitted in an array of slots on the stack.
+///
+/// \param IsIndirect - Values of this type are passed indirectly.
+/// \param ValueInfo - The size and alignment of this type, generally
+/// computed with getContext().getTypeInfoInChars(ValueTy).
+/// \param SlotSizeAndAlign - The size and alignment of a stack slot.
+/// Each argument will be allocated to a multiple of this number of
+/// slots, and all the slots will be aligned to this value.
+/// \param AllowHigherAlign - The slot alignment is not a cap;
+/// an argument type with an alignment greater than the slot size
+/// will be emitted on a higher-alignment address, potentially
+/// leaving one or more empty slots behind as padding.
+/// \param ForceRightAdjust - Default is false. On big-endian platform and
+/// if the argument is smaller than a slot, set this flag will force
+/// right-adjust the argument in its slot irrespective of the type.
+Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType ValueTy, bool IsIndirect,
+ TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign,
+ bool AllowHigherAlign, bool ForceRightAdjust = false);
+
+Address emitMergePHI(CodeGenFunction &CGF, Address Addr1,
+ llvm::BasicBlock *Block1, Address Addr2,
+ llvm::BasicBlock *Block2, const llvm::Twine &Name = "");
+
+/// isEmptyField - Return true iff a the field is "empty", that is it
+/// is an unnamed bit-field or an (array of) empty record(s).
+bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays);
+
+/// isEmptyRecord - Return true iff a structure contains only empty
+/// fields. Note that a structure with a flexible array member is not
+/// considered empty.
+bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
+
+/// isSingleElementStruct - Determine if a structure is a "single
+/// element struct", i.e. it has exactly one non-empty field or
+/// exactly one field which is itself a single element
+/// struct. Structures with flexible array members are never
+/// considered single element structs.
+///
+/// \return The field declaration for the single non-empty field, if
+/// it exists.
+const Type *isSingleElementStruct(QualType T, ASTContext &Context);
+
+Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ const ABIArgInfo &AI);
+
+bool isSIMDVectorType(ASTContext &Context, QualType Ty);
+
+bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty);
+
+} // namespace clang::CodeGen
+
+#endif // LLVM_CLANG_LIB_CODEGEN_ABIINFOIMPL_H
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Address.h b/contrib/llvm-project/clang/lib/CodeGen/Address.h
index bddeac1d6dcb..cf48df8f5e73 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Address.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/Address.h
@@ -22,77 +22,35 @@
namespace clang {
namespace CodeGen {
-// We try to save some space by using 6 bits over two PointerIntPairs to store
-// the alignment. However, some arches don't support 3 bits in a PointerIntPair
-// so we fallback to storing the alignment separately.
-template <typename T, bool = alignof(llvm::Value *) >= 8> class AddressImpl {};
-
-template <typename T> class AddressImpl<T, false> {
- llvm::Value *Pointer;
- llvm::Type *ElementType;
- CharUnits Alignment;
-
-public:
- AddressImpl(llvm::Value *Pointer, llvm::Type *ElementType,
- CharUnits Alignment)
- : Pointer(Pointer), ElementType(ElementType), Alignment(Alignment) {}
- llvm::Value *getPointer() const { return Pointer; }
- llvm::Type *getElementType() const { return ElementType; }
- CharUnits getAlignment() const { return Alignment; }
-};
-
-template <typename T> class AddressImpl<T, true> {
- // Int portion stores upper 3 bits of the log of the alignment.
- llvm::PointerIntPair<llvm::Value *, 3, unsigned> Pointer;
- // Int portion stores lower 3 bits of the log of the alignment.
- llvm::PointerIntPair<llvm::Type *, 3, unsigned> ElementType;
-
-public:
- AddressImpl(llvm::Value *Pointer, llvm::Type *ElementType,
- CharUnits Alignment)
- : Pointer(Pointer), ElementType(ElementType) {
- if (Alignment.isZero())
- return;
- // Currently the max supported alignment is much less than 1 << 63 and is
- // guaranteed to be a power of 2, so we can store the log of the alignment
- // into 6 bits.
- assert(Alignment.isPowerOfTwo() && "Alignment cannot be zero");
- auto AlignLog = llvm::Log2_64(Alignment.getQuantity());
- assert(AlignLog < (1 << 6) && "cannot fit alignment into 6 bits");
- this->Pointer.setInt(AlignLog >> 3);
- this->ElementType.setInt(AlignLog & 7);
- }
- llvm::Value *getPointer() const { return Pointer.getPointer(); }
- llvm::Type *getElementType() const { return ElementType.getPointer(); }
- CharUnits getAlignment() const {
- unsigned AlignLog = (Pointer.getInt() << 3) | ElementType.getInt();
- return CharUnits::fromQuantity(CharUnits::QuantityType(1) << AlignLog);
- }
-};
+// Indicates whether a pointer is known not to be null.
+enum KnownNonNull_t { NotKnownNonNull, KnownNonNull };
/// An aligned address.
class Address {
- AddressImpl<void> A;
+ llvm::PointerIntPair<llvm::Value *, 1, bool> PointerAndKnownNonNull;
+ llvm::Type *ElementType;
+ CharUnits Alignment;
protected:
- Address(std::nullptr_t) : A(nullptr, nullptr, CharUnits::Zero()) {}
+ Address(std::nullptr_t) : ElementType(nullptr) {}
public:
- Address(llvm::Value *Pointer, llvm::Type *ElementType, CharUnits Alignment)
- : A(Pointer, ElementType, Alignment) {
+ Address(llvm::Value *Pointer, llvm::Type *ElementType, CharUnits Alignment,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull)
+ : PointerAndKnownNonNull(Pointer, IsKnownNonNull),
+ ElementType(ElementType), Alignment(Alignment) {
assert(Pointer != nullptr && "Pointer cannot be null");
assert(ElementType != nullptr && "Element type cannot be null");
- assert(llvm::cast<llvm::PointerType>(Pointer->getType())
- ->isOpaqueOrPointeeTypeMatches(ElementType) &&
- "Incorrect pointer element type");
}
static Address invalid() { return Address(nullptr); }
- bool isValid() const { return A.getPointer() != nullptr; }
+ bool isValid() const {
+ return PointerAndKnownNonNull.getPointer() != nullptr;
+ }
llvm::Value *getPointer() const {
assert(isValid());
- return A.getPointer();
+ return PointerAndKnownNonNull.getPointer();
}
/// Return the type of the pointer value.
@@ -103,7 +61,7 @@ public:
/// Return the type of the values stored in this address.
llvm::Type *getElementType() const {
assert(isValid());
- return A.getElementType();
+ return ElementType;
}
/// Return the address space that this address resides in.
@@ -119,19 +77,41 @@ public:
/// Return the alignment of this pointer.
CharUnits getAlignment() const {
assert(isValid());
- return A.getAlignment();
+ return Alignment;
}
/// Return address with different pointer, but same element type and
/// alignment.
- Address withPointer(llvm::Value *NewPointer) const {
- return Address(NewPointer, getElementType(), getAlignment());
+ Address withPointer(llvm::Value *NewPointer,
+ KnownNonNull_t IsKnownNonNull) const {
+ return Address(NewPointer, getElementType(), getAlignment(),
+ IsKnownNonNull);
}
/// Return address with different alignment, but same pointer and element
/// type.
Address withAlignment(CharUnits NewAlignment) const {
- return Address(getPointer(), getElementType(), NewAlignment);
+ return Address(getPointer(), getElementType(), NewAlignment,
+ isKnownNonNull());
+ }
+
+ /// Return address with different element type, but same pointer and
+ /// alignment.
+ Address withElementType(llvm::Type *ElemTy) const {
+ return Address(getPointer(), ElemTy, getAlignment(), isKnownNonNull());
+ }
+
+ /// Whether the pointer is known not to be null.
+ KnownNonNull_t isKnownNonNull() const {
+ assert(isValid());
+ return (KnownNonNull_t)PointerAndKnownNonNull.getInt();
+ }
+
+ /// Set the non-null bit.
+ Address setKnownNonNull() {
+ assert(isValid());
+ PointerAndKnownNonNull.setInt(true);
+ return *this;
}
};
@@ -153,10 +133,8 @@ public:
return llvm::cast<llvm::Constant>(Address::getPointer());
}
- ConstantAddress getElementBitCast(llvm::Type *ElemTy) const {
- llvm::Constant *BitCast = llvm::ConstantExpr::getBitCast(
- getPointer(), ElemTy->getPointerTo(getAddressSpace()));
- return ConstantAddress(BitCast, ElemTy, getAlignment());
+ ConstantAddress withElementType(llvm::Type *ElemTy) const {
+ return ConstantAddress(getPointer(), ElemTy, getAlignment());
}
static bool isaImpl(Address addr) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
index 10d6bff25e6d..cda03d69522d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
@@ -17,10 +17,8 @@
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/GlobalsModRef.h"
-#include "llvm/Analysis/StackSafetyAnalysis.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Bitcode/BitcodeReader.h"
@@ -39,7 +37,6 @@
#include "llvm/IRPrinter/IRPrintingPasses.h"
#include "llvm/LTO/LTOBackend.h"
#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/SubtargetFeature.h"
#include "llvm/MC/TargetRegistry.h"
#include "llvm/Object/OffloadBinary.h"
#include "llvm/Passes/PassBuilder.h"
@@ -52,15 +49,13 @@
#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/Transforms/Coroutines/CoroCleanup.h"
-#include "llvm/Transforms/Coroutines/CoroEarly.h"
-#include "llvm/Transforms/Coroutines/CoroElide.h"
-#include "llvm/Transforms/Coroutines/CoroSplit.h"
-#include "llvm/Transforms/IPO.h"
-#include "llvm/Transforms/IPO/AlwaysInliner.h"
+#include "llvm/TargetParser/SubtargetFeature.h"
+#include "llvm/TargetParser/Triple.h"
+#include "llvm/Transforms/IPO/EmbedBitcodePass.h"
#include "llvm/Transforms/IPO/LowerTypeTests.h"
#include "llvm/Transforms/IPO/ThinLTOBitcodeWriter.h"
#include "llvm/Transforms/InstCombine/InstCombine.h"
@@ -79,18 +74,12 @@
#include "llvm/Transforms/Instrumentation/SanitizerCoverage.h"
#include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
#include "llvm/Transforms/ObjCARC.h"
-#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Scalar/EarlyCSE.h"
#include "llvm/Transforms/Scalar/GVN.h"
#include "llvm/Transforms/Scalar/JumpThreading.h"
-#include "llvm/Transforms/Scalar/LowerMatrixIntrinsics.h"
-#include "llvm/Transforms/Utils.h"
-#include "llvm/Transforms/Utils/CanonicalizeAliases.h"
#include "llvm/Transforms/Utils/Debugify.h"
#include "llvm/Transforms/Utils/EntryExitInstrumenter.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
-#include "llvm/Transforms/Utils/NameAnonGlobals.h"
-#include "llvm/Transforms/Utils/SymbolRewriter.h"
#include <memory>
#include <optional>
using namespace clang;
@@ -123,6 +112,7 @@ class EmitAssemblyHelper {
const clang::TargetOptions &TargetOpts;
const LangOptions &LangOpts;
Module *TheModule;
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS;
Timer CodeGenerationTime;
@@ -187,9 +177,10 @@ public:
const HeaderSearchOptions &HeaderSearchOpts,
const CodeGenOptions &CGOpts,
const clang::TargetOptions &TOpts,
- const LangOptions &LOpts, Module *M)
+ const LangOptions &LOpts, Module *M,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS)
: Diags(_Diags), HSOpts(HeaderSearchOpts), CodeGenOpts(CGOpts),
- TargetOpts(TOpts), LangOpts(LOpts), TheModule(M),
+ TargetOpts(TOpts), LangOpts(LOpts), TheModule(M), VFS(std::move(VFS)),
CodeGenerationTime("codegen", "Code Generation Time"),
TargetTriple(TheModule->getTargetTriple()) {}
@@ -294,6 +285,10 @@ static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
TLII->addVectorizableFunctionsFromVecLib(
TargetLibraryInfoImpl::DarwinLibSystemM, TargetTriple);
break;
+ case CodeGenOptions::ArmPL:
+ TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::ArmPL,
+ TargetTriple);
+ break;
default:
break;
}
@@ -377,8 +372,6 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.BinutilsVersion =
llvm::TargetMachine::parseBinutilsVersion(CodeGenOpts.BinutilsVersion);
Options.UseInitArray = CodeGenOpts.UseInitArray;
- Options.LowerGlobalDtorsViaCxaAtExit =
- CodeGenOpts.RegisterGlobalDtorsWithAtExit;
Options.DisableIntegratedAS = CodeGenOpts.DisableIntegratedAS;
Options.CompressDebugSections = CodeGenOpts.getCompressDebugSections();
Options.RelaxELFRelocations = CodeGenOpts.RelaxELFRelocations;
@@ -434,20 +427,20 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
CodeGenOpts.UniqueBasicBlockSectionNames;
Options.TLSSize = CodeGenOpts.TLSSize;
Options.EmulatedTLS = CodeGenOpts.EmulatedTLS;
- Options.ExplicitEmulatedTLS = true;
Options.DebuggerTuning = CodeGenOpts.getDebuggerTuning();
Options.EmitStackSizeSection = CodeGenOpts.StackSizeSection;
Options.StackUsageOutput = CodeGenOpts.StackUsageOutput;
Options.EmitAddrsig = CodeGenOpts.Addrsig;
Options.ForceDwarfFrameSection = CodeGenOpts.ForceDwarfFrameSection;
Options.EmitCallSiteInfo = CodeGenOpts.EmitCallSiteInfo;
- Options.EnableAIXExtendedAltivecABI = CodeGenOpts.EnableAIXExtendedAltivecABI;
- Options.XRayOmitFunctionIndex = CodeGenOpts.XRayOmitFunctionIndex;
+ Options.EnableAIXExtendedAltivecABI = LangOpts.EnableAIXExtendedAltivecABI;
+ Options.XRayFunctionIndex = CodeGenOpts.XRayFunctionIndex;
Options.LoopAlignment = CodeGenOpts.LoopAlignment;
Options.DebugStrictDwarf = CodeGenOpts.DebugStrictDwarf;
Options.ObjectFilenameForDebug = CodeGenOpts.ObjectFilenameForDebug;
Options.Hotpatch = CodeGenOpts.HotPatch;
Options.JMCInstrument = CodeGenOpts.JMCInstrument;
+ Options.XCOFFReadOnlyPointers = CodeGenOpts.XCOFFReadOnlyPointers;
switch (CodeGenOpts.getSwiftAsyncFramePointer()) {
case CodeGenOptions::SwiftAsyncFramePointerKind::Auto:
@@ -466,6 +459,8 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.MCOptions.SplitDwarfFile = CodeGenOpts.SplitDwarfFile;
Options.MCOptions.EmitDwarfUnwind = CodeGenOpts.getEmitDwarfUnwind();
+ Options.MCOptions.EmitCompactUnwindNonCanonical =
+ CodeGenOpts.EmitCompactUnwindNonCanonical;
Options.MCOptions.MCRelaxAll = CodeGenOpts.RelaxAll;
Options.MCOptions.MCSaveTempLabels = CodeGenOpts.SaveTempLabels;
Options.MCOptions.MCUseDwarfDirectory =
@@ -498,13 +493,14 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
static std::optional<GCOVOptions>
getGCOVOptions(const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts) {
- if (!CodeGenOpts.EmitGcovArcs && !CodeGenOpts.EmitGcovNotes)
+ if (CodeGenOpts.CoverageNotesFile.empty() &&
+ CodeGenOpts.CoverageDataFile.empty())
return std::nullopt;
// Not using 'GCOVOptions::getDefault' allows us to avoid exiting if
// LLVM's -default-gcov-version flag is set to something invalid.
GCOVOptions Options;
- Options.EmitNotes = CodeGenOpts.EmitGcovNotes;
- Options.EmitData = CodeGenOpts.EmitGcovArcs;
+ Options.EmitNotes = !CodeGenOpts.CoverageNotesFile.empty();
+ Options.EmitData = !CodeGenOpts.CoverageDataFile.empty();
llvm::copy(CodeGenOpts.CoverageVersion, std::begin(Options.Version));
Options.NoRedZone = CodeGenOpts.DisableRedZone;
Options.Filter = CodeGenOpts.ProfileFilterFiles;
@@ -640,7 +636,7 @@ static void addKCFIPass(const Triple &TargetTriple, const LangOptions &LangOpts,
PassBuilder &PB) {
// If the back-end supports KCFI operand bundle lowering, skip KCFIPass.
if (TargetTriple.getArch() == llvm::Triple::x86_64 ||
- TargetTriple.isAArch64(64))
+ TargetTriple.isAArch64(64) || TargetTriple.isRISCV())
return;
// Ensure we lower KCFI operand bundles with -O0.
@@ -675,7 +671,8 @@ static void addSanitizers(const Triple &TargetTriple,
if (CodeGenOpts.hasSanitizeBinaryMetadata()) {
MPM.addPass(SanitizerBinaryMetadataPass(
- getSanitizerBinaryMetadataOptions(CodeGenOpts)));
+ getSanitizerBinaryMetadataOptions(CodeGenOpts),
+ CodeGenOpts.SanitizeMetadataIgnorelistFiles));
}
auto MSanPass = [&](SanitizerMask Mask, bool CompileKernel) {
@@ -767,33 +764,40 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
if (CodeGenOpts.hasProfileIRInstr())
// -fprofile-generate.
- PGOOpt = PGOOptions(CodeGenOpts.InstrProfileOutput.empty()
- ? getDefaultProfileGenName()
- : CodeGenOpts.InstrProfileOutput,
- "", "", PGOOptions::IRInstr, PGOOptions::NoCSAction,
- CodeGenOpts.DebugInfoForProfiling);
+ PGOOpt = PGOOptions(
+ CodeGenOpts.InstrProfileOutput.empty() ? getDefaultProfileGenName()
+ : CodeGenOpts.InstrProfileOutput,
+ "", "", CodeGenOpts.MemoryProfileUsePath, nullptr, PGOOptions::IRInstr,
+ PGOOptions::NoCSAction, CodeGenOpts.DebugInfoForProfiling);
else if (CodeGenOpts.hasProfileIRUse()) {
// -fprofile-use.
auto CSAction = CodeGenOpts.hasProfileCSIRUse() ? PGOOptions::CSIRUse
: PGOOptions::NoCSAction;
- PGOOpt = PGOOptions(CodeGenOpts.ProfileInstrumentUsePath, "",
- CodeGenOpts.ProfileRemappingFile, PGOOptions::IRUse,
- CSAction, CodeGenOpts.DebugInfoForProfiling);
+ PGOOpt = PGOOptions(
+ CodeGenOpts.ProfileInstrumentUsePath, "",
+ CodeGenOpts.ProfileRemappingFile, CodeGenOpts.MemoryProfileUsePath, VFS,
+ PGOOptions::IRUse, CSAction, CodeGenOpts.DebugInfoForProfiling);
} else if (!CodeGenOpts.SampleProfileFile.empty())
// -fprofile-sample-use
PGOOpt = PGOOptions(
CodeGenOpts.SampleProfileFile, "", CodeGenOpts.ProfileRemappingFile,
- PGOOptions::SampleUse, PGOOptions::NoCSAction,
- CodeGenOpts.DebugInfoForProfiling, CodeGenOpts.PseudoProbeForProfiling);
+ CodeGenOpts.MemoryProfileUsePath, VFS, PGOOptions::SampleUse,
+ PGOOptions::NoCSAction, CodeGenOpts.DebugInfoForProfiling,
+ CodeGenOpts.PseudoProbeForProfiling);
+ else if (!CodeGenOpts.MemoryProfileUsePath.empty())
+ // -fmemory-profile-use (without any of the above options)
+ PGOOpt = PGOOptions("", "", "", CodeGenOpts.MemoryProfileUsePath, VFS,
+ PGOOptions::NoAction, PGOOptions::NoCSAction,
+ CodeGenOpts.DebugInfoForProfiling);
else if (CodeGenOpts.PseudoProbeForProfiling)
// -fpseudo-probe-for-profiling
- PGOOpt =
- PGOOptions("", "", "", PGOOptions::NoAction, PGOOptions::NoCSAction,
- CodeGenOpts.DebugInfoForProfiling, true);
+ PGOOpt = PGOOptions("", "", "", /*MemoryProfile=*/"", nullptr,
+ PGOOptions::NoAction, PGOOptions::NoCSAction,
+ CodeGenOpts.DebugInfoForProfiling, true);
else if (CodeGenOpts.DebugInfoForProfiling)
// -fdebug-info-for-profiling
- PGOOpt = PGOOptions("", "", "", PGOOptions::NoAction,
- PGOOptions::NoCSAction, true);
+ PGOOpt = PGOOptions("", "", "", /*MemoryProfile=*/"", nullptr,
+ PGOOptions::NoAction, PGOOptions::NoCSAction, true);
// Check to see if we want to generate a CS profile.
if (CodeGenOpts.hasProfileCSIRInstr()) {
@@ -810,12 +814,13 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
: CodeGenOpts.InstrProfileOutput;
PGOOpt->CSAction = PGOOptions::CSIRInstr;
} else
- PGOOpt = PGOOptions("",
- CodeGenOpts.InstrProfileOutput.empty()
- ? getDefaultProfileGenName()
- : CodeGenOpts.InstrProfileOutput,
- "", PGOOptions::NoAction, PGOOptions::CSIRInstr,
- CodeGenOpts.DebugInfoForProfiling);
+ PGOOpt =
+ PGOOptions("",
+ CodeGenOpts.InstrProfileOutput.empty()
+ ? getDefaultProfileGenName()
+ : CodeGenOpts.InstrProfileOutput,
+ "", /*MemoryProfile=*/"", nullptr, PGOOptions::NoAction,
+ PGOOptions::CSIRInstr, CodeGenOpts.DebugInfoForProfiling);
}
if (TM)
TM->setPGOOption(PGOOpt);
@@ -831,6 +836,7 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
// Only enable CGProfilePass when using integrated assembler, since
// non-integrated assemblers don't recognize .cgprofile section.
PTO.CallGraphProfile = !CodeGenOpts.DisableIntegratedAS;
+ PTO.UnifiedLTO = CodeGenOpts.UnifiedLTO;
LoopAnalysisManager LAM;
FunctionAnalysisManager FAM;
@@ -845,15 +851,33 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
StandardInstrumentations SI(
TheModule->getContext(),
(CodeGenOpts.DebugPassManager || DebugPassStructure),
- /*VerifyEach*/ false, PrintPassOpts);
- SI.registerCallbacks(PIC, &FAM);
+ CodeGenOpts.VerifyEach, PrintPassOpts);
+ SI.registerCallbacks(PIC, &MAM);
PassBuilder PB(TM.get(), PTO, PGOOpt, &PIC);
- if (CodeGenOpts.EnableAssignmentTracking) {
+ // Handle the assignment tracking feature options.
+ switch (CodeGenOpts.getAssignmentTrackingMode()) {
+ case CodeGenOptions::AssignmentTrackingOpts::Forced:
PB.registerPipelineStartEPCallback(
[&](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(AssignmentTrackingPass());
});
+ break;
+ case CodeGenOptions::AssignmentTrackingOpts::Enabled:
+ // Disable assignment tracking in LTO builds for now as the performance
+ // cost is too high. Disable for LLDB tuning due to llvm.org/PR43126.
+ if (!CodeGenOpts.PrepareForThinLTO && !CodeGenOpts.PrepareForLTO &&
+ CodeGenOpts.getDebuggerTuning() != llvm::DebuggerKind::LLDB) {
+ PB.registerPipelineStartEPCallback(
+ [&](ModulePassManager &MPM, OptimizationLevel Level) {
+ // Only use assignment tracking if optimisations are enabled.
+ if (Level != OptimizationLevel::O0)
+ MPM.addPass(AssignmentTrackingPass());
+ });
+ }
+ break;
+ case CodeGenOptions::AssignmentTrackingOpts::Disabled:
+ break;
}
// Enable verify-debuginfo-preserve-each for new PM.
@@ -866,7 +890,7 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
if (!CodeGenOpts.DIBugsReportFilePath.empty())
Debugify.setOrigDIVerifyBugsReportFilePath(
CodeGenOpts.DIBugsReportFilePath);
- Debugify.registerCallbacks(PIC);
+ Debugify.registerCallbacks(PIC, MAM);
}
// Attempt to load pass plugins and register their callbacks with PB.
for (auto &PluginFN : CodeGenOpts.PassPlugins) {
@@ -982,20 +1006,28 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
MPM.addPass(InstrProfiling(*Options, false));
});
- if (CodeGenOpts.OptimizationLevel == 0) {
- MPM = PB.buildO0DefaultPipeline(Level, IsLTO || IsThinLTO);
- } else if (IsThinLTO) {
+ // TODO: Consider passing the MemoryProfileOutput to the pass builder via
+ // the PGOOptions, and set this up there.
+ if (!CodeGenOpts.MemoryProfileOutput.empty()) {
+ PB.registerOptimizerLastEPCallback(
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
+ MPM.addPass(createModuleToFunctionPassAdaptor(MemProfilerPass()));
+ MPM.addPass(ModuleMemProfilerPass());
+ });
+ }
+
+ bool IsThinOrUnifiedLTO = IsThinLTO || (IsLTO && CodeGenOpts.UnifiedLTO);
+ if (CodeGenOpts.FatLTO) {
+ MPM = PB.buildFatLTODefaultPipeline(Level, IsThinOrUnifiedLTO,
+ IsThinOrUnifiedLTO ||
+ shouldEmitRegularLTOSummary());
+ } else if (IsThinOrUnifiedLTO) {
MPM = PB.buildThinLTOPreLinkDefaultPipeline(Level);
} else if (IsLTO) {
MPM = PB.buildLTOPreLinkDefaultPipeline(Level);
} else {
MPM = PB.buildPerModuleDefaultPipeline(Level);
}
-
- if (!CodeGenOpts.MemoryProfileOutput.empty()) {
- MPM.addPass(createModuleToFunctionPassAdaptor(MemProfilerPass()));
- MPM.addPass(ModuleMemProfilerPass());
- }
}
// Add a verifier pass if requested. We don't have to do this if the action
@@ -1015,8 +1047,10 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
if (!ThinLinkOS)
return;
}
- MPM.addPass(ThinLTOBitcodeWriterPass(*OS, ThinLinkOS ? &ThinLinkOS->os()
- : nullptr));
+ if (CodeGenOpts.UnifiedLTO)
+ TheModule->addModuleFlag(Module::Error, "UnifiedLTO", uint32_t(1));
+ MPM.addPass(ThinLTOBitcodeWriterPass(
+ *OS, ThinLinkOS ? &ThinLinkOS->os() : nullptr));
} else {
MPM.addPass(PrintModulePass(*OS, "", CodeGenOpts.EmitLLVMUseLists,
/*EmitLTOSummary=*/true));
@@ -1027,11 +1061,13 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
// targets
bool EmitLTOSummary = shouldEmitRegularLTOSummary();
if (EmitLTOSummary) {
- if (!TheModule->getModuleFlag("ThinLTO"))
+ if (!TheModule->getModuleFlag("ThinLTO") && !CodeGenOpts.UnifiedLTO)
TheModule->addModuleFlag(Module::Error, "ThinLTO", uint32_t(0));
if (!TheModule->getModuleFlag("EnableSplitLTOUnit"))
TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
uint32_t(1));
+ if (CodeGenOpts.UnifiedLTO)
+ TheModule->addModuleFlag(Module::Error, "UnifiedLTO", uint32_t(1));
}
if (Action == Backend_EmitBC)
MPM.addPass(BitcodeWriterPass(*OS, CodeGenOpts.EmitLLVMUseLists,
@@ -1041,6 +1077,21 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
EmitLTOSummary));
}
}
+ if (CodeGenOpts.FatLTO) {
+ // Set module flags, like EnableSplitLTOUnit and UnifiedLTO, since FatLTO
+ // uses a different action than Backend_EmitBC or Backend_EmitLL.
+ bool IsThinOrUnifiedLTO =
+ CodeGenOpts.PrepareForThinLTO ||
+ (CodeGenOpts.PrepareForLTO && CodeGenOpts.UnifiedLTO);
+ if (!TheModule->getModuleFlag("ThinLTO"))
+ TheModule->addModuleFlag(Module::Error, "ThinLTO",
+ uint32_t(IsThinOrUnifiedLTO));
+ if (!TheModule->getModuleFlag("EnableSplitLTOUnit"))
+ TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
+ uint32_t(CodeGenOpts.EnableSplitLTOUnit));
+ if (CodeGenOpts.UnifiedLTO && !TheModule->getModuleFlag("UnifiedLTO"))
+ TheModule->addModuleFlag(Module::Error, "UnifiedLTO", uint32_t(1));
+ }
// Now that we have all of the passes ready, run them.
{
@@ -1177,6 +1228,7 @@ static void runThinLTOBackend(
Conf.ProfileRemapping = std::move(ProfileRemapping);
Conf.DebugPassManager = CGOpts.DebugPassManager;
+ Conf.VerifyEach = CGOpts.VerifyEach;
Conf.RemarksWithHotness = CGOpts.DiagnosticsWithHotness;
Conf.RemarksFilename = CGOpts.OptRecordFile;
Conf.RemarksPasses = CGOpts.OptRecordPasses;
@@ -1219,9 +1271,9 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
const HeaderSearchOptions &HeaderOpts,
const CodeGenOptions &CGOpts,
const clang::TargetOptions &TOpts,
- const LangOptions &LOpts,
- StringRef TDesc, Module *M,
- BackendAction Action,
+ const LangOptions &LOpts, StringRef TDesc,
+ Module *M, BackendAction Action,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
std::unique_ptr<raw_pwrite_stream> OS) {
llvm::TimeTraceScope TimeScope("Backend");
@@ -1264,7 +1316,7 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
}
}
- EmitAssemblyHelper AsmHelper(Diags, HeaderOpts, CGOpts, TOpts, LOpts, M);
+ EmitAssemblyHelper AsmHelper(Diags, HeaderOpts, CGOpts, TOpts, LOpts, M, VFS);
AsmHelper.EmitAssembly(Action, std::move(OS));
// Verify clang's TargetInfo DataLayout against the LLVM TargetMachine's
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
index 8ef95bb80846..222b0a192c85 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
@@ -80,22 +80,23 @@ namespace {
AtomicSizeInBits = C.toBits(
C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
.alignTo(lvalue.getAlignment()));
- auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
+ llvm::Value *BitFieldPtr = lvalue.getBitFieldPointer();
auto OffsetInChars =
(C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
lvalue.getAlignment();
- VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
- CGF.Int8Ty, VoidPtrAddr, OffsetInChars.getQuantity());
- llvm::Type *IntTy = CGF.Builder.getIntNTy(AtomicSizeInBits);
- auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- VoidPtrAddr, IntTy->getPointerTo(), "atomic_bitfield_base");
+ llvm::Value *StoragePtr = CGF.Builder.CreateConstGEP1_64(
+ CGF.Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());
+ StoragePtr = CGF.Builder.CreateAddrSpaceCast(
+ StoragePtr, llvm::PointerType::getUnqual(CGF.getLLVMContext()),
+ "atomic_bitfield_base");
BFI = OrigBFI;
BFI.Offset = Offset;
BFI.StorageSize = AtomicSizeInBits;
BFI.StorageOffset += OffsetInChars;
- LVal = LValue::MakeBitfield(Address(Addr, IntTy, lvalue.getAlignment()),
- BFI, lvalue.getType(), lvalue.getBaseInfo(),
- lvalue.getTBAAInfo());
+ llvm::Type *StorageTy = CGF.Builder.getIntNTy(AtomicSizeInBits);
+ LVal = LValue::MakeBitfield(
+ Address(StoragePtr, StorageTy, lvalue.getAlignment()), BFI,
+ lvalue.getType(), lvalue.getBaseInfo(), lvalue.getTBAAInfo());
AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
if (AtomicTy.isNull()) {
llvm::APInt Size(
@@ -161,7 +162,7 @@ namespace {
}
Address getAtomicAddressAsAtomicIntPointer() const {
- return emitCastToAtomicIntPointer(getAtomicAddress());
+ return castToAtomicIntPointer(getAtomicAddress());
}
/// Is the atomic size larger than the underlying value type?
@@ -183,7 +184,7 @@ namespace {
/// Cast the given pointer to an integer pointer suitable for atomic
/// operations if the source.
- Address emitCastToAtomicIntPointer(Address Addr) const;
+ Address castToAtomicIntPointer(Address Addr) const;
/// If Addr is compatible with the iN that will be used for an atomic
/// operation, bitcast it. Otherwise, create a temporary that is suitable
@@ -623,6 +624,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
: llvm::Instruction::Sub;
[[fallthrough]];
case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__hip_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub
@@ -636,8 +638,11 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__hip_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_min:
case AtomicExpr::AO__atomic_fetch_min:
- Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
- : llvm::AtomicRMWInst::UMin;
+ Op = E->getValueType()->isFloatingType()
+ ? llvm::AtomicRMWInst::FMin
+ : (E->getValueType()->isSignedIntegerType()
+ ? llvm::AtomicRMWInst::Min
+ : llvm::AtomicRMWInst::UMin);
break;
case AtomicExpr::AO__atomic_max_fetch:
@@ -647,8 +652,11 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__hip_atomic_fetch_max:
case AtomicExpr::AO__opencl_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_max:
- Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
- : llvm::AtomicRMWInst::UMax;
+ Op = E->getValueType()->isFloatingType()
+ ? llvm::AtomicRMWInst::FMax
+ : (E->getValueType()->isSignedIntegerType()
+ ? llvm::AtomicRMWInst::Max
+ : llvm::AtomicRMWInst::UMax);
break;
case AtomicExpr::AO__atomic_and_fetch:
@@ -789,8 +797,7 @@ AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
ValTy =
CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
llvm::Type *ITy = llvm::IntegerType::get(CGF.getLLVMContext(), SizeInBits);
- Address Ptr = Address(CGF.Builder.CreateBitCast(Val, ITy->getPointerTo()),
- ITy, Align);
+ Address Ptr = Address(Val, ITy, Align);
Val = CGF.EmitLoadOfScalar(Ptr, false,
CGF.getContext().getPointerType(ValTy),
Loc);
@@ -798,8 +805,7 @@ AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
Args.add(RValue::get(Val), ValTy);
} else {
// Non-optimized functions always take a reference.
- Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
- CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(Val), CGF.getContext().VoidPtrTy);
}
}
@@ -897,6 +903,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__hip_atomic_fetch_add:
+ case AtomicExpr::AO__hip_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
if (MemTy->isPointerType()) {
@@ -916,9 +923,19 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
}
[[fallthrough]];
case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_max:
+ case AtomicExpr::AO__atomic_fetch_min:
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_max_fetch:
+ case AtomicExpr::AO__atomic_min_fetch:
case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ case AtomicExpr::AO__opencl_atomic_fetch_max:
+ case AtomicExpr::AO__opencl_atomic_fetch_min:
+ case AtomicExpr::AO__hip_atomic_fetch_max:
+ case AtomicExpr::AO__hip_atomic_fetch_min:
ShouldCastToIntPtrTy = !MemTy->isFloatingType();
[[fallthrough]];
@@ -934,13 +951,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__c11_atomic_fetch_nand:
- case AtomicExpr::AO__c11_atomic_fetch_max:
- case AtomicExpr::AO__c11_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
- case AtomicExpr::AO__opencl_atomic_fetch_min:
- case AtomicExpr::AO__opencl_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__hip_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_or:
@@ -952,12 +965,6 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__atomic_or_fetch:
case AtomicExpr::AO__atomic_xor_fetch:
case AtomicExpr::AO__atomic_nand_fetch:
- case AtomicExpr::AO__atomic_max_fetch:
- case AtomicExpr::AO__atomic_min_fetch:
- case AtomicExpr::AO__atomic_fetch_max:
- case AtomicExpr::AO__hip_atomic_fetch_max:
- case AtomicExpr::AO__atomic_fetch_min:
- case AtomicExpr::AO__hip_atomic_fetch_min:
Val1 = EmitValToTemp(*this, E->getVal1());
break;
}
@@ -971,7 +978,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
AtomicInfo Atomics(*this, AtomicVal);
if (ShouldCastToIntPtrTy) {
- Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
+ Ptr = Atomics.castToAtomicIntPointer(Ptr);
if (Val1.isValid())
Val1 = Atomics.convertToAtomicIntPointer(Val1);
if (Val2.isValid())
@@ -979,13 +986,13 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
}
if (Dest.isValid()) {
if (ShouldCastToIntPtrTy)
- Dest = Atomics.emitCastToAtomicIntPointer(Dest);
+ Dest = Atomics.castToAtomicIntPointer(Dest);
} else if (E->isCmpXChg())
Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
else if (!RValTy->isVoidType()) {
Dest = Atomics.CreateTempAlloca();
if (ShouldCastToIntPtrTy)
- Dest = Atomics.emitCastToAtomicIntPointer(Dest);
+ Dest = Atomics.castToAtomicIntPointer(Dest);
}
// Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
@@ -1013,6 +1020,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__hip_atomic_fetch_sub:
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_min:
@@ -1088,15 +1096,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
if (AS == LangAS::opencl_generic)
return V;
auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
- auto T = llvm::cast<llvm::PointerType>(V->getType());
- auto *DestType = llvm::PointerType::getWithSamePointeeType(T, DestAS);
+ auto *DestType = llvm::PointerType::get(getLLVMContext(), DestAS);
return getTargetHooks().performAddrSpaceCast(
*this, V, AS, LangAS::opencl_generic, DestType, false);
};
- Args.add(RValue::get(CastToGenericAddrSpace(
- EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
+ Args.add(RValue::get(CastToGenericAddrSpace(Ptr.getPointer(),
+ E->getPtr()->getType())),
getContext().VoidPtrTy);
std::string LibCallName;
@@ -1129,10 +1136,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
LibCallName = "__atomic_compare_exchange";
RetTy = getContext().BoolTy;
HaveRetTy = true;
- Args.add(
- RValue::get(CastToGenericAddrSpace(
- EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
- getContext().VoidPtrTy);
+ Args.add(RValue::get(CastToGenericAddrSpace(Val1.getPointer(),
+ E->getVal1()->getType())),
+ getContext().VoidPtrTy);
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
MemTy, E->getExprLoc(), TInfo.Width);
Args.add(RValue::get(Order), getContext().IntTy);
@@ -1218,6 +1224,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
[[fallthrough]];
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
+ case AtomicExpr::AO__hip_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
LibCallName = "__atomic_fetch_sub";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
@@ -1293,8 +1300,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
} else {
// Value is returned through parameter before the order.
RetTy = getContext().VoidTy;
- Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
- getContext().VoidPtrTy);
+ Args.add(RValue::get(Dest.getPointer()), getContext().VoidPtrTy);
}
}
// order is always the last parameter
@@ -1329,16 +1335,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
ResVal = Builder.CreateNot(ResVal);
- Builder.CreateStore(
- ResVal, Builder.CreateElementBitCast(Dest, ResVal->getType()));
+ Builder.CreateStore(ResVal, Dest.withElementType(ResVal->getType()));
}
if (RValTy->isVoidType())
return RValue::get(nullptr);
- return convertTempToRValue(
- Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
- RValTy, E->getExprLoc());
+ return convertTempToRValue(Dest.withElementType(ConvertTypeForMem(RValTy)),
+ RValTy, E->getExprLoc());
}
bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
@@ -1389,9 +1393,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
if (RValTy->isVoidType())
return RValue::get(nullptr);
- return convertTempToRValue(
- Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
- RValTy, E->getExprLoc());
+ return convertTempToRValue(Dest.withElementType(ConvertTypeForMem(RValTy)),
+ RValTy, E->getExprLoc());
}
// Long case, when Order isn't obviously constant.
@@ -1461,15 +1464,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
return RValue::get(nullptr);
assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
- return convertTempToRValue(
- Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
- RValTy, E->getExprLoc());
+ return convertTempToRValue(Dest.withElementType(ConvertTypeForMem(RValTy)),
+ RValTy, E->getExprLoc());
}
-Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
+Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
llvm::IntegerType *ty =
llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
- return CGF.Builder.CreateElementBitCast(addr, ty);
+ return addr.withElementType(ty);
}
Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
@@ -1482,7 +1484,7 @@ Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
Addr = Tmp;
}
- return emitCastToAtomicIntPointer(Addr);
+ return castToAtomicIntPointer(Addr);
}
RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
@@ -1554,7 +1556,7 @@ RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
}
// Slam the integer into the temporary.
- Address CastTemp = emitCastToAtomicIntPointer(Temp);
+ Address CastTemp = castToAtomicIntPointer(Temp);
CGF.Builder.CreateStore(IntVal, CastTemp)
->setVolatile(TempIsVolatile);
@@ -1566,10 +1568,8 @@ void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
// void __atomic_load(size_t size, void *mem, void *return, int order);
CallArgList Args;
Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
- Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
- CGF.getContext().VoidPtrTy);
- Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
- CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(getAtomicPointer()), CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(AddForLoaded), CGF.getContext().VoidPtrTy);
Args.add(
RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
CGF.getContext().IntTy);
@@ -1732,7 +1732,7 @@ llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
Address Addr = materializeRValue(RVal);
// Cast the temporary to the atomic int type and pull a value out.
- Addr = emitCastToAtomicIntPointer(Addr);
+ Addr = castToAtomicIntPointer(Addr);
return CGF.Builder.CreateLoad(Addr);
}
@@ -1763,12 +1763,9 @@ AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
// void *desired, int success, int failure);
CallArgList Args;
Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
- Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
- CGF.getContext().VoidPtrTy);
- Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
- CGF.getContext().VoidPtrTy);
- Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
- CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(getAtomicPointer()), CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(ExpectedAddr), CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(DesiredAddr), CGF.getContext().VoidPtrTy);
Args.add(RValue::get(
llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
CGF.getContext().IntTy);
@@ -1910,7 +1907,7 @@ void AtomicInfo::EmitAtomicUpdateOp(
/*NumReservedValues=*/2);
PHI->addIncoming(OldVal, CurBB);
Address NewAtomicAddr = CreateTempAlloca();
- Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
+ Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
requiresMemSetZero(getAtomicAddress().getElementType())) {
CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
@@ -1992,7 +1989,7 @@ void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
/*NumReservedValues=*/2);
PHI->addIncoming(OldVal, CurBB);
Address NewAtomicAddr = CreateTempAlloca();
- Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
+ Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
requiresMemSetZero(getAtomicAddress().getElementType())) {
CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
@@ -2071,10 +2068,8 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
CallArgList args;
args.add(RValue::get(atomics.getAtomicSizeValue()),
getContext().getSizeType());
- args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
- getContext().VoidPtrTy);
- args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
- getContext().VoidPtrTy);
+ args.add(RValue::get(atomics.getAtomicPointer()), getContext().VoidPtrTy);
+ args.add(RValue::get(srcAddr.getPointer()), getContext().VoidPtrTy);
args.add(
RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
getContext().IntTy);
@@ -2086,8 +2081,7 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
// Do the atomic store.
- Address addr =
- atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
+ Address addr = atomics.castToAtomicIntPointer(atomics.getAtomicAddress());
intValue = Builder.CreateIntCast(
intValue, addr.getElementType(), /*isSigned=*/false);
llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
index 6e4a0dbf2335..cfbe3272196e 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
@@ -1259,9 +1259,8 @@ Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable) {
// to byref*.
auto &byrefInfo = getBlockByrefInfo(variable);
- addr = Address(Builder.CreateLoad(addr), Int8Ty, byrefInfo.ByrefAlignment);
-
- addr = Builder.CreateElementBitCast(addr, byrefInfo.Type, "byref.addr");
+ addr = Address(Builder.CreateLoad(addr), byrefInfo.Type,
+ byrefInfo.ByrefAlignment);
addr = emitBlockByrefAddress(addr, byrefInfo, /*follow*/ true,
variable->getName());
@@ -1427,7 +1426,8 @@ void CodeGenFunction::setBlockContextParameter(const ImplicitParamDecl *D,
// directly as BlockPointer.
BlockPointer = Builder.CreatePointerCast(
arg,
- BlockInfo->StructureType->getPointerTo(
+ llvm::PointerType::get(
+ getLLVMContext(),
getContext().getLangOpts().OpenCL
? getContext().getTargetAddressSpace(LangAS::opencl_generic)
: 0),
@@ -1934,14 +1934,12 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
auto AL = ApplyDebugLocation::CreateArtificial(*this);
Address src = GetAddrOfLocalVar(&SrcDecl);
- src = Address(Builder.CreateLoad(src), Int8Ty, blockInfo.BlockAlign);
- src = Builder.CreateElementBitCast(src, blockInfo.StructureType,
- "block.source");
+ src = Address(Builder.CreateLoad(src), blockInfo.StructureType,
+ blockInfo.BlockAlign);
Address dst = GetAddrOfLocalVar(&DstDecl);
- dst = Address(Builder.CreateLoad(dst), Int8Ty, blockInfo.BlockAlign);
- dst =
- Builder.CreateElementBitCast(dst, blockInfo.StructureType, "block.dest");
+ dst = Address(Builder.CreateLoad(dst), blockInfo.StructureType,
+ blockInfo.BlockAlign);
for (auto &capture : blockInfo.SortedCaptures) {
if (capture.isConstantOrTrivial())
@@ -2124,8 +2122,8 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
auto AL = ApplyDebugLocation::CreateArtificial(*this);
Address src = GetAddrOfLocalVar(&SrcDecl);
- src = Address(Builder.CreateLoad(src), Int8Ty, blockInfo.BlockAlign);
- src = Builder.CreateElementBitCast(src, blockInfo.StructureType, "block");
+ src = Address(Builder.CreateLoad(src), blockInfo.StructureType,
+ blockInfo.BlockAlign);
CodeGenFunction::RunCleanupsScope cleanups(*this);
@@ -2162,9 +2160,9 @@ public:
void emitCopy(CodeGenFunction &CGF, Address destField,
Address srcField) override {
- destField = CGF.Builder.CreateElementBitCast(destField, CGF.Int8Ty);
+ destField = destField.withElementType(CGF.Int8Ty);
- srcField = CGF.Builder.CreateElementBitCast(srcField, CGF.Int8PtrTy);
+ srcField = srcField.withElementType(CGF.Int8PtrTy);
llvm::Value *srcValue = CGF.Builder.CreateLoad(srcField);
unsigned flags = (Flags | BLOCK_BYREF_CALLER).getBitMask();
@@ -2177,7 +2175,7 @@ public:
}
void emitDispose(CodeGenFunction &CGF, Address field) override {
- field = CGF.Builder.CreateElementBitCast(field, CGF.Int8PtrTy);
+ field = field.withElementType(CGF.Int8PtrTy);
llvm::Value *value = CGF.Builder.CreateLoad(field);
CGF.BuildBlockRelease(value, Flags | BLOCK_BYREF_CALLER, false);
@@ -2369,17 +2367,15 @@ generateByrefCopyHelper(CodeGenFunction &CGF, const BlockByrefInfo &byrefInfo,
if (generator.needsCopy()) {
// dst->x
Address destField = CGF.GetAddrOfLocalVar(&Dst);
- destField = Address(CGF.Builder.CreateLoad(destField), CGF.Int8Ty,
+ destField = Address(CGF.Builder.CreateLoad(destField), byrefInfo.Type,
byrefInfo.ByrefAlignment);
- destField = CGF.Builder.CreateElementBitCast(destField, byrefInfo.Type);
destField =
CGF.emitBlockByrefAddress(destField, byrefInfo, false, "dest-object");
// src->x
Address srcField = CGF.GetAddrOfLocalVar(&Src);
- srcField = Address(CGF.Builder.CreateLoad(srcField), CGF.Int8Ty,
+ srcField = Address(CGF.Builder.CreateLoad(srcField), byrefInfo.Type,
byrefInfo.ByrefAlignment);
- srcField = CGF.Builder.CreateElementBitCast(srcField, byrefInfo.Type);
srcField =
CGF.emitBlockByrefAddress(srcField, byrefInfo, false, "src-object");
@@ -2435,9 +2431,8 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
if (generator.needsDispose()) {
Address addr = CGF.GetAddrOfLocalVar(&Src);
- addr = Address(CGF.Builder.CreateLoad(addr), CGF.Int8Ty,
+ addr = Address(CGF.Builder.CreateLoad(addr), byrefInfo.Type,
byrefInfo.ByrefAlignment);
- addr = CGF.Builder.CreateElementBitCast(addr, byrefInfo.Type);
addr = CGF.emitBlockByrefAddress(addr, byrefInfo, false, "object");
generator.emitDispose(CGF, addr);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h
index e8857d98894f..4ef1ae9f3365 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h
@@ -287,12 +287,6 @@ public:
// This could be zero if no forced alignment is required.
CharUnits BlockHeaderForcedGapSize;
- /// The next block in the block-info chain. Invalid if this block
- /// info is not part of the CGF's block-info chain, which is true
- /// if it corresponds to a global block or a block whose expression
- /// has been encountered.
- CGBlockInfo *NextBlockInfo;
-
void buildCaptureMap() {
for (auto &C : SortedCaptures)
Captures[C.Cap->getVariable()] = &C;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h b/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h
index 2fcfea64ede6..68535920088c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h
@@ -89,8 +89,6 @@ public:
llvm::LoadInst *CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr,
CharUnits Align,
const llvm::Twine &Name = "") {
- assert(llvm::cast<llvm::PointerType>(Addr->getType())
- ->isOpaqueOrPointeeTypeMatches(Ty));
return CreateAlignedLoad(Ty, Addr, Align.getAsAlign(), Name);
}
@@ -120,15 +118,11 @@ public:
/// Emit a load from an i1 flag variable.
llvm::LoadInst *CreateFlagLoad(llvm::Value *Addr,
const llvm::Twine &Name = "") {
- assert(llvm::cast<llvm::PointerType>(Addr->getType())
- ->isOpaqueOrPointeeTypeMatches(getInt1Ty()));
return CreateAlignedLoad(getInt1Ty(), Addr, CharUnits::One(), Name);
}
/// Emit a store to an i1 flag variable.
llvm::StoreInst *CreateFlagStore(bool Value, llvm::Value *Addr) {
- assert(llvm::cast<llvm::PointerType>(Addr->getType())
- ->isOpaqueOrPointeeTypeMatches(getInt1Ty()));
return CreateAlignedStore(getInt1(Value), Addr, CharUnits::One());
}
@@ -157,19 +151,8 @@ public:
using CGBuilderBaseTy::CreateAddrSpaceCast;
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty,
const llvm::Twine &Name = "") {
- assert(cast<llvm::PointerType>(Ty)->isOpaqueOrPointeeTypeMatches(
- Addr.getElementType()) &&
- "Should not change the element type");
- return Addr.withPointer(CreateAddrSpaceCast(Addr.getPointer(), Ty, Name));
- }
-
- /// Cast the element type of the given address to a different type,
- /// preserving information like the alignment and address space.
- Address CreateElementBitCast(Address Addr, llvm::Type *Ty,
- const llvm::Twine &Name = "") {
- auto *PtrTy = Ty->getPointerTo(Addr.getAddressSpace());
- return Address(CreateBitCast(Addr.getPointer(), PtrTy, Name), Ty,
- Addr.getAlignment());
+ return Addr.withPointer(CreateAddrSpaceCast(Addr.getPointer(), Ty, Name),
+ Addr.isKnownNonNull());
}
using CGBuilderBaseTy::CreatePointerBitCastOrAddrSpaceCast;
@@ -178,7 +161,7 @@ public:
const llvm::Twine &Name = "") {
llvm::Value *Ptr =
CreatePointerBitCastOrAddrSpaceCast(Addr.getPointer(), Ty, Name);
- return Address(Ptr, ElementTy, Addr.getAlignment());
+ return Address(Ptr, ElementTy, Addr.getAlignment(), Addr.isKnownNonNull());
}
/// Given
@@ -199,7 +182,7 @@ public:
return Address(
CreateStructGEP(Addr.getElementType(), Addr.getPointer(), Index, Name),
ElTy->getElementType(Index),
- Addr.getAlignment().alignmentAtOffset(Offset));
+ Addr.getAlignment().alignmentAtOffset(Offset), Addr.isKnownNonNull());
}
/// Given
@@ -221,7 +204,8 @@ public:
CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
{getSize(CharUnits::Zero()), getSize(Index)}, Name),
ElTy->getElementType(),
- Addr.getAlignment().alignmentAtOffset(Index * EltSize));
+ Addr.getAlignment().alignmentAtOffset(Index * EltSize),
+ Addr.isKnownNonNull());
}
/// Given
@@ -237,8 +221,8 @@ public:
return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Index), Name),
- ElTy,
- Addr.getAlignment().alignmentAtOffset(Index * EltSize));
+ ElTy, Addr.getAlignment().alignmentAtOffset(Index * EltSize),
+ Addr.isKnownNonNull());
}
/// Given
@@ -255,7 +239,8 @@ public:
return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Index), Name),
Addr.getElementType(),
- Addr.getAlignment().alignmentAtOffset(Index * EltSize));
+ Addr.getAlignment().alignmentAtOffset(Index * EltSize),
+ NotKnownNonNull);
}
/// Create GEP with single dynamic index. The address alignment is reduced
@@ -270,7 +255,7 @@ public:
return Address(
CreateGEP(Addr.getElementType(), Addr.getPointer(), Index, Name),
Addr.getElementType(),
- Addr.getAlignment().alignmentOfArrayElement(EltSize));
+ Addr.getAlignment().alignmentOfArrayElement(EltSize), NotKnownNonNull);
}
/// Given a pointer to i8, adjust it by a given constant offset.
@@ -280,7 +265,8 @@ public:
return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Offset), Name),
Addr.getElementType(),
- Addr.getAlignment().alignmentAtOffset(Offset));
+ Addr.getAlignment().alignmentAtOffset(Offset),
+ Addr.isKnownNonNull());
}
Address CreateConstByteGEP(Address Addr, CharUnits Offset,
const llvm::Twine &Name = "") {
@@ -288,7 +274,8 @@ public:
return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Offset), Name),
Addr.getElementType(),
- Addr.getAlignment().alignmentAtOffset(Offset));
+ Addr.getAlignment().alignmentAtOffset(Offset),
+ NotKnownNonNull);
}
using CGBuilderBaseTy::CreateConstInBoundsGEP2_32;
@@ -305,7 +292,8 @@ public:
llvm_unreachable("offset of GEP with constants is always computable");
return Address(GEP, GEP->getResultElementType(),
Addr.getAlignment().alignmentAtOffset(
- CharUnits::fromQuantity(Offset.getSExtValue())));
+ CharUnits::fromQuantity(Offset.getSExtValue())),
+ Addr.isKnownNonNull());
}
using CGBuilderBaseTy::CreateMemCpy;
@@ -369,7 +357,8 @@ public:
using CGBuilderBaseTy::CreateLaunderInvariantGroup;
Address CreateLaunderInvariantGroup(Address Addr) {
- return Addr.withPointer(CreateLaunderInvariantGroup(Addr.getPointer()));
+ return Addr.withPointer(CreateLaunderInvariantGroup(Addr.getPointer()),
+ Addr.isKnownNonNull());
}
};
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
index f72e04a425d9..30f5f4e7061c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
@@ -28,8 +28,10 @@
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -52,10 +54,10 @@
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/MatrixBuilder.h"
-#include "llvm/Support/AArch64TargetParser.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ScopedPrinter.h"
-#include "llvm/Support/X86TargetParser.h"
+#include "llvm/TargetParser/AArch64TargetParser.h"
+#include "llvm/TargetParser/X86TargetParser.h"
#include <optional>
#include <sstream>
@@ -98,13 +100,29 @@ llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
// TODO: This list should be expanded or refactored after all GCC-compatible
// std libcall builtins are implemented.
- static SmallDenseMap<unsigned, StringRef, 8> F128Builtins{
+ static SmallDenseMap<unsigned, StringRef, 64> F128Builtins{
+ {Builtin::BI__builtin___fprintf_chk, "__fprintf_chkieee128"},
+ {Builtin::BI__builtin___printf_chk, "__printf_chkieee128"},
+ {Builtin::BI__builtin___snprintf_chk, "__snprintf_chkieee128"},
+ {Builtin::BI__builtin___sprintf_chk, "__sprintf_chkieee128"},
+ {Builtin::BI__builtin___vfprintf_chk, "__vfprintf_chkieee128"},
+ {Builtin::BI__builtin___vprintf_chk, "__vprintf_chkieee128"},
+ {Builtin::BI__builtin___vsnprintf_chk, "__vsnprintf_chkieee128"},
+ {Builtin::BI__builtin___vsprintf_chk, "__vsprintf_chkieee128"},
+ {Builtin::BI__builtin_fprintf, "__fprintfieee128"},
{Builtin::BI__builtin_printf, "__printfieee128"},
+ {Builtin::BI__builtin_snprintf, "__snprintfieee128"},
+ {Builtin::BI__builtin_sprintf, "__sprintfieee128"},
+ {Builtin::BI__builtin_vfprintf, "__vfprintfieee128"},
+ {Builtin::BI__builtin_vprintf, "__vprintfieee128"},
{Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"},
{Builtin::BI__builtin_vsprintf, "__vsprintfieee128"},
- {Builtin::BI__builtin_sprintf, "__sprintfieee128"},
- {Builtin::BI__builtin_snprintf, "__snprintfieee128"},
- {Builtin::BI__builtin_fprintf, "__fprintfieee128"},
+ {Builtin::BI__builtin_fscanf, "__fscanfieee128"},
+ {Builtin::BI__builtin_scanf, "__scanfieee128"},
+ {Builtin::BI__builtin_sscanf, "__sscanfieee128"},
+ {Builtin::BI__builtin_vfscanf, "__vfscanfieee128"},
+ {Builtin::BI__builtin_vscanf, "__vscanfieee128"},
+ {Builtin::BI__builtin_vsscanf, "__vsscanfieee128"},
{Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"},
};
@@ -169,6 +187,21 @@ static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
return V;
}
+static llvm::Value *CheckAtomicAlignment(CodeGenFunction &CGF,
+ const CallExpr *E) {
+ ASTContext &Ctx = CGF.getContext();
+ Address Ptr = CGF.EmitPointerWithAlignment(E->getArg(0));
+ unsigned Bytes = Ptr.getElementType()->isPointerTy()
+ ? Ctx.getTypeSizeInChars(Ctx.VoidPtrTy).getQuantity()
+ : Ptr.getElementType()->getScalarSizeInBits() / 8;
+ unsigned Align = Ptr.getAlignment().getQuantity();
+ if (Align % Bytes != 0) {
+ DiagnosticsEngine &Diags = CGF.CGM.getDiags();
+ Diags.Report(E->getBeginLoc(), diag::warn_sync_op_misaligned);
+ }
+ return Ptr.getPointer();
+}
+
/// Utility to insert an atomic instruction based on Intrinsic::ID
/// and the expression node.
static Value *MakeBinaryAtomicValue(
@@ -181,13 +214,14 @@ static Value *MakeBinaryAtomicValue(
E->getArg(0)->getType()->getPointeeType()));
assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
- llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
+ llvm::Value *DestPtr = CheckAtomicAlignment(CGF, E);
unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
CGF.getContext().getTypeSize(T));
- llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+ llvm::Type *IntPtrType =
+ llvm::PointerType::get(CGF.getLLVMContext(), AddrSpace);
llvm::Value *Args[2];
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
@@ -243,19 +277,16 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
E->getArg(0)->getType()->getPointeeType()));
assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
- llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
+ llvm::Value *DestPtr = CheckAtomicAlignment(CGF, E);
- llvm::IntegerType *IntType =
- llvm::IntegerType::get(CGF.getLLVMContext(),
- CGF.getContext().getTypeSize(T));
- llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+ llvm::IntegerType *IntType = llvm::IntegerType::get(
+ CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
llvm::Value *Args[2];
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
- Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
+ Args[0] = DestPtr;
llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
@@ -285,15 +316,13 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
bool ReturnBool) {
QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
- llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
+ llvm::Value *DestPtr = CheckAtomicAlignment(CGF, E);
llvm::IntegerType *IntType = llvm::IntegerType::get(
CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
- llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
Value *Args[3];
- Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
+ Args[0] = DestPtr;
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
@@ -385,10 +414,8 @@ static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
// Convert to i128 pointers and values.
llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
- llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
- Destination = CGF.Builder.CreateBitCast(Destination, Int128PtrTy);
- Address ComparandResult(CGF.Builder.CreateBitCast(ComparandPtr, Int128PtrTy),
- Int128Ty, CGF.getContext().toCharUnitsFromBits(128));
+ Address ComparandResult(ComparandPtr, Int128Ty,
+ CGF.getContext().toCharUnitsFromBits(128));
// (((i128)hi) << 64) | ((i128)lo)
ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
@@ -451,7 +478,6 @@ static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
llvm::Type *ITy =
llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
- Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
Load->setVolatile(true);
return Load;
@@ -463,9 +489,6 @@ static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
Value *Value = CGF.EmitScalarExpr(E->getArg(1));
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
- llvm::Type *ITy =
- llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8);
- Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
llvm::StoreInst *Store =
CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
Store->setVolatile(true);
@@ -508,6 +531,25 @@ static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
}
}
+// Has second type mangled argument.
+static Value *emitBinaryExpMaybeConstrainedFPBuiltin(
+ CodeGenFunction &CGF, const CallExpr *E, llvm::Intrinsic::ID IntrinsicID,
+ llvm::Intrinsic::ID ConstrainedIntrinsicID) {
+ llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
+
+ if (CGF.Builder.getIsFPConstrained()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
+ Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
+ {Src0->getType(), Src1->getType()});
+ return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1});
+ }
+
+ Function *F =
+ CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), Src1->getType()});
+ return CGF.Builder.CreateCall(F, {Src0, Src1});
+}
+
// Emit an intrinsic that has 3 operands of the same type as its result.
// Depending on mode, this may be a constrained floating-point intrinsic.
static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
@@ -611,6 +653,24 @@ emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
}
}
+static Value *emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E,
+ llvm::Intrinsic::ID IntrinsicID) {
+ llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
+
+ QualType IntPtrTy = E->getArg(1)->getType()->getPointeeType();
+ llvm::Type *IntTy = CGF.ConvertType(IntPtrTy);
+ llvm::Function *F =
+ CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), IntTy});
+ llvm::Value *Call = CGF.Builder.CreateCall(F, Src0);
+
+ llvm::Value *Exp = CGF.Builder.CreateExtractValue(Call, 1);
+ LValue LV = CGF.MakeNaturalAlignAddrLValue(Src1, IntPtrTy);
+ CGF.EmitStoreOfScalar(Exp, LV);
+
+ return CGF.Builder.CreateExtractValue(Call, 0);
+}
+
/// EmitFAbs - Emit a call to @llvm.fabs().
static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
@@ -923,7 +983,7 @@ static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
// Build the constraints. FIXME: We should support immediates when possible.
std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
- std::string MachineClobbers = CGF.getTarget().getClobbers();
+ std::string_view MachineClobbers = CGF.getTarget().getClobbers();
if (!MachineClobbers.empty()) {
Constraints += ',';
Constraints += MachineClobbers;
@@ -931,9 +991,9 @@ static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
llvm::IntegerType *IntType = llvm::IntegerType::get(
CGF.getLLVMContext(),
CGF.getContext().getTypeSize(E->getArg(1)->getType()));
- llvm::Type *IntPtrType = IntType->getPointerTo();
+ llvm::Type *PtrType = llvm::PointerType::getUnqual(CGF.getLLVMContext());
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
+ llvm::FunctionType::get(CGF.Int8Ty, {PtrType, IntType}, false);
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
@@ -1066,15 +1126,14 @@ static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,
AsmOS << "$0, ${1:y}";
std::string Constraints = "=r,*Z,~{memory}";
- std::string MachineClobbers = CGF.getTarget().getClobbers();
+ std::string_view MachineClobbers = CGF.getTarget().getClobbers();
if (!MachineClobbers.empty()) {
Constraints += ',';
Constraints += MachineClobbers;
}
- llvm::Type *IntPtrType = RetType->getPointerTo();
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(RetType, {IntPtrType}, false);
+ llvm::Type *PtrType = llvm::PointerType::getUnqual(CGF.getLLVMContext());
+ llvm::FunctionType *FTy = llvm::FunctionType::get(RetType, {PtrType}, false);
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
@@ -1709,7 +1768,7 @@ Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
&& "Unsupported builtin check kind");
Value *ArgValue = EmitScalarExpr(E);
- if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef())
+ if (!SanOpts.has(SanitizerKind::Builtin))
return ArgValue;
SanitizerScope SanScope(this);
@@ -1818,8 +1877,7 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
Address Arg = GetAddrOfLocalVar(Args[I]);
Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
- Addr =
- Builder.CreateElementBitCast(Addr, Arg.getElementType(), "argDataCast");
+ Addr = Addr.withElementType(Arg.getElementType());
Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
Offset += Size;
++I;
@@ -2182,6 +2240,17 @@ static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
}
}
+static Value *tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID,
+ Value *V) {
+ if (CGF.Builder.getIsFPConstrained() &&
+ CGF.Builder.getDefaultConstrainedExcept() != fp::ebIgnore) {
+ if (Value *Result =
+ CGF.getTargetHooks().testFPKind(V, BuiltinID, CGF.Builder, CGF.CGM))
+ return Result;
+ }
+ return nullptr;
+}
+
RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
const CallExpr *E,
ReturnValueSlot ReturnValue) {
@@ -2444,6 +2513,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Intrinsic::round,
Intrinsic::experimental_constrained_round));
+ case Builtin::BIroundeven:
+ case Builtin::BIroundevenf:
+ case Builtin::BIroundevenl:
+ case Builtin::BI__builtin_roundeven:
+ case Builtin::BI__builtin_roundevenf:
+ case Builtin::BI__builtin_roundevenf16:
+ case Builtin::BI__builtin_roundevenl:
+ case Builtin::BI__builtin_roundevenf128:
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ Intrinsic::roundeven,
+ Intrinsic::experimental_constrained_roundeven));
+
case Builtin::BIsin:
case Builtin::BIsinf:
case Builtin::BIsinl:
@@ -2463,11 +2544,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_sqrtf:
case Builtin::BI__builtin_sqrtf16:
case Builtin::BI__builtin_sqrtl:
- case Builtin::BI__builtin_sqrtf128:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::sqrt,
- Intrinsic::experimental_constrained_sqrt));
-
+ case Builtin::BI__builtin_sqrtf128: {
+ llvm::Value *Call = emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::sqrt, Intrinsic::experimental_constrained_sqrt);
+ SetSqrtFPAccuracy(Call);
+ return RValue::get(Call);
+ }
case Builtin::BItrunc:
case Builtin::BItruncf:
case Builtin::BItruncl:
@@ -2523,7 +2605,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
*this, E, Intrinsic::llrint,
Intrinsic::experimental_constrained_llrint));
-
+ case Builtin::BI__builtin_ldexp:
+ case Builtin::BI__builtin_ldexpf:
+ case Builtin::BI__builtin_ldexpl:
+ case Builtin::BI__builtin_ldexpf16:
+ case Builtin::BI__builtin_ldexpf128: {
+ return RValue::get(emitBinaryExpMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::ldexp,
+ Intrinsic::experimental_constrained_ldexp));
+ }
default:
break;
}
@@ -2801,8 +2891,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_assume_aligned: {
const Expr *Ptr = E->getArg(0);
Value *PtrValue = EmitScalarExpr(Ptr);
- if (PtrValue->getType() != VoidPtrTy)
- PtrValue = EmitCastToVoidPtr(PtrValue);
Value *OffsetValue =
(E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
@@ -2827,6 +2915,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreateCall(FnAssume, ArgValue);
return RValue::get(nullptr);
}
+ case Builtin::BI__builtin_assume_separate_storage: {
+ const Expr *Arg0 = E->getArg(0);
+ const Expr *Arg1 = E->getArg(1);
+
+ Value *Value0 = EmitScalarExpr(Arg0);
+ Value *Value1 = EmitScalarExpr(Arg1);
+
+ Value *Values[] = {Value0, Value1};
+ OperandBundleDefT<Value *> OBD("separate_storage", Values);
+ Builder.CreateAssumption(ConstantInt::getTrue(getLLVMContext()), {OBD});
+ return RValue::get(nullptr);
+ }
case Builtin::BI__arithmetic_fence: {
// Create the builtin call if FastMath is selected, and the target
// supports the builtin, otherwise just return the argument.
@@ -2981,6 +3081,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
if (Builder.getIsFPConstrained()) {
+ // FIXME: llvm.powi has 2 mangling types,
+ // llvm.experimental.constrained.powi has one.
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
Src0->getType());
@@ -2991,6 +3093,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
{ Src0->getType(), Src1->getType() });
return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
}
+ case Builtin::BI__builtin_frexp:
+ case Builtin::BI__builtin_frexpf:
+ case Builtin::BI__builtin_frexpl:
+ case Builtin::BI__builtin_frexpf128:
+ case Builtin::BI__builtin_frexpf16:
+ return RValue::get(emitFrexpBuiltin(*this, E, Intrinsic::frexp));
case Builtin::BI__builtin_isgreater:
case Builtin::BI__builtin_isgreaterequal:
case Builtin::BI__builtin_isless:
@@ -3027,37 +3135,69 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// ZExt bool to int type.
return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
}
+
case Builtin::BI__builtin_isnan: {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
Value *V = EmitScalarExpr(E->getArg(0));
- llvm::Type *Ty = V->getType();
- const llvm::fltSemantics &Semantics = Ty->getFltSemantics();
- if (!Builder.getIsFPConstrained() ||
- Builder.getDefaultConstrainedExcept() == fp::ebIgnore ||
- !Ty->isIEEE()) {
- V = Builder.CreateFCmpUNO(V, V, "cmp");
- return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
- }
+ if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
+ return RValue::get(Result);
+ return RValue::get(
+ Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNan),
+ ConvertType(E->getType())));
+ }
- if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM))
+ case Builtin::BI__builtin_isinf: {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ Value *V = EmitScalarExpr(E->getArg(0));
+ if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
return RValue::get(Result);
+ return RValue::get(
+ Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcInf),
+ ConvertType(E->getType())));
+ }
- // NaN has all exp bits set and a non zero significand. Therefore:
- // isnan(V) == ((exp mask - (abs(V) & exp mask)) < 0)
- unsigned bitsize = Ty->getScalarSizeInBits();
- llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize);
- Value *IntV = Builder.CreateBitCast(V, IntTy);
- APInt AndMask = APInt::getSignedMaxValue(bitsize);
- Value *AbsV =
- Builder.CreateAnd(IntV, llvm::ConstantInt::get(IntTy, AndMask));
- APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt();
- Value *Sub =
- Builder.CreateSub(llvm::ConstantInt::get(IntTy, ExpMask), AbsV);
- // V = sign bit (Sub) <=> V = (Sub < 0)
- V = Builder.CreateLShr(Sub, llvm::ConstantInt::get(IntTy, bitsize - 1));
- if (bitsize > 32)
- V = Builder.CreateTrunc(V, ConvertType(E->getType()));
- return RValue::get(V);
+ case Builtin::BIfinite:
+ case Builtin::BI__finite:
+ case Builtin::BIfinitef:
+ case Builtin::BI__finitef:
+ case Builtin::BIfinitel:
+ case Builtin::BI__finitel:
+ case Builtin::BI__builtin_isfinite: {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ Value *V = EmitScalarExpr(E->getArg(0));
+ if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
+ return RValue::get(Result);
+ return RValue::get(
+ Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcFinite),
+ ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_isnormal: {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ Value *V = EmitScalarExpr(E->getArg(0));
+ return RValue::get(
+ Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNormal),
+ ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_isfpclass: {
+ Expr::EvalResult Result;
+ if (!E->getArg(1)->EvaluateAsInt(Result, CGM.getContext()))
+ break;
+ uint64_t Test = Result.Val.getInt().getLimitedValue();
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ Value *V = EmitScalarExpr(E->getArg(0));
+ return RValue::get(Builder.CreateZExt(Builder.createIsFPClass(V, Test),
+ ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_nondeterministic_value: {
+ llvm::Type *Ty = ConvertType(E->getArg(0)->getType());
+
+ Value *Result = PoisonValue::get(Ty);
+ Result = Builder.CreateFreeze(Result);
+
+ return RValue::get(Result);
}
case Builtin::BI__builtin_elementwise_abs: {
@@ -3079,6 +3219,24 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_elementwise_ceil:
return RValue::get(
emitUnaryBuiltin(*this, E, llvm::Intrinsic::ceil, "elt.ceil"));
+ case Builtin::BI__builtin_elementwise_exp:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::exp, "elt.exp"));
+ case Builtin::BI__builtin_elementwise_exp2:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::exp2, "elt.exp2"));
+ case Builtin::BI__builtin_elementwise_log:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::log, "elt.log"));
+ case Builtin::BI__builtin_elementwise_log2:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::log2, "elt.log2"));
+ case Builtin::BI__builtin_elementwise_log10:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::log10, "elt.log10"));
+ case Builtin::BI__builtin_elementwise_pow: {
+ return RValue::get(emitBinaryBuiltin(*this, E, llvm::Intrinsic::pow));
+ }
case Builtin::BI__builtin_elementwise_cos:
return RValue::get(
emitUnaryBuiltin(*this, E, llvm::Intrinsic::cos, "elt.cos"));
@@ -3088,6 +3246,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_elementwise_roundeven:
return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::roundeven,
"elt.roundeven"));
+ case Builtin::BI__builtin_elementwise_round:
+ return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::round,
+ "elt.round"));
+ case Builtin::BI__builtin_elementwise_rint:
+ return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::rint,
+ "elt.rint"));
+ case Builtin::BI__builtin_elementwise_nearbyint:
+ return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::nearbyint,
+ "elt.nearbyint"));
case Builtin::BI__builtin_elementwise_sin:
return RValue::get(
emitUnaryBuiltin(*this, E, llvm::Intrinsic::sin, "elt.sin"));
@@ -3097,9 +3264,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
emitUnaryBuiltin(*this, E, llvm::Intrinsic::trunc, "elt.trunc"));
case Builtin::BI__builtin_elementwise_canonicalize:
return RValue::get(
- emitUnaryBuiltin(*this, E, llvm::Intrinsic::canonicalize, "elt.trunc"));
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::canonicalize, "elt.canonicalize"));
case Builtin::BI__builtin_elementwise_copysign:
return RValue::get(emitBinaryBuiltin(*this, E, llvm::Intrinsic::copysign));
+ case Builtin::BI__builtin_elementwise_fma:
+ return RValue::get(emitTernaryBuiltin(*this, E, llvm::Intrinsic::fma));
case Builtin::BI__builtin_elementwise_add_sat:
case Builtin::BI__builtin_elementwise_sub_sat: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
@@ -3247,52 +3416,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Result);
}
- case Builtin::BIfinite:
- case Builtin::BI__finite:
- case Builtin::BIfinitef:
- case Builtin::BI__finitef:
- case Builtin::BIfinitel:
- case Builtin::BI__finitel:
- case Builtin::BI__builtin_isinf:
- case Builtin::BI__builtin_isfinite: {
- // isinf(x) --> fabs(x) == infinity
- // isfinite(x) --> fabs(x) != infinity
- // x != NaN via the ordered compare in either case.
- CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
- Value *V = EmitScalarExpr(E->getArg(0));
- llvm::Type *Ty = V->getType();
- if (!Builder.getIsFPConstrained() ||
- Builder.getDefaultConstrainedExcept() == fp::ebIgnore ||
- !Ty->isIEEE()) {
- Value *Fabs = EmitFAbs(*this, V);
- Constant *Infinity = ConstantFP::getInfinity(V->getType());
- CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf)
- ? CmpInst::FCMP_OEQ
- : CmpInst::FCMP_ONE;
- Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf");
- return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType())));
- }
-
- if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM))
- return RValue::get(Result);
-
- // Inf values have all exp bits set and a zero significand. Therefore:
- // isinf(V) == ((V << 1) == ((exp mask) << 1))
- // isfinite(V) == ((V << 1) < ((exp mask) << 1)) using unsigned comparison
- unsigned bitsize = Ty->getScalarSizeInBits();
- llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize);
- Value *IntV = Builder.CreateBitCast(V, IntTy);
- Value *Shl1 = Builder.CreateShl(IntV, 1);
- const llvm::fltSemantics &Semantics = Ty->getFltSemantics();
- APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt();
- Value *ExpMaskShl1 = llvm::ConstantInt::get(IntTy, ExpMask.shl(1));
- if (BuiltinID == Builtin::BI__builtin_isinf)
- V = Builder.CreateICmpEQ(Shl1, ExpMaskShl1);
- else
- V = Builder.CreateICmpULT(Shl1, ExpMaskShl1);
- return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
- }
-
case Builtin::BI__builtin_isinf_sign: {
// isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
@@ -3312,26 +3435,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Result);
}
- case Builtin::BI__builtin_isnormal: {
- // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
- CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
- // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
- Value *V = EmitScalarExpr(E->getArg(0));
- Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
-
- Value *Abs = EmitFAbs(*this, V);
- Value *IsLessThanInf =
- Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
- APFloat Smallest = APFloat::getSmallestNormalized(
- getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
- Value *IsNormal =
- Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
- "isnormal");
- V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
- V = Builder.CreateAnd(V, IsNormal, "and");
- return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
- }
-
case Builtin::BI__builtin_flt_rounds: {
Function *F = CGM.getIntrinsic(Intrinsic::get_rounding);
@@ -3343,6 +3446,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Result);
}
+ case Builtin::BI__builtin_set_flt_rounds: {
+ Function *F = CGM.getIntrinsic(Intrinsic::set_rounding);
+
+ Value *V = EmitScalarExpr(E->getArg(0));
+ Builder.CreateCall(F, V);
+ return RValue::get(nullptr);
+ }
+
case Builtin::BI__builtin_fpclassify: {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
// FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
@@ -3802,7 +3913,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Call LLVM's EH setjmp, which is lightweight.
Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
- Buf = Builder.CreateElementBitCast(Buf, Int8Ty);
return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
}
case Builtin::BI__builtin_longjmp: {
@@ -3970,12 +4080,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__sync_lock_release_4:
case Builtin::BI__sync_lock_release_8:
case Builtin::BI__sync_lock_release_16: {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
+ Value *Ptr = CheckAtomicAlignment(*this, E);
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
- llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
- StoreSize.getQuantity() * 8);
- Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
+ llvm::Type *ITy =
+ llvm::IntegerType::get(getLLVMContext(), StoreSize.getQuantity() * 8);
llvm::StoreInst *Store =
Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
StoreSize);
@@ -4030,8 +4139,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
Value *Ptr = EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
- Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
Value *NewVal = Builder.getInt8(1);
Value *Order = EmitScalarExpr(E->getArg(1));
if (isa<llvm::ConstantInt>(Order)) {
@@ -4113,7 +4220,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
Address Ptr = EmitPointerWithAlignment(E->getArg(0));
- Ptr = Builder.CreateElementBitCast(Ptr, Int8Ty);
+ Ptr = Ptr.withElementType(Int8Ty);
Value *NewVal = Builder.getInt8(0);
Value *Order = EmitScalarExpr(E->getArg(1));
if (isa<llvm::ConstantInt>(Order)) {
@@ -4549,13 +4656,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI_InterlockedCompareExchangePointer:
case Builtin::BI_InterlockedCompareExchangePointer_nf: {
llvm::Type *RTy;
- llvm::IntegerType *IntType =
- IntegerType::get(getLLVMContext(),
- getContext().getTypeSize(E->getType()));
- llvm::Type *IntPtrType = IntType->getPointerTo();
+ llvm::IntegerType *IntType = IntegerType::get(
+ getLLVMContext(), getContext().getTypeSize(E->getType()));
- llvm::Value *Destination =
- Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
+ llvm::Value *Destination = EmitScalarExpr(E->getArg(0));
llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
RTy = Exchange->getType();
@@ -4674,6 +4778,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BImove:
case Builtin::BImove_if_noexcept:
case Builtin::BIforward:
+ case Builtin::BIforward_like:
case Builtin::BIas_const:
return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
case Builtin::BI__GetExceptionInfo: {
@@ -4922,7 +5027,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
llvm::Value *Kernel =
- Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
+ Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
llvm::Value *Block =
Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
@@ -4976,7 +5081,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
llvm::Value *Kernel =
- Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
+ Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
@@ -5000,8 +5105,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
// Any calls now have event arguments passed.
if (NumArgs >= 7) {
- llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
- llvm::PointerType *EventPtrTy = EventTy->getPointerTo(
+ llvm::PointerType *PtrTy = llvm::PointerType::get(
+ CGM.getLLVMContext(),
CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
llvm::Value *NumEvents =
@@ -5013,33 +5118,33 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Value *EventWaitList = nullptr;
if (E->getArg(4)->isNullPointerConstant(
getContext(), Expr::NPC_ValueDependentIsNotNull)) {
- EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy);
+ EventWaitList = llvm::ConstantPointerNull::get(PtrTy);
} else {
EventWaitList = E->getArg(4)->getType()->isArrayType()
? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
: EmitScalarExpr(E->getArg(4));
// Convert to generic address space.
- EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy);
+ EventWaitList = Builder.CreatePointerCast(EventWaitList, PtrTy);
}
llvm::Value *EventRet = nullptr;
if (E->getArg(5)->isNullPointerConstant(
getContext(), Expr::NPC_ValueDependentIsNotNull)) {
- EventRet = llvm::ConstantPointerNull::get(EventPtrTy);
+ EventRet = llvm::ConstantPointerNull::get(PtrTy);
} else {
EventRet =
- Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy);
+ Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), PtrTy);
}
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
llvm::Value *Kernel =
- Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
+ Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
llvm::Value *Block =
Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
std::vector<llvm::Type *> ArgTys = {
- QueueTy, Int32Ty, RangeTy, Int32Ty,
- EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
+ QueueTy, Int32Ty, RangeTy, Int32Ty,
+ PtrTy, PtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
std::vector<llvm::Value *> Args = {Queue, Flags, Range,
NumEvents, EventWaitList, EventRet,
@@ -5083,7 +5188,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
getContext().getTargetAddressSpace(LangAS::opencl_generic));
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
- Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
+ Value *Kernel =
+ Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
return RValue::get(EmitRuntimeCall(
CGM.CreateRuntimeFunction(
@@ -5097,7 +5203,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
getContext().getTargetAddressSpace(LangAS::opencl_generic));
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
- Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
+ Value *Kernel =
+ Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
return RValue::get(EmitRuntimeCall(
CGM.CreateRuntimeFunction(
@@ -5114,7 +5221,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer();
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
- Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
+ Value *Kernel =
+ Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
const char *Name =
BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
@@ -5150,7 +5258,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BIprintf:
if (getTarget().getTriple().isNVPTX() ||
getTarget().getTriple().isAMDGCN()) {
- if (getLangOpts().OpenMPIsDevice)
+ if (getLangOpts().OpenMPIsTargetDevice)
return EmitOpenMPDevicePrintfCallExpr(E);
if (getTarget().getTriple().isNVPTX())
return EmitNVPTXDevicePrintfCallExpr(E);
@@ -5354,8 +5462,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
if (PtrTy->getAddressSpace() !=
ArgValue->getType()->getPointerAddressSpace()) {
ArgValue = Builder.CreateAddrSpaceCast(
- ArgValue,
- ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace()));
+ ArgValue, llvm::PointerType::get(getLLVMContext(),
+ PtrTy->getAddressSpace()));
}
}
@@ -5385,7 +5493,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
V = Builder.CreateAddrSpaceCast(
- V, V->getType()->getPointerTo(PtrTy->getAddressSpace()));
+ V, llvm::PointerType::get(getLLVMContext(),
+ PtrTy->getAddressSpace()));
}
}
@@ -6643,6 +6752,21 @@ static const std::pair<unsigned, unsigned> NEONEquivalentIntrinsicMap[] = {
{ NEON::BI__builtin_neon_vuzpq_f16, NEON::BI__builtin_neon_vuzpq_v, },
{ NEON::BI__builtin_neon_vzip_f16, NEON::BI__builtin_neon_vzip_v, },
{ NEON::BI__builtin_neon_vzipq_f16, NEON::BI__builtin_neon_vzipq_v, },
+ // The mangling rules cause us to have one ID for each type for vldap1(q)_lane
+ // and vstl1(q)_lane, but codegen is equivalent for all of them. Choose an
+ // arbitrary one to be handled as tha canonical variation.
+ { NEON::BI__builtin_neon_vldap1_lane_u64, NEON::BI__builtin_neon_vldap1_lane_s64 },
+ { NEON::BI__builtin_neon_vldap1_lane_f64, NEON::BI__builtin_neon_vldap1_lane_s64 },
+ { NEON::BI__builtin_neon_vldap1_lane_p64, NEON::BI__builtin_neon_vldap1_lane_s64 },
+ { NEON::BI__builtin_neon_vldap1q_lane_u64, NEON::BI__builtin_neon_vldap1q_lane_s64 },
+ { NEON::BI__builtin_neon_vldap1q_lane_f64, NEON::BI__builtin_neon_vldap1q_lane_s64 },
+ { NEON::BI__builtin_neon_vldap1q_lane_p64, NEON::BI__builtin_neon_vldap1q_lane_s64 },
+ { NEON::BI__builtin_neon_vstl1_lane_u64, NEON::BI__builtin_neon_vstl1_lane_s64 },
+ { NEON::BI__builtin_neon_vstl1_lane_f64, NEON::BI__builtin_neon_vstl1_lane_s64 },
+ { NEON::BI__builtin_neon_vstl1_lane_p64, NEON::BI__builtin_neon_vstl1_lane_s64 },
+ { NEON::BI__builtin_neon_vstl1q_lane_u64, NEON::BI__builtin_neon_vstl1q_lane_s64 },
+ { NEON::BI__builtin_neon_vstl1q_lane_f64, NEON::BI__builtin_neon_vstl1q_lane_s64 },
+ { NEON::BI__builtin_neon_vstl1q_lane_p64, NEON::BI__builtin_neon_vstl1q_lane_s64 },
};
#undef NEONMAP0
@@ -6667,11 +6791,29 @@ static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = {
#undef SVEMAP1
#undef SVEMAP2
+#define SMEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
+ { \
+ #NameBase, SME::BI__builtin_sme_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
+ TypeModifier \
+ }
+
+#define SMEMAP2(NameBase, TypeModifier) \
+ { #NameBase, SME::BI__builtin_sme_##NameBase, 0, 0, TypeModifier }
+static const ARMVectorIntrinsicInfo AArch64SMEIntrinsicMap[] = {
+#define GET_SME_LLVM_INTRINSIC_MAP
+#include "clang/Basic/arm_sme_builtin_cg.inc"
+#undef GET_SME_LLVM_INTRINSIC_MAP
+};
+
+#undef SMEMAP1
+#undef SMEMAP2
+
static bool NEONSIMDIntrinsicsProvenSorted = false;
static bool AArch64SIMDIntrinsicsProvenSorted = false;
static bool AArch64SISDIntrinsicsProvenSorted = false;
static bool AArch64SVEIntrinsicsProvenSorted = false;
+static bool AArch64SMEIntrinsicsProvenSorted = false;
static const ARMVectorIntrinsicInfo *
findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,
@@ -7121,7 +7263,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v: {
Value *V = PoisonValue::get(Ty);
- PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
+ PtrOp0 = PtrOp0.withElementType(VTy->getElementType());
LoadInst *Ld = Builder.CreateLoad(PtrOp0);
llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
@@ -7768,6 +7910,17 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
}
+ if (BuiltinID == clang::ARM::BI__builtin_arm_clz ||
+ BuiltinID == clang::ARM::BI__builtin_arm_clz64) {
+ llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
+ Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Arg->getType());
+ Value *Res = Builder.CreateCall(F, {Arg, Builder.getInt1(false)});
+ if (BuiltinID == clang::ARM::BI__builtin_arm_clz64)
+ Res = Builder.CreateTrunc(Res, Builder.getInt32Ty());
+ return Res;
+ }
+
+
if (BuiltinID == clang::ARM::BI__builtin_arm_cls) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls");
@@ -7900,8 +8053,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
llvm::Type *RealResTy = ConvertType(Ty);
llvm::Type *IntTy =
llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
- llvm::Type *PtrTy = IntTy->getPointerTo();
- LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(getLLVMContext());
Function *F = CGM.getIntrinsic(
BuiltinID == clang::ARM::BI__builtin_arm_ldaex ? Intrinsic::arm_ldaex
@@ -7934,7 +8086,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Value *Val = EmitScalarExpr(E->getArg(0));
Builder.CreateStore(Val, Tmp);
- Address LdPtr = Builder.CreateElementBitCast(Tmp, STy);
+ Address LdPtr = Tmp.withElementType(STy);
Val = Builder.CreateLoad(LdPtr);
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
@@ -7949,9 +8101,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Value *StoreAddr = EmitScalarExpr(E->getArg(1));
QualType Ty = E->getArg(0)->getType();
- llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
- getContext().getTypeSize(Ty));
- StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
+ llvm::Type *StoreTy =
+ llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
if (StoreVal->getType()->isPointerTy())
StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
@@ -8309,7 +8460,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
[[fallthrough]];
case NEON::BI__builtin_neon_vld1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
+ PtrOp0 = PtrOp0.withElementType(VTy->getElementType());
Value *Ld = Builder.CreateLoad(PtrOp0);
return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
}
@@ -8373,9 +8524,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vst1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
- auto St = Builder.CreateStore(
- Ops[1], Builder.CreateElementBitCast(PtrOp0, Ops[1]->getType()));
- return St;
+ return Builder.CreateStore(Ops[1],
+ PtrOp0.withElementType(Ops[1]->getType()));
}
case NEON::BI__builtin_neon_vtbl1_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
@@ -8820,6 +8970,8 @@ llvm::Type *CodeGenFunction::getEltType(const SVETypeFlags &TypeFlags) {
return Builder.getInt32Ty();
case SVETypeFlags::EltTyInt64:
return Builder.getInt64Ty();
+ case SVETypeFlags::EltTyInt128:
+ return Builder.getInt128Ty();
case SVETypeFlags::EltTyFloat16:
return Builder.getHalfTy();
@@ -8938,6 +9090,7 @@ Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
switch (VTy->getMinNumElements()) {
default:
llvm_unreachable("unsupported element count!");
+ case 1:
case 2:
case 4:
case 8:
@@ -9223,13 +9376,9 @@ Value *CodeGenFunction::EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
Value *BasePtr = Ops[1];
// Implement the index operand if not omitted.
- if (Ops.size() > 3) {
- BasePtr = Builder.CreateBitCast(BasePtr, MemoryTy->getPointerTo());
+ if (Ops.size() > 3)
BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
- }
- // Prefetch intriniscs always expect an i8*
- BasePtr = Builder.CreateBitCast(BasePtr, llvm::PointerType::getUnqual(Int8Ty));
Value *PrfOp = Ops.back();
Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType());
@@ -9251,13 +9400,12 @@ Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
- Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
+ Value *BasePtr = Ops[1];
// Does the load have an offset?
if (Ops.size() > 2)
BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
- BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
auto *Load =
cast<llvm::Instruction>(Builder.CreateCall(F, {Predicate, BasePtr}));
@@ -9281,7 +9429,7 @@ Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
- Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
+ Value *BasePtr = Ops[1];
// Does the store have an offset?
if (Ops.size() == 4)
@@ -9290,7 +9438,6 @@ Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
// Last value is always the data
llvm::Value *Val = Builder.CreateTrunc(Ops.back(), MemoryTy);
- BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
auto *Store =
cast<llvm::Instruction>(Builder.CreateCall(F, {Val, Predicate, BasePtr}));
@@ -9299,6 +9446,84 @@ Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
return Store;
}
+Value *CodeGenFunction::EmitTileslice(Value *Offset, Value *Base) {
+ llvm::Value *CastOffset = Builder.CreateIntCast(Offset, Int32Ty, false);
+ return Builder.CreateAdd(Base, CastOffset, "tileslice");
+}
+
+Value *CodeGenFunction::EmitSMELd1St1(SVETypeFlags TypeFlags,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned IntID) {
+ Ops[3] = EmitSVEPredicateCast(
+ Ops[3], getSVEVectorForElementType(SVEBuiltinMemEltTy(TypeFlags)));
+
+ SmallVector<Value *> NewOps;
+ NewOps.push_back(Ops[3]);
+
+ llvm::Value *BasePtr = Ops[4];
+
+ // If the intrinsic contains the vnum parameter, multiply it with the vector
+ // size in bytes.
+ if (Ops.size() == 6) {
+ Function *StreamingVectorLength =
+ CGM.getIntrinsic(Intrinsic::aarch64_sme_cntsb);
+ llvm::Value *StreamingVectorLengthCall =
+ Builder.CreateCall(StreamingVectorLength);
+ llvm::Value *Mulvl =
+ Builder.CreateMul(StreamingVectorLengthCall, Ops[5], "mulvl");
+ // The type of the ptr parameter is void *, so use Int8Ty here.
+ BasePtr = Builder.CreateGEP(Int8Ty, Ops[4], Mulvl);
+ }
+ NewOps.push_back(BasePtr);
+ NewOps.push_back(Ops[0]);
+ NewOps.push_back(EmitTileslice(Ops[2], Ops[1]));
+ Function *F = CGM.getIntrinsic(IntID);
+ return Builder.CreateCall(F, NewOps);
+}
+
+Value *CodeGenFunction::EmitSMEReadWrite(SVETypeFlags TypeFlags,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned IntID) {
+ auto *VecTy = getSVEType(TypeFlags);
+ Function *F = CGM.getIntrinsic(IntID, VecTy);
+ if (TypeFlags.isReadZA()) {
+ Ops[1] = EmitSVEPredicateCast(Ops[1], VecTy);
+ Ops[3] = EmitTileslice(Ops[4], Ops[3]);
+ Ops.erase(&Ops[4]);
+ } else if (TypeFlags.isWriteZA()) {
+ Ops[1] = EmitTileslice(Ops[2], Ops[1]);
+ Ops[2] = EmitSVEPredicateCast(Ops[3], VecTy);
+ Ops.erase(&Ops[3]);
+ }
+ return Builder.CreateCall(F, Ops);
+}
+
+Value *CodeGenFunction::EmitSMEZero(SVETypeFlags TypeFlags,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned IntID) {
+ // svzero_za() intrinsic zeros the entire za tile and has no paramters.
+ if (Ops.size() == 0)
+ Ops.push_back(llvm::ConstantInt::get(Int32Ty, 255));
+ Function *F = CGM.getIntrinsic(IntID, {});
+ return Builder.CreateCall(F, Ops);
+}
+
+Value *CodeGenFunction::EmitSMELdrStr(SVETypeFlags TypeFlags,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned IntID) {
+ Function *Cntsb = CGM.getIntrinsic(Intrinsic::aarch64_sme_cntsb);
+ llvm::Value *CntsbCall = Builder.CreateCall(Cntsb, {}, "svlb");
+ llvm::Value *MulVL = Builder.CreateMul(
+ CntsbCall,
+ Builder.getInt64(cast<llvm::ConstantInt>(Ops[1])->getZExtValue()),
+ "mulvl");
+ Ops[2] = Builder.CreateGEP(Int8Ty, Ops[2], MulVL);
+ Ops[0] = EmitTileslice(Ops[1], Ops[0]);
+ Ops.erase(&Ops[1]);
+ Function *F = CGM.getIntrinsic(IntID, {});
+ return Builder.CreateCall(F, Ops);
+}
+
// Limit the usage of scalable llvm IR generated by the ACLE by using the
// sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat.
Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) {
@@ -9475,9 +9700,14 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
if (TypeFlags.isReverseCompare())
std::swap(Ops[1], Ops[2]);
-
- if (TypeFlags.isReverseUSDOT())
+ else if (TypeFlags.isReverseUSDOT())
std::swap(Ops[1], Ops[2]);
+ else if (TypeFlags.isReverseMergeAnyBinOp() &&
+ TypeFlags.getMergeType() == SVETypeFlags::MergeAny)
+ std::swap(Ops[1], Ops[2]);
+ else if (TypeFlags.isReverseMergeAnyAccOp() &&
+ TypeFlags.getMergeType() == SVETypeFlags::MergeAny)
+ std::swap(Ops[1], Ops[3]);
// Predicated intrinsics with _z suffix need a select w/ zeroinitializer.
if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) {
@@ -9720,6 +9950,64 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
return nullptr;
}
+Value *CodeGenFunction::EmitAArch64SMEBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ // Find out if any arguments are required to be integer constant expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+
+ llvm::Type *Ty = ConvertType(E->getType());
+ llvm::SmallVector<Value *, 4> Ops;
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
+ if ((ICEArguments & (1 << i)) == 0)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ else {
+ // If this is required to be a constant, constant fold it so that we know
+ // that the generated intrinsic gets a ConstantInt.
+ std::optional<llvm::APSInt> Result =
+ E->getArg(i)->getIntegerConstantExpr(getContext());
+ assert(Result && "Expected argument to be a constant");
+
+ // Immediates for SVE llvm intrinsics are always 32bit. We can safely
+ // truncate because the immediate has been range checked and no valid
+ // immediate requires more than a handful of bits.
+ *Result = Result->extOrTrunc(32);
+ Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result));
+ }
+ }
+
+ auto *Builtin = findARMVectorIntrinsicInMap(AArch64SMEIntrinsicMap, BuiltinID,
+ AArch64SMEIntrinsicsProvenSorted);
+ SVETypeFlags TypeFlags(Builtin->TypeModifier);
+ if (TypeFlags.isLoad() || TypeFlags.isStore())
+ return EmitSMELd1St1(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isReadZA() || TypeFlags.isWriteZA())
+ return EmitSMEReadWrite(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (BuiltinID == SME::BI__builtin_sme_svzero_mask_za ||
+ BuiltinID == SME::BI__builtin_sme_svzero_za)
+ return EmitSMEZero(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (BuiltinID == SME::BI__builtin_sme_svldr_vnum_za ||
+ BuiltinID == SME::BI__builtin_sme_svstr_vnum_za)
+ return EmitSMELdrStr(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (Builtin->LLVMIntrinsic != 0) {
+ // Predicates must match the main datatype.
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
+ if (PredTy->getElementType()->isIntegerTy(1))
+ Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
+
+ Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic,
+ getSVEOverloadTypes(TypeFlags, Ty, Ops));
+ Value *Call = Builder.CreateCall(F, Ops);
+ return Call;
+ }
+
+ /// Should not happen
+ return nullptr;
+}
+
Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
llvm::Triple::ArchType Arch) {
@@ -9727,6 +10015,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
BuiltinID <= clang::AArch64::LastSVEBuiltin)
return EmitAArch64SVEBuiltinExpr(BuiltinID, E);
+ if (BuiltinID >= clang::AArch64::FirstSMEBuiltin &&
+ BuiltinID <= clang::AArch64::LastSMEBuiltin)
+ return EmitAArch64SMEBuiltinExpr(BuiltinID, E);
+
unsigned HintID = static_cast<unsigned>(-1);
switch (BuiltinID) {
default: break;
@@ -9775,6 +10067,16 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
}
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_clz ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_clz64) {
+ llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
+ Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Arg->getType());
+ Value *Res = Builder.CreateCall(F, {Arg, Builder.getInt1(false)});
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_clz64)
+ Res = Builder.CreateTrunc(Res, Builder.getInt32Ty());
+ return Res;
+ }
+
if (BuiltinID == clang::AArch64::BI__builtin_arm_cls) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg,
@@ -9929,8 +10231,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::Type *RealResTy = ConvertType(Ty);
llvm::Type *IntTy =
llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
- llvm::Type *PtrTy = IntTy->getPointerTo();
- LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(getLLVMContext());
Function *F =
CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex
@@ -9962,7 +10263,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Address Tmp = CreateMemTemp(E->getArg(0)->getType());
EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
- Tmp = Builder.CreateElementBitCast(Tmp, STy);
+ Tmp = Tmp.withElementType(STy);
llvm::Value *Val = Builder.CreateLoad(Tmp);
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
@@ -9978,9 +10279,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Value *StoreAddr = EmitScalarExpr(E->getArg(1));
QualType Ty = E->getArg(0)->getType();
- llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
- getContext().getTypeSize(Ty));
- StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
+ llvm::Type *StoreTy =
+ llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
if (StoreVal->getType()->isPointerTy())
StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
@@ -10358,6 +10658,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vst1q_v:
case NEON::BI__builtin_neon_vst1_lane_v:
case NEON::BI__builtin_neon_vst1q_lane_v:
+ case NEON::BI__builtin_neon_vldap1_lane_s64:
+ case NEON::BI__builtin_neon_vldap1q_lane_s64:
+ case NEON::BI__builtin_neon_vstl1_lane_s64:
+ case NEON::BI__builtin_neon_vstl1q_lane_s64:
// Get the alignment for the argument in addition to the value;
// we'll use it later.
PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
@@ -10880,14 +11184,12 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
*this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
{EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
case NEON::BI__builtin_neon_vfmsh_f16: {
- // FIXME: This should be an fneg instruction:
- Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
- Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
+ Value* Neg = Builder.CreateFNeg(EmitScalarExpr(E->getArg(1)), "vsubh");
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
return emitCallMaybeConstrainedFPBuiltin(
*this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
- {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
+ {Neg, EmitScalarExpr(E->getArg(2)), Ops[0]});
}
case NEON::BI__builtin_neon_vaddd_s64:
case NEON::BI__builtin_neon_vaddd_u64:
@@ -11958,6 +12260,17 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
PtrOp0.getAlignment());
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
}
+ case NEON::BI__builtin_neon_vldap1_lane_s64:
+ case NEON::BI__builtin_neon_vldap1q_lane_s64: {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ty = llvm::PointerType::getUnqual(VTy->getElementType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ llvm::LoadInst *LI = Builder.CreateAlignedLoad(
+ VTy->getElementType(), Ops[0], PtrOp0.getAlignment());
+ LI->setAtomic(llvm::AtomicOrdering::Acquire);
+ Ops[0] = LI;
+ return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vldap1_lane");
+ }
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v: {
Value *V = PoisonValue::get(Ty);
@@ -11976,6 +12289,16 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
return Builder.CreateAlignedStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty),
PtrOp0.getAlignment());
+ case NEON::BI__builtin_neon_vstl1_lane_s64:
+ case NEON::BI__builtin_neon_vstl1q_lane_s64: {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ llvm::StoreInst *SI = Builder.CreateAlignedStore(
+ Ops[1], Builder.CreateBitCast(Ops[0], Ty), PtrOp0.getAlignment());
+ SI->setAtomic(llvm::AtomicOrdering::Release);
+ return SI;
+ }
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
@@ -14312,7 +14635,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// Unaligned nontemporal store of the scalar value.
StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
- SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
+ SI->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
SI->setAlignment(llvm::Align(1));
return SI;
}
@@ -15750,9 +16073,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// If the user wants the entire vector, just load the entire vector.
if (NumBytes == 16) {
- Value *BC = Builder.CreateBitCast(Op0, ResTy->getPointerTo());
Value *LD =
- Builder.CreateLoad(Address(BC, ResTy, CharUnits::fromQuantity(1)));
+ Builder.CreateLoad(Address(Op0, ResTy, CharUnits::fromQuantity(1)));
if (!IsLE)
return LD;
@@ -15805,7 +16127,6 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// Storing the whole vector, simply store it on BE and reverse bytes and
// store on LE.
if (Width == 16) {
- Value *BC = Builder.CreateBitCast(Op0, Op2->getType()->getPointerTo());
Value *StVec = Op2;
if (IsLE) {
SmallVector<int, 16> RevMask;
@@ -15814,7 +16135,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
StVec = Builder.CreateShuffleVector(Op2, Op2, RevMask);
}
return Builder.CreateStore(
- StVec, Address(BC, Op2->getType(), CharUnits::fromQuantity(1)));
+ StVec, Address(Op0, Op2->getType(), CharUnits::fromQuantity(1)));
}
auto *ConvTy = Int64Ty;
unsigned NumElts = 0;
@@ -15842,14 +16163,13 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Op2, llvm::FixedVectorType::get(ConvTy, NumElts));
Value *Ptr =
Builder.CreateGEP(Int8Ty, Op0, ConstantInt::get(Int64Ty, Offset));
- Value *PtrBC = Builder.CreateBitCast(Ptr, ConvTy->getPointerTo());
Value *Elt = Builder.CreateExtractElement(Vec, EltNo);
if (IsLE && Width > 1) {
Function *F = CGM.getIntrinsic(Intrinsic::bswap, ConvTy);
Elt = Builder.CreateCall(F, Elt);
}
return Builder.CreateStore(
- Elt, Address(PtrBC, ConvTy, CharUnits::fromQuantity(1)));
+ Elt, Address(Ptr, ConvTy, CharUnits::fromQuantity(1)));
};
unsigned Stored = 0;
unsigned RemainingBytes = NumBytes;
@@ -16469,7 +16789,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// use custom code generation to expand a builtin call with a pointer to a
// load (if the corresponding instruction accumulates its result) followed by
// the call to the intrinsic and a store of the result.
-#define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate) \
+#define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate, Feature) \
case PPC::BI__builtin_##Name:
#include "clang/Basic/BuiltinsPPC.def"
{
@@ -16497,7 +16817,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Value *Vec = Builder.CreateLoad(Addr);
Value *Call = Builder.CreateCall(F, {Vec});
llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- Value *Ptr = Builder.CreateBitCast(Ops[0], VTy->getPointerTo());
+ Value *Ptr = Ops[0];
for (unsigned i=0; i<NumVecs; i++) {
Value *Vec = Builder.CreateExtractValue(Call, i);
llvm::ConstantInt* Index = llvm::ConstantInt::get(IntTy, i);
@@ -16519,7 +16839,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
}
bool Accumulate;
switch (BuiltinID) {
- #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \
+ #define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \
case PPC::BI__builtin_##Name: \
ID = Intrinsic::ppc_##Intr; \
Accumulate = Acc; \
@@ -16790,11 +17110,8 @@ Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
}
auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset);
- auto *DstTy =
- CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
- auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
auto *LD = CGF.Builder.CreateLoad(
- Address(Cast, CGF.Int16Ty, CharUnits::fromQuantity(2)));
+ Address(GEP, CGF.Int16Ty, CharUnits::fromQuantity(2)));
llvm::MDBuilder MDHelper(CGF.getLLVMContext());
llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1),
APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
@@ -16813,11 +17130,8 @@ Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) {
// Indexing the HSA kernel_dispatch_packet struct.
auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 4);
auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset);
- auto *DstTy =
- CGF.Int32Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
- auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
auto *LD = CGF.Builder.CreateLoad(
- Address(Cast, CGF.Int32Ty, CharUnits::fromQuantity(4)));
+ Address(GEP, CGF.Int32Ty, CharUnits::fromQuantity(4)));
LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
llvm::MDNode::get(CGF.getLLVMContext(), std::nullopt));
return LD;
@@ -16950,12 +17264,21 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
case AMDGPU::BI__builtin_amdgcn_dispatch_ptr:
return EmitAMDGPUDispatchPtr(*this, E);
+ case AMDGPU::BI__builtin_amdgcn_logf:
+ return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log);
+ case AMDGPU::BI__builtin_amdgcn_exp2f:
+ return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_exp2);
case AMDGPU::BI__builtin_amdgcn_log_clampf:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
case AMDGPU::BI__builtin_amdgcn_ldexp:
case AMDGPU::BI__builtin_amdgcn_ldexpf:
- case AMDGPU::BI__builtin_amdgcn_ldexph:
- return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp);
+ case AMDGPU::BI__builtin_amdgcn_ldexph: {
+ llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
+ llvm::Function *F =
+ CGM.getIntrinsic(Intrinsic::ldexp, {Src0->getType(), Src1->getType()});
+ return Builder.CreateCall(F, {Src0, Src1});
+ }
case AMDGPU::BI__builtin_amdgcn_frexp_mant:
case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
case AMDGPU::BI__builtin_amdgcn_frexp_manth:
@@ -17128,7 +17451,8 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Addr, Val});
}
case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
- case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32: {
+ case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32:
+ case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2f16: {
Intrinsic::ID IID;
llvm::Type *ArgTy;
switch (BuiltinID) {
@@ -17140,6 +17464,11 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
ArgTy = llvm::Type::getDoubleTy(getLLVMContext());
IID = Intrinsic::amdgcn_ds_fadd;
break;
+ case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2f16:
+ ArgTy = llvm::FixedVectorType::get(
+ llvm::Type::getHalfTy(getLLVMContext()), 2);
+ IID = Intrinsic::amdgcn_ds_fadd;
+ break;
}
llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
llvm::Value *Val = EmitScalarExpr(E->getArg(1));
@@ -17319,40 +17648,33 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
case AMDGPU::BI__builtin_amdgcn_atomic_dec64: {
- unsigned BuiltinAtomicOp;
- llvm::Type *ResultType = ConvertType(E->getType());
-
+ llvm::AtomicRMWInst::BinOp BinOp;
switch (BuiltinID) {
case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
- BuiltinAtomicOp = Intrinsic::amdgcn_atomic_inc;
+ BinOp = llvm::AtomicRMWInst::UIncWrap;
break;
case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
- BuiltinAtomicOp = Intrinsic::amdgcn_atomic_dec;
+ BinOp = llvm::AtomicRMWInst::UDecWrap;
break;
}
Value *Ptr = EmitScalarExpr(E->getArg(0));
Value *Val = EmitScalarExpr(E->getArg(1));
- llvm::Function *F =
- CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()});
-
ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
EmitScalarExpr(E->getArg(3)), AO, SSID);
- // llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and
- // scope as unsigned values
- Value *MemOrder = Builder.getInt32(static_cast<int>(AO));
- Value *MemScope = Builder.getInt32(static_cast<int>(SSID));
-
QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
bool Volatile =
- PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
- Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile));
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
- return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile});
+ llvm::AtomicRMWInst *RMW =
+ Builder.CreateAtomicRMW(BinOp, Ptr, Val, AO, SSID);
+ if (Volatile)
+ RMW->setVolatile(true);
+ return RMW;
}
case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtn:
case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtnl: {
@@ -18071,27 +18393,76 @@ static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
#undef MMA_VARIANTS_B1_XOR
}
+static Value *MakeLdgLdu(unsigned IntrinsicID, CodeGenFunction &CGF,
+ const CallExpr *E) {
+ Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
+ QualType ArgType = E->getArg(0)->getType();
+ clang::CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment(ArgType);
+ llvm::Type *ElemTy = CGF.ConvertTypeForMem(ArgType->getPointeeType());
+ return CGF.Builder.CreateCall(
+ CGF.CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}),
+ {Ptr, ConstantInt::get(CGF.Builder.getInt32Ty(), Align.getQuantity())});
+}
+
+static Value *MakeScopedAtomic(unsigned IntrinsicID, CodeGenFunction &CGF,
+ const CallExpr *E) {
+ Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
+ llvm::Type *ElemTy =
+ CGF.ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType());
+ return CGF.Builder.CreateCall(
+ CGF.CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}),
+ {Ptr, CGF.EmitScalarExpr(E->getArg(1))});
+}
+
+static Value *MakeCpAsync(unsigned IntrinsicID, unsigned IntrinsicIDS,
+ CodeGenFunction &CGF, const CallExpr *E,
+ int SrcSize) {
+ return E->getNumArgs() == 3
+ ? CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IntrinsicIDS),
+ {CGF.EmitScalarExpr(E->getArg(0)),
+ CGF.EmitScalarExpr(E->getArg(1)),
+ CGF.EmitScalarExpr(E->getArg(2))})
+ : CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IntrinsicID),
+ {CGF.EmitScalarExpr(E->getArg(0)),
+ CGF.EmitScalarExpr(E->getArg(1))});
+}
+
+static Value *MakeHalfType(unsigned IntrinsicID, unsigned BuiltinID,
+ const CallExpr *E, CodeGenFunction &CGF) {
+ auto &C = CGF.CGM.getContext();
+ if (!(C.getLangOpts().NativeHalfType ||
+ !C.getTargetInfo().useFP16ConversionIntrinsics())) {
+ CGF.CGM.Error(E->getExprLoc(), C.BuiltinInfo.getName(BuiltinID).str() +
+ " requires native half type support.");
+ return nullptr;
+ }
+
+ if (IntrinsicID == Intrinsic::nvvm_ldg_global_f ||
+ IntrinsicID == Intrinsic::nvvm_ldu_global_f)
+ return MakeLdgLdu(IntrinsicID, CGF, E);
+
+ SmallVector<Value *, 16> Args;
+ auto *F = CGF.CGM.getIntrinsic(IntrinsicID);
+ auto *FTy = F->getFunctionType();
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ C.GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
+ assert((ICEArguments & (1 << i)) == 0);
+ auto *ArgValue = CGF.EmitScalarExpr(E->getArg(i));
+ auto *PTy = FTy->getParamType(i);
+ if (PTy != ArgValue->getType())
+ ArgValue = CGF.Builder.CreateBitCast(ArgValue, PTy);
+ Args.push_back(ArgValue);
+ }
+
+ return CGF.Builder.CreateCall(F, Args);
+}
} // namespace
-Value *
-CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
- auto MakeLdg = [&](unsigned IntrinsicID) {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- QualType ArgType = E->getArg(0)->getType();
- clang::CharUnits Align = CGM.getNaturalPointeeTypeAlignment(ArgType);
- llvm::Type *ElemTy = ConvertTypeForMem(ArgType->getPointeeType());
- return Builder.CreateCall(
- CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}),
- {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
- };
- auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- llvm::Type *ElemTy =
- ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType());
- return Builder.CreateCall(
- CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}),
- {Ptr, EmitScalarExpr(E->getArg(1))});
- };
+Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
switch (BuiltinID) {
case NVPTX::BI__nvvm_atom_add_gen_i:
case NVPTX::BI__nvvm_atom_add_gen_l:
@@ -18175,8 +18546,11 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
}
case NVPTX::BI__nvvm_ldg_c:
+ case NVPTX::BI__nvvm_ldg_sc:
case NVPTX::BI__nvvm_ldg_c2:
+ case NVPTX::BI__nvvm_ldg_sc2:
case NVPTX::BI__nvvm_ldg_c4:
+ case NVPTX::BI__nvvm_ldg_sc4:
case NVPTX::BI__nvvm_ldg_s:
case NVPTX::BI__nvvm_ldg_s2:
case NVPTX::BI__nvvm_ldg_s4:
@@ -18184,6 +18558,7 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
case NVPTX::BI__nvvm_ldg_i2:
case NVPTX::BI__nvvm_ldg_i4:
case NVPTX::BI__nvvm_ldg_l:
+ case NVPTX::BI__nvvm_ldg_l2:
case NVPTX::BI__nvvm_ldg_ll:
case NVPTX::BI__nvvm_ldg_ll2:
case NVPTX::BI__nvvm_ldg_uc:
@@ -18196,101 +18571,139 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
case NVPTX::BI__nvvm_ldg_ui2:
case NVPTX::BI__nvvm_ldg_ui4:
case NVPTX::BI__nvvm_ldg_ul:
+ case NVPTX::BI__nvvm_ldg_ul2:
case NVPTX::BI__nvvm_ldg_ull:
case NVPTX::BI__nvvm_ldg_ull2:
// PTX Interoperability section 2.2: "For a vector with an even number of
// elements, its alignment is set to number of elements times the alignment
// of its member: n*alignof(t)."
- return MakeLdg(Intrinsic::nvvm_ldg_global_i);
+ return MakeLdgLdu(Intrinsic::nvvm_ldg_global_i, *this, E);
case NVPTX::BI__nvvm_ldg_f:
case NVPTX::BI__nvvm_ldg_f2:
case NVPTX::BI__nvvm_ldg_f4:
case NVPTX::BI__nvvm_ldg_d:
case NVPTX::BI__nvvm_ldg_d2:
- return MakeLdg(Intrinsic::nvvm_ldg_global_f);
+ return MakeLdgLdu(Intrinsic::nvvm_ldg_global_f, *this, E);
+
+ case NVPTX::BI__nvvm_ldu_c:
+ case NVPTX::BI__nvvm_ldu_sc:
+ case NVPTX::BI__nvvm_ldu_c2:
+ case NVPTX::BI__nvvm_ldu_sc2:
+ case NVPTX::BI__nvvm_ldu_c4:
+ case NVPTX::BI__nvvm_ldu_sc4:
+ case NVPTX::BI__nvvm_ldu_s:
+ case NVPTX::BI__nvvm_ldu_s2:
+ case NVPTX::BI__nvvm_ldu_s4:
+ case NVPTX::BI__nvvm_ldu_i:
+ case NVPTX::BI__nvvm_ldu_i2:
+ case NVPTX::BI__nvvm_ldu_i4:
+ case NVPTX::BI__nvvm_ldu_l:
+ case NVPTX::BI__nvvm_ldu_l2:
+ case NVPTX::BI__nvvm_ldu_ll:
+ case NVPTX::BI__nvvm_ldu_ll2:
+ case NVPTX::BI__nvvm_ldu_uc:
+ case NVPTX::BI__nvvm_ldu_uc2:
+ case NVPTX::BI__nvvm_ldu_uc4:
+ case NVPTX::BI__nvvm_ldu_us:
+ case NVPTX::BI__nvvm_ldu_us2:
+ case NVPTX::BI__nvvm_ldu_us4:
+ case NVPTX::BI__nvvm_ldu_ui:
+ case NVPTX::BI__nvvm_ldu_ui2:
+ case NVPTX::BI__nvvm_ldu_ui4:
+ case NVPTX::BI__nvvm_ldu_ul:
+ case NVPTX::BI__nvvm_ldu_ul2:
+ case NVPTX::BI__nvvm_ldu_ull:
+ case NVPTX::BI__nvvm_ldu_ull2:
+ return MakeLdgLdu(Intrinsic::nvvm_ldu_global_i, *this, E);
+ case NVPTX::BI__nvvm_ldu_f:
+ case NVPTX::BI__nvvm_ldu_f2:
+ case NVPTX::BI__nvvm_ldu_f4:
+ case NVPTX::BI__nvvm_ldu_d:
+ case NVPTX::BI__nvvm_ldu_d2:
+ return MakeLdgLdu(Intrinsic::nvvm_ldu_global_f, *this, E);
case NVPTX::BI__nvvm_atom_cta_add_gen_i:
case NVPTX::BI__nvvm_atom_cta_add_gen_l:
case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta, *this, E);
case NVPTX::BI__nvvm_atom_sys_add_gen_i:
case NVPTX::BI__nvvm_atom_sys_add_gen_l:
case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys, *this, E);
case NVPTX::BI__nvvm_atom_cta_add_gen_f:
case NVPTX::BI__nvvm_atom_cta_add_gen_d:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta, *this, E);
case NVPTX::BI__nvvm_atom_sys_add_gen_f:
case NVPTX::BI__nvvm_atom_sys_add_gen_d:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys, *this, E);
case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta, *this, E);
case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys, *this, E);
case NVPTX::BI__nvvm_atom_cta_max_gen_i:
case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
case NVPTX::BI__nvvm_atom_cta_max_gen_l:
case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta, *this, E);
case NVPTX::BI__nvvm_atom_sys_max_gen_i:
case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
case NVPTX::BI__nvvm_atom_sys_max_gen_l:
case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys, *this, E);
case NVPTX::BI__nvvm_atom_cta_min_gen_i:
case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
case NVPTX::BI__nvvm_atom_cta_min_gen_l:
case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta, *this, E);
case NVPTX::BI__nvvm_atom_sys_min_gen_i:
case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
case NVPTX::BI__nvvm_atom_sys_min_gen_l:
case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys, *this, E);
case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta, *this, E);
case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta, *this, E);
case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys, *this, E);
case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys, *this, E);
case NVPTX::BI__nvvm_atom_cta_and_gen_i:
case NVPTX::BI__nvvm_atom_cta_and_gen_l:
case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta, *this, E);
case NVPTX::BI__nvvm_atom_sys_and_gen_i:
case NVPTX::BI__nvvm_atom_sys_and_gen_l:
case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys, *this, E);
case NVPTX::BI__nvvm_atom_cta_or_gen_i:
case NVPTX::BI__nvvm_atom_cta_or_gen_l:
case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta, *this, E);
case NVPTX::BI__nvvm_atom_sys_or_gen_i:
case NVPTX::BI__nvvm_atom_sys_or_gen_l:
case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys, *this, E);
case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta, *this, E);
case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys, *this, E);
case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
@@ -18555,6 +18968,243 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
CharUnits::fromQuantity(4));
return Result;
}
+ // The following builtins require half type support
+ case NVPTX::BI__nvvm_ex2_approx_f16:
+ return MakeHalfType(Intrinsic::nvvm_ex2_approx_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_ex2_approx_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_ex2_approx_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_ff2f16x2_rn:
+ return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rn, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_ff2f16x2_rn_relu:
+ return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rn_relu, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_ff2f16x2_rz:
+ return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rz, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_ff2f16x2_rz_relu:
+ return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rz_relu, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fma_rn_f16:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fma_rn_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fma_rn_ftz_f16:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fma_rn_ftz_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fma_rn_ftz_relu_f16:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_relu_f16, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fma_rn_ftz_relu_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_relu_f16x2, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fma_rn_ftz_sat_f16:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_sat_f16, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fma_rn_ftz_sat_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_sat_f16x2, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fma_rn_relu_f16:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_relu_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fma_rn_relu_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_relu_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fma_rn_sat_f16:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_sat_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fma_rn_sat_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_sat_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmax_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmax_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmax_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmax_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmax_ftz_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmax_ftz_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmax_ftz_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmax_ftz_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmax_ftz_nan_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmax_ftz_nan_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_f16x2, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fmax_ftz_nan_xorsign_abs_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f16, BuiltinID,
+ E, *this);
+ case NVPTX::BI__nvvm_fmax_ftz_nan_xorsign_abs_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f16x2,
+ BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmax_ftz_xorsign_abs_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmax_ftz_xorsign_abs_f16, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fmax_ftz_xorsign_abs_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmax_ftz_xorsign_abs_f16x2, BuiltinID,
+ E, *this);
+ case NVPTX::BI__nvvm_fmax_nan_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmax_nan_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmax_nan_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmax_nan_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmax_nan_xorsign_abs_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmax_nan_xorsign_abs_f16, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fmax_nan_xorsign_abs_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmax_nan_xorsign_abs_f16x2, BuiltinID,
+ E, *this);
+ case NVPTX::BI__nvvm_fmax_xorsign_abs_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmax_xorsign_abs_f16, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fmax_xorsign_abs_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmax_xorsign_abs_f16x2, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fmin_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmin_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmin_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmin_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmin_ftz_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmin_ftz_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmin_ftz_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmin_ftz_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmin_ftz_nan_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmin_ftz_nan_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_f16x2, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fmin_ftz_nan_xorsign_abs_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f16, BuiltinID,
+ E, *this);
+ case NVPTX::BI__nvvm_fmin_ftz_nan_xorsign_abs_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f16x2,
+ BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmin_ftz_xorsign_abs_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmin_ftz_xorsign_abs_f16, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fmin_ftz_xorsign_abs_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmin_ftz_xorsign_abs_f16x2, BuiltinID,
+ E, *this);
+ case NVPTX::BI__nvvm_fmin_nan_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmin_nan_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmin_nan_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmin_nan_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmin_nan_xorsign_abs_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmin_nan_xorsign_abs_f16, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fmin_nan_xorsign_abs_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmin_nan_xorsign_abs_f16x2, BuiltinID,
+ E, *this);
+ case NVPTX::BI__nvvm_fmin_xorsign_abs_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmin_xorsign_abs_f16, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fmin_xorsign_abs_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmin_xorsign_abs_f16x2, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_ldg_h:
+ return MakeHalfType(Intrinsic::nvvm_ldg_global_f, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_ldg_h2:
+ return MakeHalfType(Intrinsic::nvvm_ldg_global_f, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_ldu_h:
+ return MakeHalfType(Intrinsic::nvvm_ldu_global_f, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_ldu_h2: {
+ return MakeHalfType(Intrinsic::nvvm_ldu_global_f, BuiltinID, E, *this);
+ }
+ case NVPTX::BI__nvvm_cp_async_ca_shared_global_4:
+ return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_4,
+ Intrinsic::nvvm_cp_async_ca_shared_global_4_s, *this, E,
+ 4);
+ case NVPTX::BI__nvvm_cp_async_ca_shared_global_8:
+ return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_8,
+ Intrinsic::nvvm_cp_async_ca_shared_global_8_s, *this, E,
+ 8);
+ case NVPTX::BI__nvvm_cp_async_ca_shared_global_16:
+ return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_16,
+ Intrinsic::nvvm_cp_async_ca_shared_global_16_s, *this, E,
+ 16);
+ case NVPTX::BI__nvvm_cp_async_cg_shared_global_16:
+ return MakeCpAsync(Intrinsic::nvvm_cp_async_cg_shared_global_16,
+ Intrinsic::nvvm_cp_async_cg_shared_global_16_s, *this, E,
+ 16);
+ case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_x:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_x));
+ case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_y:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_y));
+ case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_z:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_z));
+ case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_w:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_w));
+ case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_x:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_x));
+ case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_y:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_y));
+ case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_z:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_z));
+ case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_w:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_w));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_x:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_x));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_y:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_y));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_z:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_z));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_w:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_w));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_x:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_x));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_y:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_y));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_z:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_z));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_w:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_w));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctarank:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctarank));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctarank:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctarank));
+ case NVPTX::BI__nvvm_is_explicit_cluster:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_is_explicit_cluster));
+ case NVPTX::BI__nvvm_isspacep_shared_cluster:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_isspacep_shared_cluster),
+ EmitScalarExpr(E->getArg(0)));
+ case NVPTX::BI__nvvm_mapa:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_mapa),
+ {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
+ case NVPTX::BI__nvvm_mapa_shared_cluster:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_mapa_shared_cluster),
+ {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
+ case NVPTX::BI__nvvm_getctarank:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_getctarank),
+ EmitScalarExpr(E->getArg(0)));
+ case NVPTX::BI__nvvm_getctarank_shared_cluster:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_getctarank_shared_cluster),
+ EmitScalarExpr(E->getArg(0)));
+ case NVPTX::BI__nvvm_barrier_cluster_arrive:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_barrier_cluster_arrive));
+ case NVPTX::BI__nvvm_barrier_cluster_arrive_relaxed:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_barrier_cluster_arrive_relaxed));
+ case NVPTX::BI__nvvm_barrier_cluster_wait:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_barrier_cluster_wait));
+ case NVPTX::BI__nvvm_fence_sc_cluster:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_fence_sc_cluster));
default:
return nullptr;
}
@@ -18633,15 +19283,14 @@ RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
llvm::Value *Difference = Builder.CreateSub(Result, SrcAddr, "diff");
// The result must point to the same underlying allocation. This means we
// can use an inbounds GEP to enable better optimization.
- Value *Base = EmitCastToVoidPtr(Args.Src);
if (getLangOpts().isSignedOverflowDefined())
- Result = Builder.CreateGEP(Int8Ty, Base, Difference, "aligned_result");
+ Result =
+ Builder.CreateGEP(Int8Ty, Args.Src, Difference, "aligned_result");
else
- Result = EmitCheckedInBoundsGEP(Int8Ty, Base, Difference,
+ Result = EmitCheckedInBoundsGEP(Int8Ty, Args.Src, Difference,
/*SignedIndices=*/true,
/*isSubtraction=*/!AlignUp,
E->getExprLoc(), "aligned_result");
- Result = Builder.CreatePointerCast(Result, Args.SrcType);
// Emit an alignment assumption to ensure that the new alignment is
// propagated to loads/stores, etc.
emitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment);
@@ -18823,6 +19472,14 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
return Builder.CreateCall(Callee, Value);
}
+ case WebAssembly::BI__builtin_wasm_ref_null_extern: {
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_ref_null_extern);
+ return Builder.CreateCall(Callee);
+ }
+ case WebAssembly::BI__builtin_wasm_ref_null_func: {
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_ref_null_func);
+ return Builder.CreateCall(Callee);
+ }
case WebAssembly::BI__builtin_wasm_swizzle_i8x16: {
Value *Src = EmitScalarExpr(E->getArg(0));
Value *Indices = EmitScalarExpr(E->getArg(1));
@@ -19188,6 +19845,88 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::wasm_relaxed_dot_bf16x8_add_f32);
return Builder.CreateCall(Callee, {LHS, RHS, Acc});
}
+ case WebAssembly::BI__builtin_wasm_table_get: {
+ assert(E->getArg(0)->getType()->isArrayType());
+ Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *Index = EmitScalarExpr(E->getArg(1));
+ Function *Callee;
+ if (E->getType().isWebAssemblyExternrefType())
+ Callee = CGM.getIntrinsic(Intrinsic::wasm_table_get_externref);
+ else if (E->getType().isWebAssemblyFuncrefType())
+ Callee = CGM.getIntrinsic(Intrinsic::wasm_table_get_funcref);
+ else
+ llvm_unreachable(
+ "Unexpected reference type for __builtin_wasm_table_get");
+ return Builder.CreateCall(Callee, {Table, Index});
+ }
+ case WebAssembly::BI__builtin_wasm_table_set: {
+ assert(E->getArg(0)->getType()->isArrayType());
+ Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *Index = EmitScalarExpr(E->getArg(1));
+ Value *Val = EmitScalarExpr(E->getArg(2));
+ Function *Callee;
+ if (E->getArg(2)->getType().isWebAssemblyExternrefType())
+ Callee = CGM.getIntrinsic(Intrinsic::wasm_table_set_externref);
+ else if (E->getArg(2)->getType().isWebAssemblyFuncrefType())
+ Callee = CGM.getIntrinsic(Intrinsic::wasm_table_set_funcref);
+ else
+ llvm_unreachable(
+ "Unexpected reference type for __builtin_wasm_table_set");
+ return Builder.CreateCall(Callee, {Table, Index, Val});
+ }
+ case WebAssembly::BI__builtin_wasm_table_size: {
+ assert(E->getArg(0)->getType()->isArrayType());
+ Value *Value = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_table_size);
+ return Builder.CreateCall(Callee, Value);
+ }
+ case WebAssembly::BI__builtin_wasm_table_grow: {
+ assert(E->getArg(0)->getType()->isArrayType());
+ Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *Val = EmitScalarExpr(E->getArg(1));
+ Value *NElems = EmitScalarExpr(E->getArg(2));
+
+ Function *Callee;
+ if (E->getArg(1)->getType().isWebAssemblyExternrefType())
+ Callee = CGM.getIntrinsic(Intrinsic::wasm_table_grow_externref);
+ else if (E->getArg(2)->getType().isWebAssemblyFuncrefType())
+ Callee = CGM.getIntrinsic(Intrinsic::wasm_table_fill_funcref);
+ else
+ llvm_unreachable(
+ "Unexpected reference type for __builtin_wasm_table_grow");
+
+ return Builder.CreateCall(Callee, {Table, Val, NElems});
+ }
+ case WebAssembly::BI__builtin_wasm_table_fill: {
+ assert(E->getArg(0)->getType()->isArrayType());
+ Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *Index = EmitScalarExpr(E->getArg(1));
+ Value *Val = EmitScalarExpr(E->getArg(2));
+ Value *NElems = EmitScalarExpr(E->getArg(3));
+
+ Function *Callee;
+ if (E->getArg(2)->getType().isWebAssemblyExternrefType())
+ Callee = CGM.getIntrinsic(Intrinsic::wasm_table_fill_externref);
+ else if (E->getArg(2)->getType().isWebAssemblyFuncrefType())
+ Callee = CGM.getIntrinsic(Intrinsic::wasm_table_fill_funcref);
+ else
+ llvm_unreachable(
+ "Unexpected reference type for __builtin_wasm_table_fill");
+
+ return Builder.CreateCall(Callee, {Table, Index, Val, NElems});
+ }
+ case WebAssembly::BI__builtin_wasm_table_copy: {
+ assert(E->getArg(0)->getType()->isArrayType());
+ Value *TableX = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *TableY = EmitArrayToPointerDecay(E->getArg(1)).getPointer();
+ Value *DstIdx = EmitScalarExpr(E->getArg(2));
+ Value *SrcIdx = EmitScalarExpr(E->getArg(3));
+ Value *NElems = EmitScalarExpr(E->getArg(4));
+
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_table_copy);
+
+ return Builder.CreateCall(Callee, {TableX, TableY, SrcIdx, DstIdx, NElems});
+ }
default:
return nullptr;
}
@@ -19278,8 +20017,7 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
// generate one (NewBase). The new base address needs to be stored.
llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1)
: Result;
- llvm::Value *LV = Builder.CreateBitCast(
- EmitScalarExpr(E->getArg(0)), NewBase->getType()->getPointerTo());
+ llvm::Value *LV = EmitScalarExpr(E->getArg(0));
Address Dest = EmitPointerWithAlignment(E->getArg(0));
llvm::Value *RetVal =
Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
@@ -19320,9 +20058,7 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
// to be handled with stores of respective destination type.
DestVal = Builder.CreateTrunc(DestVal, DestTy);
- llvm::Value *DestForStore =
- Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo());
- Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment());
+ Builder.CreateAlignedStore(DestVal, DestAddress, DestAddr.getAlignment());
// The updated value of the base pointer is returned.
return Builder.CreateExtractValue(Result, 1);
};
@@ -19350,8 +20086,8 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
// Get the type from the 0-th argument.
llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
- Address PredAddr = Builder.CreateElementBitCast(
- EmitPointerWithAlignment(E->getArg(2)), VecType);
+ Address PredAddr =
+ EmitPointerWithAlignment(E->getArg(2)).withElementType(VecType);
llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr));
llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
{EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
@@ -19370,8 +20106,8 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
case Hexagon::BI__builtin_HEXAGON_V6_vsubcarryo_128B: {
// Get the type from the 0-th argument.
llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
- Address PredAddr = Builder.CreateElementBitCast(
- EmitPointerWithAlignment(E->getArg(2)), VecType);
+ Address PredAddr =
+ EmitPointerWithAlignment(E->getArg(2)).withElementType(VecType);
llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
{EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
@@ -19465,7 +20201,20 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
assert(Error == ASTContext::GE_None && "Unexpected error");
}
+ if (BuiltinID == RISCV::BI__builtin_riscv_ntl_load)
+ ICEArguments |= (1 << 1);
+ if (BuiltinID == RISCV::BI__builtin_riscv_ntl_store)
+ ICEArguments |= (1 << 2);
+
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
+ // Handle aggregate argument, namely RVV tuple types in segment load/store
+ if (hasAggregateEvaluationKind(E->getArg(i)->getType())) {
+ LValue L = EmitAggExprToLValue(E->getArg(i));
+ llvm::Value *AggValue = Builder.CreateLoad(L.getAddress(*this));
+ Ops.push_back(AggValue);
+ continue;
+ }
+
// If this is a normal argument, just emit it as a scalar.
if ((ICEArguments & (1 << i)) == 0) {
Ops.push_back(EmitScalarExpr(E->getArg(i)));
@@ -19497,12 +20246,18 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
case RISCV::BI__builtin_riscv_clz_64:
case RISCV::BI__builtin_riscv_ctz_32:
case RISCV::BI__builtin_riscv_ctz_64:
- case RISCV::BI__builtin_riscv_clmul:
- case RISCV::BI__builtin_riscv_clmulh:
- case RISCV::BI__builtin_riscv_clmulr:
- case RISCV::BI__builtin_riscv_xperm4:
- case RISCV::BI__builtin_riscv_xperm8:
- case RISCV::BI__builtin_riscv_brev8:
+ case RISCV::BI__builtin_riscv_clmul_32:
+ case RISCV::BI__builtin_riscv_clmul_64:
+ case RISCV::BI__builtin_riscv_clmulh_32:
+ case RISCV::BI__builtin_riscv_clmulh_64:
+ case RISCV::BI__builtin_riscv_clmulr_32:
+ case RISCV::BI__builtin_riscv_clmulr_64:
+ case RISCV::BI__builtin_riscv_xperm4_32:
+ case RISCV::BI__builtin_riscv_xperm4_64:
+ case RISCV::BI__builtin_riscv_xperm8_32:
+ case RISCV::BI__builtin_riscv_xperm8_64:
+ case RISCV::BI__builtin_riscv_brev8_32:
+ case RISCV::BI__builtin_riscv_brev8_64:
case RISCV::BI__builtin_riscv_zip_32:
case RISCV::BI__builtin_riscv_unzip_32: {
switch (BuiltinID) {
@@ -19515,35 +20270,49 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
case RISCV::BI__builtin_riscv_clz_32:
case RISCV::BI__builtin_riscv_clz_64: {
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
+ Value *Result = Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return Result;
}
case RISCV::BI__builtin_riscv_ctz_32:
case RISCV::BI__builtin_riscv_ctz_64: {
Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
+ Value *Result = Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return Result;
}
// Zbc
- case RISCV::BI__builtin_riscv_clmul:
+ case RISCV::BI__builtin_riscv_clmul_32:
+ case RISCV::BI__builtin_riscv_clmul_64:
ID = Intrinsic::riscv_clmul;
break;
- case RISCV::BI__builtin_riscv_clmulh:
+ case RISCV::BI__builtin_riscv_clmulh_32:
+ case RISCV::BI__builtin_riscv_clmulh_64:
ID = Intrinsic::riscv_clmulh;
break;
- case RISCV::BI__builtin_riscv_clmulr:
+ case RISCV::BI__builtin_riscv_clmulr_32:
+ case RISCV::BI__builtin_riscv_clmulr_64:
ID = Intrinsic::riscv_clmulr;
break;
// Zbkx
- case RISCV::BI__builtin_riscv_xperm8:
+ case RISCV::BI__builtin_riscv_xperm8_32:
+ case RISCV::BI__builtin_riscv_xperm8_64:
ID = Intrinsic::riscv_xperm8;
break;
- case RISCV::BI__builtin_riscv_xperm4:
+ case RISCV::BI__builtin_riscv_xperm4_32:
+ case RISCV::BI__builtin_riscv_xperm4_64:
ID = Intrinsic::riscv_xperm4;
break;
// Zbkb
- case RISCV::BI__builtin_riscv_brev8:
+ case RISCV::BI__builtin_riscv_brev8_32:
+ case RISCV::BI__builtin_riscv_brev8_64:
ID = Intrinsic::riscv_brev8;
break;
case RISCV::BI__builtin_riscv_zip_32:
@@ -19560,115 +20329,88 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
// Zk builtins
- // Zknd
- case RISCV::BI__builtin_riscv_aes32dsi_32:
- ID = Intrinsic::riscv_aes32dsi;
- break;
- case RISCV::BI__builtin_riscv_aes32dsmi_32:
- ID = Intrinsic::riscv_aes32dsmi;
- break;
- case RISCV::BI__builtin_riscv_aes64ds_64:
- ID = Intrinsic::riscv_aes64ds;
- break;
- case RISCV::BI__builtin_riscv_aes64dsm_64:
- ID = Intrinsic::riscv_aes64dsm;
- break;
- case RISCV::BI__builtin_riscv_aes64im_64:
- ID = Intrinsic::riscv_aes64im;
- break;
-
- // Zkne
- case RISCV::BI__builtin_riscv_aes32esi_32:
- ID = Intrinsic::riscv_aes32esi;
- break;
- case RISCV::BI__builtin_riscv_aes32esmi_32:
- ID = Intrinsic::riscv_aes32esmi;
- break;
- case RISCV::BI__builtin_riscv_aes64es_64:
- ID = Intrinsic::riscv_aes64es;
- break;
- case RISCV::BI__builtin_riscv_aes64esm_64:
- ID = Intrinsic::riscv_aes64esm;
- break;
-
- // Zknd & Zkne
- case RISCV::BI__builtin_riscv_aes64ks1i_64:
- ID = Intrinsic::riscv_aes64ks1i;
- break;
- case RISCV::BI__builtin_riscv_aes64ks2_64:
- ID = Intrinsic::riscv_aes64ks2;
- break;
-
// Zknh
case RISCV::BI__builtin_riscv_sha256sig0:
ID = Intrinsic::riscv_sha256sig0;
- IntrinsicTypes = {ResultType};
break;
case RISCV::BI__builtin_riscv_sha256sig1:
ID = Intrinsic::riscv_sha256sig1;
- IntrinsicTypes = {ResultType};
break;
case RISCV::BI__builtin_riscv_sha256sum0:
ID = Intrinsic::riscv_sha256sum0;
- IntrinsicTypes = {ResultType};
break;
case RISCV::BI__builtin_riscv_sha256sum1:
ID = Intrinsic::riscv_sha256sum1;
- IntrinsicTypes = {ResultType};
- break;
- case RISCV::BI__builtin_riscv_sha512sig0_64:
- ID = Intrinsic::riscv_sha512sig0;
- break;
- case RISCV::BI__builtin_riscv_sha512sig0h_32:
- ID = Intrinsic::riscv_sha512sig0h;
- break;
- case RISCV::BI__builtin_riscv_sha512sig0l_32:
- ID = Intrinsic::riscv_sha512sig0l;
- break;
- case RISCV::BI__builtin_riscv_sha512sig1_64:
- ID = Intrinsic::riscv_sha512sig1;
- break;
- case RISCV::BI__builtin_riscv_sha512sig1h_32:
- ID = Intrinsic::riscv_sha512sig1h;
- break;
- case RISCV::BI__builtin_riscv_sha512sig1l_32:
- ID = Intrinsic::riscv_sha512sig1l;
- break;
- case RISCV::BI__builtin_riscv_sha512sum0_64:
- ID = Intrinsic::riscv_sha512sum0;
- break;
- case RISCV::BI__builtin_riscv_sha512sum0r_32:
- ID = Intrinsic::riscv_sha512sum0r;
- break;
- case RISCV::BI__builtin_riscv_sha512sum1_64:
- ID = Intrinsic::riscv_sha512sum1;
- break;
- case RISCV::BI__builtin_riscv_sha512sum1r_32:
- ID = Intrinsic::riscv_sha512sum1r;
break;
// Zksed
case RISCV::BI__builtin_riscv_sm4ks:
ID = Intrinsic::riscv_sm4ks;
- IntrinsicTypes = {ResultType};
break;
case RISCV::BI__builtin_riscv_sm4ed:
ID = Intrinsic::riscv_sm4ed;
- IntrinsicTypes = {ResultType};
break;
// Zksh
case RISCV::BI__builtin_riscv_sm3p0:
ID = Intrinsic::riscv_sm3p0;
- IntrinsicTypes = {ResultType};
break;
case RISCV::BI__builtin_riscv_sm3p1:
ID = Intrinsic::riscv_sm3p1;
- IntrinsicTypes = {ResultType};
break;
+ // Zihintntl
+ case RISCV::BI__builtin_riscv_ntl_load: {
+ llvm::Type *ResTy = ConvertType(E->getType());
+ ConstantInt *Mode = cast<ConstantInt>(Ops[1]);
+
+ llvm::MDNode *RISCVDomainNode = llvm::MDNode::get(
+ getLLVMContext(),
+ llvm::ConstantAsMetadata::get(Builder.getInt32(Mode->getZExtValue())));
+ llvm::MDNode *NontemporalNode = llvm::MDNode::get(
+ getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
+
+ int Width;
+ if(ResTy->isScalableTy()) {
+ const ScalableVectorType *SVTy = cast<ScalableVectorType>(ResTy);
+ llvm::Type *ScalarTy = ResTy->getScalarType();
+ Width = ScalarTy->getPrimitiveSizeInBits() *
+ SVTy->getElementCount().getKnownMinValue();
+ } else
+ Width = ResTy->getPrimitiveSizeInBits();
+ LoadInst *Load = Builder.CreateLoad(
+ Address(Ops[0], ResTy, CharUnits::fromQuantity(Width / 8)));
+
+ Load->setMetadata(llvm::LLVMContext::MD_nontemporal, NontemporalNode);
+ Load->setMetadata(CGM.getModule().getMDKindID("riscv-nontemporal-domain"),
+ RISCVDomainNode);
+
+ return Load;
+ }
+ case RISCV::BI__builtin_riscv_ntl_store: {
+ ConstantInt *Mode = cast<ConstantInt>(Ops[2]);
+
+ llvm::MDNode *RISCVDomainNode = llvm::MDNode::get(
+ getLLVMContext(),
+ llvm::ConstantAsMetadata::get(Builder.getInt32(Mode->getZExtValue())));
+ llvm::MDNode *NontemporalNode = llvm::MDNode::get(
+ getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
+
+ Value *BC = Builder.CreateBitCast(
+ Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType()), "cast");
+
+ StoreInst *Store = Builder.CreateDefaultAlignedStore(Ops[1], BC);
+ Store->setMetadata(llvm::LLVMContext::MD_nontemporal, NontemporalNode);
+ Store->setMetadata(CGM.getModule().getMDKindID("riscv-nontemporal-domain"),
+ RISCVDomainNode);
+
+ return Store;
+ }
+
// Vector builtins are handled from here.
#include "clang/Basic/riscv_vector_builtin_cg.inc"
+ // SiFive Vector builtins are handled from here.
+#include "clang/Basic/riscv_sifive_vector_builtin_cg.inc"
}
assert(ID != Intrinsic::not_intrinsic);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp
index bb887df3e4e0..08769c98dc29 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp
@@ -24,6 +24,7 @@
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/ReplaceConstant.h"
#include "llvm/Support/Format.h"
+#include "llvm/Support/VirtualFileSystem.h"
using namespace clang;
using namespace CodeGen;
@@ -236,7 +237,7 @@ CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy));
VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy));
- VoidPtrPtrTy = VoidPtrTy->getPointerTo();
+ VoidPtrPtrTy = llvm::PointerType::getUnqual(CGM.getLLVMContext());
}
llvm::FunctionCallee CGNVCUDARuntime::getSetupArgumentFn() const {
@@ -267,10 +268,8 @@ llvm::FunctionType *CGNVCUDARuntime::getCallbackFnTy() const {
}
llvm::FunctionType *CGNVCUDARuntime::getRegisterLinkedBinaryFnTy() const {
- auto *CallbackFnTy = getCallbackFnTy();
- auto *RegisterGlobalsFnTy = getRegisterGlobalsFnTy();
- llvm::Type *Params[] = {RegisterGlobalsFnTy->getPointerTo(), VoidPtrTy,
- VoidPtrTy, CallbackFnTy->getPointerTo()};
+ llvm::Type *Params[] = {llvm::PointerType::getUnqual(Context), VoidPtrTy,
+ VoidPtrTy, llvm::PointerType::getUnqual(Context)};
return llvm::FunctionType::get(VoidTy, Params, false);
}
@@ -359,9 +358,13 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
TranslationUnitDecl *TUDecl = CGM.getContext().getTranslationUnitDecl();
DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
std::string KernelLaunchAPI = "LaunchKernel";
- if (CGF.getLangOpts().HIP && CGF.getLangOpts().GPUDefaultStream ==
- LangOptions::GPUDefaultStreamKind::PerThread)
- KernelLaunchAPI = KernelLaunchAPI + "_spt";
+ if (CGF.getLangOpts().GPUDefaultStream ==
+ LangOptions::GPUDefaultStreamKind::PerThread) {
+ if (CGF.getLangOpts().HIP)
+ KernelLaunchAPI = KernelLaunchAPI + "_spt";
+ else if (CGF.getLangOpts().CUDA)
+ KernelLaunchAPI = KernelLaunchAPI + "_ptsz";
+ }
auto LaunchKernelName = addPrefixToName(KernelLaunchAPI);
IdentifierInfo &cudaLaunchKernelII =
CGM.getContext().Idents.get(LaunchKernelName);
@@ -536,8 +539,11 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
// void __cudaRegisterFunction(void **, const char *, char *, const char *,
// int, uint3*, uint3*, dim3*, dim3*, int*)
llvm::Type *RegisterFuncParams[] = {
- VoidPtrPtrTy, CharPtrTy, CharPtrTy, CharPtrTy, IntTy,
- VoidPtrTy, VoidPtrTy, VoidPtrTy, VoidPtrTy, IntTy->getPointerTo()};
+ VoidPtrPtrTy, CharPtrTy,
+ CharPtrTy, CharPtrTy,
+ IntTy, VoidPtrTy,
+ VoidPtrTy, VoidPtrTy,
+ VoidPtrTy, llvm::PointerType::getUnqual(Context)};
llvm::FunctionCallee RegisterFunc = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, RegisterFuncParams, false),
addUnderscoredPrefixToName("RegisterFunction"));
@@ -560,7 +566,7 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
NullPtr,
NullPtr,
NullPtr,
- llvm::ConstantPointerNull::get(IntTy->getPointerTo())};
+ llvm::ConstantPointerNull::get(llvm::PointerType::getUnqual(Context))};
Builder.CreateCall(RegisterFunc, Args);
}
@@ -721,8 +727,9 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// handle so CUDA runtime can figure out what to call on the GPU side.
std::unique_ptr<llvm::MemoryBuffer> CudaGpuBinary = nullptr;
if (!CudaGpuBinaryFileName.empty()) {
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> CudaGpuBinaryOrErr =
- llvm::MemoryBuffer::getFileOrSTDIN(CudaGpuBinaryFileName);
+ auto VFS = CGM.getFileSystem();
+ auto CudaGpuBinaryOrErr =
+ VFS->getBufferForFile(CudaGpuBinaryFileName, -1, false);
if (std::error_code EC = CudaGpuBinaryOrErr.getError()) {
CGM.getDiags().Report(diag::err_cannot_open_file)
<< CudaGpuBinaryFileName << EC.message();
@@ -1195,8 +1202,23 @@ llvm::Function *CGNVCUDARuntime::finalizeModule() {
llvm::GlobalValue *CGNVCUDARuntime::getKernelHandle(llvm::Function *F,
GlobalDecl GD) {
auto Loc = KernelHandles.find(F->getName());
- if (Loc != KernelHandles.end())
- return Loc->second;
+ if (Loc != KernelHandles.end()) {
+ auto OldHandle = Loc->second;
+ if (KernelStubs[OldHandle] == F)
+ return OldHandle;
+
+ // We've found the function name, but F itself has changed, so we need to
+ // update the references.
+ if (CGM.getLangOpts().HIP) {
+ // For HIP compilation the handle itself does not change, so we only need
+ // to update the Stub value.
+ KernelStubs[OldHandle] = F;
+ return OldHandle;
+ }
+ // For non-HIP compilation, erase the old Stub and fall-through to creating
+ // new entries.
+ KernelStubs.erase(OldHandle);
+ }
if (!CGM.getLangOpts().HIP) {
KernelHandles[F->getName()] = F;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp
index 86f548191d65..110e21f7cb6d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp
@@ -131,17 +131,10 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
if (Replacements.count(MangledName))
return false;
- // Derive the type for the alias.
llvm::Type *AliasValueType = getTypes().GetFunctionType(AliasDecl);
- llvm::PointerType *AliasType = AliasValueType->getPointerTo();
- // Find the referent. Some aliases might require a bitcast, in
- // which case the caller is responsible for ensuring the soundness
- // of these semantics.
- auto *Ref = cast<llvm::GlobalValue>(GetAddrOfGlobal(TargetDecl));
- llvm::Constant *Aliasee = Ref;
- if (Ref->getType() != AliasType)
- Aliasee = llvm::ConstantExpr::getBitCast(Ref, AliasType);
+ // Find the referent.
+ auto *Aliasee = cast<llvm::GlobalValue>(GetAddrOfGlobal(TargetDecl));
// Instead of creating as alias to a linkonce_odr, replace all of the uses
// of the aliasee.
@@ -170,7 +163,7 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
// If we don't have a definition for the destructor yet or the definition is
// avaialable_externally, don't emit an alias. We can't emit aliases to
// declarations; that's just not how aliases work.
- if (Ref->isDeclarationForLinker())
+ if (Aliasee->isDeclarationForLinker())
return true;
// Don't create an alias to a linker weak symbol. This avoids producing
@@ -189,7 +182,8 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
// Switch any previous uses to the alias.
if (Entry) {
- assert(Entry->getType() == AliasType &&
+ assert(Entry->getValueType() == AliasValueType &&
+ Entry->getAddressSpace() == Alias->getAddressSpace() &&
"declaration exists with different type");
Alias->takeName(Entry);
Entry->replaceAllUsesWith(Alias);
@@ -252,8 +246,7 @@ static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
"No kext in Microsoft ABI");
CodeGenModule &CGM = CGF.CGM;
llvm::Value *VTable = CGM.getCXXABI().getAddrOfVTable(RD, CharUnits());
- Ty = Ty->getPointerTo();
- VTable = CGF.Builder.CreateBitCast(VTable, Ty->getPointerTo());
+ Ty = llvm::PointerType::getUnqual(CGM.getLLVMContext());
assert(VTable && "BuildVirtualCall = kext vtbl pointer is null");
uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
const VTableLayout &VTLayout = CGM.getItaniumVTableContext().getVTableLayout(RD);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
index 42e6c916bed0..7b77dd7875bc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
@@ -46,11 +46,8 @@ CGCallee CGCXXABI::EmitLoadOfMemberFunctionPointer(
ThisPtrForCall = This.getPointer();
const auto *FPT = MPT->getPointeeType()->castAs<FunctionProtoType>();
- const auto *RD =
- cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
- CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
- llvm::Constant *FnPtr = llvm::Constant::getNullValue(FTy->getPointerTo());
+ llvm::Constant *FnPtr = llvm::Constant::getNullValue(
+ llvm::PointerType::getUnqual(CGM.getLLVMContext()));
return CGCallee::forDirect(FnPtr, FPT);
}
@@ -59,8 +56,8 @@ CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
Address Base, llvm::Value *MemPtr,
const MemberPointerType *MPT) {
ErrorUnsupportedABI(CGF, "loads of member pointers");
- llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())
- ->getPointerTo(Base.getAddressSpace());
+ llvm::Type *Ty =
+ llvm::PointerType::get(CGF.getLLVMContext(), Base.getAddressSpace());
return llvm::Constant::getNullValue(Ty);
}
@@ -250,7 +247,7 @@ void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, Address ptr,
llvm::Value *&numElements,
llvm::Value *&allocPtr, CharUnits &cookieSize) {
// Derive a char* in the same address space as the pointer.
- ptr = CGF.Builder.CreateElementBitCast(ptr, CGF.Int8Ty);
+ ptr = ptr.withElementType(CGF.Int8Ty);
// If we don't need an array cookie, bail out early.
if (!requiresArrayCookie(expr, eltTy)) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
index 78646996eac2..ad1ad08d0856 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
@@ -287,16 +287,26 @@ public:
virtual bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
QualType SrcRecordTy) = 0;
+ virtual bool shouldEmitExactDynamicCast(QualType DestRecordTy) = 0;
- virtual llvm::Value *
- EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
- QualType SrcRecordTy, QualType DestTy,
- QualType DestRecordTy, llvm::BasicBlock *CastEnd) = 0;
+ virtual llvm::Value *emitDynamicCastCall(CodeGenFunction &CGF, Address Value,
+ QualType SrcRecordTy,
+ QualType DestTy,
+ QualType DestRecordTy,
+ llvm::BasicBlock *CastEnd) = 0;
- virtual llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF,
+ virtual llvm::Value *emitDynamicCastToVoid(CodeGenFunction &CGF,
Address Value,
- QualType SrcRecordTy,
- QualType DestTy) = 0;
+ QualType SrcRecordTy) = 0;
+
+ /// Emit a dynamic_cast from SrcRecordTy to DestRecordTy. The cast fails if
+ /// the dynamic type of Value is not exactly DestRecordTy.
+ virtual llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address Value,
+ QualType SrcRecordTy,
+ QualType DestTy,
+ QualType DestRecordTy,
+ llvm::BasicBlock *CastSuccess,
+ llvm::BasicBlock *CastFail) = 0;
virtual bool EmitBadCastCall(CodeGenFunction &CGF) = 0;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
index ee5b76ab2120..bd272e016e92 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
@@ -25,13 +25,13 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/CodeGenOptions.h"
-#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/CodeGen/SwiftCallingConv.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Assumptions.h"
+#include "llvm/IR/AttributeMask.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
@@ -1286,7 +1286,7 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
//
// FIXME: Assert that we aren't truncating non-padding bits when have access
// to that information.
- Src = CGF.Builder.CreateElementBitCast(Src, Ty);
+ Src = Src.withElementType(Ty);
return CGF.Builder.CreateLoad(Src);
}
@@ -1311,7 +1311,7 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
auto *UndefVec = llvm::UndefValue::get(ScalableDst);
auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
llvm::Value *Result = CGF.Builder.CreateInsertVector(
- ScalableDst, UndefVec, Load, Zero, "castScalableSve");
+ ScalableDst, UndefVec, Load, Zero, "cast.scalable");
if (NeedsBitcast)
Result = CGF.Builder.CreateBitCast(Result, OrigType);
return Result;
@@ -1396,7 +1396,7 @@ static void CreateCoercedStore(llvm::Value *Src,
if (isa<llvm::ScalableVectorType>(SrcTy) ||
isa<llvm::ScalableVectorType>(DstTy) ||
SrcSize.getFixedValue() <= DstSize.getFixedValue()) {
- Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
+ Dst = Dst.withElementType(SrcTy);
CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
} else {
// Otherwise do coercion through memory. This is stupid, but
@@ -1420,10 +1420,10 @@ static void CreateCoercedStore(llvm::Value *Src,
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
const ABIArgInfo &info) {
if (unsigned offset = info.getDirectOffset()) {
- addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
+ addr = addr.withElementType(CGF.Int8Ty);
addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
CharUnits::fromQuantity(offset));
- addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
+ addr = addr.withElementType(info.getCoerceToType());
}
return addr;
}
@@ -1638,9 +1638,8 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
if (retAI.getInAllocaSRet()) {
// sret things on win32 aren't void, they return the sret pointer.
QualType ret = FI.getReturnType();
- llvm::Type *ty = ConvertType(ret);
unsigned addressSpace = CGM.getTypes().getTargetAddressSpace(ret);
- resultType = llvm::PointerType::get(ty, addressSpace);
+ resultType = llvm::PointerType::get(getLLVMContext(), addressSpace);
} else {
resultType = llvm::Type::getVoidTy(getLLVMContext());
}
@@ -1662,18 +1661,15 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
// Add type for sret argument.
if (IRFunctionArgs.hasSRetArg()) {
QualType Ret = FI.getReturnType();
- llvm::Type *Ty = ConvertType(Ret);
unsigned AddressSpace = CGM.getTypes().getTargetAddressSpace(Ret);
ArgTypes[IRFunctionArgs.getSRetArgNo()] =
- llvm::PointerType::get(Ty, AddressSpace);
+ llvm::PointerType::get(getLLVMContext(), AddressSpace);
}
// Add type for inalloca argument.
- if (IRFunctionArgs.hasInallocaArg()) {
- auto ArgStruct = FI.getArgStruct();
- assert(ArgStruct);
- ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
- }
+ if (IRFunctionArgs.hasInallocaArg())
+ ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
+ llvm::PointerType::getUnqual(getLLVMContext());
// Add in all of the required arguments.
unsigned ArgNo = 0;
@@ -1696,20 +1692,17 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
assert(NumIRArgs == 0);
break;
- case ABIArgInfo::Indirect: {
+ case ABIArgInfo::Indirect:
assert(NumIRArgs == 1);
// indirect arguments are always on the stack, which is alloca addr space.
- llvm::Type *LTy = ConvertTypeForMem(it->type);
- ArgTypes[FirstIRArg] = LTy->getPointerTo(
- CGM.getDataLayout().getAllocaAddrSpace());
+ ArgTypes[FirstIRArg] = llvm::PointerType::get(
+ getLLVMContext(), CGM.getDataLayout().getAllocaAddrSpace());
break;
- }
- case ABIArgInfo::IndirectAliased: {
+ case ABIArgInfo::IndirectAliased:
assert(NumIRArgs == 1);
- llvm::Type *LTy = ConvertTypeForMem(it->type);
- ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace());
+ ArgTypes[FirstIRArg] = llvm::PointerType::get(
+ getLLVMContext(), ArgInfo.getIndirectAddrSpace());
break;
- }
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
// Fast-isel and the optimizer generally like scalar values better than
@@ -1752,7 +1745,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
if (!isFuncTypeConvertible(FPT))
return llvm::StructType::get(getLLVMContext());
@@ -1830,10 +1823,33 @@ static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy,
Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
}
-void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
- bool HasOptnone,
- bool AttrOnCallSite,
- llvm::AttrBuilder &FuncAttrs) {
+/// Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the
+/// requested denormal behavior, accounting for the overriding behavior of the
+/// -f32 case.
+static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode,
+ llvm::DenormalMode FP32DenormalMode,
+ llvm::AttrBuilder &FuncAttrs) {
+ if (FPDenormalMode != llvm::DenormalMode::getDefault())
+ FuncAttrs.addAttribute("denormal-fp-math", FPDenormalMode.str());
+
+ if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid())
+ FuncAttrs.addAttribute("denormal-fp-math-f32", FP32DenormalMode.str());
+}
+
+/// Add default attributes to a function, which have merge semantics under
+/// -mlink-builtin-bitcode and should not simply overwrite any existing
+/// attributes in the linked library.
+static void
+addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts,
+ llvm::AttrBuilder &FuncAttrs) {
+ addDenormalModeAttrs(CodeGenOpts.FPDenormalMode, CodeGenOpts.FP32DenormalMode,
+ FuncAttrs);
+}
+
+static void getTrivialDefaultFunctionAttributes(
+ StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts,
+ const LangOptions &LangOpts, bool AttrOnCallSite,
+ llvm::AttrBuilder &FuncAttrs) {
// OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
if (!HasOptnone) {
if (CodeGenOpts.OptimizeSize)
@@ -1875,15 +1891,6 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
if (CodeGenOpts.NullPointerIsValid)
FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
- if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE())
- FuncAttrs.addAttribute("denormal-fp-math",
- CodeGenOpts.FPDenormalMode.str());
- if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) {
- FuncAttrs.addAttribute(
- "denormal-fp-math-f32",
- CodeGenOpts.FP32DenormalMode.str());
- }
-
if (LangOpts.getDefaultExceptionMode() == LangOptions::FPE_Ignore)
FuncAttrs.addAttribute("no-trapping-math", "true");
@@ -1962,7 +1969,7 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
}
}
- if (getLangOpts().assumeFunctionsAreConvergent()) {
+ if (LangOpts.assumeFunctionsAreConvergent()) {
// Conservatively, mark all functions and calls in CUDA and OpenCL as
// convergent (meaning, they may call an intrinsically convergent op, such
// as __syncthreads() / barrier(), and so can't have certain optimizations
@@ -1972,10 +1979,9 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
}
// TODO: NoUnwind attribute should be added for other GPU modes HIP,
- // SYCL, OpenMP offload. AFAIK, none of them support exceptions in device
- // code.
- if ((getLangOpts().CUDA && getLangOpts().CUDAIsDevice) ||
- getLangOpts().OpenCL) {
+ // OpenMP offload. AFAIK, neither of them support exceptions in device code.
+ if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
+ LangOpts.SYCLIsDevice) {
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
}
@@ -1986,6 +1992,98 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
}
}
+/// Adds attributes to \p F according to our \p CodeGenOpts and \p LangOpts, as
+/// though we had emitted it ourselves. We remove any attributes on F that
+/// conflict with the attributes we add here.
+static void mergeDefaultFunctionDefinitionAttributes(
+ llvm::Function &F, const CodeGenOptions CodeGenOpts,
+ const LangOptions &LangOpts, const TargetOptions &TargetOpts,
+ bool WillInternalize) {
+
+ llvm::AttrBuilder FuncAttrs(F.getContext());
+ // Here we only extract the options that are relevant compared to the version
+ // from GetCPUAndFeaturesAttributes.
+ if (!TargetOpts.CPU.empty())
+ FuncAttrs.addAttribute("target-cpu", TargetOpts.CPU);
+ if (!TargetOpts.TuneCPU.empty())
+ FuncAttrs.addAttribute("tune-cpu", TargetOpts.TuneCPU);
+
+ ::getTrivialDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
+ CodeGenOpts, LangOpts,
+ /*AttrOnCallSite=*/false, FuncAttrs);
+
+ if (!WillInternalize && F.isInterposable()) {
+ // Do not promote "dynamic" denormal-fp-math to this translation unit's
+ // setting for weak functions that won't be internalized. The user has no
+ // real control for how builtin bitcode is linked, so we shouldn't assume
+ // later copies will use a consistent mode.
+ F.addFnAttrs(FuncAttrs);
+ return;
+ }
+
+ llvm::AttributeMask AttrsToRemove;
+
+ llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw();
+ llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw();
+ llvm::DenormalMode Merged =
+ CodeGenOpts.FPDenormalMode.mergeCalleeMode(DenormModeToMerge);
+ llvm::DenormalMode MergedF32 = CodeGenOpts.FP32DenormalMode;
+
+ if (DenormModeToMergeF32.isValid()) {
+ MergedF32 =
+ CodeGenOpts.FP32DenormalMode.mergeCalleeMode(DenormModeToMergeF32);
+ }
+
+ if (Merged == llvm::DenormalMode::getDefault()) {
+ AttrsToRemove.addAttribute("denormal-fp-math");
+ } else if (Merged != DenormModeToMerge) {
+ // Overwrite existing attribute
+ FuncAttrs.addAttribute("denormal-fp-math",
+ CodeGenOpts.FPDenormalMode.str());
+ }
+
+ if (MergedF32 == llvm::DenormalMode::getDefault()) {
+ AttrsToRemove.addAttribute("denormal-fp-math-f32");
+ } else if (MergedF32 != DenormModeToMergeF32) {
+ // Overwrite existing attribute
+ FuncAttrs.addAttribute("denormal-fp-math-f32",
+ CodeGenOpts.FP32DenormalMode.str());
+ }
+
+ F.removeFnAttrs(AttrsToRemove);
+ addDenormalModeAttrs(Merged, MergedF32, FuncAttrs);
+ F.addFnAttrs(FuncAttrs);
+}
+
+void clang::CodeGen::mergeDefaultFunctionDefinitionAttributes(
+ llvm::Function &F, const CodeGenOptions CodeGenOpts,
+ const LangOptions &LangOpts, const TargetOptions &TargetOpts,
+ bool WillInternalize) {
+
+ ::mergeDefaultFunctionDefinitionAttributes(F, CodeGenOpts, LangOpts,
+ TargetOpts, WillInternalize);
+}
+
+void CodeGenModule::getTrivialDefaultFunctionAttributes(
+ StringRef Name, bool HasOptnone, bool AttrOnCallSite,
+ llvm::AttrBuilder &FuncAttrs) {
+ ::getTrivialDefaultFunctionAttributes(Name, HasOptnone, getCodeGenOpts(),
+ getLangOpts(), AttrOnCallSite,
+ FuncAttrs);
+}
+
+void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
+ bool HasOptnone,
+ bool AttrOnCallSite,
+ llvm::AttrBuilder &FuncAttrs) {
+ getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite,
+ FuncAttrs);
+ // If we're just getting the default, get the default values for mergeable
+ // attributes.
+ if (!AttrOnCallSite)
+ addMergableDefaultFunctionAttributes(CodeGenOpts, FuncAttrs);
+}
+
void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) {
llvm::AttrBuilder FuncAttrs(F.getContext());
getDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
@@ -1994,8 +2092,17 @@ void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) {
F.addFnAttrs(FuncAttrs);
}
+/// Apply default attributes to \p F, accounting for merge semantics of
+/// attributes that should not overwrite existing attributes.
+void CodeGenModule::mergeDefaultFunctionDefinitionAttributes(
+ llvm::Function &F, bool WillInternalize) {
+ ::mergeDefaultFunctionDefinitionAttributes(F, getCodeGenOpts(), getLangOpts(),
+ getTarget().getTargetOpts(),
+ WillInternalize);
+}
+
void CodeGenModule::addDefaultFunctionDefinitionAttributes(
- llvm::AttrBuilder &attrs) {
+ llvm::AttrBuilder &attrs) {
getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false,
/*for call*/ false, attrs);
GetCPUAndFeaturesAttributes(GlobalDecl(), attrs);
@@ -2107,6 +2214,39 @@ static bool IsArgumentMaybeUndef(const Decl *TargetDecl,
return false;
}
+/// Test if it's legal to apply nofpclass for the given parameter type and it's
+/// lowered IR type.
+static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType,
+ bool IsReturn) {
+ // Should only apply to FP types in the source, not ABI promoted.
+ if (!ParamType->hasFloatingRepresentation())
+ return false;
+
+ // The promoted-to IR type also needs to support nofpclass.
+ llvm::Type *IRTy = AI.getCoerceToType();
+ if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
+ return true;
+
+ if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
+ return !IsReturn && AI.getCanBeFlattened() &&
+ llvm::all_of(ST->elements(), [](llvm::Type *Ty) {
+ return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty);
+ });
+ }
+
+ return false;
+}
+
+/// Return the nofpclass mask that can be applied to floating-point parameters.
+static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts) {
+ llvm::FPClassTest Mask = llvm::fcNone;
+ if (LangOpts.NoHonorInfs)
+ Mask |= llvm::fcInf;
+ if (LangOpts.NoHonorNaNs)
+ Mask |= llvm::fcNan;
+ return Mask;
+}
+
/// Construct the IR attribute list of a function or call.
///
/// When adding an attribute, please consider where it should be handled:
@@ -2202,6 +2342,9 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
NBA = Fn->getAttr<NoBuiltinAttr>();
}
+ }
+
+ if (isa<FunctionDecl>(TargetDecl) || isa<VarDecl>(TargetDecl)) {
// Only place nomerge attribute on call sites, never functions. This
// allows it to work on indirect virtual function calls.
if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>())
@@ -2374,6 +2517,10 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
case ABIArgInfo::Direct:
if (RetAI.getInReg())
RetAttrs.addAttribute(llvm::Attribute::InReg);
+
+ if (canApplyNoFPClass(RetAI, RetTy, true))
+ RetAttrs.addNoFPClassAttr(getNoFPClassTestMask(getLangOpts()));
+
break;
case ABIArgInfo::Ignore:
break;
@@ -2512,8 +2659,10 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
else if (AI.getInReg())
Attrs.addAttribute(llvm::Attribute::InReg);
Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign()));
- break;
+ if (canApplyNoFPClass(AI, ParamType, false))
+ Attrs.addNoFPClassAttr(getNoFPClassTestMask(getLangOpts()));
+ break;
case ABIArgInfo::Indirect: {
if (AI.getInReg())
Attrs.addAttribute(llvm::Attribute::InReg);
@@ -2745,13 +2894,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If we're using inalloca, all the memory arguments are GEPs off of the last
// parameter, which is a pointer to the complete memory area.
Address ArgStruct = Address::invalid();
- if (IRFunctionArgs.hasInallocaArg()) {
+ if (IRFunctionArgs.hasInallocaArg())
ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
FI.getArgStruct(), FI.getArgStructAlignment());
- assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
- }
-
// Name the struct return parameter.
if (IRFunctionArgs.hasSRetArg()) {
auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
@@ -2807,7 +2953,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::IndirectAliased: {
assert(NumIRArgs == 1);
Address ParamAddr = Address(Fn->getArg(FirstIRArg), ConvertTypeForMem(Ty),
- ArgI.getIndirectAlign());
+ ArgI.getIndirectAlign(), KnownNonNull);
if (!hasScalarEvaluationKind(Ty)) {
// Aggregates and complex variables are accessed by reference. All we
@@ -3000,7 +3146,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
assert(NumIRArgs == 1);
Coerced->setName(Arg->getName() + ".coerce");
ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector(
- VecTyTo, Coerced, Zero, "castFixedSve")));
+ VecTyTo, Coerced, Zero, "cast.fixed")));
break;
}
}
@@ -3017,30 +3163,51 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
STy->getNumElements() > 1) {
- uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
- llvm::Type *DstTy = Ptr.getElementType();
- uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
+ llvm::TypeSize StructSize = CGM.getDataLayout().getTypeAllocSize(STy);
+ llvm::TypeSize PtrElementSize =
+ CGM.getDataLayout().getTypeAllocSize(Ptr.getElementType());
+ if (StructSize.isScalable()) {
+ assert(STy->containsHomogeneousScalableVectorTypes() &&
+ "ABI only supports structure with homogeneous scalable vector "
+ "type");
+ assert(StructSize == PtrElementSize &&
+ "Only allow non-fractional movement of structure with"
+ "homogeneous scalable vector type");
+ assert(STy->getNumElements() == NumIRArgs);
+
+ llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ auto *AI = Fn->getArg(FirstIRArg + i);
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ LoadedStructValue =
+ Builder.CreateInsertValue(LoadedStructValue, AI, i);
+ }
- Address AddrToStoreInto = Address::invalid();
- if (SrcSize <= DstSize) {
- AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
+ Builder.CreateStore(LoadedStructValue, Ptr);
} else {
- AddrToStoreInto =
- CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
- }
+ uint64_t SrcSize = StructSize.getFixedValue();
+ uint64_t DstSize = PtrElementSize.getFixedValue();
+
+ Address AddrToStoreInto = Address::invalid();
+ if (SrcSize <= DstSize) {
+ AddrToStoreInto = Ptr.withElementType(STy);
+ } else {
+ AddrToStoreInto =
+ CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
+ }
- assert(STy->getNumElements() == NumIRArgs);
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- auto AI = Fn->getArg(FirstIRArg + i);
- AI->setName(Arg->getName() + ".coerce" + Twine(i));
- Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
- Builder.CreateStore(AI, EltPtr);
- }
+ assert(STy->getNumElements() == NumIRArgs);
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ auto AI = Fn->getArg(FirstIRArg + i);
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
+ Builder.CreateStore(AI, EltPtr);
+ }
- if (SrcSize > DstSize) {
- Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
+ if (SrcSize > DstSize) {
+ Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
+ }
}
-
} else {
// Simple case, just do a coerced store of the argument into the alloca.
assert(NumIRArgs == 1);
@@ -3068,7 +3235,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
ArgVals.push_back(ParamValue::forIndirect(alloca));
auto coercionType = ArgI.getCoerceAndExpandType();
- alloca = Builder.CreateElementBitCast(alloca, coercionType);
+ alloca = alloca.withElementType(coercionType);
unsigned argIndex = FirstIRArg;
for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
@@ -3325,8 +3492,9 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
// single-predecessors chain from the current insertion point.
llvm::BasicBlock *StoreBB = store->getParent();
llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
+ llvm::SmallPtrSet<llvm::BasicBlock *, 4> SeenBBs;
while (IP != StoreBB) {
- if (!(IP = IP->getSinglePredecessor()))
+ if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
return nullptr;
}
@@ -3669,7 +3837,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
// Load all of the coerced elements out into results.
llvm::SmallVector<llvm::Value*, 4> results;
- Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
+ Address addr = ReturnValue.withElementType(coercionType);
for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
auto coercedEltType = coercionType->getElementType(i);
if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
@@ -3795,8 +3963,8 @@ static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
// FIXME: Generate IR in one pass, rather than going back and fixing up these
// placeholders.
llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
- llvm::Type *IRPtrTy = IRTy->getPointerTo();
- llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy->getPointerTo());
+ llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.getLLVMContext());
+ llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
// FIXME: When we generate this IR in one pass, we shouldn't need
// this win32-specific alignment hack.
@@ -4764,7 +4932,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// the proper cpu features (and it won't cause code generation issues due to
// function based code generation).
if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
- TargetDecl->hasAttr<TargetAttr>())
+ (TargetDecl->hasAttr<TargetAttr>() ||
+ (CurFuncDecl && CurFuncDecl->hasAttr<TargetAttr>())))
checkTargetFeatures(Loc, FD);
// Some architectures (such as x86-64) have the ABI changed based on
@@ -4773,25 +4942,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs);
}
-#ifndef NDEBUG
- if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
- // For an inalloca varargs function, we don't expect CallInfo to match the
- // function pointer's type, because the inalloca struct a will have extra
- // fields in it for the varargs parameters. Code later in this function
- // bitcasts the function pointer to the type derived from CallInfo.
- //
- // In other cases, we assert that the types match up (until pointers stop
- // having pointee types).
- if (Callee.isVirtual())
- assert(IRFuncTy == Callee.getVirtualFunctionType());
- else {
- llvm::PointerType *PtrTy =
- llvm::cast<llvm::PointerType>(Callee.getFunctionPointer()->getType());
- assert(PtrTy->isOpaqueOrPointeeTypeMatches(IRFuncTy));
- }
- }
-#endif
-
// 1. Set up the arguments.
// If we're using inalloca, insert the allocation after the stack save.
@@ -4913,10 +5063,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Store the RValue into the argument struct.
Address Addr =
Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
- // There are some cases where a trivial bitcast is not avoidable. The
- // definition of a type later in a translation unit may change it's type
- // from {}* to (%struct.foo*)*.
- Addr = Builder.CreateElementBitCast(Addr, ConvertTypeForMem(I->Ty));
+ Addr = Addr.withElementType(ConvertTypeForMem(I->Ty));
I->copyInto(*this, Addr);
}
break;
@@ -5010,9 +5157,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
I->copyInto(*this, AI);
} else {
// Skip the extra memcpy call.
- auto *T = llvm::PointerType::getWithSamePointeeType(
- cast<llvm::PointerType>(V->getType()),
- CGM.getDataLayout().getAllocaAddrSpace());
+ auto *T = llvm::PointerType::get(
+ CGM.getLLVMContext(), CGM.getDataLayout().getAllocaAddrSpace());
llvm::Value *Val = getTargetHooks().performAddrSpaceCast(
*this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
@@ -5112,7 +5258,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
Src = TempAlloca;
} else {
- Src = Builder.CreateElementBitCast(Src, STy);
+ Src = Src.withElementType(STy);
}
assert(NumIRArgs == STy->getNumElements());
@@ -5176,7 +5322,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Builder.CreateStore(RV.getScalarVal(), addr);
}
- addr = Builder.CreateElementBitCast(addr, coercionType);
+ addr = addr.withElementType(coercionType);
unsigned IRArgPos = FirstIRArg;
for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
@@ -5212,35 +5358,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If we're using inalloca, set up that argument.
if (ArgMemory.isValid()) {
llvm::Value *Arg = ArgMemory.getPointer();
- if (CallInfo.isVariadic()) {
- // When passing non-POD arguments by value to variadic functions, we will
- // end up with a variadic prototype and an inalloca call site. In such
- // cases, we can't do any parameter mismatch checks. Give up and bitcast
- // the callee.
- unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
- CalleePtr =
- Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
- } else {
- llvm::Type *LastParamTy =
- IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
- if (Arg->getType() != LastParamTy) {
-#ifndef NDEBUG
- // Assert that these structs have equivalent element types.
- llvm::StructType *FullTy = CallInfo.getArgStruct();
- if (!LastParamTy->isOpaquePointerTy()) {
- llvm::StructType *DeclaredTy = cast<llvm::StructType>(
- LastParamTy->getNonOpaquePointerElementType());
- assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
- for (auto DI = DeclaredTy->element_begin(),
- DE = DeclaredTy->element_end(),
- FI = FullTy->element_begin();
- DI != DE; ++DI, ++FI)
- assert(*DI == *FI);
- }
-#endif
- Arg = Builder.CreateBitCast(Arg, LastParamTy);
- }
- }
assert(IRFunctionArgs.hasInallocaArg());
IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
}
@@ -5560,8 +5677,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::CoerceAndExpand: {
auto coercionType = RetAI.getCoerceAndExpandType();
- Address addr = SRetPtr;
- addr = Builder.CreateElementBitCast(addr, coercionType);
+ Address addr = SRetPtr.withElementType(coercionType);
assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
bool requiresExtract = isa<llvm::StructType>(CI->getType());
@@ -5578,7 +5694,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(unpaddedIndex == 0);
Builder.CreateStore(elt, eltAddr);
}
- // FALLTHROUGH
[[fallthrough]];
}
@@ -5628,6 +5743,20 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm_unreachable("bad evaluation kind");
}
+ // If coercing a fixed vector from a scalable vector for ABI
+ // compatibility, and the types match, use the llvm.vector.extract
+ // intrinsic to perform the conversion.
+ if (auto *FixedDst = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
+ llvm::Value *V = CI;
+ if (auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(V->getType())) {
+ if (FixedDst->getElementType() == ScalableSrc->getElementType()) {
+ llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
+ V = Builder.CreateExtractVector(FixedDst, V, Zero, "cast.fixed");
+ return RValue::get(V);
+ }
+ }
+ }
+
Address DestPtr = ReturnValue.getValue();
bool DestIsVolatile = ReturnValue.isVolatile();
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCall.h b/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
index 59c3f304f59b..eaaf10c4eec6 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
@@ -30,6 +30,7 @@ class Value;
namespace clang {
class Decl;
class FunctionDecl;
+class TargetOptions;
class VarDecl;
namespace CodeGen {
@@ -108,9 +109,6 @@ public:
AbstractInfo = abstractInfo;
assert(functionPtr && "configuring callee without function pointer");
assert(functionPtr->getType()->isPointerTy());
- assert(functionPtr->getType()->isOpaquePointerTy() ||
- functionPtr->getType()->getNonOpaquePointerElementType()
- ->isFunctionTy());
}
static CGCallee forBuiltin(unsigned builtinID,
@@ -377,6 +375,14 @@ public:
bool isExternallyDestructed() const { return IsExternallyDestructed; }
};
+/// Helper to add attributes to \p F according to the CodeGenOptions and
+/// LangOptions without requiring a CodeGenModule to be constructed.
+void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F,
+ const CodeGenOptions CodeGenOpts,
+ const LangOptions &LangOpts,
+ const TargetOptions &TargetOpts,
+ bool WillInternalize);
+
} // end namespace CodeGen
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
index 0795ea598411..93e7b54fca04 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
@@ -139,7 +139,7 @@ Address CodeGenFunction::LoadCXXThisAddress() {
}
llvm::Type *Ty = ConvertType(MD->getThisType()->getPointeeType());
- return Address(LoadCXXThis(), Ty, CXXThisAlignment);
+ return Address(LoadCXXThis(), Ty, CXXThisAlignment, KnownNonNull);
}
/// Emit the address of a field using a member data pointer.
@@ -236,12 +236,10 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(Address This,
// TODO: for complete types, this should be possible with a GEP.
Address V = This;
if (!Offset.isZero()) {
- V = Builder.CreateElementBitCast(V, Int8Ty);
+ V = V.withElementType(Int8Ty);
V = Builder.CreateConstInBoundsByteGEP(V, Offset);
}
- V = Builder.CreateElementBitCast(V, ConvertType(Base));
-
- return V;
+ return V.withElementType(ConvertType(Base));
}
static Address
@@ -272,8 +270,6 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, Address addr,
// Apply the base offset.
llvm::Value *ptr = addr.getPointer();
- unsigned AddrSpace = ptr->getType()->getPointerAddressSpace();
- ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8Ty->getPointerTo(AddrSpace));
ptr = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ptr, baseOffset, "add.ptr");
// If we have a virtual component, the alignment of the result will
@@ -329,8 +325,8 @@ Address CodeGenFunction::GetAddressOfBaseClass(
// Get the base pointer type.
llvm::Type *BaseValueTy = ConvertType((PathEnd[-1])->getType());
- llvm::Type *BasePtrTy =
- BaseValueTy->getPointerTo(Value.getType()->getPointerAddressSpace());
+ llvm::Type *PtrTy = llvm::PointerType::get(
+ CGM.getLLVMContext(), Value.getType()->getPointerAddressSpace());
QualType DerivedTy = getContext().getRecordType(Derived);
CharUnits DerivedAlign = CGM.getClassPointerAlignment(Derived);
@@ -344,7 +340,7 @@ Address CodeGenFunction::GetAddressOfBaseClass(
EmitTypeCheck(TCK_Upcast, Loc, Value.getPointer(),
DerivedTy, DerivedAlign, SkippedChecks);
}
- return Builder.CreateElementBitCast(Value, BaseValueTy);
+ return Value.withElementType(BaseValueTy);
}
llvm::BasicBlock *origBB = nullptr;
@@ -381,7 +377,7 @@ Address CodeGenFunction::GetAddressOfBaseClass(
VirtualOffset, Derived, VBase);
// Cast to the destination type.
- Value = Builder.CreateElementBitCast(Value, BaseValueTy);
+ Value = Value.withElementType(BaseValueTy);
// Build a phi if we needed a null check.
if (NullCheckValue) {
@@ -389,10 +385,10 @@ Address CodeGenFunction::GetAddressOfBaseClass(
Builder.CreateBr(endBB);
EmitBlock(endBB);
- llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result");
+ llvm::PHINode *PHI = Builder.CreatePHI(PtrTy, 2, "cast.result");
PHI->addIncoming(Value.getPointer(), notNullBB);
- PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB);
- Value = Value.withPointer(PHI);
+ PHI->addIncoming(llvm::Constant::getNullValue(PtrTy), origBB);
+ Value = Value.withPointer(PHI, NotKnownNonNull);
}
return Value;
@@ -410,14 +406,15 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
getContext().getCanonicalType(getContext().getTagDeclType(Derived));
unsigned AddrSpace = BaseAddr.getAddressSpace();
llvm::Type *DerivedValueTy = ConvertType(DerivedTy);
- llvm::Type *DerivedPtrTy = DerivedValueTy->getPointerTo(AddrSpace);
+ llvm::Type *DerivedPtrTy =
+ llvm::PointerType::get(getLLVMContext(), AddrSpace);
llvm::Value *NonVirtualOffset =
CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
if (!NonVirtualOffset) {
// No offset, we can just cast back.
- return Builder.CreateElementBitCast(BaseAddr, DerivedValueTy);
+ return BaseAddr.withElementType(DerivedValueTy);
}
llvm::BasicBlock *CastNull = nullptr;
@@ -998,8 +995,8 @@ namespace {
private:
void emitMemcpyIR(Address DestPtr, Address SrcPtr, CharUnits Size) {
- DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
- SrcPtr = CGF.Builder.CreateElementBitCast(SrcPtr, CGF.Int8Ty);
+ DestPtr = DestPtr.withElementType(CGF.Int8Ty);
+ SrcPtr = SrcPtr.withElementType(CGF.Int8Ty);
CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity());
}
@@ -2132,8 +2129,8 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
if (SlotAS != ThisAS) {
unsigned TargetThisAS = getContext().getTargetAddressSpace(ThisAS);
- llvm::Type *NewType = llvm::PointerType::getWithSamePointeeType(
- This.getType(), TargetThisAS);
+ llvm::Type *NewType =
+ llvm::PointerType::get(getLLVMContext(), TargetThisAS);
ThisPtr = getTargetHooks().performAddrSpaceCast(*this, This.getPointer(),
ThisAS, SlotAS, NewType);
}
@@ -2579,18 +2576,13 @@ void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) {
// Finally, store the address point. Use the same LLVM types as the field to
// support optimization.
unsigned GlobalsAS = CGM.getDataLayout().getDefaultGlobalsAddressSpace();
- unsigned ProgAS = CGM.getDataLayout().getProgramAddressSpace();
- llvm::Type *VTablePtrTy =
- llvm::FunctionType::get(CGM.Int32Ty, /*isVarArg=*/true)
- ->getPointerTo(ProgAS)
- ->getPointerTo(GlobalsAS);
+ llvm::Type *PtrTy = llvm::PointerType::get(CGM.getLLVMContext(), GlobalsAS);
// vtable field is derived from `this` pointer, therefore they should be in
// the same addr space. Note that this might not be LLVM address space 0.
- VTableField = Builder.CreateElementBitCast(VTableField, VTablePtrTy);
- VTableAddressPoint = Builder.CreateBitCast(VTableAddressPoint, VTablePtrTy);
+ VTableField = VTableField.withElementType(PtrTy);
llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
- TBAAAccessInfo TBAAInfo = CGM.getTBAAVTablePtrAccessInfo(VTablePtrTy);
+ TBAAAccessInfo TBAAInfo = CGM.getTBAAVTablePtrAccessInfo(PtrTy);
CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
CGM.getCodeGenOpts().StrictVTablePointers)
@@ -2683,7 +2675,7 @@ void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
llvm::Value *CodeGenFunction::GetVTablePtr(Address This,
llvm::Type *VTableTy,
const CXXRecordDecl *RD) {
- Address VTablePtrSrc = Builder.CreateElementBitCast(This, VTableTy);
+ Address VTablePtrSrc = This.withElementType(VTableTy);
llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
TBAAAccessInfo TBAAInfo = CGM.getTBAAVTablePtrAccessInfo(VTableTy);
CGM.DecorateInstructionWithTBAA(VTable, TBAAInfo);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp
index 43758ac27e43..0bbab283603d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp
@@ -782,7 +782,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
if (!RequiresNormalCleanup) {
// Mark CPP scope end for passed-by-value Arg temp
// per Windows ABI which is "normally" Cleanup in callee
- if (IsEHa && getInvokeDest()) {
+ if (IsEHa && getInvokeDest() && Builder.GetInsertBlock()) {
if (Personality.isMSVCXXPersonality())
EmitSehCppScopeEnd();
}
@@ -836,7 +836,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
EmitBlock(NormalEntry);
// intercept normal cleanup to mark SEH scope end
- if (IsEHa) {
+ if (IsEHa && getInvokeDest()) {
if (Personality.isMSVCXXPersonality())
EmitSehCppScopeEnd();
else
@@ -1031,6 +1031,8 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
if (!Personality.isMSVCPersonality()) {
EHStack.pushTerminate();
PushedTerminate = true;
+ } else if (IsEHa && getInvokeDest()) {
+ EmitSehCppScopeEnd();
}
// We only actually emit the cleanup code if the cleanup is either
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
index 775a4341558a..8437cda79beb 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
@@ -198,7 +198,9 @@ static LValueOrRValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Co
auto *NullPtr = llvm::ConstantPointerNull::get(CGF.CGM.Int8PtrTy);
auto *SaveCall = Builder.CreateCall(CoroSave, {NullPtr});
+ CGF.CurCoro.InSuspendBlock = true;
auto *SuspendRet = CGF.EmitScalarExpr(S.getSuspendExpr());
+ CGF.CurCoro.InSuspendBlock = false;
if (SuspendRet != nullptr && SuspendRet->getType()->isIntegerTy(1)) {
// Veto suspension if requested by bool returning await_suspend.
BasicBlock *RealSuspendBlock =
@@ -465,6 +467,123 @@ struct CallCoroDelete final : public EHScopeStack::Cleanup {
};
}
+namespace {
+struct GetReturnObjectManager {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ const CoroutineBodyStmt &S;
+ // When true, performs RVO for the return object.
+ bool DirectEmit = false;
+
+ Address GroActiveFlag;
+ CodeGenFunction::AutoVarEmission GroEmission;
+
+ GetReturnObjectManager(CodeGenFunction &CGF, const CoroutineBodyStmt &S)
+ : CGF(CGF), Builder(CGF.Builder), S(S), GroActiveFlag(Address::invalid()),
+ GroEmission(CodeGenFunction::AutoVarEmission::invalid()) {
+ // The call to get_­return_­object is sequenced before the call to
+ // initial_­suspend and is invoked at most once, but there are caveats
+ // regarding on whether the prvalue result object may be initialized
+ // directly/eager or delayed, depending on the types involved.
+ //
+ // More info at https://github.com/cplusplus/papers/issues/1414
+ //
+ // The general cases:
+ // 1. Same type of get_return_object and coroutine return type (direct
+ // emission):
+ // - Constructed in the return slot.
+ // 2. Different types (delayed emission):
+ // - Constructed temporary object prior to initial suspend initialized with
+ // a call to get_return_object()
+ // - When coroutine needs to to return to the caller and needs to construct
+ // return value for the coroutine it is initialized with expiring value of
+ // the temporary obtained above.
+ //
+ // Direct emission for void returning coroutines or GROs.
+ DirectEmit = [&]() {
+ auto *RVI = S.getReturnValueInit();
+ assert(RVI && "expected RVI");
+ auto GroType = RVI->getType();
+ return CGF.getContext().hasSameType(GroType, CGF.FnRetTy);
+ }();
+ }
+
+ // The gro variable has to outlive coroutine frame and coroutine promise, but,
+ // it can only be initialized after coroutine promise was created, thus, we
+ // split its emission in two parts. EmitGroAlloca emits an alloca and sets up
+ // cleanups. Later when coroutine promise is available we initialize the gro
+ // and sets the flag that the cleanup is now active.
+ void EmitGroAlloca() {
+ if (DirectEmit)
+ return;
+
+ auto *GroDeclStmt = dyn_cast_or_null<DeclStmt>(S.getResultDecl());
+ if (!GroDeclStmt) {
+ // If get_return_object returns void, no need to do an alloca.
+ return;
+ }
+
+ auto *GroVarDecl = cast<VarDecl>(GroDeclStmt->getSingleDecl());
+
+ // Set GRO flag that it is not initialized yet
+ GroActiveFlag = CGF.CreateTempAlloca(Builder.getInt1Ty(), CharUnits::One(),
+ "gro.active");
+ Builder.CreateStore(Builder.getFalse(), GroActiveFlag);
+
+ GroEmission = CGF.EmitAutoVarAlloca(*GroVarDecl);
+
+ // Remember the top of EHStack before emitting the cleanup.
+ auto old_top = CGF.EHStack.stable_begin();
+ CGF.EmitAutoVarCleanups(GroEmission);
+ auto top = CGF.EHStack.stable_begin();
+
+ // Make the cleanup conditional on gro.active
+ for (auto b = CGF.EHStack.find(top), e = CGF.EHStack.find(old_top); b != e;
+ b++) {
+ if (auto *Cleanup = dyn_cast<EHCleanupScope>(&*b)) {
+ assert(!Cleanup->hasActiveFlag() && "cleanup already has active flag?");
+ Cleanup->setActiveFlag(GroActiveFlag);
+ Cleanup->setTestFlagInEHCleanup();
+ Cleanup->setTestFlagInNormalCleanup();
+ }
+ }
+ }
+
+ void EmitGroInit() {
+ if (DirectEmit) {
+ // ReturnValue should be valid as long as the coroutine's return type
+ // is not void. The assertion could help us to reduce the check later.
+ assert(CGF.ReturnValue.isValid() == (bool)S.getReturnStmt());
+ // Now we have the promise, initialize the GRO.
+ // We need to emit `get_return_object` first. According to:
+ // [dcl.fct.def.coroutine]p7
+ // The call to get_return_­object is sequenced before the call to
+ // initial_suspend and is invoked at most once.
+ //
+ // So we couldn't emit return value when we emit return statment,
+ // otherwise the call to get_return_object wouldn't be in front
+ // of initial_suspend.
+ if (CGF.ReturnValue.isValid()) {
+ CGF.EmitAnyExprToMem(S.getReturnValue(), CGF.ReturnValue,
+ S.getReturnValue()->getType().getQualifiers(),
+ /*IsInit*/ true);
+ }
+ return;
+ }
+
+ if (!GroActiveFlag.isValid()) {
+ // No Gro variable was allocated. Simply emit the call to
+ // get_return_object.
+ CGF.EmitStmt(S.getResultDecl());
+ return;
+ }
+
+ CGF.EmitAutoVarInit(GroEmission);
+ Builder.CreateStore(Builder.getTrue(), GroActiveFlag);
+ }
+};
+} // namespace
+
static void emitBodyAndFallthrough(CodeGenFunction &CGF,
const CoroutineBodyStmt &S, Stmt *Body) {
CGF.EmitStmt(Body);
@@ -511,6 +630,8 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
// See if allocation was successful.
auto *NullPtr = llvm::ConstantPointerNull::get(Int8PtrTy);
auto *Cond = Builder.CreateICmpNE(AllocateCall, NullPtr);
+ // Expect the allocation to be successful.
+ emitCondLikelihoodViaExpectIntrinsic(Cond, Stmt::LH_Likely);
Builder.CreateCondBr(Cond, InitBB, RetOnFailureBB);
// If not, return OnAllocFailure object.
@@ -531,6 +652,9 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
CGM.getIntrinsic(llvm::Intrinsic::coro_begin), {CoroId, Phi});
CurCoro.Data->CoroBegin = CoroBegin;
+ GetReturnObjectManager GroManager(*this, S);
+ GroManager.EmitGroAlloca();
+
CurCoro.Data->CleanupJD = getJumpDestInCurrentScope(RetBB);
{
CGDebugInfo *DI = getDebugInfo();
@@ -568,23 +692,8 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
// promise local variable was not emitted yet.
CoroId->setArgOperand(1, PromiseAddrVoidPtr);
- // ReturnValue should be valid as long as the coroutine's return type
- // is not void. The assertion could help us to reduce the check later.
- assert(ReturnValue.isValid() == (bool)S.getReturnStmt());
- // Now we have the promise, initialize the GRO.
- // We need to emit `get_return_object` first. According to:
- // [dcl.fct.def.coroutine]p7
- // The call to get_return_­object is sequenced before the call to
- // initial_suspend and is invoked at most once.
- //
- // So we couldn't emit return value when we emit return statment,
- // otherwise the call to get_return_object wouldn't be in front
- // of initial_suspend.
- if (ReturnValue.isValid()) {
- EmitAnyExprToMem(S.getReturnValue(), ReturnValue,
- S.getReturnValue()->getType().getQualifiers(),
- /*IsInit*/ true);
- }
+ // Now we have the promise, initialize the GRO
+ GroManager.EmitGroInit();
EHStack.pushCleanup<CallCoroEnd>(EHCleanup);
@@ -650,7 +759,8 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
if (Stmt *Ret = S.getReturnStmt()) {
// Since we already emitted the return value above, so we shouldn't
// emit it again here.
- cast<ReturnStmt>(Ret)->setRetValue(nullptr);
+ if (GroManager.DirectEmit)
+ cast<ReturnStmt>(Ret)->setRetValue(nullptr);
EmitStmt(Ret);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
index 3bde43cc1db3..f049a682cfed 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -18,6 +18,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
+#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclFriend.h"
@@ -72,8 +73,6 @@ CGDebugInfo::CGDebugInfo(CodeGenModule &CGM)
: CGM(CGM), DebugKind(CGM.getCodeGenOpts().getDebugInfo()),
DebugTypeExtRefs(CGM.getCodeGenOpts().DebugTypeExtRefs),
DBuilder(CGM.getModule()) {
- for (const auto &KV : CGM.getCodeGenOpts().DebugPrefixMap)
- DebugPrefixMap[KV.first] = KV.second;
CreateCompileUnit();
}
@@ -469,12 +468,9 @@ llvm::DIFile *CGDebugInfo::createFile(
}
std::string CGDebugInfo::remapDIPath(StringRef Path) const {
- if (DebugPrefixMap.empty())
- return Path.str();
-
SmallString<256> P = Path;
- for (const auto &Entry : DebugPrefixMap)
- if (llvm::sys::path::replace_path_prefix(P, Entry.first, Entry.second))
+ for (auto &[From, To] : llvm::reverse(CGM.getCodeGenOpts().DebugPrefixMap))
+ if (llvm::sys::path::replace_path_prefix(P, From, To))
break;
return P.str().str();
}
@@ -527,6 +523,7 @@ void CGDebugInfo::CreateCompileUnit() {
// Get absolute path name.
SourceManager &SM = CGM.getContext().getSourceManager();
auto &CGO = CGM.getCodeGenOpts();
+ const LangOptions &LO = CGM.getLangOpts();
std::string MainFileName = CGO.MainFileName;
if (MainFileName.empty())
MainFileName = "<stdin>";
@@ -541,9 +538,15 @@ void CGDebugInfo::CreateCompileUnit() {
MainFileDir = std::string(MainFile->getDir().getName());
if (!llvm::sys::path::is_absolute(MainFileName)) {
llvm::SmallString<1024> MainFileDirSS(MainFileDir);
- llvm::sys::path::append(MainFileDirSS, MainFileName);
- MainFileName =
- std::string(llvm::sys::path::remove_leading_dotslash(MainFileDirSS));
+ llvm::sys::path::Style Style =
+ LO.UseTargetPathSeparator
+ ? (CGM.getTarget().getTriple().isOSWindows()
+ ? llvm::sys::path::Style::windows_backslash
+ : llvm::sys::path::Style::posix)
+ : llvm::sys::path::Style::native;
+ llvm::sys::path::append(MainFileDirSS, Style, MainFileName);
+ MainFileName = std::string(
+ llvm::sys::path::remove_leading_dotslash(MainFileDirSS, Style));
}
// If the main file name provided is identical to the input file name, and
// if the input file is a preprocessed source, use the module name for
@@ -559,7 +562,6 @@ void CGDebugInfo::CreateCompileUnit() {
}
llvm::dwarf::SourceLanguage LangTag;
- const LangOptions &LO = CGM.getLangOpts();
if (LO.CPlusPlus) {
if (LO.ObjC)
LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus;
@@ -595,20 +597,20 @@ void CGDebugInfo::CreateCompileUnit() {
llvm::DICompileUnit::DebugEmissionKind EmissionKind;
switch (DebugKind) {
- case codegenoptions::NoDebugInfo:
- case codegenoptions::LocTrackingOnly:
+ case llvm::codegenoptions::NoDebugInfo:
+ case llvm::codegenoptions::LocTrackingOnly:
EmissionKind = llvm::DICompileUnit::NoDebug;
break;
- case codegenoptions::DebugLineTablesOnly:
+ case llvm::codegenoptions::DebugLineTablesOnly:
EmissionKind = llvm::DICompileUnit::LineTablesOnly;
break;
- case codegenoptions::DebugDirectivesOnly:
+ case llvm::codegenoptions::DebugDirectivesOnly:
EmissionKind = llvm::DICompileUnit::DebugDirectivesOnly;
break;
- case codegenoptions::DebugInfoConstructor:
- case codegenoptions::LimitedDebugInfo:
- case codegenoptions::FullDebugInfo:
- case codegenoptions::UnusedTypeInfo:
+ case llvm::codegenoptions::DebugInfoConstructor:
+ case llvm::codegenoptions::LimitedDebugInfo:
+ case llvm::codegenoptions::FullDebugInfo:
+ case llvm::codegenoptions::UnusedTypeInfo:
EmissionKind = llvm::DICompileUnit::FullDebug;
break;
}
@@ -635,17 +637,21 @@ void CGDebugInfo::CreateCompileUnit() {
SDK = *It;
}
+ llvm::DICompileUnit::DebugNameTableKind NameTableKind =
+ static_cast<llvm::DICompileUnit::DebugNameTableKind>(
+ CGOpts.DebugNameTable);
+ if (CGM.getTarget().getTriple().isNVPTX())
+ NameTableKind = llvm::DICompileUnit::DebugNameTableKind::None;
+ else if (CGM.getTarget().getTriple().getVendor() == llvm::Triple::Apple)
+ NameTableKind = llvm::DICompileUnit::DebugNameTableKind::Apple;
+
// Create new compile unit.
TheCU = DBuilder.createCompileUnit(
LangTag, CUFile, CGOpts.EmitVersionIdentMetadata ? Producer : "",
LO.Optimize || CGOpts.PrepareForLTO || CGOpts.PrepareForThinLTO,
CGOpts.DwarfDebugFlags, RuntimeVers, CGOpts.SplitDwarfFile, EmissionKind,
DwoId, CGOpts.SplitDwarfInlining, CGOpts.DebugInfoForProfiling,
- CGM.getTarget().getTriple().isNVPTX()
- ? llvm::DICompileUnit::DebugNameTableKind::None
- : static_cast<llvm::DICompileUnit::DebugNameTableKind>(
- CGOpts.DebugNameTable),
- CGOpts.DebugRangesBaseAddress, remapDIPath(Sysroot), SDK);
+ NameTableKind, CGOpts.DebugRangesBaseAddress, remapDIPath(Sysroot), SDK);
}
llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
@@ -727,24 +733,41 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
#include "clang/Basic/AArch64SVEACLETypes.def"
{
ASTContext::BuiltinVectorTypeInfo Info =
- CGM.getContext().getBuiltinVectorTypeInfo(BT);
- unsigned NumElemsPerVG = (Info.EC.getKnownMinValue() * Info.NumVectors) / 2;
+ // For svcount_t, only the lower 2 bytes are relevant.
+ BT->getKind() == BuiltinType::SveCount
+ ? ASTContext::BuiltinVectorTypeInfo(
+ CGM.getContext().BoolTy, llvm::ElementCount::getFixed(16),
+ 1)
+ : CGM.getContext().getBuiltinVectorTypeInfo(BT);
+
+ // A single vector of bytes may not suffice as the representation of
+ // svcount_t tuples because of the gap between the active 16bits of
+ // successive tuple members. Currently no such tuples are defined for
+ // svcount_t, so assert that NumVectors is 1.
+ assert((BT->getKind() != BuiltinType::SveCount || Info.NumVectors == 1) &&
+ "Unsupported number of vectors for svcount_t");
// Debuggers can't extract 1bit from a vector, so will display a
- // bitpattern for svbool_t instead.
+ // bitpattern for predicates instead.
+ unsigned NumElems = Info.EC.getKnownMinValue() * Info.NumVectors;
if (Info.ElementType == CGM.getContext().BoolTy) {
- NumElemsPerVG /= 8;
+ NumElems /= 8;
Info.ElementType = CGM.getContext().UnsignedCharTy;
}
- auto *LowerBound =
- llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
- llvm::Type::getInt64Ty(CGM.getLLVMContext()), 0));
- SmallVector<uint64_t, 9> Expr(
- {llvm::dwarf::DW_OP_constu, NumElemsPerVG, llvm::dwarf::DW_OP_bregx,
- /* AArch64::VG */ 46, 0, llvm::dwarf::DW_OP_mul,
- llvm::dwarf::DW_OP_constu, 1, llvm::dwarf::DW_OP_minus});
- auto *UpperBound = DBuilder.createExpression(Expr);
+ llvm::Metadata *LowerBound, *UpperBound;
+ LowerBound = llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
+ llvm::Type::getInt64Ty(CGM.getLLVMContext()), 0));
+ if (Info.EC.isScalable()) {
+ unsigned NumElemsPerVG = NumElems / 2;
+ SmallVector<uint64_t, 9> Expr(
+ {llvm::dwarf::DW_OP_constu, NumElemsPerVG, llvm::dwarf::DW_OP_bregx,
+ /* AArch64::VG */ 46, 0, llvm::dwarf::DW_OP_mul,
+ llvm::dwarf::DW_OP_constu, 1, llvm::dwarf::DW_OP_minus});
+ UpperBound = DBuilder.createExpression(Expr);
+ } else
+ UpperBound = llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
+ llvm::Type::getInt64Ty(CGM.getLLVMContext()), NumElems - 1));
llvm::Metadata *Subscript = DBuilder.getOrCreateSubrange(
/*count*/ nullptr, LowerBound, UpperBound, /*stride*/ nullptr);
@@ -817,6 +840,17 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
return DBuilder.createVectorType(/*Size=*/0, Align, ElemTy,
SubscriptArray);
}
+
+#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
+ case BuiltinType::Id: { \
+ if (!SingletonId) \
+ SingletonId = \
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, \
+ MangledName, TheCU, TheCU->getFile(), 0); \
+ return SingletonId; \
+ }
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
+
case BuiltinType::UChar:
case BuiltinType::Char_U:
Encoding = llvm::dwarf::DW_ATE_unsigned_char;
@@ -1455,9 +1489,9 @@ llvm::DIType *CGDebugInfo::CreateType(const FunctionType *Ty,
return F;
}
-llvm::DIType *CGDebugInfo::createBitFieldType(const FieldDecl *BitFieldDecl,
- llvm::DIScope *RecordTy,
- const RecordDecl *RD) {
+llvm::DIDerivedType *
+CGDebugInfo::createBitFieldType(const FieldDecl *BitFieldDecl,
+ llvm::DIScope *RecordTy, const RecordDecl *RD) {
StringRef Name = BitFieldDecl->getName();
QualType Ty = BitFieldDecl->getType();
SourceLocation Loc = BitFieldDecl->getLocation();
@@ -1488,6 +1522,78 @@ llvm::DIType *CGDebugInfo::createBitFieldType(const FieldDecl *BitFieldDecl,
Flags, DebugType, Annotations);
}
+llvm::DIDerivedType *CGDebugInfo::createBitFieldSeparatorIfNeeded(
+ const FieldDecl *BitFieldDecl, const llvm::DIDerivedType *BitFieldDI,
+ llvm::ArrayRef<llvm::Metadata *> PreviousFieldsDI, const RecordDecl *RD) {
+
+ if (!CGM.getTargetCodeGenInfo().shouldEmitDWARFBitFieldSeparators())
+ return nullptr;
+
+ /*
+ Add a *single* zero-bitfield separator between two non-zero bitfields
+ separated by one or more zero-bitfields. This is used to distinguish between
+ structures such the ones below, where the memory layout is the same, but how
+ the ABI assigns fields to registers differs.
+
+ struct foo {
+ int space[4];
+ char a : 8; // on amdgpu, passed on v4
+ char b : 8;
+ char x : 8;
+ char y : 8;
+ };
+ struct bar {
+ int space[4];
+ char a : 8; // on amdgpu, passed on v4
+ char b : 8;
+ char : 0;
+ char x : 8; // passed on v5
+ char y : 8;
+ };
+ */
+ if (PreviousFieldsDI.empty())
+ return nullptr;
+
+ // If we already emitted metadata for a 0-length bitfield, nothing to do here.
+ auto *PreviousMDEntry =
+ PreviousFieldsDI.empty() ? nullptr : PreviousFieldsDI.back();
+ auto *PreviousMDField =
+ dyn_cast_or_null<llvm::DIDerivedType>(PreviousMDEntry);
+ if (!PreviousMDField || !PreviousMDField->isBitField() ||
+ PreviousMDField->getSizeInBits() == 0)
+ return nullptr;
+
+ auto PreviousBitfield = RD->field_begin();
+ std::advance(PreviousBitfield, BitFieldDecl->getFieldIndex() - 1);
+
+ assert(PreviousBitfield->isBitField());
+
+ ASTContext &Context = CGM.getContext();
+ if (!PreviousBitfield->isZeroLengthBitField(Context))
+ return nullptr;
+
+ QualType Ty = PreviousBitfield->getType();
+ SourceLocation Loc = PreviousBitfield->getLocation();
+ llvm::DIFile *VUnit = getOrCreateFile(Loc);
+ llvm::DIType *DebugType = getOrCreateType(Ty, VUnit);
+ llvm::DIScope *RecordTy = BitFieldDI->getScope();
+
+ llvm::DIFile *File = getOrCreateFile(Loc);
+ unsigned Line = getLineNumber(Loc);
+
+ uint64_t StorageOffsetInBits =
+ cast<llvm::ConstantInt>(BitFieldDI->getStorageOffsetInBits())
+ ->getZExtValue();
+
+ llvm::DINode::DIFlags Flags =
+ getAccessFlag(PreviousBitfield->getAccess(), RD);
+ llvm::DINodeArray Annotations =
+ CollectBTFDeclTagAnnotations(*PreviousBitfield);
+ return DBuilder.createBitFieldMemberType(
+ RecordTy, "", File, Line, 0, StorageOffsetInBits, StorageOffsetInBits,
+ Flags, DebugType, Annotations);
+}
+
llvm::DIType *CGDebugInfo::createFieldType(
StringRef name, QualType type, SourceLocation loc, AccessSpecifier AS,
uint64_t offsetInBits, uint32_t AlignInBits, llvm::DIFile *tunit,
@@ -1596,7 +1702,11 @@ void CGDebugInfo::CollectRecordNormalField(
llvm::DIType *FieldType;
if (field->isBitField()) {
- FieldType = createBitFieldType(field, RecordTy, RD);
+ llvm::DIDerivedType *BitFieldType;
+ FieldType = BitFieldType = createBitFieldType(field, RecordTy, RD);
+ if (llvm::DIType *Separator =
+ createBitFieldSeparatorIfNeeded(field, BitFieldType, elements, RD))
+ elements.push_back(Separator);
} else {
auto Align = getDeclAlignIfRequired(field, CGM.getContext());
llvm::DINodeArray Annotations = CollectBTFDeclTagAnnotations(field);
@@ -1835,27 +1945,8 @@ llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
ContainingType = RecordTy;
}
- // We're checking for deleted C++ special member functions
- // [Ctors,Dtors, Copy/Move]
- auto checkAttrDeleted = [&](const auto *Method) {
- if (Method->getCanonicalDecl()->isDeleted())
- SPFlags |= llvm::DISubprogram::SPFlagDeleted;
- };
-
- switch (Method->getKind()) {
-
- case Decl::CXXConstructor:
- case Decl::CXXDestructor:
- checkAttrDeleted(Method);
- break;
- case Decl::CXXMethod:
- if (Method->isCopyAssignmentOperator() ||
- Method->isMoveAssignmentOperator())
- checkAttrDeleted(Method);
- break;
- default:
- break;
- }
+ if (Method->getCanonicalDecl()->isDeleted())
+ SPFlags |= llvm::DISubprogram::SPFlagDeleted;
if (Method->isNoReturn())
Flags |= llvm::DINode::FlagNoReturn;
@@ -1885,7 +1976,7 @@ llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
// In this debug mode, emit type info for a class when its constructor type
// info is emitted.
- if (DebugKind == codegenoptions::DebugInfoConstructor)
+ if (DebugKind == llvm::codegenoptions::DebugInfoConstructor)
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Method))
completeUnusedClass(*CD->getParent());
@@ -2010,15 +2101,10 @@ CGDebugInfo::CollectTemplateParams(std::optional<TemplateArgs> OArgs,
for (unsigned i = 0, e = Args.Args.size(); i != e; ++i) {
const TemplateArgument &TA = Args.Args[i];
StringRef Name;
- bool defaultParameter = false;
- if (Args.TList) {
+ const bool defaultParameter = TA.getIsDefaulted();
+ if (Args.TList)
Name = Args.TList->getParam(i)->getName();
- NamedDecl const *ND = Args.TList->getParam(i);
- defaultParameter = clang::isSubstitutedDefaultArgument(
- CGM.getContext(), TA, ND, Args.Args, Args.TList->getDepth());
- }
-
switch (TA.getKind()) {
case TemplateArgument::Type: {
llvm::DIType *TTy = getOrCreateType(TA.getAsType(), Unit);
@@ -2362,7 +2448,7 @@ void CGDebugInfo::addHeapAllocSiteMetadata(llvm::CallBase *CI,
QualType AllocatedTy,
SourceLocation Loc) {
if (CGM.getCodeGenOpts().getDebugInfo() <=
- codegenoptions::DebugLineTablesOnly)
+ llvm::codegenoptions::DebugLineTablesOnly)
return;
llvm::MDNode *node;
if (AllocatedTy->isVoidType())
@@ -2374,7 +2460,7 @@ void CGDebugInfo::addHeapAllocSiteMetadata(llvm::CallBase *CI,
}
void CGDebugInfo::completeType(const EnumDecl *ED) {
- if (DebugKind <= codegenoptions::DebugLineTablesOnly)
+ if (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
return;
QualType Ty = CGM.getContext().getEnumType(ED);
void *TyPtr = Ty.getAsOpaquePtr();
@@ -2387,7 +2473,7 @@ void CGDebugInfo::completeType(const EnumDecl *ED) {
}
void CGDebugInfo::completeType(const RecordDecl *RD) {
- if (DebugKind > codegenoptions::LimitedDebugInfo ||
+ if (DebugKind > llvm::codegenoptions::LimitedDebugInfo ||
!CGM.getLangOpts().CPlusPlus)
completeRequiredType(RD);
}
@@ -2449,14 +2535,18 @@ void CGDebugInfo::completeClassData(const RecordDecl *RD) {
}
void CGDebugInfo::completeClass(const RecordDecl *RD) {
- if (DebugKind <= codegenoptions::DebugLineTablesOnly)
+ if (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
return;
QualType Ty = CGM.getContext().getRecordType(RD);
void *TyPtr = Ty.getAsOpaquePtr();
auto I = TypeCache.find(TyPtr);
if (I != TypeCache.end() && !cast<llvm::DIType>(I->second)->isForwardDecl())
return;
- llvm::DIType *Res = CreateTypeDefinition(Ty->castAs<RecordType>());
+
+ // We want the canonical definition of the structure to not
+ // be the typedef. Since that would lead to circular typedef
+ // metadata.
+ auto [Res, PrefRes] = CreateTypeDefinition(Ty->castAs<RecordType>());
assert(!Res->isForwardDecl());
TypeCache[TyPtr].reset(Res);
}
@@ -2483,12 +2573,21 @@ static bool canUseCtorHoming(const CXXRecordDecl *RD) {
if (isClassOrMethodDLLImport(RD))
return false;
- return !RD->isLambda() && !RD->isAggregate() &&
- !RD->hasTrivialDefaultConstructor() &&
- !RD->hasConstexprNonCopyMoveConstructor();
+ if (RD->isLambda() || RD->isAggregate() ||
+ RD->hasTrivialDefaultConstructor() ||
+ RD->hasConstexprNonCopyMoveConstructor())
+ return false;
+
+ for (const CXXConstructorDecl *Ctor : RD->ctors()) {
+ if (Ctor->isCopyOrMoveConstructor())
+ continue;
+ if (!Ctor->isDeleted())
+ return true;
+ }
+ return false;
}
-static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
+static bool shouldOmitDefinition(llvm::codegenoptions::DebugInfoKind DebugKind,
bool DebugTypeExtRefs, const RecordDecl *RD,
const LangOptions &LangOpts) {
if (DebugTypeExtRefs && isDefinedInClangModule(RD->getDefinition()))
@@ -2501,10 +2600,10 @@ static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
// Only emit forward declarations in line tables only to keep debug info size
// small. This only applies to CodeView, since we don't emit types in DWARF
// line tables only.
- if (DebugKind == codegenoptions::DebugLineTablesOnly)
+ if (DebugKind == llvm::codegenoptions::DebugLineTablesOnly)
return true;
- if (DebugKind > codegenoptions::LimitedDebugInfo ||
+ if (DebugKind > llvm::codegenoptions::LimitedDebugInfo ||
RD->hasAttr<StandaloneDebugAttr>())
return false;
@@ -2540,7 +2639,7 @@ static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
// In constructor homing mode, only emit complete debug info for a class
// when its constructor is emitted.
- if ((DebugKind == codegenoptions::DebugInfoConstructor) &&
+ if ((DebugKind == llvm::codegenoptions::DebugInfoConstructor) &&
canUseCtorHoming(CXXDecl))
return true;
@@ -2567,10 +2666,25 @@ llvm::DIType *CGDebugInfo::CreateType(const RecordType *Ty) {
return T;
}
- return CreateTypeDefinition(Ty);
+ auto [Def, Pref] = CreateTypeDefinition(Ty);
+
+ return Pref ? Pref : Def;
}
-llvm::DIType *CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
+llvm::DIType *CGDebugInfo::GetPreferredNameType(const CXXRecordDecl *RD,
+ llvm::DIFile *Unit) {
+ if (!RD)
+ return nullptr;
+
+ auto const *PNA = RD->getAttr<PreferredNameAttr>();
+ if (!PNA)
+ return nullptr;
+
+ return getOrCreateType(PNA->getTypedefType(), Unit);
+}
+
+std::pair<llvm::DIType *, llvm::DIType *>
+CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
RecordDecl *RD = Ty->getDecl();
// Get overall information about the record type for the debug info.
@@ -2586,7 +2700,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
const RecordDecl *D = RD->getDefinition();
if (!D || !D->isCompleteDefinition())
- return FwdDecl;
+ return {FwdDecl, nullptr};
if (const auto *CXXDecl = dyn_cast<CXXRecordDecl>(RD))
CollectContainingType(CXXDecl, FwdDecl);
@@ -2625,7 +2739,12 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
llvm::MDNode::replaceWithPermanent(llvm::TempDICompositeType(FwdDecl));
RegionMap[Ty->getDecl()].reset(FwdDecl);
- return FwdDecl;
+
+ if (CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::LLDB)
+ if (auto *PrefDI = GetPreferredNameType(CXXDecl, DefUnit))
+ return {FwdDecl, PrefDI};
+
+ return {FwdDecl, nullptr};
}
llvm::DIType *CGDebugInfo::CreateType(const ObjCObjectType *Ty,
@@ -3173,7 +3292,7 @@ llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty,
Flags);
const FunctionProtoType *FPT =
- Ty->getPointeeType()->getAs<FunctionProtoType>();
+ Ty->getPointeeType()->castAs<FunctionProtoType>();
return DBuilder.createMemberPointerType(
getOrCreateInstanceMethodType(
CXXMethodDecl::getThisType(FPT, Ty->getMostRecentCXXRecordDecl()),
@@ -3368,7 +3487,8 @@ void CGDebugInfo::completeTemplateDefinition(
}
void CGDebugInfo::completeUnusedClass(const CXXRecordDecl &D) {
- if (DebugKind <= codegenoptions::DebugLineTablesOnly || D.isDynamicClass())
+ if (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly ||
+ D.isDynamicClass())
return;
completeClassData(&D);
@@ -3653,7 +3773,7 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD,
llvm::DICompositeType *RealDecl) {
// A class's primary base or the class itself contains the vtable.
- llvm::DICompositeType *ContainingType = nullptr;
+ llvm::DIType *ContainingType = nullptr;
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) {
// Seek non-virtual primary base root.
@@ -3665,9 +3785,8 @@ void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD,
else
break;
}
- ContainingType = cast<llvm::DICompositeType>(
- getOrCreateType(QualType(PBase->getTypeForDecl(), 0),
- getOrCreateFile(RD->getLocation())));
+ ContainingType = getOrCreateType(QualType(PBase->getTypeForDecl(), 0),
+ getOrCreateFile(RD->getLocation()));
} else if (RD->isDynamicClass())
ContainingType = RealDecl;
@@ -3702,17 +3821,18 @@ void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit,
// No need to replicate the linkage name if it isn't different from the
// subprogram name, no need to have it at all unless coverage is enabled or
// debug is set to more than just line tables or extra debug info is needed.
- if (LinkageName == Name || (!CGM.getCodeGenOpts().EmitGcovArcs &&
- !CGM.getCodeGenOpts().EmitGcovNotes &&
- !CGM.getCodeGenOpts().DebugInfoForProfiling &&
- !CGM.getCodeGenOpts().PseudoProbeForProfiling &&
- DebugKind <= codegenoptions::DebugLineTablesOnly))
+ if (LinkageName == Name ||
+ (CGM.getCodeGenOpts().CoverageNotesFile.empty() &&
+ CGM.getCodeGenOpts().CoverageDataFile.empty() &&
+ !CGM.getCodeGenOpts().DebugInfoForProfiling &&
+ !CGM.getCodeGenOpts().PseudoProbeForProfiling &&
+ DebugKind <= llvm::codegenoptions::DebugLineTablesOnly))
LinkageName = StringRef();
// Emit the function scope in line tables only mode (if CodeView) to
// differentiate between function names.
if (CGM.getCodeGenOpts().hasReducedDebugInfo() ||
- (DebugKind == codegenoptions::DebugLineTablesOnly &&
+ (DebugKind == llvm::codegenoptions::DebugLineTablesOnly &&
CGM.getCodeGenOpts().EmitCodeView)) {
if (const NamespaceDecl *NSDecl =
dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
@@ -3904,7 +4024,7 @@ llvm::DINode *CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
}
llvm::DISubprogram *CGDebugInfo::getFunctionDeclaration(const Decl *D) {
- if (!D || DebugKind <= codegenoptions::DebugLineTablesOnly)
+ if (!D || DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
return nullptr;
const auto *FD = dyn_cast<FunctionDecl>(D);
@@ -3941,7 +4061,7 @@ llvm::DISubprogram *CGDebugInfo::getFunctionDeclaration(const Decl *D) {
llvm::DISubprogram *CGDebugInfo::getObjCMethodDeclaration(
const Decl *D, llvm::DISubroutineType *FnType, unsigned LineNo,
llvm::DINode::DIFlags Flags, llvm::DISubprogram::DISPFlags SPFlags) {
- if (!D || DebugKind <= codegenoptions::DebugLineTablesOnly)
+ if (!D || DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
return nullptr;
const auto *OMD = dyn_cast<ObjCMethodDecl>(D);
@@ -3981,7 +4101,7 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
llvm::DIFile *F) {
// In CodeView, we emit the function types in line tables only because the
// only way to distinguish between functions is by display name and type.
- if (!D || (DebugKind <= codegenoptions::DebugLineTablesOnly &&
+ if (!D || (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly &&
!CGM.getCodeGenOpts().EmitCodeView))
// Create fake but valid subroutine type. Otherwise -verify would fail, and
// subprogram DIE will miss DW_AT_decl_file and DW_AT_decl_line fields.
@@ -4219,10 +4339,9 @@ void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc,
llvm::DINodeArray Annotations = CollectBTFDeclTagAnnotations(D);
llvm::DISubroutineType *STy = getOrCreateFunctionType(D, FnType, Unit);
- llvm::DISubprogram *SP =
- DBuilder.createFunction(FDContext, Name, LinkageName, Unit, LineNo, STy,
- ScopeLine, Flags, SPFlags, TParamsArray.get(),
- getFunctionDeclaration(D), nullptr, Annotations);
+ llvm::DISubprogram *SP = DBuilder.createFunction(
+ FDContext, Name, LinkageName, Unit, LineNo, STy, ScopeLine, Flags,
+ SPFlags, TParamsArray.get(), nullptr, nullptr, Annotations);
// Preserve btf_decl_tag attributes for parameters of extern functions
// for BPF target. The parameters created in this loop are attached as
@@ -4337,7 +4456,7 @@ void CGDebugInfo::EmitLexicalBlockStart(CGBuilderTy &Builder,
CGM.getLLVMContext(), getLineNumber(Loc), getColumnNumber(Loc),
LexicalBlockStack.back(), CurInlinedAt));
- if (DebugKind <= codegenoptions::DebugLineTablesOnly)
+ if (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
return;
// Create a new lexical block and push it on the stack.
@@ -4351,7 +4470,7 @@ void CGDebugInfo::EmitLexicalBlockEnd(CGBuilderTy &Builder,
// Provide an entry in the line table for the end of the block.
EmitLocation(Builder, Loc);
- if (DebugKind <= codegenoptions::DebugLineTablesOnly)
+ if (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
return;
LexicalBlockStack.pop_back();
@@ -4834,9 +4953,10 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
llvm::DILocalVariable *
CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
- unsigned ArgNo, CGBuilderTy &Builder) {
+ unsigned ArgNo, CGBuilderTy &Builder,
+ bool UsePointerValue) {
assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
- return EmitDeclare(VD, AI, ArgNo, Builder);
+ return EmitDeclare(VD, AI, ArgNo, Builder, UsePointerValue);
}
namespace {
@@ -5167,11 +5287,11 @@ std::string CGDebugInfo::GetName(const Decl *D, bool Qualified) const {
const NamedDecl *ND = dyn_cast<NamedDecl>(D);
if (!ND)
return Name;
- codegenoptions::DebugTemplateNamesKind TemplateNamesKind =
+ llvm::codegenoptions::DebugTemplateNamesKind TemplateNamesKind =
CGM.getCodeGenOpts().getDebugSimpleTemplateNames();
if (!CGM.getCodeGenOpts().hasReducedDebugInfo())
- TemplateNamesKind = codegenoptions::DebugTemplateNamesKind::Full;
+ TemplateNamesKind = llvm::codegenoptions::DebugTemplateNamesKind::Full;
std::optional<TemplateArgs> Args;
@@ -5254,12 +5374,12 @@ std::string CGDebugInfo::GetName(const Decl *D, bool Qualified) const {
PrintingPolicy PP = getPrintingPolicy();
- if (TemplateNamesKind == codegenoptions::DebugTemplateNamesKind::Full ||
+ if (TemplateNamesKind == llvm::codegenoptions::DebugTemplateNamesKind::Full ||
!Reconstitutable) {
ND->getNameForDiagnostic(OS, PP, Qualified);
} else {
- bool Mangled =
- TemplateNamesKind == codegenoptions::DebugTemplateNamesKind::Mangled;
+ bool Mangled = TemplateNamesKind ==
+ llvm::codegenoptions::DebugTemplateNamesKind::Mangled;
// check if it's a template
if (Mangled)
OS << "_STN|";
@@ -5747,8 +5867,9 @@ llvm::DebugLoc CGDebugInfo::SourceLocToDebugLoc(SourceLocation Loc) {
llvm::DINode::DIFlags CGDebugInfo::getCallSiteRelatedAttrs() const {
// Call site-related attributes are only useful in optimized programs, and
// when there's a possibility of debugging backtraces.
- if (!CGM.getLangOpts().Optimize || DebugKind == codegenoptions::NoDebugInfo ||
- DebugKind == codegenoptions::LocTrackingOnly)
+ if (!CGM.getLangOpts().Optimize ||
+ DebugKind == llvm::codegenoptions::NoDebugInfo ||
+ DebugKind == llvm::codegenoptions::LocTrackingOnly)
return llvm::DINode::FlagZero;
// Call site-related attributes are available in DWARF v5. Some debuggers,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
index 95484a060cd8..1fd08626358b 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
@@ -56,7 +56,7 @@ class CGDebugInfo {
friend class ApplyDebugLocation;
friend class SaveAndRestoreLocation;
CodeGenModule &CGM;
- const codegenoptions::DebugInfoKind DebugKind;
+ const llvm::codegenoptions::DebugInfoKind DebugKind;
bool DebugTypeExtRefs;
llvm::DIBuilder DBuilder;
llvm::DICompileUnit *TheCU = nullptr;
@@ -80,13 +80,12 @@ class CGDebugInfo {
#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
llvm::DIType *Id##Ty = nullptr;
#include "clang/Basic/OpenCLExtensionTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) llvm::DIType *SingletonId = nullptr;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
/// Cache of previously constructed Types.
llvm::DenseMap<const void *, llvm::TrackingMDRef> TypeCache;
- std::map<llvm::StringRef, llvm::StringRef, std::greater<llvm::StringRef>>
- DebugPrefixMap;
-
/// Cache that maps VLA types to size expressions for that type,
/// represented by instantiated Metadata nodes.
llvm::SmallDenseMap<QualType, llvm::Metadata *> SizeExprCache;
@@ -149,7 +148,7 @@ class CGDebugInfo {
llvm::BumpPtrAllocator DebugInfoNames;
StringRef CWDName;
- llvm::DenseMap<const char *, llvm::TrackingMDRef> DIFileCache;
+ llvm::StringMap<llvm::TrackingMDRef> DIFileCache;
llvm::DenseMap<const FunctionDecl *, llvm::TrackingMDRef> SPCache;
/// Cache declarations relevant to DW_TAG_imported_declarations (C++
/// using declarations and global alias variables) that aren't covered
@@ -190,7 +189,15 @@ class CGDebugInfo {
llvm::DIType *CreateType(const FunctionType *Ty, llvm::DIFile *F);
/// Get structure or union type.
llvm::DIType *CreateType(const RecordType *Tyg);
- llvm::DIType *CreateTypeDefinition(const RecordType *Ty);
+
+ /// Create definition for the specified 'Ty'.
+ ///
+ /// \returns A pair of 'llvm::DIType's. The first is the definition
+ /// of the 'Ty'. The second is the type specified by the preferred_name
+ /// attribute on 'Ty', which can be a nullptr if no such attribute
+ /// exists.
+ std::pair<llvm::DIType *, llvm::DIType *>
+ CreateTypeDefinition(const RecordType *Ty);
llvm::DICompositeType *CreateLimitedType(const RecordType *Ty);
void CollectContainingType(const CXXRecordDecl *RD,
llvm::DICompositeType *CT);
@@ -274,6 +281,12 @@ class CGDebugInfo {
llvm::DenseSet<CanonicalDeclPtr<const CXXRecordDecl>> &SeenTypes,
llvm::DINode::DIFlags StartingFlags);
+ /// Helper function that returns the llvm::DIType that the
+ /// PreferredNameAttr attribute on \ref RD refers to. If no such
+ /// attribute exists, returns nullptr.
+ llvm::DIType *GetPreferredNameType(const CXXRecordDecl *RD,
+ llvm::DIFile *Unit);
+
struct TemplateArgs {
const TemplateParameterList *TList;
llvm::ArrayRef<TemplateArgument> Args;
@@ -320,9 +333,15 @@ class CGDebugInfo {
}
/// Create new bit field member.
- llvm::DIType *createBitFieldType(const FieldDecl *BitFieldDecl,
- llvm::DIScope *RecordTy,
- const RecordDecl *RD);
+ llvm::DIDerivedType *createBitFieldType(const FieldDecl *BitFieldDecl,
+ llvm::DIScope *RecordTy,
+ const RecordDecl *RD);
+
+ /// Create an anonnymous zero-size separator for bit-field-decl if needed on
+ /// the target.
+ llvm::DIDerivedType *createBitFieldSeparatorIfNeeded(
+ const FieldDecl *BitFieldDecl, const llvm::DIDerivedType *BitFieldDI,
+ llvm::ArrayRef<llvm::Metadata *> PreviousFieldsDI, const RecordDecl *RD);
/// Helpers for collecting fields of a record.
/// @{
@@ -487,10 +506,9 @@ public:
/// Emit call to \c llvm.dbg.declare for an argument variable
/// declaration.
- llvm::DILocalVariable *EmitDeclareOfArgVariable(const VarDecl *Decl,
- llvm::Value *AI,
- unsigned ArgNo,
- CGBuilderTy &Builder);
+ llvm::DILocalVariable *
+ EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI, unsigned ArgNo,
+ CGBuilderTy &Builder, bool UsePointerValue = false);
/// Emit call to \c llvm.dbg.declare for the block-literal argument
/// to a block invocation function.
@@ -811,7 +829,13 @@ public:
ApplyDebugLocation(ApplyDebugLocation &&Other) : CGF(Other.CGF) {
Other.CGF = nullptr;
}
- ApplyDebugLocation &operator=(ApplyDebugLocation &&) = default;
+
+ // Define copy assignment operator.
+ ApplyDebugLocation &operator=(ApplyDebugLocation &&Other) {
+ CGF = Other.CGF;
+ Other.CGF = nullptr;
+ return *this;
+ }
~ApplyDebugLocation();
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
index ceaddc4e694a..b0d6eb05acc2 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
@@ -292,7 +292,8 @@ llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
if (AS != ExpectedAS) {
Addr = getTargetCodeGenInfo().performAddrSpaceCast(
*this, GV, AS, ExpectedAS,
- LTy->getPointerTo(getContext().getTargetAddressSpace(ExpectedAS)));
+ llvm::PointerType::get(getLLVMContext(),
+ getContext().getTargetAddressSpace(ExpectedAS)));
}
setStaticLocalDeclAddress(&D, Addr);
@@ -394,13 +395,15 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
OldGV->eraseFromParent();
}
- GV->setConstant(CGM.isTypeConstant(D.getType(), true));
+ bool NeedsDtor =
+ D.needsDestruction(getContext()) == QualType::DK_cxx_destructor;
+
+ GV->setConstant(CGM.isTypeConstant(D.getType(), true, !NeedsDtor));
GV->setInitializer(Init);
emitter.finalize(GV);
- if (D.needsDestruction(getContext()) == QualType::DK_cxx_destructor &&
- HaveInsertPoint()) {
+ if (NeedsDtor && HaveInsertPoint()) {
// We have a constant initializer, but a nontrivial destructor. We still
// need to perform a guarded "initialization" in order to register the
// destructor.
@@ -467,6 +470,9 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
else if (D.hasAttr<UsedAttr>())
CGM.addUsedOrCompilerUsedGlobal(var);
+ if (CGM.getCodeGenOpts().KeepPersistentStorageVariables)
+ CGM.addUsedOrCompilerUsedGlobal(var);
+
// We may have to cast the constant because of the initializer
// mismatch above.
//
@@ -578,6 +584,16 @@ namespace {
}
};
+ struct KmpcAllocFree final : EHScopeStack::Cleanup {
+ std::pair<llvm::Value *, llvm::Value *> AddrSizePair;
+ KmpcAllocFree(const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair)
+ : AddrSizePair(AddrSizePair) {}
+ void Emit(CodeGenFunction &CGF, Flags EmissionFlags) override {
+ auto &RT = CGF.CGM.getOpenMPRuntime();
+ RT.getKmpcFreeShared(CGF, AddrSizePair);
+ }
+ };
+
struct ExtendGCLifetime final : EHScopeStack::Cleanup {
const VarDecl &Var;
ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
@@ -724,8 +740,8 @@ static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
// Handle a formal type change to avoid asserting.
auto srcAddr = srcLV.getAddress(CGF);
if (needsCast) {
- srcAddr = CGF.Builder.CreateElementBitCast(
- srcAddr, destLV.getAddress(CGF).getElementType());
+ srcAddr =
+ srcAddr.withElementType(destLV.getAddress(CGF).getElementType());
}
// If it was an l-value, use objc_copyWeak.
@@ -1170,7 +1186,7 @@ static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
llvm::Constant *Constant,
CharUnits Align) {
Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
- return Builder.CreateElementBitCast(SrcPtr, CGM.Int8Ty);
+ return SrcPtr.withElementType(CGM.Int8Ty);
}
static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
@@ -1204,7 +1220,7 @@ static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
bool valueAlreadyCorrect =
constant->isNullValue() || isa<llvm::UndefValue>(constant);
if (!valueAlreadyCorrect) {
- Loc = Builder.CreateElementBitCast(Loc, Ty);
+ Loc = Loc.withElementType(Ty);
emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder,
IsAutoInit);
}
@@ -1403,9 +1419,6 @@ void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
else {
// Create an artificial VarDecl to generate debug info for.
IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
- assert(cast<llvm::PointerType>(VlaSize.NumElts->getType())
- ->isOpaqueOrPointeeTypeMatches(SizeTy) &&
- "Number of VLA elements must be SizeTy");
auto QT = getContext().getIntTypeForBitwidth(
SizeTy->getScalarSizeInBits(), false);
auto *ArtificialDecl = VarDecl::Create(
@@ -1481,10 +1494,12 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// emit it as a global instead.
// Exception is if a variable is located in non-constant address space
// in OpenCL.
+ bool NeedsDtor =
+ D.needsDestruction(getContext()) == QualType::DK_cxx_destructor;
if ((!getLangOpts().OpenCL ||
Ty.getAddressSpace() == LangAS::opencl_constant) &&
(CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
- !isEscapingByRef && CGM.isTypeConstant(Ty, true))) {
+ !isEscapingByRef && CGM.isTypeConstant(Ty, true, !NeedsDtor))) {
EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
// Signal this condition to later callbacks.
@@ -1581,28 +1596,59 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
} else {
EnsureInsertPoint();
- if (!DidCallStackSave) {
- // Save the stack.
- Address Stack =
- CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack");
+ // Delayed globalization for variable length declarations. This ensures that
+ // the expression representing the length has been emitted and can be used
+ // by the definition of the VLA. Since this is an escaped declaration, in
+ // OpenMP we have to use a call to __kmpc_alloc_shared(). The matching
+ // deallocation call to __kmpc_free_shared() is emitted later.
+ bool VarAllocated = false;
+ if (getLangOpts().OpenMPIsTargetDevice) {
+ auto &RT = CGM.getOpenMPRuntime();
+ if (RT.isDelayedVariableLengthDecl(*this, &D)) {
+ // Emit call to __kmpc_alloc_shared() instead of the alloca.
+ std::pair<llvm::Value *, llvm::Value *> AddrSizePair =
+ RT.getKmpcAllocShared(*this, &D);
+
+ // Save the address of the allocation:
+ LValue Base = MakeAddrLValue(AddrSizePair.first, D.getType(),
+ CGM.getContext().getDeclAlign(&D),
+ AlignmentSource::Decl);
+ address = Base.getAddress(*this);
+
+ // Push a cleanup block to emit the call to __kmpc_free_shared in the
+ // appropriate location at the end of the scope of the
+ // __kmpc_alloc_shared functions:
+ pushKmpcAllocFree(NormalCleanup, AddrSizePair);
+
+ // Mark variable as allocated:
+ VarAllocated = true;
+ }
+ }
- llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
- llvm::Value *V = Builder.CreateCall(F);
- Builder.CreateStore(V, Stack);
+ if (!VarAllocated) {
+ if (!DidCallStackSave) {
+ // Save the stack.
+ Address Stack =
+ CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack");
- DidCallStackSave = true;
+ llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
+ llvm::Value *V = Builder.CreateCall(F);
+ Builder.CreateStore(V, Stack);
- // Push a cleanup block and restore the stack there.
- // FIXME: in general circumstances, this should be an EH cleanup.
- pushStackRestore(NormalCleanup, Stack);
- }
+ DidCallStackSave = true;
- auto VlaSize = getVLASize(Ty);
- llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
+ // Push a cleanup block and restore the stack there.
+ // FIXME: in general circumstances, this should be an EH cleanup.
+ pushStackRestore(NormalCleanup, Stack);
+ }
+
+ auto VlaSize = getVLASize(Ty);
+ llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
- // Allocate memory for the array.
- address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
- &AllocaAddr);
+ // Allocate memory for the array.
+ address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
+ &AllocaAddr);
+ }
// If we have debug info enabled, properly describe the VLA dimensions for
// this type by registering the vla size expression for each of the
@@ -1788,7 +1834,7 @@ void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
llvm::Value *BaseSizeInChars =
llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
- Address Begin = Builder.CreateElementBitCast(Loc, Int8Ty, "vla.begin");
+ Address Begin = Loc.withElementType(Int8Ty);
llvm::Value *End = Builder.CreateInBoundsGEP(
Begin.getElementType(), Begin.getPointer(), SizeVal, "vla.end");
llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
@@ -1919,7 +1965,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
return EmitStoreThroughLValue(RValue::get(constant), lv, true);
}
- emitStoresForConstant(CGM, D, Builder.CreateElementBitCast(Loc, CGM.Int8Ty),
+ emitStoresForConstant(CGM, D, Loc.withElementType(CGM.Int8Ty),
type.isVolatileQualified(), Builder, constant,
/*IsAutoInit=*/false);
}
@@ -2139,6 +2185,11 @@ void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
}
+void CodeGenFunction::pushKmpcAllocFree(
+ CleanupKind Kind, std::pair<llvm::Value *, llvm::Value *> AddrSizePair) {
+ EHStack.pushCleanup<KmpcAllocFree>(Kind, AddrSizePair);
+}
+
void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind,
Address addr, QualType type,
Destroyer *destroyer,
@@ -2451,7 +2502,10 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
"Invalid argument to EmitParmDecl");
- Arg.getAnyValue()->setName(D.getName());
+ // Set the name of the parameter's initial value to make IR easier to
+ // read. Don't modify the names of globals.
+ if (!isa<llvm::GlobalValue>(Arg.getAnyValue()))
+ Arg.getAnyValue()->setName(D.getName());
QualType Ty = D.getType();
@@ -2476,17 +2530,30 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
Address AllocaPtr = Address::invalid();
bool DoStore = false;
bool IsScalar = hasScalarEvaluationKind(Ty);
+ bool UseIndirectDebugAddress = false;
+
// If we already have a pointer to the argument, reuse the input pointer.
if (Arg.isIndirect()) {
- // If we have a prettier pointer type at this point, bitcast to that.
DeclPtr = Arg.getIndirectAddress();
- DeclPtr = Builder.CreateElementBitCast(DeclPtr, ConvertTypeForMem(Ty),
- D.getName());
+ DeclPtr = DeclPtr.withElementType(ConvertTypeForMem(Ty));
// Indirect argument is in alloca address space, which may be different
// from the default address space.
auto AllocaAS = CGM.getASTAllocaAddressSpace();
auto *V = DeclPtr.getPointer();
AllocaPtr = DeclPtr;
+
+ // For truly ABI indirect arguments -- those that are not `byval` -- store
+ // the address of the argument on the stack to preserve debug information.
+ ABIArgInfo ArgInfo = CurFnInfo->arguments()[ArgNo - 1].info;
+ if (ArgInfo.isIndirect())
+ UseIndirectDebugAddress = !ArgInfo.getIndirectByVal();
+ if (UseIndirectDebugAddress) {
+ auto PtrTy = getContext().getPointerType(Ty);
+ AllocaPtr = CreateMemTemp(PtrTy, getContext().getTypeAlignInChars(PtrTy),
+ D.getName() + ".indirect_addr");
+ EmitStoreOfScalar(V, AllocaPtr, /* Volatile */ false, PtrTy);
+ }
+
auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
auto DestLangAS =
getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
@@ -2494,9 +2561,11 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
assert(getContext().getTargetAddressSpace(SrcLangAS) ==
CGM.getDataLayout().getAllocaAddrSpace());
auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
- auto *T = DeclPtr.getElementType()->getPointerTo(DestAS);
- DeclPtr = DeclPtr.withPointer(getTargetHooks().performAddrSpaceCast(
- *this, V, SrcLangAS, DestLangAS, T, true));
+ auto *T = llvm::PointerType::get(getLLVMContext(), DestAS);
+ DeclPtr =
+ DeclPtr.withPointer(getTargetHooks().performAddrSpaceCast(
+ *this, V, SrcLangAS, DestLangAS, T, true),
+ DeclPtr.isKnownNonNull());
}
// Push a destructor cleanup for this parameter if the ABI requires it.
@@ -2603,7 +2672,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
if (CGM.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk &&
!NoDebugInfo) {
llvm::DILocalVariable *DILocalVar = DI->EmitDeclareOfArgVariable(
- &D, AllocaPtr.getPointer(), ArgNo, Builder);
+ &D, AllocaPtr.getPointer(), ArgNo, Builder, UseIndirectDebugAddress);
if (const auto *Var = dyn_cast_or_null<ParmVarDecl>(&D))
DI->getParamDbgMappings().insert({Var, DILocalVar});
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
index dcd811ea257b..be8fb6c274db 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -122,8 +122,8 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
if (CGF.getContext().getLangOpts().OpenCL) {
auto DestAS =
CGM.getTargetCodeGenInfo().getAddrSpaceOfCxaAtexitPtrParam();
- auto DestTy = CGF.getTypes().ConvertType(Type)->getPointerTo(
- CGM.getContext().getTargetAddressSpace(DestAS));
+ auto DestTy = llvm::PointerType::get(
+ CGM.getLLVMContext(), CGM.getContext().getTargetAddressSpace(DestAS));
auto SrcAS = D.getType().getQualifiers().getAddressSpace();
if (DestAS == SrcAS)
Argument = llvm::ConstantExpr::getBitCast(Addr.getPointer(), DestTy);
@@ -132,12 +132,11 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
// of the global destructor function should be adjusted accordingly.
Argument = llvm::ConstantPointerNull::get(DestTy);
} else {
- Argument = llvm::ConstantExpr::getBitCast(
- Addr.getPointer(), CGF.getTypes().ConvertType(Type)->getPointerTo());
+ Argument = Addr.getPointer();
}
// Otherwise, the standard logic requires a helper function.
} else {
- Addr = Addr.getElementBitCast(CGF.ConvertTypeForMem(Type));
+ Addr = Addr.withElementType(CGF.ConvertTypeForMem(Type));
Func = CodeGenFunction(CGM)
.generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind),
CGF.needsEHCleanup(DtorKind), &D);
@@ -199,8 +198,8 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
unsigned ActualAddrSpace = GV->getAddressSpace();
llvm::Constant *DeclPtr = GV;
if (ActualAddrSpace != ExpectedAddrSpace) {
- llvm::PointerType *PTy = llvm::PointerType::getWithSamePointeeType(
- GV->getType(), ExpectedAddrSpace);
+ llvm::PointerType *PTy =
+ llvm::PointerType::get(getLLVMContext(), ExpectedAddrSpace);
DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy);
}
@@ -214,9 +213,11 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
&D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
PerformInit, this);
}
+ bool NeedsDtor =
+ D.needsDestruction(getContext()) == QualType::DK_cxx_destructor;
if (PerformInit)
EmitDeclInit(*this, D, DeclAddr);
- if (CGM.isTypeConstant(D.getType(), true))
+ if (CGM.isTypeConstant(D.getType(), true, !NeedsDtor))
EmitDeclInvariant(*this, D, DeclPtr);
else
EmitDeclDestroy(*this, D, DeclAddr);
@@ -642,7 +643,7 @@ void CodeGenModule::EmitCXXThreadLocalInitFunc() {
This is arranged to be run only once regardless of how many times the module
might be included transitively. This arranged by using a guard variable.
- If there are no initalizers at all (and also no imported modules) we reduce
+ If there are no initializers at all (and also no imported modules) we reduce
this to an empty function (since the Itanium ABI requires that this function
be available to a caller, which might be produced by a different
implementation).
@@ -878,13 +879,15 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
// Include the filename in the symbol name. Including "sub_" matches gcc
// and makes sure these symbols appear lexicographically behind the symbols
- // with priority emitted above.
+ // with priority emitted above. Module implementation units behave the same
+ // way as a non-modular TU with imports.
llvm::Function *Fn;
- if (CXX20ModuleInits && getContext().getModuleForCodeGen()) {
+ if (CXX20ModuleInits && getContext().getCurrentNamedModule() &&
+ !getContext().getCurrentNamedModule()->isModuleImplementation()) {
SmallString<256> InitFnName;
llvm::raw_svector_ostream Out(InitFnName);
cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
- .mangleModuleInitializer(getContext().getModuleForCodeGen(), Out);
+ .mangleModuleInitializer(getContext().getCurrentNamedModule(), Out);
Fn = CreateGlobalInitOrCleanUpFunction(
FTy, llvm::Twine(InitFnName), FI, SourceLocation(), false,
llvm::GlobalVariable::ExternalLinkage);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp
index 6fa7871588f7..9cb7d4c7731d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp
@@ -401,7 +401,7 @@ void CodeGenFunction::EmitAnyExprToExn(const Expr *e, Address addr) {
// __cxa_allocate_exception returns a void*; we need to cast this
// to the appropriate type for the object.
llvm::Type *ty = ConvertTypeForMem(e->getType());
- Address typedAddr = Builder.CreateElementBitCast(addr, ty);
+ Address typedAddr = addr.withElementType(ty);
// FIXME: this isn't quite right! If there's a final unelided call
// to a copy constructor, then according to [except.terminate]p1 we
@@ -646,7 +646,7 @@ void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
// Under async exceptions, catch(...) need to catch HW exception too
// Mark scope with SehTryBegin as a SEH __try scope
if (getLangOpts().EHAsynch)
- EmitRuntimeCallOrInvoke(getSehTryBeginFn(CGM));
+ EmitSehTryScopeBegin();
}
}
}
@@ -1842,7 +1842,7 @@ Address CodeGenFunction::recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
llvm::Value *ChildVar =
Builder.CreateBitCast(RecoverCall, ParentVar.getType());
ChildVar->setName(ParentVar.getName());
- return ParentVar.withPointer(ChildVar);
+ return ParentVar.withPointer(ChildVar, KnownNonNull);
}
void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
@@ -2101,7 +2101,6 @@ void CodeGenFunction::EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
// pointer is stored in the second field. So, GEP 20 bytes backwards and
// load the pointer.
SEHInfo = Builder.CreateConstInBoundsGEP1_32(Int8Ty, EntryFP, -20);
- SEHInfo = Builder.CreateBitCast(SEHInfo, Int8PtrTy->getPointerTo());
SEHInfo = Builder.CreateAlignedLoad(Int8PtrTy, SEHInfo, getPointerAlign());
SEHCodeSlotStack.push_back(recoverAddrOfEscapedLocal(
ParentCGF, ParentCGF.SEHCodeSlotStack.back(), ParentFP));
@@ -2114,10 +2113,9 @@ void CodeGenFunction::EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
// CONTEXT *ContextRecord;
// };
// int exceptioncode = exception_pointers->ExceptionRecord->ExceptionCode;
- llvm::Type *RecordTy = CGM.Int32Ty->getPointerTo();
+ llvm::Type *RecordTy = llvm::PointerType::getUnqual(getLLVMContext());
llvm::Type *PtrsTy = llvm::StructType::get(RecordTy, CGM.VoidPtrTy);
- llvm::Value *Ptrs = Builder.CreateBitCast(SEHInfo, PtrsTy->getPointerTo());
- llvm::Value *Rec = Builder.CreateStructGEP(PtrsTy, Ptrs, 0);
+ llvm::Value *Rec = Builder.CreateStructGEP(PtrsTy, SEHInfo, 0);
Rec = Builder.CreateAlignedLoad(RecordTy, Rec, getPointerAlign());
llvm::Value *Code = Builder.CreateAlignedLoad(Int32Ty, Rec, getIntAlign());
assert(!SEHCodeSlotStack.empty() && "emitting EH code outside of __except");
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
index c26dd1b23321..ed6095f7cfeb 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
@@ -33,13 +33,16 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/MatrixBuilder.h"
+#include "llvm/Passes/OptimizationLevel.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/SaveAndRestore.h"
+#include "llvm/Support/xxhash.h"
#include "llvm/Transforms/Utils/SanitizerStats.h"
#include <optional>
@@ -52,18 +55,6 @@ using namespace CodeGen;
// Miscellaneous Helper Methods
//===--------------------------------------------------------------------===//
-llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
- unsigned addressSpace =
- cast<llvm::PointerType>(value->getType())->getAddressSpace();
-
- llvm::PointerType *destType = Int8PtrTy;
- if (addressSpace)
- destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
-
- if (value->getType() == destType) return value;
- return Builder.CreateBitCast(value, destType);
-}
-
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block.
Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty,
@@ -72,7 +63,7 @@ Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty,
llvm::Value *ArraySize) {
auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
Alloca->setAlignment(Align.getAsAlign());
- return Address(Alloca, Ty, Align);
+ return Address(Alloca, Ty, Align, KnownNonNull);
}
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
@@ -102,7 +93,7 @@ Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);
}
- return Address(V, Ty, Align);
+ return Address(V, Ty, Align, KnownNonNull);
}
/// CreateTempAlloca - This creates an alloca and inserts it into the entry
@@ -151,7 +142,7 @@ Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
Result = Address(
Builder.CreateBitCast(Result.getPointer(), VectorTy->getPointerTo()),
- VectorTy, Result.getAlignment());
+ VectorTy, Result.getAlignment(), KnownNonNull);
}
return Result;
}
@@ -401,7 +392,7 @@ static Address createReferenceTemporary(CodeGenFunction &CGF,
QualType Ty = Inner->getType();
if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
(Ty->isArrayType() || Ty->isRecordType()) &&
- CGF.CGM.isTypeConstant(Ty, true))
+ CGF.CGM.isTypeConstant(Ty, true, false))
if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
auto *GV = new llvm::GlobalVariable(
@@ -541,13 +532,17 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
// Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
// marker. Instead, start the lifetime of a conditional temporary earlier
// so that it's unconditional. Don't do this with sanitizers which need
- // more precise lifetime marks.
+ // more precise lifetime marks. However when inside an "await.suspend"
+ // block, we should always avoid conditional cleanup because it creates
+ // boolean marker that lives across await_suspend, which can destroy coro
+ // frame.
ConditionalEvaluation *OldConditional = nullptr;
CGBuilderTy::InsertPoint OldIP;
if (isInConditionalBranch() && !E->getType().isDestructedType() &&
- !SanOpts.has(SanitizerKind::HWAddress) &&
- !SanOpts.has(SanitizerKind::Memory) &&
- !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) {
+ ((!SanOpts.has(SanitizerKind::HWAddress) &&
+ !SanOpts.has(SanitizerKind::Memory) &&
+ !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
+ inSuspendBlock())) {
OldConditional = OutermostConditional;
OutermostConditional = nullptr;
@@ -1035,11 +1030,10 @@ void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E,
// LValue Expression Emission
//===----------------------------------------------------------------------===//
-/// EmitPointerWithAlignment - Given an expression of pointer type, try to
-/// derive a more accurate bound on the alignment of the pointer.
-Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
- LValueBaseInfo *BaseInfo,
- TBAAAccessInfo *TBAAInfo) {
+static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
+ TBAAAccessInfo *TBAAInfo,
+ KnownNonNull_t IsKnownNonNull,
+ CodeGenFunction &CGF) {
// We allow this with ObjC object pointers because of fragile ABIs.
assert(E->getType()->isPointerType() ||
E->getType()->isObjCObjectPointerType());
@@ -1048,7 +1042,7 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
// Casts:
if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
- CGM.EmitExplicitCastExprType(ECE, this);
+ CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
switch (CE->getCastKind()) {
// Non-converting casts (but not C's implicit conversion from void*).
@@ -1061,49 +1055,51 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
LValueBaseInfo InnerBaseInfo;
TBAAAccessInfo InnerTBAAInfo;
- Address Addr = EmitPointerWithAlignment(CE->getSubExpr(),
- &InnerBaseInfo,
- &InnerTBAAInfo);
+ Address Addr = CGF.EmitPointerWithAlignment(
+ CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
if (BaseInfo) *BaseInfo = InnerBaseInfo;
if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
if (isa<ExplicitCastExpr>(CE)) {
LValueBaseInfo TargetTypeBaseInfo;
TBAAAccessInfo TargetTypeTBAAInfo;
- CharUnits Align = CGM.getNaturalPointeeTypeAlignment(
+ CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment(
E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
if (TBAAInfo)
- *TBAAInfo = CGM.mergeTBAAInfoForCast(*TBAAInfo,
- TargetTypeTBAAInfo);
+ *TBAAInfo =
+ CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
// If the source l-value is opaque, honor the alignment of the
// casted-to type.
if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
if (BaseInfo)
BaseInfo->mergeForCast(TargetTypeBaseInfo);
- Addr = Address(Addr.getPointer(), Addr.getElementType(), Align);
+ Addr = Address(Addr.getPointer(), Addr.getElementType(), Align,
+ IsKnownNonNull);
}
}
- if (SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
+ if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
CE->getCastKind() == CK_BitCast) {
if (auto PT = E->getType()->getAs<PointerType>())
- EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
- /*MayBeNull=*/true,
- CodeGenFunction::CFITCK_UnrelatedCast,
- CE->getBeginLoc());
+ CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
+ /*MayBeNull=*/true,
+ CodeGenFunction::CFITCK_UnrelatedCast,
+ CE->getBeginLoc());
}
- llvm::Type *ElemTy = ConvertTypeForMem(E->getType()->getPointeeType());
- Addr = Builder.CreateElementBitCast(Addr, ElemTy);
+ llvm::Type *ElemTy =
+ CGF.ConvertTypeForMem(E->getType()->getPointeeType());
+ Addr = Addr.withElementType(ElemTy);
if (CE->getCastKind() == CK_AddressSpaceConversion)
- Addr = Builder.CreateAddrSpaceCast(Addr, ConvertType(E->getType()));
+ Addr = CGF.Builder.CreateAddrSpaceCast(Addr,
+ CGF.ConvertType(E->getType()));
return Addr;
}
break;
// Array-to-pointer decay.
case CK_ArrayToPointerDecay:
- return EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
+ return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
// Derived-to-base conversions.
case CK_UncheckedDerivedToBase:
@@ -1112,13 +1108,15 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
// conservatively pretend that the complete object is of the base class
// type.
if (TBAAInfo)
- *TBAAInfo = CGM.getTBAAAccessInfo(E->getType());
- Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), BaseInfo);
+ *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
+ Address Addr = CGF.EmitPointerWithAlignment(
+ CE->getSubExpr(), BaseInfo, nullptr,
+ (KnownNonNull_t)(IsKnownNonNull ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase));
auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
- return GetAddressOfBaseClass(Addr, Derived,
- CE->path_begin(), CE->path_end(),
- ShouldNullCheckClassCastValue(CE),
- CE->getExprLoc());
+ return CGF.GetAddressOfBaseClass(
+ Addr, Derived, CE->path_begin(), CE->path_end(),
+ CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
}
// TODO: Is there any reason to treat base-to-derived conversions
@@ -1131,10 +1129,10 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
// Unary &.
if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
if (UO->getOpcode() == UO_AddrOf) {
- LValue LV = EmitLValue(UO->getSubExpr());
+ LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
if (BaseInfo) *BaseInfo = LV.getBaseInfo();
if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
- return LV.getAddress(*this);
+ return LV.getAddress(CGF);
}
}
@@ -1146,10 +1144,10 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
case Builtin::BIaddressof:
case Builtin::BI__addressof:
case Builtin::BI__builtin_addressof: {
- LValue LV = EmitLValue(Call->getArg(0));
+ LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
if (BaseInfo) *BaseInfo = LV.getBaseInfo();
if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
- return LV.getAddress(*this);
+ return LV.getAddress(CGF);
}
}
}
@@ -1158,9 +1156,21 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
// Otherwise, use the alignment of the type.
CharUnits Align =
- CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo);
- llvm::Type *ElemTy = ConvertTypeForMem(E->getType()->getPointeeType());
- return Address(EmitScalarExpr(E), ElemTy, Align);
+ CGF.CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo);
+ llvm::Type *ElemTy = CGF.ConvertTypeForMem(E->getType()->getPointeeType());
+ return Address(CGF.EmitScalarExpr(E), ElemTy, Align, IsKnownNonNull);
+}
+
+/// EmitPointerWithAlignment - Given an expression of pointer type, try to
+/// derive a more accurate bound on the alignment of the pointer.
+Address CodeGenFunction::EmitPointerWithAlignment(
+ const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
+ KnownNonNull_t IsKnownNonNull) {
+ Address Addr =
+ ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
+ if (IsKnownNonNull && !Addr.isKnownNonNull())
+ Addr.setKnownNonNull();
+ return Addr;
}
llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) {
@@ -1270,7 +1280,16 @@ LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
/// type of the same size of the lvalue's type. If the lvalue has a variable
/// length type, this is not possible.
///
-LValue CodeGenFunction::EmitLValue(const Expr *E) {
+LValue CodeGenFunction::EmitLValue(const Expr *E,
+ KnownNonNull_t IsKnownNonNull) {
+ LValue LV = EmitLValueHelper(E, IsKnownNonNull);
+ if (IsKnownNonNull && !LV.isKnownNonNull())
+ LV.setKnownNonNull();
+ return LV;
+}
+
+LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
+ KnownNonNull_t IsKnownNonNull) {
ApplyDebugLocation DL(*this, E);
switch (E->getStmtClass()) {
default: return EmitUnsupportedLValue(E, "l-value expression");
@@ -1298,7 +1317,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::UserDefinedLiteralClass:
return EmitCallExprLValue(cast<CallExpr>(E));
case Expr::CXXRewrittenBinaryOperatorClass:
- return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm());
+ return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
+ IsKnownNonNull);
case Expr::VAArgExprClass:
return EmitVAArgExprLValue(cast<VAArgExpr>(E));
case Expr::DeclRefExprClass:
@@ -1311,12 +1331,13 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
->getPointeeType();
return MakeNaturalAlignAddrLValue(Result, RetType);
}
- return EmitLValue(cast<ConstantExpr>(E)->getSubExpr());
+ return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
}
case Expr::ParenExprClass:
- return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
+ return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
case Expr::GenericSelectionExprClass:
- return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
+ return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
+ IsKnownNonNull);
case Expr::PredefinedExprClass:
return EmitPredefinedLValue(cast<PredefinedExpr>(E));
case Expr::StringLiteralClass:
@@ -1340,15 +1361,16 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::ExprWithCleanupsClass: {
const auto *cleanups = cast<ExprWithCleanups>(E);
RunCleanupsScope Scope(*this);
- LValue LV = EmitLValue(cleanups->getSubExpr());
+ LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
if (LV.isSimple()) {
// Defend against branches out of gnu statement expressions surrounded by
// cleanups.
Address Addr = LV.getAddress(*this);
llvm::Value *V = Addr.getPointer();
Scope.ForceCleanup({&V});
- return LValue::MakeAddr(Addr.withPointer(V), LV.getType(), getContext(),
- LV.getBaseInfo(), LV.getTBAAInfo());
+ return LValue::MakeAddr(Addr.withPointer(V, Addr.isKnownNonNull()),
+ LV.getType(), getContext(), LV.getBaseInfo(),
+ LV.getTBAAInfo());
}
// FIXME: Is it possible to create an ExprWithCleanups that produces a
// bitfield lvalue or some other non-simple lvalue?
@@ -1358,12 +1380,12 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::CXXDefaultArgExprClass: {
auto *DAE = cast<CXXDefaultArgExpr>(E);
CXXDefaultArgExprScope Scope(*this, DAE);
- return EmitLValue(DAE->getExpr());
+ return EmitLValue(DAE->getExpr(), IsKnownNonNull);
}
case Expr::CXXDefaultInitExprClass: {
auto *DIE = cast<CXXDefaultInitExpr>(E);
CXXDefaultInitExprScope Scope(*this, DIE);
- return EmitLValue(DIE->getExpr());
+ return EmitLValue(DIE->getExpr(), IsKnownNonNull);
}
case Expr::CXXTypeidExprClass:
return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
@@ -1395,11 +1417,12 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::BinaryConditionalOperatorClass:
return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
case Expr::ChooseExprClass:
- return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr());
+ return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
case Expr::OpaqueValueExprClass:
return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
case Expr::SubstNonTypeTemplateParmExprClass:
- return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
+ return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
+ IsKnownNonNull);
case Expr::ImplicitCastExprClass:
case Expr::CStyleCastExprClass:
case Expr::CXXFunctionalCastExprClass:
@@ -1691,7 +1714,8 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
bool isNontemporal) {
if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getPointer()))
if (GV->isThreadLocal())
- Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV));
+ Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
+ NotKnownNonNull);
if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
// Boolean vectors use `iN` as storage type.
@@ -1719,10 +1743,9 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
if (!CGM.getCodeGenOpts().PreserveVec3Type && VTy->getNumElements() == 3) {
- // Bitcast to vec4 type.
llvm::VectorType *vec4Ty =
llvm::FixedVectorType::get(VTy->getElementType(), 4);
- Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4");
+ Address Cast = Addr.withElementType(vec4Ty);
// Now load value.
llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
@@ -1743,7 +1766,7 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
if (isNontemporal) {
llvm::MDNode *Node = llvm::MDNode::get(
Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
- Load->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
+ Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
}
CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
@@ -1806,7 +1829,7 @@ static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF,
auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
ArrayTy->getNumElements());
- return Address(CGF.Builder.CreateElementBitCast(Addr, VectorTy));
+ return Addr.withElementType(VectorTy);
}
auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
if (VectorTy && !IsVector) {
@@ -1814,7 +1837,7 @@ static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF,
VectorTy->getElementType(),
cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
- return Address(CGF.Builder.CreateElementBitCast(Addr, ArrayTy));
+ return Addr.withElementType(ArrayTy);
}
return Addr;
@@ -1839,7 +1862,8 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
bool isInit, bool isNontemporal) {
if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getPointer()))
if (GV->isThreadLocal())
- Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV));
+ Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
+ NotKnownNonNull);
llvm::Type *SrcTy = Value->getType();
if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
@@ -1861,7 +1885,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4);
}
if (Addr.getElementType() != SrcTy) {
- Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp");
+ Addr = Addr.withElementType(SrcTy);
}
}
}
@@ -1881,7 +1905,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
llvm::MDNode *Node =
llvm::MDNode::get(Store->getContext(),
llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
- Store->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
+ Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
}
CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
@@ -2043,9 +2067,7 @@ Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
- Address CastToPointerElement =
- Builder.CreateElementBitCast(VectorAddress, VectorElementTy,
- "conv.ptr.element");
+ Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
const llvm::Constant *Elts = LV.getExtVectorElts();
unsigned ix = getAccessedFieldNo(0, Elts);
@@ -2488,7 +2510,7 @@ static LValue EmitThreadPrivateVarDeclLValue(
Addr =
CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
- Addr = CGF.Builder.CreateElementBitCast(Addr, RealVarTy);
+ Addr = Addr.withElementType(RealVarTy);
return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
@@ -2566,7 +2588,7 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
// Check if the variable is marked as declare target with link clause in
// device codegen.
- if (CGF.getLangOpts().OpenMPIsDevice) {
+ if (CGF.getLangOpts().OpenMPIsTargetDevice) {
Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
if (Addr.isValid())
return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
@@ -2848,8 +2870,8 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
// Handle threadlocal function locals.
if (VD->getTLSKind() != VarDecl::TLS_None)
- addr =
- addr.withPointer(Builder.CreateThreadLocalAddress(addr.getPointer()));
+ addr = addr.withPointer(
+ Builder.CreateThreadLocalAddress(addr.getPointer()), NotKnownNonNull);
// Check for OpenMP threadprivate variables.
if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
@@ -3188,7 +3210,7 @@ enum class CheckRecoverableKind {
static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) {
assert(Kind.countPopulation() == 1);
- if (Kind == SanitizerKind::Function || Kind == SanitizerKind::Vptr)
+ if (Kind == SanitizerKind::Vptr)
return CheckRecoverableKind::AlwaysRecoverable;
else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable)
return CheckRecoverableKind::Unrecoverable;
@@ -3333,7 +3355,7 @@ void CodeGenFunction::EmitCheck(
CGM.getDataLayout().getDefaultGlobalsAddressSpace());
InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
- Args.push_back(EmitCastToVoidPtr(InfoPtr));
+ Args.push_back(InfoPtr);
ArgTypes.push_back(Args.back()->getType());
}
@@ -3606,7 +3628,7 @@ Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,
// If the array type was an incomplete type, we need to make sure
// the decay ends up being the right type.
llvm::Type *NewTy = ConvertType(E->getType());
- Addr = Builder.CreateElementBitCast(Addr, NewTy);
+ Addr = Addr.withElementType(NewTy);
// Note that VLA pointers are always decayed, so we don't need to do
// anything here.
@@ -3625,7 +3647,7 @@ Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,
if (BaseInfo) *BaseInfo = LV.getBaseInfo();
if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
- return Builder.CreateElementBitCast(Addr, ConvertTypeForMem(EltType));
+ return Addr.withElementType(ConvertTypeForMem(EltType));
}
/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
@@ -3871,18 +3893,14 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// correctly, so we need to cast to i8*. FIXME: is this actually
// true? A lot of other things in the fragile ABI would break...
llvm::Type *OrigBaseElemTy = Addr.getElementType();
- Addr = Builder.CreateElementBitCast(Addr, Int8Ty);
// Do the GEP.
CharUnits EltAlign =
getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
llvm::Value *EltPtr =
- emitArraySubscriptGEP(*this, Addr.getElementType(), Addr.getPointer(),
- ScaledIdx, false, SignedIndices, E->getExprLoc());
- Addr = Address(EltPtr, Addr.getElementType(), EltAlign);
-
- // Cast back.
- Addr = Builder.CreateElementBitCast(Addr, OrigBaseElemTy);
+ emitArraySubscriptGEP(*this, Int8Ty, Addr.getPointer(), ScaledIdx,
+ false, SignedIndices, E->getExprLoc());
+ Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
} else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
// If this is A[i] where A is an array, the frontend will have decayed the
// base to be a ArrayToPointerDecay implicit cast. While correct, it is
@@ -3960,7 +3978,7 @@ static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
// If the array type was an incomplete type, we need to make sure
// the decay ends up being the right type.
llvm::Type *NewTy = CGF.ConvertType(BaseTy);
- Addr = CGF.Builder.CreateElementBitCast(Addr, NewTy);
+ Addr = Addr.withElementType(NewTy);
// Note that VLA pointers are always decayed, so we don't need to do
// anything here.
@@ -3970,8 +3988,7 @@ static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
}
- return CGF.Builder.CreateElementBitCast(Addr,
- CGF.ConvertTypeForMem(ElTy));
+ return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
}
LValueBaseInfo TypeBaseInfo;
TBAAAccessInfo TypeTBAAInfo;
@@ -4067,6 +4084,7 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
}
} else {
auto *CAT = C.getAsConstantArrayType(ArrayTy);
+ assert(CAT && "unexpected type for array initializer");
ConstLength = CAT->getSize();
}
if (Length) {
@@ -4287,7 +4305,7 @@ static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base,
CGF.getContext().getFieldOffset(Field));
if (Offset.isZero())
return Base;
- Base = CGF.Builder.CreateElementBitCast(Base, CGF.Int8Ty);
+ Base = Base.withElementType(CGF.Int8Ty);
return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
}
@@ -4375,8 +4393,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
// Get the access type.
llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
- if (Addr.getElementType() != FieldIntTy)
- Addr = Builder.CreateElementBitCast(Addr, FieldIntTy);
+ Addr = Addr.withElementType(FieldIntTy);
if (UseVolatile) {
const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
if (VolatileOffset)
@@ -4463,8 +4480,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
}
if (FieldType->isReferenceType())
- addr = Builder.CreateElementBitCast(
- addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName());
+ addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
} else {
if (!IsInPreservedAIRegion &&
(!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
@@ -4489,11 +4505,8 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
}
// Make sure that the address is pointing to the right type. This is critical
- // for both unions and structs. A union needs a bitcast, a struct element
- // will need a bitcast if the LLVM type laid out doesn't match the desired
- // type.
- addr = Builder.CreateElementBitCast(
- addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName());
+ // for both unions and structs.
+ addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
if (field->hasAttr<AnnotateAttr>())
addr = EmitFieldAnnotations(field, addr);
@@ -4520,7 +4533,7 @@ CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
// Make sure that the address is pointing to the right type.
llvm::Type *llvmType = ConvertTypeForMem(FieldType);
- V = Builder.CreateElementBitCast(V, llvmType, Field->getName());
+ V = V.withElementType(llvmType);
// TODO: Generate TBAA information that describes this access as a structure
// member access and not just an access to an object of the field's type. This
@@ -4811,7 +4824,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
if (V.isValid()) {
llvm::Type *T = ConvertTypeForMem(E->getType());
if (V.getElementType() != T)
- LV.setAddress(Builder.CreateElementBitCast(V, T));
+ LV.setAddress(V.withElementType(T));
}
}
return LV;
@@ -4870,8 +4883,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
CGM.EmitExplicitCastExprType(CE, this);
LValue LV = EmitLValue(E->getSubExpr());
- Address V = Builder.CreateElementBitCast(
- LV.getAddress(*this),
+ Address V = LV.getAddress(*this).withElementType(
ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
@@ -4895,8 +4907,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
}
case CK_ObjCObjectLValueCast: {
LValue LV = EmitLValue(E->getSubExpr());
- Address V = Builder.CreateElementBitCast(LV.getAddress(*this),
- ConvertType(E->getType()));
+ Address V = LV.getAddress(*this).withElementType(ConvertType(E->getType()));
return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, E->getType()));
}
@@ -5106,7 +5117,7 @@ CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
functionType = ptrType->getPointeeType();
} else {
functionType = E->getType();
- calleePtr = EmitLValue(E).getPointer(*this);
+ calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);
}
assert(functionType->isFunctionType());
@@ -5206,8 +5217,8 @@ CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
}
Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
- return Builder.CreateElementBitCast(CGM.GetAddrOfMSGuidDecl(E->getGuidDecl()),
- ConvertType(E->getType()));
+ return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl())
+ .withElementType(ConvertType(E->getType()));
}
LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
@@ -5308,33 +5319,56 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
const Decl *TargetDecl =
OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
+ assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
+ !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
+ "trying to emit a call to an immediate function");
+
CalleeType = getContext().getCanonicalType(CalleeType);
auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
CGCallee Callee = OrigCallee;
- if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function) &&
- (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
+ if (SanOpts.has(SanitizerKind::Function) &&
+ (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&
+ !isa<FunctionNoProtoType>(PointeeType)) {
if (llvm::Constant *PrefixSig =
CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
SanitizerScope SanScope(this);
- // Remove any (C++17) exception specifications, to allow calling e.g. a
- // noexcept function through a non-noexcept pointer.
- auto ProtoTy =
- getContext().getFunctionTypeWithExceptionSpec(PointeeType, EST_None);
- llvm::Constant *FTRTTIConst =
- CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
+ auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);
+
llvm::Type *PrefixSigType = PrefixSig->getType();
llvm::StructType *PrefixStructTy = llvm::StructType::get(
CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
llvm::Value *CalleePtr = Callee.getFunctionPointer();
+ // On 32-bit Arm, the low bit of a function pointer indicates whether
+ // it's using the Arm or Thumb instruction set. The actual first
+ // instruction lives at the same address either way, so we must clear
+ // that low bit before using the function address to find the prefix
+ // structure.
+ //
+ // This applies to both Arm and Thumb target triples, because
+ // either one could be used in an interworking context where it
+ // might be passed function pointers of both types.
+ llvm::Value *AlignedCalleePtr;
+ if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
+ llvm::Value *CalleeAddress =
+ Builder.CreatePtrToInt(CalleePtr, IntPtrTy);
+ llvm::Value *Mask = llvm::ConstantInt::get(IntPtrTy, ~1);
+ llvm::Value *AlignedCalleeAddress =
+ Builder.CreateAnd(CalleeAddress, Mask);
+ AlignedCalleePtr =
+ Builder.CreateIntToPtr(AlignedCalleeAddress, CalleePtr->getType());
+ } else {
+ AlignedCalleePtr = CalleePtr;
+ }
+
llvm::Value *CalleePrefixStruct = Builder.CreateBitCast(
- CalleePtr, llvm::PointerType::getUnqual(PrefixStructTy));
+ AlignedCalleePtr, llvm::PointerType::getUnqual(PrefixStructTy));
llvm::Value *CalleeSigPtr =
- Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 0);
+ Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);
llvm::Value *CalleeSig =
Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
@@ -5344,19 +5378,17 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
EmitBlock(TypeCheck);
- llvm::Value *CalleeRTTIPtr =
- Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 1);
- llvm::Value *CalleeRTTIEncoded =
- Builder.CreateAlignedLoad(Int32Ty, CalleeRTTIPtr, getPointerAlign());
- llvm::Value *CalleeRTTI =
- DecodeAddrUsedInPrologue(CalleePtr, CalleeRTTIEncoded);
- llvm::Value *CalleeRTTIMatch =
- Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst);
+ llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
+ Int32Ty,
+ Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),
+ getPointerAlign());
+ llvm::Value *CalleeTypeHashMatch =
+ Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);
llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
EmitCheckTypeDescriptor(CalleeType)};
- EmitCheck(std::make_pair(CalleeRTTIMatch, SanitizerKind::Function),
+ EmitCheck(std::make_pair(CalleeTypeHashMatch, SanitizerKind::Function),
SanitizerHandler::FunctionTypeMismatch, StaticData,
- {CalleePtr, CalleeRTTI, FTRTTIConst});
+ {CalleePtr});
Builder.CreateBr(Cont);
EmitBlock(Cont);
@@ -5549,6 +5581,48 @@ void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
}
+void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) {
+ llvm::Type *EltTy = Val->getType()->getScalarType();
+ if (!EltTy->isFloatTy())
+ return;
+
+ if ((getLangOpts().OpenCL &&
+ !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
+ (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
+ !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
+ // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp
+ //
+ // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
+ // build option allows an application to specify that single precision
+ // floating-point divide (x/y and 1/x) and sqrt used in the program
+ // source are correctly rounded.
+ //
+ // TODO: CUDA has a prec-sqrt flag
+ SetFPAccuracy(Val, 3.0f);
+ }
+}
+
+void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) {
+ llvm::Type *EltTy = Val->getType()->getScalarType();
+ if (!EltTy->isFloatTy())
+ return;
+
+ if ((getLangOpts().OpenCL &&
+ !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
+ (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
+ !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
+ // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
+ //
+ // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
+ // build option allows an application to specify that single precision
+ // floating-point divide (x/y and 1/x) and sqrt used in the program
+ // source are correctly rounded.
+ //
+ // TODO: CUDA has a prec-div flag
+ SetFPAccuracy(Val, 2.5f);
+ }
+}
+
namespace {
struct LValueOrRValue {
LValue LV;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
index 34e535a78dd6..810b28f25fa1 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
@@ -85,8 +85,6 @@ public:
void EmitCopy(QualType type, const AggValueSlot &dest,
const AggValueSlot &src);
- void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
-
void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, QualType ArrayQTy,
Expr *ExprToVisit, ArrayRef<Expr *> Args,
Expr *ArrayFiller);
@@ -131,7 +129,14 @@ public:
EnsureDest(E->getType());
if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
- CGF.EmitAggregateStore(Result, Dest.getAddress(),
+ Address StoreDest = Dest.getAddress();
+ // The emitted value is guaranteed to have the same size as the
+ // destination but can have a different type. Just do a bitcast in this
+ // case to avoid incorrect GEPs.
+ if (Result->getType() != StoreDest.getType())
+ StoreDest = StoreDest.withElementType(Result->getType());
+
+ CGF.EmitAggregateStore(Result, StoreDest,
E->getType().isVolatileQualified());
return;
}
@@ -525,8 +530,8 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
Emitter.tryEmitForInitializer(ExprToVisit, AS, ArrayQTy)) {
auto GV = new llvm::GlobalVariable(
CGM.getModule(), C->getType(),
- CGM.isTypeConstant(ArrayQTy, /* ExcludeCtorDtor= */ true),
- llvm::GlobalValue::PrivateLinkage, C, "constinit",
+ /* isConstant= */ true, llvm::GlobalValue::PrivateLinkage, C,
+ "constinit",
/* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
CGM.getContext().getTargetAddressSpace(AS));
Emitter.finalize(GV);
@@ -746,8 +751,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
// GCC union extension
QualType Ty = E->getSubExpr()->getType();
- Address CastPtr =
- Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty));
+ Address CastPtr = Dest.getAddress().withElementType(CGF.ConvertType(Ty));
EmitInitializationToLValue(E->getSubExpr(),
CGF.MakeAddrLValue(CastPtr, Ty));
break;
@@ -762,9 +766,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
Address SourceAddress =
- Builder.CreateElementBitCast(SourceLV.getAddress(CGF), CGF.Int8Ty);
- Address DestAddress =
- Builder.CreateElementBitCast(Dest.getAddress(), CGF.Int8Ty);
+ SourceLV.getAddress(CGF).withElementType(CGF.Int8Ty);
+ Address DestAddress = Dest.getAddress().withElementType(CGF.Int8Ty);
llvm::Value *SizeVal = llvm::ConstantInt::get(
CGF.SizeTy,
CGF.getContext().getTypeSizeInChars(E->getType()).getQuantity());
@@ -1651,11 +1654,19 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), ExprToVisit->getType());
// Handle initialization of an array.
- if (ExprToVisit->getType()->isArrayType()) {
+ if (ExprToVisit->getType()->isConstantArrayType()) {
auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
EmitArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), ExprToVisit,
InitExprs, ArrayFiller);
return;
+ } else if (ExprToVisit->getType()->isVariableArrayType()) {
+ // A variable array type that has an initializer can only do empty
+ // initialization. And because this feature is not exposed as an extension
+ // in C++, we can safely memset the array memory to zero.
+ assert(InitExprs.size() == 0 &&
+ "you can only use an empty initializer with VLAs");
+ CGF.EmitNullInitialization(Dest.getAddress(), ExprToVisit->getType());
+ return;
}
assert(ExprToVisit->getType()->isRecordType() &&
@@ -2011,8 +2022,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
// Okay, it seems like a good idea to use an initial memset, emit the call.
llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
- Address Loc = Slot.getAddress();
- Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty);
+ Address Loc = Slot.getAddress().withElementType(CGF.Int8Ty);
CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
// Tell the AggExprEmitter that the slot is known zero.
@@ -2176,8 +2186,8 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
// we need to use a different call here. We use isVolatile to indicate when
// either the source or the destination is volatile.
- DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
- SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty);
+ DestPtr = DestPtr.withElementType(Int8Ty);
+ SrcPtr = SrcPtr.withElementType(Int8Ty);
// Don't do any of the memmove_collectable tests if GC isn't set.
if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
index a9f3434589f2..4d3f3e9603d9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
@@ -446,9 +446,9 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
// Emit the 'this' pointer.
Address This = Address::invalid();
if (BO->getOpcode() == BO_PtrMemI)
- This = EmitPointerWithAlignment(BaseExpr);
+ This = EmitPointerWithAlignment(BaseExpr, nullptr, nullptr, KnownNonNull);
else
- This = EmitLValue(BaseExpr).getAddress(*this);
+ This = EmitLValue(BaseExpr, KnownNonNull).getAddress(*this);
EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
QualType(MPT->getClass(), 0));
@@ -502,7 +502,7 @@ static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
if (Base->isEmpty())
return;
- DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
+ DestPtr = DestPtr.withElementType(CGF.Int8Ty);
const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
CharUnits NVSize = Layout.getNonVirtualSize();
@@ -555,8 +555,7 @@ static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
std::max(Layout.getNonVirtualAlignment(), DestPtr.getAlignment());
NullVariable->setAlignment(Align.getAsAlign());
- Address SrcPtr =
- Address(CGF.EmitCastToVoidPtr(NullVariable), CGF.Int8Ty, Align);
+ Address SrcPtr(NullVariable, CGF.Int8Ty, Align);
// Get and call the appropriate llvm.memcpy overload.
for (std::pair<CharUnits, CharUnits> Store : Stores) {
@@ -768,7 +767,7 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
// wider than that, check whether it's already too big, and if so,
// overflow.
else if (numElementsWidth > sizeWidth &&
- numElementsWidth - sizeWidth > count.countLeadingZeros())
+ numElementsWidth - sizeWidth > count.countl_zero())
hasAnyOverflow = true;
// Okay, compute a count at the right width.
@@ -829,8 +828,8 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
// going to have to do a comparison for (2), and this happens to
// take care of (1), too.
if (numElementsWidth > sizeWidth) {
- llvm::APInt threshold(numElementsWidth, 1);
- threshold <<= sizeWidth;
+ llvm::APInt threshold =
+ llvm::APInt::getOneBitSet(numElementsWidth, sizeWidth);
llvm::Value *thresholdV
= llvm::ConstantInt::get(numElementsType, threshold);
@@ -1077,7 +1076,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
AllocType->getAsArrayTypeUnsafe())) {
ElementTy = ConvertTypeForMem(AllocType);
- CurPtr = Builder.CreateElementBitCast(CurPtr, ElementTy);
+ CurPtr = CurPtr.withElementType(ElementTy);
InitListElements *= getContext().getConstantArrayElementCount(CAT);
}
@@ -1134,7 +1133,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
}
// Switch back to initializing one base element at a time.
- CurPtr = Builder.CreateElementBitCast(CurPtr, BeginPtr.getElementType());
+ CurPtr = CurPtr.withElementType(BeginPtr.getElementType());
}
// If all elements have already been initialized, skip any further
@@ -1657,7 +1656,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
CharUnits allocationAlign = allocAlign;
if (!E->passAlignment() &&
allocator->isReplaceableGlobalAllocationFunction()) {
- unsigned AllocatorAlign = llvm::PowerOf2Floor(std::min<uint64_t>(
+ unsigned AllocatorAlign = llvm::bit_floor(std::min<uint64_t>(
Target.getNewAlign(), getContext().getTypeSize(allocType)));
allocationAlign = std::max(
allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign));
@@ -1716,7 +1715,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
}
llvm::Type *elementTy = ConvertTypeForMem(allocType);
- Address result = Builder.CreateElementBitCast(allocation, elementTy);
+ Address result = allocation.withElementType(elementTy);
// Passing pointer through launder.invariant.group to avoid propagation of
// vptrs information which may be included in previous type.
@@ -2074,6 +2073,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
EmitBlock(DeleteNotNull);
+ Ptr.setKnownNonNull();
QualType DeleteTy = E->getDestroyedType();
@@ -2106,7 +2106,8 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getElementType(),
Ptr.getPointer(), GEP, "del.first"),
- ConvertTypeForMem(DeleteTy), Ptr.getAlignment());
+ ConvertTypeForMem(DeleteTy), Ptr.getAlignment(),
+ Ptr.isKnownNonNull());
}
assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
@@ -2193,13 +2194,12 @@ static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
}
llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
- llvm::Type *StdTypeInfoPtrTy =
- ConvertType(E->getType())->getPointerTo();
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(getLLVMContext());
if (E->isTypeOperand()) {
llvm::Constant *TypeInfo =
CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
- return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
+ return TypeInfo;
}
// C++ [expr.typeid]p2:
@@ -2209,12 +2209,10 @@ llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
// type) to which the glvalue refers.
// If the operand is already most derived object, no need to look up vtable.
if (E->isPotentiallyEvaluated() && !E->isMostDerived(getContext()))
- return EmitTypeidFromVTable(*this, E->getExprOperand(),
- StdTypeInfoPtrTy);
+ return EmitTypeidFromVTable(*this, E->getExprOperand(), PtrTy);
QualType OperandTy = E->getExprOperand()->getType();
- return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
- StdTypeInfoPtrTy);
+ return CGM.GetAddrOfRTTIDescriptor(OperandTy);
}
static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
@@ -2228,8 +2226,8 @@ static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
return nullptr;
- CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
- return llvm::UndefValue::get(DestLTy);
+ CGF.Builder.ClearInsertionPoint();
+ return llvm::PoisonValue::get(DestLTy);
}
llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
@@ -2242,17 +2240,16 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
// C++ [expr.dynamic.cast]p7:
// If T is "pointer to cv void," then the result is a pointer to the most
// derived object pointed to by v.
- const PointerType *DestPTy = DestTy->getAs<PointerType>();
-
- bool isDynamicCastToVoid;
+ bool IsDynamicCastToVoid = DestTy->isVoidPointerType();
QualType SrcRecordTy;
QualType DestRecordTy;
- if (DestPTy) {
- isDynamicCastToVoid = DestPTy->getPointeeType()->isVoidType();
+ if (IsDynamicCastToVoid) {
+ SrcRecordTy = SrcTy->getPointeeType();
+ // No DestRecordTy.
+ } else if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
DestRecordTy = DestPTy->getPointeeType();
} else {
- isDynamicCastToVoid = false;
SrcRecordTy = SrcTy;
DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
}
@@ -2265,18 +2262,30 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(),
SrcRecordTy);
- if (DCE->isAlwaysNull())
- if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy))
+ if (DCE->isAlwaysNull()) {
+ if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy)) {
+ // Expression emission is expected to retain a valid insertion point.
+ if (!Builder.GetInsertBlock())
+ EmitBlock(createBasicBlock("dynamic_cast.unreachable"));
return T;
+ }
+ }
assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
+ // If the destination is effectively final, the cast succeeds if and only
+ // if the dynamic type of the pointer is exactly the destination type.
+ bool IsExact = !IsDynamicCastToVoid &&
+ CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ DestRecordTy->getAsCXXRecordDecl()->isEffectivelyFinal() &&
+ CGM.getCXXABI().shouldEmitExactDynamicCast(DestRecordTy);
+
// C++ [expr.dynamic.cast]p4:
// If the value of v is a null pointer value in the pointer case, the result
// is the null pointer value of type T.
bool ShouldNullCheckSrcValue =
- CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy->isPointerType(),
- SrcRecordTy);
+ IsExact || CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(
+ SrcTy->isPointerType(), SrcRecordTy);
llvm::BasicBlock *CastNull = nullptr;
llvm::BasicBlock *CastNotNull = nullptr;
@@ -2292,30 +2301,38 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
}
llvm::Value *Value;
- if (isDynamicCastToVoid) {
- Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy,
- DestTy);
+ if (IsDynamicCastToVoid) {
+ Value = CGM.getCXXABI().emitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy);
+ } else if (IsExact) {
+ // If the destination type is effectively final, this pointer points to the
+ // right type if and only if its vptr has the right value.
+ Value = CGM.getCXXABI().emitExactDynamicCast(
+ *this, ThisAddr, SrcRecordTy, DestTy, DestRecordTy, CastEnd, CastNull);
} else {
assert(DestRecordTy->isRecordType() &&
"destination type must be a record type!");
- Value = CGM.getCXXABI().EmitDynamicCastCall(*this, ThisAddr, SrcRecordTy,
+ Value = CGM.getCXXABI().emitDynamicCastCall(*this, ThisAddr, SrcRecordTy,
DestTy, DestRecordTy, CastEnd);
- CastNotNull = Builder.GetInsertBlock();
}
+ CastNotNull = Builder.GetInsertBlock();
+ llvm::Value *NullValue = nullptr;
if (ShouldNullCheckSrcValue) {
EmitBranch(CastEnd);
EmitBlock(CastNull);
+ NullValue = EmitDynamicCastToNull(*this, DestTy);
+ CastNull = Builder.GetInsertBlock();
+
EmitBranch(CastEnd);
}
EmitBlock(CastEnd);
- if (ShouldNullCheckSrcValue) {
+ if (CastNull) {
llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
PHI->addIncoming(Value, CastNotNull);
- PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
+ PHI->addIncoming(NullValue, CastNull);
Value = PHI;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp
index 7a14a418c7b6..2dd1a991ec97 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp
@@ -488,15 +488,14 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
case CK_LValueBitCast: {
LValue origLV = CGF.EmitLValue(Op);
- Address V = origLV.getAddress(CGF);
- V = Builder.CreateElementBitCast(V, CGF.ConvertType(DestTy));
+ Address V = origLV.getAddress(CGF).withElementType(CGF.ConvertType(DestTy));
return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), Op->getExprLoc());
}
case CK_LValueToRValueBitCast: {
LValue SourceLVal = CGF.EmitLValue(Op);
- Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF),
- CGF.ConvertTypeForMem(DestTy));
+ Address Addr = SourceLVal.getAddress(CGF).withElementType(
+ CGF.ConvertTypeForMem(DestTy));
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
return EmitLoadOfLValue(DestLV, Op->getExprLoc());
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp
index 8dacc8cbd613..353ee56839f3 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp
@@ -932,12 +932,12 @@ tryEmitGlobalCompoundLiteral(ConstantEmitter &emitter,
return ConstantAddress::invalid();
}
- auto GV = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
- CGM.isTypeConstant(E->getType(), true),
- llvm::GlobalValue::InternalLinkage,
- C, ".compoundliteral", nullptr,
- llvm::GlobalVariable::NotThreadLocal,
- CGM.getContext().getTargetAddressSpace(addressSpace));
+ auto GV = new llvm::GlobalVariable(
+ CGM.getModule(), C->getType(),
+ CGM.isTypeConstant(E->getType(), true, false),
+ llvm::GlobalValue::InternalLinkage, C, ".compoundliteral", nullptr,
+ llvm::GlobalVariable::NotThreadLocal,
+ CGM.getContext().getTargetAddressSpace(addressSpace));
emitter.finalize(GV);
GV->setAlignment(Align.getAsAlign());
CGM.setAddrOfConstantCompoundLiteral(E, GV);
@@ -1215,11 +1215,6 @@ public:
return Visit(E->getSubExpr(), T);
}
- llvm::Constant *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E,
- QualType T) {
- return Visit(E->getSubExpr(), T);
- }
-
llvm::Constant *EmitArrayInitialization(InitListExpr *ILE, QualType T) {
auto *CAT = CGM.getContext().getAsConstantArrayType(ILE->getType());
assert(CAT && "can't emit array init for non-constant-bound array");
@@ -1322,7 +1317,12 @@ public:
assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
"argument to copy ctor is of wrong type");
- return Visit(Arg, Ty);
+ // Look through the temporary; it's just converting the value to an
+ // lvalue to pass it to the constructor.
+ if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(Arg))
+ return Visit(MTE->getSubExpr(), Ty);
+ // Don't try to support arbitrary lvalue-to-rvalue conversions for now.
+ return nullptr;
}
return CGM.EmitNullConstant(Ty);
@@ -1340,6 +1340,7 @@ public:
std::string Str;
CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
const ConstantArrayType *CAT = CGM.getContext().getAsConstantArrayType(T);
+ assert(CAT && "String data not of constant array type!");
// Resize the string to the right size, adding zeros at the end, or
// truncating as needed.
@@ -1570,7 +1571,7 @@ namespace {
}
void setLocation(llvm::GlobalVariable *placeholder) {
- assert(Locations.find(placeholder) == Locations.end() &&
+ assert(!Locations.contains(placeholder) &&
"already found location for placeholder!");
// Lazily fill in IndexValues with the values from Indices.
@@ -1649,33 +1650,26 @@ llvm::Constant *ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) {
if (CD->isTrivial() && CD->isDefaultConstructor())
return CGM.EmitNullConstant(D.getType());
}
- InConstantContext = true;
}
+ InConstantContext = D.hasConstantInitialization();
QualType destType = D.getType();
+ const Expr *E = D.getInit();
+ assert(E && "No initializer to emit");
+
+ if (!destType->isReferenceType()) {
+ QualType nonMemoryDestType = getNonMemoryType(CGM, destType);
+ if (llvm::Constant *C = ConstExprEmitter(*this).Visit(const_cast<Expr *>(E),
+ nonMemoryDestType))
+ return emitForMemory(C, destType);
+ }
// Try to emit the initializer. Note that this can allow some things that
// are not allowed by tryEmitPrivateForMemory alone.
- if (auto value = D.evaluateValue()) {
+ if (APValue *value = D.evaluateValue())
return tryEmitPrivateForMemory(*value, destType);
- }
-
- // FIXME: Implement C++11 [basic.start.init]p2: if the initializer of a
- // reference is a constant expression, and the reference binds to a temporary,
- // then constant initialization is performed. ConstExprEmitter will
- // incorrectly emit a prvalue constant in this case, and the calling code
- // interprets that as the (pointer) value of the reference, rather than the
- // desired value of the referee.
- if (destType->isReferenceType())
- return nullptr;
- const Expr *E = D.getInit();
- assert(E && "No initializer to emit");
-
- auto nonMemoryDestType = getNonMemoryType(CGM, destType);
- auto C =
- ConstExprEmitter(*this).Visit(const_cast<Expr*>(E), nonMemoryDestType);
- return (C ? emitForMemory(C, destType) : nullptr);
+ return nullptr;
}
llvm::Constant *
@@ -1730,7 +1724,7 @@ llvm::Constant *ConstantEmitter::emitForMemory(CodeGenModule &CGM,
}
// Zero-extend bool.
- if (C->getType()->isIntegerTy(1)) {
+ if (C->getType()->isIntegerTy(1) && !destType->isBitIntType()) {
llvm::Type *boolTy = CGM.getTypes().ConvertTypeForMem(destType);
return llvm::ConstantExpr::getZExt(C, boolTy);
}
@@ -1742,6 +1736,10 @@ llvm::Constant *ConstantEmitter::tryEmitPrivate(const Expr *E,
QualType destType) {
assert(!destType->isVoidType() && "can't emit a void constant");
+ if (llvm::Constant *C =
+ ConstExprEmitter(*this).Visit(const_cast<Expr *>(E), destType))
+ return C;
+
Expr::EvalResult Result;
bool Success = false;
@@ -1751,13 +1749,10 @@ llvm::Constant *ConstantEmitter::tryEmitPrivate(const Expr *E,
else
Success = E->EvaluateAsRValue(Result, CGM.getContext(), InConstantContext);
- llvm::Constant *C;
if (Success && !Result.HasSideEffects)
- C = tryEmitPrivate(Result.Val, destType);
- else
- C = ConstExprEmitter(*this).Visit(const_cast<Expr*>(E), destType);
+ return tryEmitPrivate(Result.Val, destType);
- return C;
+ return nullptr;
}
llvm::Constant *CodeGenModule::getNullPointer(llvm::PointerType *T, QualType QT) {
@@ -1832,9 +1827,6 @@ private:
return C;
llvm::Type *origPtrTy = C->getType();
- unsigned AS = origPtrTy->getPointerAddressSpace();
- llvm::Type *charPtrTy = CGM.Int8Ty->getPointerTo(AS);
- C = llvm::ConstantExpr::getBitCast(C, charPtrTy);
C = llvm::ConstantExpr::getGetElementPtr(CGM.Int8Ty, C, getOffset());
C = llvm::ConstantExpr::getPointerCast(C, origPtrTy);
return C;
@@ -1944,15 +1936,8 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
}
// Handle typeid(T).
- if (TypeInfoLValue TI = base.dyn_cast<TypeInfoLValue>()) {
- llvm::Type *StdTypeInfoPtrTy =
- CGM.getTypes().ConvertType(base.getTypeInfoType())->getPointerTo();
- llvm::Constant *TypeInfo =
- CGM.GetAddrOfRTTIDescriptor(QualType(TI.getType(), 0));
- if (TypeInfo->getType() != StdTypeInfoPtrTy)
- TypeInfo = llvm::ConstantExpr::getBitCast(TypeInfo, StdTypeInfoPtrTy);
- return TypeInfo;
- }
+ if (TypeInfoLValue TI = base.dyn_cast<TypeInfoLValue>())
+ return CGM.GetAddrOfRTTIDescriptor(QualType(TI.getType(), 0));
// Otherwise, it must be an expression.
return Visit(base.get<const Expr*>());
@@ -1986,7 +1971,7 @@ static ConstantLValue emitConstantObjCStringLiteral(const StringLiteral *S,
QualType T,
CodeGenModule &CGM) {
auto C = CGM.getObjCRuntime().GenerateConstantString(S);
- return C.getElementBitCast(CGM.getTypes().ConvertTypeForMem(T));
+ return C.withElementType(CGM.getTypes().ConvertTypeForMem(T));
}
ConstantLValue
@@ -2189,6 +2174,11 @@ llvm::Constant *ConstantEmitter::tryEmitPrivate(const APValue &Value,
llvm::ArrayType *Desired =
cast<llvm::ArrayType>(CGM.getTypes().ConvertType(DestType));
+
+ // Fix the type of incomplete arrays if the initializer isn't empty.
+ if (DestType->isIncompleteArrayType() && !Elts.empty())
+ Desired = llvm::ArrayType::get(Desired->getElementType(), Elts.size());
+
return EmitArrayConstant(CGM, Desired, CommonElementType, NumElements, Elts,
Filler);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
index ba8b5ab502d2..fe1a59b21f38 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
@@ -814,13 +814,21 @@ public:
Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
QualType getPromotionType(QualType Ty) {
+ const auto &Ctx = CGF.getContext();
if (auto *CT = Ty->getAs<ComplexType>()) {
QualType ElementType = CT->getElementType();
- if (ElementType.UseExcessPrecision(CGF.getContext()))
- return CGF.getContext().getComplexType(CGF.getContext().FloatTy);
+ if (ElementType.UseExcessPrecision(Ctx))
+ return Ctx.getComplexType(Ctx.FloatTy);
}
- if (Ty.UseExcessPrecision(CGF.getContext()))
- return CGF.getContext().FloatTy;
+
+ if (Ty.UseExcessPrecision(Ctx)) {
+ if (auto *VT = Ty->getAs<VectorType>()) {
+ unsigned NumElements = VT->getNumElements();
+ return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind());
+ }
+ return Ctx.FloatTy;
+ }
+
return QualType();
}
@@ -2055,15 +2063,15 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_LValueBitCast:
case CK_ObjCObjectLValueCast: {
Address Addr = EmitLValue(E).getAddress(CGF);
- Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
+ Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
return EmitLoadOfLValue(LV, CE->getExprLoc());
}
case CK_LValueToRValueBitCast: {
LValue SourceLVal = CGF.EmitLValue(E);
- Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF),
- CGF.ConvertTypeForMem(DestTy));
+ Address Addr = SourceLVal.getAddress(CGF).withElementType(
+ CGF.ConvertTypeForMem(DestTy));
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
return EmitLoadOfLValue(DestLV, CE->getExprLoc());
@@ -2115,7 +2123,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// Update heapallocsite metadata when there is an explicit pointer cast.
if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
- if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) {
+ if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) &&
+ !isa<CastExpr>(E)) {
QualType PointeeType = DestTy->getPointeeType();
if (!PointeeType.isNull())
CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
@@ -2143,7 +2152,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
llvm::Value *UndefVec = llvm::UndefValue::get(DstTy);
llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
llvm::Value *Result = Builder.CreateInsertVector(
- DstTy, UndefVec, Src, Zero, "castScalableSve");
+ DstTy, UndefVec, Src, Zero, "cast.scalable");
if (NeedsBitCast)
Result = Builder.CreateBitCast(Result, OrigType);
return Result;
@@ -2167,7 +2176,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
- return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve");
+ return Builder.CreateExtractVector(DstTy, Src, Zero, "cast.fixed");
}
}
}
@@ -2185,8 +2194,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
CGF.EmitStoreOfScalar(Src, LV);
- Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy),
- "castFixedSve");
+ Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
return EmitLoadOfLValue(DestLV, CE->getExprLoc());
@@ -2698,15 +2706,13 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
} else if (type->isFunctionType()) {
llvm::Value *amt = Builder.getInt32(amount);
- value = CGF.EmitCastToVoidPtr(value);
if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
else
- value = CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
- /*SignedIndices=*/false,
- isSubtraction, E->getExprLoc(),
- "incdec.funcptr");
- value = Builder.CreateBitCast(value, input->getType());
+ value =
+ CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
+ /*SignedIndices=*/false, isSubtraction,
+ E->getExprLoc(), "incdec.funcptr");
// For everything else, we can just do a simple increment.
} else {
@@ -2817,7 +2823,6 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// Objective-C pointer types.
} else {
const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
- value = CGF.EmitCastToVoidPtr(value);
CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
if (!isInc) size = -size;
@@ -3473,21 +3478,7 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
llvm::Value *Val;
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
- if ((CGF.getLangOpts().OpenCL &&
- !CGF.CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
- (CGF.getLangOpts().HIP && CGF.getLangOpts().CUDAIsDevice &&
- !CGF.CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
- // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
- // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
- // build option allows an application to specify that single precision
- // floating-point divide (x/y and 1/x) and sqrt used in the program
- // source are correctly rounded.
- llvm::Type *ValTy = Val->getType();
- if (ValTy->isFloatTy() ||
- (isa<llvm::VectorType>(ValTy) &&
- cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
- CGF.SetFPAccuracy(Val, 2.5);
- }
+ CGF.SetDivFPAccuracy(Val);
return Val;
}
else if (Ops.isFixedPointOp())
@@ -3728,11 +3719,8 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
// Explicitly handle GNU void* and function pointer arithmetic extensions. The
// GNU void* casts amount to no-ops since our void* type is i8*, but this is
// future proof.
- if (elementType->isVoidType() || elementType->isFunctionType()) {
- Value *result = CGF.EmitCastToVoidPtr(pointer);
- result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr");
- return CGF.Builder.CreateBitCast(result, pointer->getType());
- }
+ if (elementType->isVoidType() || elementType->isFunctionType())
+ return CGF.Builder.CreateGEP(CGF.Int8Ty, pointer, index, "add.ptr");
llvm::Type *elemTy = CGF.ConvertTypeForMem(elementType);
if (CGF.getLangOpts().isSignedOverflowDefined())
@@ -3751,8 +3739,6 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
const CodeGenFunction &CGF, CGBuilderTy &Builder,
bool negMul, bool negAdd) {
- assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
-
Value *MulOp0 = MulOp->getOperand(0);
Value *MulOp1 = MulOp->getOperand(1);
if (negMul)
@@ -3797,31 +3783,70 @@ static Value* tryEmitFMulAdd(const BinOpInfo &op,
if (!op.FPFeatures.allowFPContractWithinStatement())
return nullptr;
+ Value *LHS = op.LHS;
+ Value *RHS = op.RHS;
+
+ // Peek through fneg to look for fmul. Make sure fneg has no users, and that
+ // it is the only use of its operand.
+ bool NegLHS = false;
+ if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) {
+ if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
+ LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) {
+ LHS = LHSUnOp->getOperand(0);
+ NegLHS = true;
+ }
+ }
+
+ bool NegRHS = false;
+ if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) {
+ if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
+ RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) {
+ RHS = RHSUnOp->getOperand(0);
+ NegRHS = true;
+ }
+ }
+
// We have a potentially fusable op. Look for a mul on one of the operands.
// Also, make sure that the mul result isn't used directly. In that case,
// there's no point creating a muladd operation.
- if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
+ if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) {
if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
- LHSBinOp->use_empty())
- return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
+ (LHSBinOp->use_empty() || NegLHS)) {
+ // If we looked through fneg, erase it.
+ if (NegLHS)
+ cast<llvm::Instruction>(op.LHS)->eraseFromParent();
+ return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
+ }
}
- if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
+ if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) {
if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
- RHSBinOp->use_empty())
- return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
+ (RHSBinOp->use_empty() || NegRHS)) {
+ // If we looked through fneg, erase it.
+ if (NegRHS)
+ cast<llvm::Instruction>(op.RHS)->eraseFromParent();
+ return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
+ }
}
- if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) {
+ if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) {
if (LHSBinOp->getIntrinsicID() ==
llvm::Intrinsic::experimental_constrained_fmul &&
- LHSBinOp->use_empty())
- return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
+ (LHSBinOp->use_empty() || NegLHS)) {
+ // If we looked through fneg, erase it.
+ if (NegLHS)
+ cast<llvm::Instruction>(op.LHS)->eraseFromParent();
+ return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
+ }
}
- if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) {
+ if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) {
if (RHSBinOp->getIntrinsicID() ==
llvm::Intrinsic::experimental_constrained_fmul &&
- RHSBinOp->use_empty())
- return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
+ (RHSBinOp->use_empty() || NegRHS)) {
+ // If we looked through fneg, erase it.
+ if (NegRHS)
+ cast<llvm::Instruction>(op.RHS)->eraseFromParent();
+ return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
+ }
}
return nullptr;
@@ -5115,7 +5140,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
}
// Cast the address to Class*.
- Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
+ Addr = Addr.withElementType(ConvertType(E->getType()));
return MakeAddrLValue(Addr, E->getType());
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp
index c39e0cc75f2d..75fb06de9384 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp
@@ -125,7 +125,7 @@ packArgsIntoNVPTXFormatBuffer(CodeGenFunction *CGF, const CallArgList &Args) {
}
}
-bool containsNonScalarVarargs(CodeGenFunction *CGF, CallArgList Args) {
+bool containsNonScalarVarargs(CodeGenFunction *CGF, const CallArgList &Args) {
return llvm::any_of(llvm::drop_begin(Args), [&](const CallArg &A) {
return !A.getRValue(*CGF).isScalar();
});
@@ -189,7 +189,7 @@ RValue CodeGenFunction::EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E) {
/* ParamsToSkip = */ 0);
SmallVector<llvm::Value *, 8> Args;
- for (auto A : CallArgs) {
+ for (const auto &A : CallArgs) {
// We don't know how to emit non-scalar varargs.
if (!A.getRValue(*this).isScalar()) {
CGM.ErrorUnsupported(E, "non-scalar arg to printf");
@@ -202,7 +202,10 @@ RValue CodeGenFunction::EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E) {
llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
- auto Printf = llvm::emitAMDGPUPrintfCall(IRB, Args);
+
+ bool isBuffered = (CGM.getTarget().getTargetOpts().AMDGPUPrintfKindVal ==
+ clang::TargetOptions::AMDGPUPrintfKind::Buffered);
+ auto Printf = llvm::emitAMDGPUPrintfCall(IRB, Args, isBuffered);
Builder.SetInsertPoint(IRB.GetInsertBlock(), IRB.GetInsertPoint());
return RValue::get(Printf);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.cpp
index 5882f491d597..e9fa273f21cc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.cpp
@@ -175,7 +175,7 @@ void CGHLSLRuntime::finishCodeGen() {
for (auto &Buf : Buffers) {
layoutBuffer(Buf, DL);
GlobalVariable *GV = replaceBuffer(Buf);
- M.getGlobalList().push_back(GV);
+ M.insertGlobalVariable(GV);
llvm::hlsl::ResourceClass RC = Buf.IsCBuffer
? llvm::hlsl::ResourceClass::CBuffer
: llvm::hlsl::ResourceClass::SRV;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp
index 0abf39ad1f28..3d2b1b8b2f78 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -323,11 +323,11 @@ static const CGFunctionInfo &getFunctionInfo(CodeGenModule &CGM,
template <size_t N, size_t... Ints>
static std::array<Address, N> getParamAddrs(std::index_sequence<Ints...> IntSeq,
std::array<CharUnits, N> Alignments,
- FunctionArgList Args,
+ const FunctionArgList &Args,
CodeGenFunction *CGF) {
return std::array<Address, N>{
{Address(CGF->Builder.CreateLoad(CGF->GetAddrOfLocalVar(Args[Ints])),
- CGF->VoidPtrTy, Alignments[Ints])...}};
+ CGF->VoidPtrTy, Alignments[Ints], KnownNonNull)...}};
}
// Template classes that are used as bases for classes that emit special
@@ -365,9 +365,8 @@ template <class Derived> struct GenFuncBase {
llvm::ConstantInt::get(NumElts->getType(), BaseEltSize);
llvm::Value *SizeInBytes =
CGF.Builder.CreateNUWMul(BaseEltSizeVal, NumElts);
- Address BC = CGF.Builder.CreateElementBitCast(DstAddr, CGF.CGM.Int8Ty);
- llvm::Value *DstArrayEnd =
- CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BC.getPointer(), SizeInBytes);
+ llvm::Value *DstArrayEnd = CGF.Builder.CreateInBoundsGEP(
+ CGF.Int8Ty, DstAddr.getPointer(), SizeInBytes);
DstArrayEnd = CGF.Builder.CreateBitCast(
DstArrayEnd, CGF.CGM.Int8PtrPtrTy, "dstarray.end");
llvm::BasicBlock *PreheaderBB = CGF.Builder.GetInsertBlock();
@@ -426,9 +425,9 @@ template <class Derived> struct GenFuncBase {
assert(Addr.isValid() && "invalid address");
if (Offset.getQuantity() == 0)
return Addr;
- Addr = CGF->Builder.CreateElementBitCast(Addr, CGF->CGM.Int8Ty);
+ Addr = Addr.withElementType(CGF->CGM.Int8Ty);
Addr = CGF->Builder.CreateConstInBoundsGEP(Addr, Offset.getQuantity());
- return CGF->Builder.CreateElementBitCast(Addr, CGF->CGM.Int8PtrTy);
+ return Addr.withElementType(CGF->CGM.Int8PtrTy);
}
Address getAddrWithOffset(Address Addr, CharUnits StructFieldOffset,
@@ -491,8 +490,7 @@ template <class Derived> struct GenFuncBase {
for (unsigned I = 0; I < N; ++I) {
Alignments[I] = Addrs[I].getAlignment();
- Ptrs[I] = CallerCGF.Builder.CreateElementBitCast(
- Addrs[I], CallerCGF.CGM.Int8PtrTy).getPointer();
+ Ptrs[I] = Addrs[I].getPointer();
}
if (llvm::Function *F =
@@ -522,20 +520,19 @@ struct GenBinaryFunc : CopyStructVisitor<Derived, IsMove>,
Address SrcAddr = this->getAddrWithOffset(Addrs[SrcIdx], this->Start);
// Emit memcpy.
- if (Size.getQuantity() >= 16 || !llvm::isPowerOf2_32(Size.getQuantity())) {
+ if (Size.getQuantity() >= 16 ||
+ !llvm::has_single_bit<uint32_t>(Size.getQuantity())) {
llvm::Value *SizeVal =
llvm::ConstantInt::get(this->CGF->SizeTy, Size.getQuantity());
- DstAddr =
- this->CGF->Builder.CreateElementBitCast(DstAddr, this->CGF->Int8Ty);
- SrcAddr =
- this->CGF->Builder.CreateElementBitCast(SrcAddr, this->CGF->Int8Ty);
+ DstAddr = DstAddr.withElementType(this->CGF->Int8Ty);
+ SrcAddr = SrcAddr.withElementType(this->CGF->Int8Ty);
this->CGF->Builder.CreateMemCpy(DstAddr, SrcAddr, SizeVal, false);
} else {
llvm::Type *Ty = llvm::Type::getIntNTy(
this->CGF->getLLVMContext(),
Size.getQuantity() * this->CGF->getContext().getCharWidth());
- DstAddr = this->CGF->Builder.CreateElementBitCast(DstAddr, Ty);
- SrcAddr = this->CGF->Builder.CreateElementBitCast(SrcAddr, Ty);
+ DstAddr = DstAddr.withElementType(Ty);
+ SrcAddr = SrcAddr.withElementType(Ty);
llvm::Value *SrcVal = this->CGF->Builder.CreateLoad(SrcAddr, false);
this->CGF->Builder.CreateStore(SrcVal, DstAddr, false);
}
@@ -555,19 +552,17 @@ struct GenBinaryFunc : CopyStructVisitor<Derived, IsMove>,
QualType RT = QualType(FD->getParent()->getTypeForDecl(), 0);
llvm::Type *Ty = this->CGF->ConvertType(RT);
Address DstAddr = this->getAddrWithOffset(Addrs[DstIdx], Offset);
- LValue DstBase = this->CGF->MakeAddrLValue(
- this->CGF->Builder.CreateElementBitCast(DstAddr, Ty), FT);
+ LValue DstBase =
+ this->CGF->MakeAddrLValue(DstAddr.withElementType(Ty), FT);
DstLV = this->CGF->EmitLValueForField(DstBase, FD);
Address SrcAddr = this->getAddrWithOffset(Addrs[SrcIdx], Offset);
- LValue SrcBase = this->CGF->MakeAddrLValue(
- this->CGF->Builder.CreateElementBitCast(SrcAddr, Ty), FT);
+ LValue SrcBase =
+ this->CGF->MakeAddrLValue(SrcAddr.withElementType(Ty), FT);
SrcLV = this->CGF->EmitLValueForField(SrcBase, FD);
} else {
llvm::Type *Ty = this->CGF->ConvertTypeForMem(FT);
- Address DstAddr =
- this->CGF->Builder.CreateElementBitCast(Addrs[DstIdx], Ty);
- Address SrcAddr =
- this->CGF->Builder.CreateElementBitCast(Addrs[SrcIdx], Ty);
+ Address DstAddr = Addrs[DstIdx].withElementType(Ty);
+ Address SrcAddr = Addrs[SrcIdx].withElementType(Ty);
DstLV = this->CGF->MakeAddrLValue(DstAddr, FT);
SrcLV = this->CGF->MakeAddrLValue(SrcAddr, FT);
}
@@ -665,7 +660,7 @@ struct GenDefaultInitialize
llvm::Constant *SizeVal = CGF->Builder.getInt64(Size.getQuantity());
Address DstAddr = getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD);
- Address Loc = CGF->Builder.CreateElementBitCast(DstAddr, CGF->Int8Ty);
+ Address Loc = DstAddr.withElementType(CGF->Int8Ty);
CGF->Builder.CreateMemSet(Loc, CGF->Builder.getInt8(0), SizeVal,
IsVolatile);
}
@@ -817,8 +812,7 @@ void CodeGenFunction::destroyNonTrivialCStruct(CodeGenFunction &CGF,
// such structure.
void CodeGenFunction::defaultInitNonTrivialCStructVar(LValue Dst) {
GenDefaultInitialize Gen(getContext());
- Address DstPtr =
- Builder.CreateElementBitCast(Dst.getAddress(*this), CGM.Int8PtrTy);
+ Address DstPtr = Dst.getAddress(*this).withElementType(CGM.Int8PtrTy);
Gen.setCGF(this);
QualType QT = Dst.getType();
QT = Dst.isVolatile() ? QT.withVolatile() : QT;
@@ -831,7 +825,7 @@ static void callSpecialFunction(G &&Gen, StringRef FuncName, QualType QT,
std::array<Address, N> Addrs) {
auto SetArtificialLoc = ApplyDebugLocation::CreateArtificial(CGF);
for (unsigned I = 0; I < N; ++I)
- Addrs[I] = CGF.Builder.CreateElementBitCast(Addrs[I], CGF.CGM.Int8PtrTy);
+ Addrs[I] = Addrs[I].withElementType(CGF.CGM.Int8PtrTy);
QT = IsVolatile ? QT.withVolatile() : QT;
Gen.callFunc(FuncName, QT, Addrs, CGF);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
index 7df2088a81d7..46c37eaea82b 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
@@ -140,7 +140,7 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
LValue LV = MakeNaturalAlignAddrLValue(Constant, IdTy);
llvm::Value *Ptr = EmitLoadOfScalar(LV, E->getBeginLoc());
cast<llvm::LoadInst>(Ptr)->setMetadata(
- CGM.getModule().getMDKindID("invariant.load"),
+ llvm::LLVMContext::MD_invariant_load,
llvm::MDNode::get(getLLVMContext(), std::nullopt));
return Builder.CreateBitCast(Ptr, ConvertType(E->getType()));
}
@@ -1190,7 +1190,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
// Perform an atomic load. This does not impose ordering constraints.
Address ivarAddr = LV.getAddress(*this);
- ivarAddr = Builder.CreateElementBitCast(ivarAddr, bitcastType);
+ ivarAddr = ivarAddr.withElementType(bitcastType);
llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
load->setAtomic(llvm::AtomicOrdering::Unordered);
@@ -1204,8 +1204,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
bitcastType = llvm::Type::getIntNTy(getLLVMContext(), retTySize);
ivarVal = Builder.CreateTrunc(load, bitcastType);
}
- Builder.CreateStore(ivarVal,
- Builder.CreateElementBitCast(ReturnValue, bitcastType));
+ Builder.CreateStore(ivarVal, ReturnValue.withElementType(bitcastType));
// Make sure we don't do an autorelease.
AutoreleaseResult = false;
@@ -1485,15 +1484,13 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
// Currently, all atomic accesses have to be through integer
// types, so there's no point in trying to pick a prettier type.
- llvm::Type *bitcastType =
- llvm::Type::getIntNTy(getLLVMContext(),
- getContext().toBits(strategy.getIvarSize()));
+ llvm::Type *castType = llvm::Type::getIntNTy(
+ getLLVMContext(), getContext().toBits(strategy.getIvarSize()));
// Cast both arguments to the chosen operation type.
- argAddr = Builder.CreateElementBitCast(argAddr, bitcastType);
- ivarAddr = Builder.CreateElementBitCast(ivarAddr, bitcastType);
+ argAddr = argAddr.withElementType(castType);
+ ivarAddr = ivarAddr.withElementType(castType);
- // This bitcast load is likely to cause some nasty IR.
llvm::Value *load = Builder.CreateLoad(argAddr);
// Perform an atomic store. There are no memory ordering requirements.
@@ -2205,18 +2202,7 @@ static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, Address addr,
if (!fn)
fn = getARCIntrinsic(IntID, CGF.CGM);
- // Cast the argument to 'id*'.
- llvm::Type *origType = addr.getElementType();
- addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8PtrTy);
-
- // Call the function.
- llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr.getPointer());
-
- // Cast the result back to a dereference of the original type.
- if (origType != CGF.Int8PtrTy)
- result = CGF.Builder.CreateBitCast(result, origType);
-
- return result;
+ return CGF.EmitNounwindRuntimeCall(fn, addr.getPointer());
}
/// Perform an operation having the following signature:
@@ -2661,9 +2647,6 @@ void CodeGenFunction::EmitARCDestroyWeak(Address addr) {
if (!fn)
fn = getARCIntrinsic(llvm::Intrinsic::objc_destroyWeak, CGM);
- // Cast the argument to 'id*'.
- addr = Builder.CreateElementBitCast(addr, Int8PtrTy);
-
EmitNounwindRuntimeCall(fn, addr.getPointer());
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
index c7b193e34ea0..09b6c3ac6adf 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -46,17 +46,13 @@ namespace {
/// types and the function declaration into a module if they're not used, and
/// avoids constructing the type more than once if it's used more than once.
class LazyRuntimeFunction {
- CodeGenModule *CGM;
- llvm::FunctionType *FTy;
- const char *FunctionName;
- llvm::FunctionCallee Function;
+ CodeGenModule *CGM = nullptr;
+ llvm::FunctionType *FTy = nullptr;
+ const char *FunctionName = nullptr;
+ llvm::FunctionCallee Function = nullptr;
public:
- /// Constructor leaves this class uninitialized, because it is intended to
- /// be used as a field in another class and not all of the types that are
- /// used as arguments will necessarily be available at construction time.
- LazyRuntimeFunction()
- : CGM(nullptr), FunctionName(nullptr), Function(nullptr) {}
+ LazyRuntimeFunction() = default;
/// Initialises the lazy function with the name, return type, and the types
/// of the arguments.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
index c739d3742f80..32f4f411347a 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
@@ -3809,15 +3809,9 @@ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
ivarList.fillPlaceholderWithInt(countSlot, ObjCTypes.IntTy, count);
llvm::GlobalVariable *GV;
- if (ForClass)
- GV =
- CreateMetadataVar("OBJC_CLASS_VARIABLES_" + ID->getName(), ivarList,
- "__OBJC,__class_vars,regular,no_dead_strip",
- CGM.getPointerAlign(), true);
- else
- GV = CreateMetadataVar("OBJC_INSTANCE_VARIABLES_" + ID->getName(), ivarList,
- "__OBJC,__instance_vars,regular,no_dead_strip",
- CGM.getPointerAlign(), true);
+ GV = CreateMetadataVar("OBJC_INSTANCE_VARIABLES_" + ID->getName(), ivarList,
+ "__OBJC,__instance_vars,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListPtrTy);
}
@@ -5023,11 +5017,8 @@ void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
}
void CGObjCMac::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
- Address DestPtr,
- Address SrcPtr,
+ Address DestPtr, Address SrcPtr,
llvm::Value *size) {
- SrcPtr = CGF.Builder.CreateElementBitCast(SrcPtr, CGF.Int8Ty);
- DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
llvm::Value *args[] = { DestPtr.getPointer(), SrcPtr.getPointer(), size };
CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args);
}
@@ -5293,12 +5284,7 @@ llvm::Constant *CGObjCCommonMac::GetClassName(StringRef RuntimeName) {
}
llvm::Function *CGObjCCommonMac::GetMethodDefinition(const ObjCMethodDecl *MD) {
- llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*>::iterator
- I = MethodDefinitions.find(MD);
- if (I != MethodDefinitions.end())
- return I->second;
-
- return nullptr;
+ return MethodDefinitions.lookup(MD);
}
/// GetIvarLayoutName - Returns a unique constant for the given
@@ -7229,7 +7215,7 @@ CGObjCNonFragileABIMac::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
CGF.getSizeAlign(), "ivar");
if (IsIvarOffsetKnownIdempotent(CGF, Ivar))
cast<llvm::LoadInst>(IvarOffsetValue)
- ->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
+ ->setMetadata(llvm::LLVMContext::MD_invariant_load,
llvm::MDNode::get(VMContext, std::nullopt));
}
@@ -7431,7 +7417,7 @@ CGObjCNonFragileABIMac::GetClassGlobal(StringRef Name,
GV->eraseFromParent();
}
GV = NewGV;
- CGM.getModule().getGlobalList().push_back(GV);
+ CGM.getModule().insertGlobalVariable(GV);
}
assert(GV->getLinkage() == L);
@@ -7629,7 +7615,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CodeGenFunction &CGF,
Address Addr = EmitSelectorAddr(Sel);
llvm::LoadInst* LI = CGF.Builder.CreateLoad(Addr);
- LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
+ LI->setMetadata(llvm::LLVMContext::MD_invariant_load,
llvm::MDNode::get(VMContext, std::nullopt));
return LI;
}
@@ -7701,12 +7687,8 @@ void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
}
void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
- CodeGen::CodeGenFunction &CGF,
- Address DestPtr,
- Address SrcPtr,
- llvm::Value *Size) {
- SrcPtr = CGF.Builder.CreateElementBitCast(SrcPtr, CGF.Int8Ty);
- DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
+ CodeGen::CodeGenFunction &CGF, Address DestPtr, Address SrcPtr,
+ llvm::Value *Size) {
llvm::Value *args[] = { DestPtr.getPointer(), SrcPtr.getPointer(), Size };
CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp
index 9097a8cf7009..634a3d5a938d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -107,10 +107,10 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
CGF.CGM.getContext().toBits(StorageSize),
CharUnits::fromQuantity(0)));
- Address Addr = Address(V, CGF.Int8Ty, Alignment);
- Addr = CGF.Builder.CreateElementBitCast(Addr,
- llvm::Type::getIntNTy(CGF.getLLVMContext(),
- Info->StorageSize));
+ Address Addr =
+ Address(V, llvm::Type::getIntNTy(CGF.getLLVMContext(), Info->StorageSize),
+ Alignment);
+
return LValue::MakeBitfield(Addr, *Info, IvarTy,
LValueBaseInfo(AlignmentSource::Decl),
TBAAAccessInfo());
@@ -364,14 +364,14 @@ CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method,
CallArgList &callArgs) {
unsigned ProgramAS = CGM.getDataLayout().getProgramAddressSpace();
+ llvm::PointerType *signatureType =
+ llvm::PointerType::get(CGM.getLLVMContext(), ProgramAS);
+
// If there's a method, use information from that.
if (method) {
const CGFunctionInfo &signature =
CGM.getTypes().arrangeObjCMessageSendSignature(method, callArgs[0].Ty);
- llvm::PointerType *signatureType =
- CGM.getTypes().GetFunctionType(signature)->getPointerTo(ProgramAS);
-
const CGFunctionInfo &signatureForCall =
CGM.getTypes().arrangeCall(signature, callArgs);
@@ -382,9 +382,6 @@ CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method,
const CGFunctionInfo &argsInfo =
CGM.getTypes().arrangeUnprototypedObjCMessageSend(resultType, callArgs);
- // Derive the signature to call from that.
- llvm::PointerType *signatureType =
- CGM.getTypes().GetFunctionType(argsInfo)->getPointerTo(ProgramAS);
return MessageSendInfo(argsInfo, signatureType);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.cpp
index ab8de7ecf50c..dc2330a29976 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.cpp
@@ -31,8 +31,11 @@ void CGOpenCLRuntime::EmitWorkGroupLocalVarDecl(CodeGenFunction &CGF,
}
llvm::Type *CGOpenCLRuntime::convertOpenCLSpecificType(const Type *T) {
- assert(T->isOpenCLSpecificType() &&
- "Not an OpenCL specific type!");
+ assert(T->isOpenCLSpecificType() && "Not an OpenCL specific type!");
+
+ // Check if the target has a specific translation for this type first.
+ if (llvm::Type *TransTy = CGM.getTargetCodeGenInfo().getOpenCLType(CGM, T))
+ return TransTy;
switch (cast<BuiltinType>(T)->getKind()) {
default:
@@ -75,6 +78,9 @@ llvm::PointerType *CGOpenCLRuntime::getPointerType(const Type *T,
}
llvm::Type *CGOpenCLRuntime::getPipeType(const PipeType *T) {
+ if (llvm::Type *PipeTy = CGM.getTargetCodeGenInfo().getOpenCLType(CGM, T))
+ return PipeTy;
+
if (T->isReadOnly())
return getPipeType(T, "opencl.pipe_ro_t", PipeROTy);
else
@@ -91,12 +97,18 @@ llvm::Type *CGOpenCLRuntime::getPipeType(const PipeType *T, StringRef Name,
return PipeTy;
}
-llvm::PointerType *CGOpenCLRuntime::getSamplerType(const Type *T) {
- if (!SamplerTy)
- SamplerTy = llvm::PointerType::get(llvm::StructType::create(
- CGM.getLLVMContext(), "opencl.sampler_t"),
- CGM.getContext().getTargetAddressSpace(
- CGM.getContext().getOpenCLTypeAddrSpace(T)));
+llvm::Type *CGOpenCLRuntime::getSamplerType(const Type *T) {
+ if (SamplerTy)
+ return SamplerTy;
+
+ if (llvm::Type *TransTy = CGM.getTargetCodeGenInfo().getOpenCLType(
+ CGM, CGM.getContext().OCLSamplerTy.getTypePtr()))
+ SamplerTy = TransTy;
+ else
+ SamplerTy = llvm::PointerType::get(
+ llvm::StructType::create(CGM.getLLVMContext(), "opencl.sampler_t"),
+ CGM.getContext().getTargetAddressSpace(
+ CGM.getContext().getOpenCLTypeAddrSpace(T)));
return SamplerTy;
}
@@ -149,14 +161,13 @@ static const BlockExpr *getBlockExpr(const Expr *E) {
void CGOpenCLRuntime::recordBlockInfo(const BlockExpr *E,
llvm::Function *InvokeF,
llvm::Value *Block, llvm::Type *BlockTy) {
- assert(EnqueuedBlockMap.find(E) == EnqueuedBlockMap.end() &&
- "Block expression emitted twice");
+ assert(!EnqueuedBlockMap.contains(E) && "Block expression emitted twice");
assert(isa<llvm::Function>(InvokeF) && "Invalid invoke function");
assert(Block->getType()->isPointerTy() && "Invalid block literal type");
EnqueuedBlockMap[E].InvokeFunc = InvokeF;
EnqueuedBlockMap[E].BlockArg = Block;
EnqueuedBlockMap[E].BlockTy = BlockTy;
- EnqueuedBlockMap[E].Kernel = nullptr;
+ EnqueuedBlockMap[E].KernelHandle = nullptr;
}
llvm::Function *CGOpenCLRuntime::getInvokeFunction(const Expr *E) {
@@ -171,11 +182,10 @@ CGOpenCLRuntime::emitOpenCLEnqueuedBlock(CodeGenFunction &CGF, const Expr *E) {
// to get the block literal.
const BlockExpr *Block = getBlockExpr(E);
- assert(EnqueuedBlockMap.find(Block) != EnqueuedBlockMap.end() &&
- "Block expression not emitted");
+ assert(EnqueuedBlockMap.contains(Block) && "Block expression not emitted");
// Do not emit the block wrapper again if it has been emitted.
- if (EnqueuedBlockMap[Block].Kernel) {
+ if (EnqueuedBlockMap[Block].KernelHandle) {
return EnqueuedBlockMap[Block];
}
@@ -183,9 +193,6 @@ CGOpenCLRuntime::emitOpenCLEnqueuedBlock(CodeGenFunction &CGF, const Expr *E) {
CGF, EnqueuedBlockMap[Block].InvokeFunc, EnqueuedBlockMap[Block].BlockTy);
// The common part of the post-processing of the kernel goes here.
- F->addFnAttr(llvm::Attribute::NoUnwind);
- F->setCallingConv(
- CGF.getTypes().ClangCallConvToLLVMCallConv(CallingConv::CC_OpenCLKernel));
- EnqueuedBlockMap[Block].Kernel = F;
+ EnqueuedBlockMap[Block].KernelHandle = F;
return EnqueuedBlockMap[Block];
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.h
index 900644b3b93b..df8084d6008b 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.h
@@ -38,13 +38,13 @@ protected:
CodeGenModule &CGM;
llvm::Type *PipeROTy;
llvm::Type *PipeWOTy;
- llvm::PointerType *SamplerTy;
+ llvm::Type *SamplerTy;
llvm::StringMap<llvm::PointerType *> CachedTys;
/// Structure for enqueued block information.
struct EnqueuedBlockInfo {
llvm::Function *InvokeFunc; /// Block invoke function.
- llvm::Function *Kernel; /// Enqueued block kernel.
+ llvm::Value *KernelHandle; /// Enqueued block kernel reference.
llvm::Value *BlockArg; /// The first argument to enqueued block kernel.
llvm::Type *BlockTy; /// Type of the block argument.
};
@@ -70,7 +70,7 @@ public:
virtual llvm::Type *getPipeType(const PipeType *T);
- llvm::PointerType *getSamplerType(const Type *T);
+ llvm::Type *getSamplerType(const Type *T);
// Returns a value which indicates the size in bytes of the pipe
// element.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 2284aa1d1eb6..a52ec8909b12 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -498,11 +498,6 @@ enum OpenMPOffloadingRequiresDirFlags : int64_t {
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_REQ_DYNAMIC_ALLOCATORS)
};
-enum OpenMPOffloadingReservedDeviceIDs {
- /// Device ID if the device was not defined, runtime should get it
- /// from environment variables in the spec.
- OMP_DEVICEID_UNDEF = -1,
-};
} // anonymous namespace
/// Describes ident structure that describes a source location.
@@ -689,8 +684,7 @@ static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
if (DRD)
- SrcAddr =
- CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
+ SrcAddr = SrcAddr.withElementType(DestAddr.getElementType());
llvm::Value *SrcBegin = nullptr;
if (DRD)
@@ -911,8 +905,8 @@ void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N,
QualType PrivateType = getPrivateType(N);
QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
if (needCleanups(N)) {
- PrivateAddr = CGF.Builder.CreateElementBitCast(
- PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
+ PrivateAddr =
+ PrivateAddr.withElementType(CGF.ConvertTypeForMem(PrivateType));
CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
}
}
@@ -931,8 +925,7 @@ static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
BaseTy = BaseTy->getPointeeType();
}
return CGF.MakeAddrLValue(
- CGF.Builder.CreateElementBitCast(BaseLV.getAddress(CGF),
- CGF.ConvertTypeForMem(ElTy)),
+ BaseLV.getAddress(CGF).withElementType(CGF.ConvertTypeForMem(ElTy)),
BaseLV.getType(), BaseLV.getBaseInfo(),
CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
}
@@ -963,7 +956,7 @@ static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
Addr, OriginalBaseAddress.getType());
- return OriginalBaseAddress.withPointer(Addr);
+ return OriginalBaseAddress.withPointer(Addr, NotKnownNonNull);
}
static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
@@ -1059,16 +1052,15 @@ static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
}
CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM)
- : CGM(CGM), OMPBuilder(CGM.getModule()), OffloadEntriesInfoManager() {
+ : CGM(CGM), OMPBuilder(CGM.getModule()) {
KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
- llvm::OpenMPIRBuilderConfig Config(CGM.getLangOpts().OpenMPIsDevice, false,
- hasRequiresUnifiedSharedMemory(),
+ llvm::OpenMPIRBuilderConfig Config(CGM.getLangOpts().OpenMPIsTargetDevice,
+ isGPU(), hasRequiresUnifiedSharedMemory(),
CGM.getLangOpts().OpenMPOffloadMandatory);
- // Initialize Types used in OpenMPIRBuilder from OMPKinds.def
- OMPBuilder.initialize();
+ OMPBuilder.initialize(CGM.getLangOpts().OpenMPIsTargetDevice
+ ? CGM.getLangOpts().OMPHostIRFile
+ : StringRef{});
OMPBuilder.setConfig(Config);
- OffloadEntriesInfoManager.setConfig(Config);
- loadOffloadInfoMetadata();
}
void CGOpenMPRuntime::clear() {
@@ -1262,20 +1254,38 @@ static llvm::Function *emitParallelOrTeamsOutlinedFunction(
return CGF.GenerateOpenMPCapturedStmtFunction(*CS, D.getBeginLoc());
}
+std::string CGOpenMPRuntime::getOutlinedHelperName(StringRef Name) const {
+ std::string Suffix = getName({"omp_outlined"});
+ return (Name + Suffix).str();
+}
+
+std::string CGOpenMPRuntime::getOutlinedHelperName(CodeGenFunction &CGF) const {
+ return getOutlinedHelperName(CGF.CurFn->getName());
+}
+
+std::string CGOpenMPRuntime::getReductionFuncName(StringRef Name) const {
+ std::string Suffix = getName({"omp", "reduction", "reduction_func"});
+ return (Name + Suffix).str();
+}
+
llvm::Function *CGOpenMPRuntime::emitParallelOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) {
const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
return emitParallelOrTeamsOutlinedFunction(
- CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
+ CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(CGF),
+ CodeGen);
}
llvm::Function *CGOpenMPRuntime::emitTeamsOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) {
const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
return emitParallelOrTeamsOutlinedFunction(
- CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
+ CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(CGF),
+ CodeGen);
}
llvm::Function *CGOpenMPRuntime::emitTaskOutlinedFunction(
@@ -1368,8 +1378,8 @@ llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
unsigned Flags, bool EmitLoc) {
uint32_t SrcLocStrSize;
llvm::Constant *SrcLocStr;
- if ((!EmitLoc &&
- CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo) ||
+ if ((!EmitLoc && CGM.getCodeGenOpts().getDebugInfo() ==
+ llvm::codegenoptions::NoDebugInfo) ||
Loc.isInvalid()) {
SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
} else {
@@ -1589,71 +1599,94 @@ CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize, bool IVSigned) {
return CGM.CreateRuntimeFunction(FnTy, Name);
}
-/// Obtain information that uniquely identifies a target entry. This
-/// consists of the file and device IDs as well as line number associated with
-/// the relevant entry source location.
-static llvm::TargetRegionEntryInfo
-getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
- StringRef ParentName = "") {
- SourceManager &SM = C.getSourceManager();
+llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseKind
+convertDeviceClause(const VarDecl *VD) {
+ std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
+ OMPDeclareTargetDeclAttr::getDeviceType(VD);
+ if (!DevTy)
+ return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseNone;
+
+ switch ((int)*DevTy) { // Avoid -Wcovered-switch-default
+ case OMPDeclareTargetDeclAttr::DT_Host:
+ return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseHost;
+ break;
+ case OMPDeclareTargetDeclAttr::DT_NoHost:
+ return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseNoHost;
+ break;
+ case OMPDeclareTargetDeclAttr::DT_Any:
+ return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseAny;
+ break;
+ default:
+ return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseNone;
+ break;
+ }
+}
+
+llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind
+convertCaptureClause(const VarDecl *VD) {
+ std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> MapType =
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
+ if (!MapType)
+ return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryNone;
+ switch ((int)*MapType) { // Avoid -Wcovered-switch-default
+ case OMPDeclareTargetDeclAttr::MapTypeTy::MT_To:
+ return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryTo;
+ break;
+ case OMPDeclareTargetDeclAttr::MapTypeTy::MT_Enter:
+ return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryEnter;
+ break;
+ case OMPDeclareTargetDeclAttr::MapTypeTy::MT_Link:
+ return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryLink;
+ break;
+ default:
+ return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryNone;
+ break;
+ }
+}
- // The loc should be always valid and have a file ID (the user cannot use
- // #pragma directives in macros)
+static llvm::TargetRegionEntryInfo getEntryInfoFromPresumedLoc(
+ CodeGenModule &CGM, llvm::OpenMPIRBuilder &OMPBuilder,
+ SourceLocation BeginLoc, llvm::StringRef ParentName = "") {
- assert(Loc.isValid() && "Source location is expected to be always valid.");
+ auto FileInfoCallBack = [&]() {
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(BeginLoc);
- PresumedLoc PLoc = SM.getPresumedLoc(Loc);
- assert(PLoc.isValid() && "Source location is expected to be always valid.");
+ llvm::sys::fs::UniqueID ID;
+ if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) {
+ PLoc = SM.getPresumedLoc(BeginLoc, /*UseLineDirectives=*/false);
+ }
- llvm::sys::fs::UniqueID ID;
- if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) {
- PLoc = SM.getPresumedLoc(Loc, /*UseLineDirectives=*/false);
- assert(PLoc.isValid() && "Source location is expected to be always valid.");
- if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
- SM.getDiagnostics().Report(diag::err_cannot_open_file)
- << PLoc.getFilename() << EC.message();
- }
+ return std::pair<std::string, uint64_t>(PLoc.getFilename(), PLoc.getLine());
+ };
- return llvm::TargetRegionEntryInfo(ParentName, ID.getDevice(), ID.getFile(),
- PLoc.getLine());
+ return OMPBuilder.getTargetEntryUniqueInfo(FileInfoCallBack, ParentName);
}
Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) {
- if (CGM.getLangOpts().OpenMPSimd)
- return Address::invalid();
- std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
- if (Res && (*Res == OMPDeclareTargetDeclAttr::MT_Link ||
- ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
- *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
- HasRequiresUnifiedSharedMemory))) {
- SmallString<64> PtrName;
- {
- llvm::raw_svector_ostream OS(PtrName);
- OS << CGM.getMangledName(GlobalDecl(VD));
- if (!VD->isExternallyVisible()) {
- auto EntryInfo = getTargetEntryUniqueInfo(
- CGM.getContext(), VD->getCanonicalDecl()->getBeginLoc());
- OS << llvm::format("_%x", EntryInfo.FileID);
- }
- OS << "_decl_tgt_ref_ptr";
- }
- llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
- QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
- llvm::Type *LlvmPtrTy = CGM.getTypes().ConvertTypeForMem(PtrTy);
- if (!Ptr) {
- Ptr = OMPBuilder.getOrCreateInternalVariable(LlvmPtrTy, PtrName);
+ auto AddrOfGlobal = [&VD, this]() { return CGM.GetAddrOfGlobal(VD); };
- auto *GV = cast<llvm::GlobalVariable>(Ptr);
- GV->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
+ auto LinkageForVariable = [&VD, this]() {
+ return CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
+ };
- if (!CGM.getLangOpts().OpenMPIsDevice)
- GV->setInitializer(CGM.GetAddrOfGlobal(VD));
- registerTargetGlobalVariable(VD, cast<llvm::Constant>(Ptr));
- }
- return Address(Ptr, LlvmPtrTy, CGM.getContext().getDeclAlign(VD));
- }
- return Address::invalid();
+ std::vector<llvm::GlobalVariable *> GeneratedRefs;
+
+ llvm::Type *LlvmPtrTy = CGM.getTypes().ConvertTypeForMem(
+ CGM.getContext().getPointerType(VD->getType()));
+ llvm::Constant *addr = OMPBuilder.getAddrOfDeclareTargetVar(
+ convertCaptureClause(VD), convertDeviceClause(VD),
+ VD->hasDefinition(CGM.getContext()) == VarDecl::DeclarationOnly,
+ VD->isExternallyVisible(),
+ getEntryInfoFromPresumedLoc(CGM, OMPBuilder,
+ VD->getCanonicalDecl()->getBeginLoc()),
+ CGM.getMangledName(VD), GeneratedRefs, CGM.getLangOpts().OpenMPSimd,
+ CGM.getLangOpts().OMPTargetTriples, LlvmPtrTy, AddrOfGlobal,
+ LinkageForVariable);
+
+ if (!addr)
+ return Address::invalid();
+ return Address(addr, LlvmPtrTy, CGM.getContext().getDeclAlign(VD));
}
llvm::Constant *
@@ -1742,9 +1775,8 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
CGM.getContext().VoidPtrTy, Dst.getLocation());
- Address Arg(ArgVal, CtorCGF.Int8Ty, VDAddr.getAlignment());
- Arg = CtorCGF.Builder.CreateElementBitCast(
- Arg, CtorCGF.ConvertTypeForMem(ASTTy));
+ Address Arg(ArgVal, CtorCGF.ConvertTypeForMem(ASTTy),
+ VDAddr.getAlignment());
CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
/*IsInitializer=*/true);
ArgVal = CtorCGF.EmitLoadOfScalar(
@@ -1833,7 +1865,7 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
llvm::GlobalVariable *Addr,
bool PerformInit) {
if (CGM.getLangOpts().OMPTargetTriples.empty() &&
- !CGM.getLangOpts().OpenMPIsDevice)
+ !CGM.getLangOpts().OpenMPIsTargetDevice)
return false;
std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
@@ -1841,12 +1873,12 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
((*Res == OMPDeclareTargetDeclAttr::MT_To ||
*Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
HasRequiresUnifiedSharedMemory))
- return CGM.getLangOpts().OpenMPIsDevice;
+ return CGM.getLangOpts().OpenMPIsTargetDevice;
VD = VD->getDefinition(CGM.getContext());
assert(VD && "Unknown VarDecl");
if (!DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
- return CGM.getLangOpts().OpenMPIsDevice;
+ return CGM.getLangOpts().OpenMPIsTargetDevice;
QualType ASTTy = VD->getType();
SourceLocation Loc = VD->getCanonicalDecl()->getBeginLoc();
@@ -1854,16 +1886,16 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
// Produce the unique prefix to identify the new target regions. We use
// the source location of the variable declaration which we know to not
// conflict with any target region.
- auto EntryInfo =
- getTargetEntryUniqueInfo(CGM.getContext(), Loc, VD->getName());
+ llvm::TargetRegionEntryInfo EntryInfo =
+ getEntryInfoFromPresumedLoc(CGM, OMPBuilder, Loc, VD->getName());
SmallString<128> Buffer, Out;
- OffloadEntriesInfoManager.getTargetRegionEntryFnName(Buffer, EntryInfo);
+ OMPBuilder.OffloadInfoManager.getTargetRegionEntryFnName(Buffer, EntryInfo);
const Expr *Init = VD->getAnyInitializer();
if (CGM.getLangOpts().CPlusPlus && PerformInit) {
llvm::Constant *Ctor;
llvm::Constant *ID;
- if (CGM.getLangOpts().OpenMPIsDevice) {
+ if (CGM.getLangOpts().OpenMPIsTargetDevice) {
// Generate function that re-emits the declaration's initializer into
// the threadprivate copy of the variable VD
CodeGenFunction CtorCGF(CGM);
@@ -1883,8 +1915,7 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
llvm::Constant *AddrInAS0 = Addr;
if (Addr->getAddressSpace() != 0)
AddrInAS0 = llvm::ConstantExpr::getAddrSpaceCast(
- Addr, llvm::PointerType::getWithSamePointeeType(
- cast<llvm::PointerType>(Addr->getType()), 0));
+ Addr, llvm::PointerType::get(CGM.getLLVMContext(), 0));
CtorCGF.EmitAnyExprToMem(Init,
Address(AddrInAS0, Addr->getValueType(),
CGM.getContext().getDeclAlign(VD)),
@@ -1905,14 +1936,14 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
Out.clear();
auto CtorEntryInfo = EntryInfo;
CtorEntryInfo.ParentName = Twine(Buffer, "_ctor").toStringRef(Out);
- OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
+ OMPBuilder.OffloadInfoManager.registerTargetRegionEntryInfo(
CtorEntryInfo, Ctor, ID,
llvm::OffloadEntriesInfoManager::OMPTargetRegionEntryCtor);
}
if (VD->getType().isDestructedType() != QualType::DK_none) {
llvm::Constant *Dtor;
llvm::Constant *ID;
- if (CGM.getLangOpts().OpenMPIsDevice) {
+ if (CGM.getLangOpts().OpenMPIsTargetDevice) {
// Generate function that emits destructor call for the threadprivate
// copy of the variable VD
CodeGenFunction DtorCGF(CGM);
@@ -1934,8 +1965,7 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
llvm::Constant *AddrInAS0 = Addr;
if (Addr->getAddressSpace() != 0)
AddrInAS0 = llvm::ConstantExpr::getAddrSpaceCast(
- Addr, llvm::PointerType::getWithSamePointeeType(
- cast<llvm::PointerType>(Addr->getType()), 0));
+ Addr, llvm::PointerType::get(CGM.getLLVMContext(), 0));
DtorCGF.emitDestroy(Address(AddrInAS0, Addr->getValueType(),
CGM.getContext().getDeclAlign(VD)),
ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()),
@@ -1954,11 +1984,11 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
Out.clear();
auto DtorEntryInfo = EntryInfo;
DtorEntryInfo.ParentName = Twine(Buffer, "_dtor").toStringRef(Out);
- OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
+ OMPBuilder.OffloadInfoManager.registerTargetRegionEntryInfo(
DtorEntryInfo, Dtor, ID,
llvm::OffloadEntriesInfoManager::OMPTargetRegionEntryDtor);
}
- return CGM.getLangOpts().OpenMPIsDevice;
+ return CGM.getLangOpts().OpenMPIsTargetDevice;
}
Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
@@ -2131,7 +2161,11 @@ Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
std::string Name = getName({Prefix, "var"});
- return OMPBuilder.getOrCreateInternalVariable(KmpCriticalNameTy, Name);
+ llvm::GlobalVariable *G = OMPBuilder.getOrCreateInternalVariable(KmpCriticalNameTy, Name);
+ llvm::Align PtrAlign = OMPBuilder.M.getDataLayout().getPointerABIAlignment(G->getAddressSpace());
+ if (PtrAlign > llvm::Align(G->getAlignment()))
+ G->setAlignment(PtrAlign);
+ return G;
}
namespace {
@@ -2778,7 +2812,7 @@ void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
const StaticRTInput &Values) {
OpenMPSchedType ScheduleNum = getRuntimeSchedule(
ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
- assert(isOpenMPWorksharingDirective(DKind) &&
+ assert((isOpenMPWorksharingDirective(DKind) || (DKind == OMPD_loop)) &&
"Expected loop-based or sections-based directive.");
llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc,
isOpenMPLoopDirective(DKind)
@@ -2803,7 +2837,7 @@ void CGOpenMPRuntime::emitDistributeStaticInit(
llvm::Value *ThreadId = getThreadID(CGF, Loc);
llvm::FunctionCallee StaticInitFunction;
bool isGPUDistribute =
- CGM.getLangOpts().OpenMPIsDevice &&
+ CGM.getLangOpts().OpenMPIsTargetDevice &&
(CGM.getTriple().isAMDGCN() || CGM.getTriple().isNVPTX());
StaticInitFunction = createForStaticInitFunction(
Values.IVSize, Values.IVSigned, isGPUDistribute);
@@ -2828,7 +2862,8 @@ void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
: OMP_IDENT_WORK_SECTIONS),
getThreadID(CGF, Loc)};
auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
- if (isOpenMPDistributeDirective(DKind) && CGM.getLangOpts().OpenMPIsDevice &&
+ if (isOpenMPDistributeDirective(DKind) &&
+ CGM.getLangOpts().OpenMPIsTargetDevice &&
(CGM.getTriple().isAMDGCN() || CGM.getTriple().isNVPTX()))
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
@@ -2947,7 +2982,7 @@ enum KmpTaskTFields {
void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
// If we are in simd mode or there are no entries, we don't need to do
// anything.
- if (CGM.getLangOpts().OpenMPSimd || OffloadEntriesInfoManager.empty())
+ if (CGM.getLangOpts().OpenMPSimd || OMPBuilder.OffloadInfoManager.empty())
return;
llvm::OpenMPIRBuilder::EmitMetadataErrorReportFunctionTy &&ErrorReportFn =
@@ -2991,42 +3026,7 @@ void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
}
};
- OMPBuilder.createOffloadEntriesAndInfoMetadata(OffloadEntriesInfoManager,
- ErrorReportFn);
-}
-
-/// Loads all the offload entries information from the host IR
-/// metadata.
-void CGOpenMPRuntime::loadOffloadInfoMetadata() {
- // If we are in target mode, load the metadata from the host IR. This code has
- // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
-
- if (!CGM.getLangOpts().OpenMPIsDevice)
- return;
-
- if (CGM.getLangOpts().OMPHostIRFile.empty())
- return;
-
- auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
- if (auto EC = Buf.getError()) {
- CGM.getDiags().Report(diag::err_cannot_open_file)
- << CGM.getLangOpts().OMPHostIRFile << EC.message();
- return;
- }
-
- llvm::LLVMContext C;
- auto ME = expectedToErrorOrAndEmitErrors(
- C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
-
- if (auto EC = ME.getError()) {
- unsigned DiagID = CGM.getDiags().getCustomDiagID(
- DiagnosticsEngine::Error, "Unable to parse host IR file '%0':'%1'");
- CGM.getDiags().Report(DiagID)
- << CGM.getLangOpts().OMPHostIRFile << EC.message();
- return;
- }
-
- OMPBuilder.loadOffloadInfoMetadata(*ME.get(), OffloadEntriesInfoManager);
+ OMPBuilder.createOffloadEntriesAndInfoMetadata(ErrorReportFn);
}
void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
@@ -4242,8 +4242,7 @@ CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
LValue Base = CGF.EmitLoadOfPointerLValue(
- CGF.Builder.CreateElementBitCast(
- DepobjLVal.getAddress(CGF),
+ DepobjLVal.getAddress(CGF).withElementType(
CGF.ConvertTypeForMem(KmpDependInfoPtrTy)),
KmpDependInfoPtrTy->castAs<PointerType>());
Address DepObjAddr = CGF.Builder.CreateGEP(
@@ -4670,7 +4669,7 @@ void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
llvm::PHINode *ElementPHI =
CGF.Builder.CreatePHI(Begin.getType(), 2, "omp.elementPast");
ElementPHI->addIncoming(Begin.getPointer(), EntryBB);
- Begin = Begin.withPointer(ElementPHI);
+ Begin = Begin.withPointer(ElementPHI, KnownNonNull);
Base = CGF.MakeAddrLValue(Begin, KmpDependInfoTy, Base.getBaseInfo(),
Base.getTBAAInfo());
// deps[i].flags = NewDepKind;
@@ -5005,7 +5004,7 @@ static void emitReductionCombiner(CodeGenFunction &CGF,
}
llvm::Function *CGOpenMPRuntime::emitReductionFunction(
- SourceLocation Loc, llvm::Type *ArgsElemType,
+ StringRef ReducerName, SourceLocation Loc, llvm::Type *ArgsElemType,
ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps) {
ASTContext &C = CGM.getContext();
@@ -5020,7 +5019,7 @@ llvm::Function *CGOpenMPRuntime::emitReductionFunction(
Args.push_back(&RHSArg);
const auto &CGFI =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- std::string Name = getName({"omp", "reduction", "reduction_func"});
+ std::string Name = getReductionFuncName(ReducerName);
auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
llvm::GlobalValue::InternalLinkage, Name,
&CGM.getModule());
@@ -5215,9 +5214,9 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
}
// 2. Emit reduce_func().
- llvm::Function *ReductionFn =
- emitReductionFunction(Loc, CGF.ConvertTypeForMem(ReductionArrayTy),
- Privates, LHSExprs, RHSExprs, ReductionOps);
+ llvm::Function *ReductionFn = emitReductionFunction(
+ CGF.CurFn->getName(), Loc, CGF.ConvertTypeForMem(ReductionArrayTy),
+ Privates, LHSExprs, RHSExprs, ReductionOps);
// 3. Create static kmp_critical_name lock = { 0 };
std::string Name = getName({"reduction"});
@@ -5469,8 +5468,7 @@ static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
QualType PrivateType = RCG.getPrivateType(N);
Address PrivateAddr = CGF.EmitLoadOfPointer(
- CGF.Builder.CreateElementBitCast(
- CGF.GetAddrOfLocalVar(&Param),
+ CGF.GetAddrOfLocalVar(&Param).withElementType(
CGF.ConvertTypeForMem(PrivateType)->getPointerTo()),
C.getPointerType(PrivateType)->castAs<PointerType>());
llvm::Value *Size = nullptr;
@@ -5558,17 +5556,16 @@ static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM,
LHSVD,
// Pull out the pointer to the variable.
CGF.EmitLoadOfPointer(
- CGF.Builder.CreateElementBitCast(
- CGF.GetAddrOfLocalVar(&ParamInOut),
- CGF.ConvertTypeForMem(LHSVD->getType())->getPointerTo()),
+ CGF.GetAddrOfLocalVar(&ParamInOut)
+ .withElementType(
+ CGF.ConvertTypeForMem(LHSVD->getType())->getPointerTo()),
C.getPointerType(LHSVD->getType())->castAs<PointerType>()));
PrivateScope.addPrivate(
RHSVD,
// Pull out the pointer to the variable.
CGF.EmitLoadOfPointer(
- CGF.Builder.CreateElementBitCast(
- CGF.GetAddrOfLocalVar(&ParamIn),
- CGF.ConvertTypeForMem(RHSVD->getType())->getPointerTo()),
+ CGF.GetAddrOfLocalVar(&ParamIn).withElementType(
+ CGF.ConvertTypeForMem(RHSVD->getType())->getPointerTo()),
C.getPointerType(RHSVD->getType())->castAs<PointerType>()));
PrivateScope.Privatize();
// Emit the combiner body:
@@ -5678,14 +5675,12 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
// ElemLVal.reduce_shar = &Shareds[Cnt];
LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD);
RCG.emitSharedOrigLValue(CGF, Cnt);
- llvm::Value *CastedShared =
- CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer(CGF));
- CGF.EmitStoreOfScalar(CastedShared, SharedLVal);
+ llvm::Value *Shared = RCG.getSharedLValue(Cnt).getPointer(CGF);
+ CGF.EmitStoreOfScalar(Shared, SharedLVal);
// ElemLVal.reduce_orig = &Origs[Cnt];
LValue OrigLVal = CGF.EmitLValueForField(ElemLVal, OrigFD);
- llvm::Value *CastedOrig =
- CGF.EmitCastToVoidPtr(RCG.getOrigLValue(Cnt).getPointer(CGF));
- CGF.EmitStoreOfScalar(CastedOrig, OrigLVal);
+ llvm::Value *Orig = RCG.getOrigLValue(Cnt).getPointer(CGF);
+ CGF.EmitStoreOfScalar(Orig, OrigLVal);
RCG.emitAggregateType(CGF, Cnt);
llvm::Value *SizeValInChars;
llvm::Value *SizeVal;
@@ -5702,21 +5697,19 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
CGF.EmitStoreOfScalar(SizeValInChars, SizeLVal);
// ElemLVal.reduce_init = init;
LValue InitLVal = CGF.EmitLValueForField(ElemLVal, InitFD);
- llvm::Value *InitAddr =
- CGF.EmitCastToVoidPtr(emitReduceInitFunction(CGM, Loc, RCG, Cnt));
+ llvm::Value *InitAddr = emitReduceInitFunction(CGM, Loc, RCG, Cnt);
CGF.EmitStoreOfScalar(InitAddr, InitLVal);
// ElemLVal.reduce_fini = fini;
LValue FiniLVal = CGF.EmitLValueForField(ElemLVal, FiniFD);
llvm::Value *Fini = emitReduceFiniFunction(CGM, Loc, RCG, Cnt);
- llvm::Value *FiniAddr = Fini
- ? CGF.EmitCastToVoidPtr(Fini)
- : llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
+ llvm::Value *FiniAddr =
+ Fini ? Fini : llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
CGF.EmitStoreOfScalar(FiniAddr, FiniLVal);
// ElemLVal.reduce_comb = comb;
LValue CombLVal = CGF.EmitLValueForField(ElemLVal, CombFD);
- llvm::Value *CombAddr = CGF.EmitCastToVoidPtr(emitReduceCombFunction(
+ llvm::Value *CombAddr = emitReduceCombFunction(
CGM, Loc, RCG, Cnt, Data.ReductionOps[Cnt], LHSExprs[Cnt],
- RHSExprs[Cnt], Data.ReductionCopies[Cnt]));
+ RHSExprs[Cnt], Data.ReductionCopies[Cnt]);
CGF.EmitStoreOfScalar(CombAddr, CombLVal);
// ElemLVal.flags = 0;
LValue FlagsLVal = CGF.EmitLValueForField(ElemLVal, FlagsFD);
@@ -6057,15 +6050,14 @@ void CGOpenMPRuntime::emitUsesAllocatorsInit(CodeGenFunction &CGF,
AllocatorTraitsLVal = CGF.MakeAddrLValue(Addr, CGF.getContext().VoidPtrTy,
AllocatorTraitsLVal.getBaseInfo(),
AllocatorTraitsLVal.getTBAAInfo());
- llvm::Value *Traits =
- CGF.EmitLoadOfScalar(AllocatorTraitsLVal, AllocatorTraits->getExprLoc());
+ llvm::Value *Traits = Addr.getPointer();
llvm::Value *AllocatorVal =
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_init_allocator),
{ThreadId, MemSpaceHandle, NumTraits, Traits});
// Store to allocator.
- CGF.EmitVarDecl(*cast<VarDecl>(
+ CGF.EmitAutoVarAlloca(*cast<VarDecl>(
cast<DeclRefExpr>(Allocator->IgnoreParenImpCasts())->getDecl()));
LValue AllocatorLVal = CGF.EmitLValue(Allocator->IgnoreParenImpCasts());
AllocatorVal =
@@ -6095,8 +6087,8 @@ void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
- auto EntryInfo =
- getTargetEntryUniqueInfo(CGM.getContext(), D.getBeginLoc(), ParentName);
+ llvm::TargetRegionEntryInfo EntryInfo =
+ getEntryInfoFromPresumedLoc(CGM, OMPBuilder, D.getBeginLoc(), ParentName);
CodeGenFunction CGF(CGM, true);
llvm::OpenMPIRBuilder::FunctionGenCallback &&GenerateOutlinedFunction =
@@ -6114,10 +6106,9 @@ void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
getNumTeamsExprForTargetDirective(CGF, D, DefaultValTeams);
getNumThreadsExprForTargetDirective(CGF, D, DefaultValThreads);
- OMPBuilder.emitTargetRegionFunction(OffloadEntriesInfoManager, EntryInfo,
- GenerateOutlinedFunction, DefaultValTeams,
- DefaultValThreads, IsOffloadEntry,
- OutlinedFn, OutlinedFnID);
+ OMPBuilder.emitTargetRegionFunction(EntryInfo, GenerateOutlinedFunction,
+ DefaultValTeams, DefaultValThreads,
+ IsOffloadEntry, OutlinedFn, OutlinedFnID);
if (OutlinedFn != nullptr)
CGM.getTargetCodeGenInfo().setTargetAttributes(nullptr, OutlinedFn, CGM);
@@ -6216,6 +6207,7 @@ const Expr *CGOpenMPRuntime::getNumTeamsExprForTargetDirective(
DefaultVal = -1;
return nullptr;
}
+ case OMPD_target_teams_loop:
case OMPD_target_teams:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd:
@@ -6235,12 +6227,14 @@ const Expr *CGOpenMPRuntime::getNumTeamsExprForTargetDirective(
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
+ case OMPD_target_parallel_loop:
case OMPD_target_simd:
DefaultVal = 1;
return nullptr;
case OMPD_parallel:
case OMPD_for:
case OMPD_parallel_for:
+ case OMPD_parallel_loop:
case OMPD_parallel_master:
case OMPD_parallel_sections:
case OMPD_for_simd:
@@ -6306,7 +6300,7 @@ const Expr *CGOpenMPRuntime::getNumTeamsExprForTargetDirective(
llvm::Value *CGOpenMPRuntime::emitNumTeamsForTargetDirective(
CodeGenFunction &CGF, const OMPExecutableDirective &D) {
- assert(!CGF.getLangOpts().OpenMPIsDevice &&
+ assert(!CGF.getLangOpts().OpenMPIsTargetDevice &&
"Clauses associated with the teams directive expected to be emitted "
"only for the host!");
CGBuilderTy &Bld = CGF.Builder;
@@ -6457,6 +6451,8 @@ const Expr *CGOpenMPRuntime::getNumThreadsExprForTargetDirective(
return ThreadLimit;
}
return nullptr;
+ case OMPD_target_teams_loop:
+ case OMPD_target_parallel_loop:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
@@ -6558,7 +6554,7 @@ const Expr *CGOpenMPRuntime::getNumThreadsExprForTargetDirective(
llvm::Value *CGOpenMPRuntime::emitNumThreadsForTargetDirective(
CodeGenFunction &CGF, const OMPExecutableDirective &D) {
- assert(!CGF.getLangOpts().OpenMPIsDevice &&
+ assert(!CGF.getLangOpts().OpenMPIsTargetDevice &&
"Clauses associated with the teams directive expected to be emitted "
"only for the host!");
OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
@@ -6659,6 +6655,8 @@ llvm::Value *CGOpenMPRuntime::emitNumThreadsForTargetDirective(
getNumThreads(CGF, D.getInnermostCapturedStmt(), ThreadLimitVal))
return NumThreads;
return Bld.getInt32(0);
+ case OMPD_target_teams_loop:
+ case OMPD_target_parallel_loop:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
@@ -6821,67 +6819,31 @@ public:
const Expr *getMapExpr() const { return MapExpr; }
};
- /// Class that associates information with a base pointer to be passed to the
- /// runtime library.
- class BasePointerInfo {
- /// The base pointer.
- llvm::Value *Ptr = nullptr;
- /// The base declaration that refers to this device pointer, or null if
- /// there is none.
- const ValueDecl *DevPtrDecl = nullptr;
-
- public:
- BasePointerInfo(llvm::Value *Ptr, const ValueDecl *DevPtrDecl = nullptr)
- : Ptr(Ptr), DevPtrDecl(DevPtrDecl) {}
- llvm::Value *operator*() const { return Ptr; }
- const ValueDecl *getDevicePtrDecl() const { return DevPtrDecl; }
- void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; }
- };
-
+ using DeviceInfoTy = llvm::OpenMPIRBuilder::DeviceInfoTy;
+ using MapBaseValuesArrayTy = llvm::OpenMPIRBuilder::MapValuesArrayTy;
+ using MapValuesArrayTy = llvm::OpenMPIRBuilder::MapValuesArrayTy;
+ using MapFlagsArrayTy = llvm::OpenMPIRBuilder::MapFlagsArrayTy;
+ using MapDimArrayTy = llvm::OpenMPIRBuilder::MapDimArrayTy;
+ using MapNonContiguousArrayTy =
+ llvm::OpenMPIRBuilder::MapNonContiguousArrayTy;
using MapExprsArrayTy = SmallVector<MappingExprInfo, 4>;
- using MapBaseValuesArrayTy = SmallVector<BasePointerInfo, 4>;
- using MapValuesArrayTy = SmallVector<llvm::Value *, 4>;
- using MapFlagsArrayTy = SmallVector<OpenMPOffloadMappingFlags, 4>;
- using MapMappersArrayTy = SmallVector<const ValueDecl *, 4>;
- using MapDimArrayTy = SmallVector<uint64_t, 4>;
- using MapNonContiguousArrayTy = SmallVector<MapValuesArrayTy, 4>;
+ using MapValueDeclsArrayTy = SmallVector<const ValueDecl *, 4>;
/// This structure contains combined information generated for mappable
/// clauses, including base pointers, pointers, sizes, map types, user-defined
/// mappers, and non-contiguous information.
- struct MapCombinedInfoTy {
- struct StructNonContiguousInfo {
- bool IsNonContiguous = false;
- MapDimArrayTy Dims;
- MapNonContiguousArrayTy Offsets;
- MapNonContiguousArrayTy Counts;
- MapNonContiguousArrayTy Strides;
- };
+ struct MapCombinedInfoTy : llvm::OpenMPIRBuilder::MapInfosTy {
MapExprsArrayTy Exprs;
- MapBaseValuesArrayTy BasePointers;
- MapValuesArrayTy Pointers;
- MapValuesArrayTy Sizes;
- MapFlagsArrayTy Types;
- MapMappersArrayTy Mappers;
- StructNonContiguousInfo NonContigInfo;
+ MapValueDeclsArrayTy Mappers;
+ MapValueDeclsArrayTy DevicePtrDecls;
/// Append arrays in \a CurInfo.
void append(MapCombinedInfoTy &CurInfo) {
Exprs.append(CurInfo.Exprs.begin(), CurInfo.Exprs.end());
- BasePointers.append(CurInfo.BasePointers.begin(),
- CurInfo.BasePointers.end());
- Pointers.append(CurInfo.Pointers.begin(), CurInfo.Pointers.end());
- Sizes.append(CurInfo.Sizes.begin(), CurInfo.Sizes.end());
- Types.append(CurInfo.Types.begin(), CurInfo.Types.end());
+ DevicePtrDecls.append(CurInfo.DevicePtrDecls.begin(),
+ CurInfo.DevicePtrDecls.end());
Mappers.append(CurInfo.Mappers.begin(), CurInfo.Mappers.end());
- NonContigInfo.Dims.append(CurInfo.NonContigInfo.Dims.begin(),
- CurInfo.NonContigInfo.Dims.end());
- NonContigInfo.Offsets.append(CurInfo.NonContigInfo.Offsets.begin(),
- CurInfo.NonContigInfo.Offsets.end());
- NonContigInfo.Counts.append(CurInfo.NonContigInfo.Counts.begin(),
- CurInfo.NonContigInfo.Counts.end());
- NonContigInfo.Strides.append(CurInfo.NonContigInfo.Strides.begin(),
- CurInfo.NonContigInfo.Strides.end());
+ llvm::OpenMPIRBuilder::MapInfosTy::append(CurInfo);
}
};
@@ -7163,6 +7125,7 @@ private:
// double d;
// int i[100];
// float *p;
+ // int **a = &i;
//
// struct S1 {
// int i;
@@ -7196,6 +7159,14 @@ private:
// in unified shared memory mode or for local pointers
// p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM
//
+ // map((*a)[0:3])
+ // &(*a), &(*a), sizeof(pointer), TARGET_PARAM | TO | FROM
+ // &(*a), &(*a)[0], 3*sizeof(int), PTR_AND_OBJ | TO | FROM
+ //
+ // map(**a)
+ // &(*a), &(*a), sizeof(pointer), TARGET_PARAM | TO | FROM
+ // &(*a), &(**a), sizeof(int), PTR_AND_OBJ | TO | FROM
+ //
// map(s)
// &s, &s, sizeof(S2), TARGET_PARAM | TO | FROM
//
@@ -7488,7 +7459,9 @@ private:
bool IsMemberReference = isa<MemberExpr>(I->getAssociatedExpression()) &&
MapDecl &&
MapDecl->getType()->isLValueReferenceType();
- bool IsNonDerefPointer = IsPointer && !UO && !BO && !IsNonContiguous;
+ bool IsNonDerefPointer = IsPointer &&
+ !(UO && UO->getOpcode() != UO_Deref) && !BO &&
+ !IsNonContiguous;
if (OASE)
++DimSize;
@@ -7609,14 +7582,15 @@ private:
.getAddress(CGF);
}
Size = CGF.Builder.CreatePtrDiff(
- CGF.Int8Ty, CGF.EmitCastToVoidPtr(ComponentLB.getPointer()),
- CGF.EmitCastToVoidPtr(LB.getPointer()));
+ CGF.Int8Ty, ComponentLB.getPointer(), LB.getPointer());
break;
}
}
assert(Size && "Failed to determine structure size");
CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
CombinedInfo.BasePointers.push_back(BP.getPointer());
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
CombinedInfo.Pointers.push_back(LB.getPointer());
CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
Size, CGF.Int64Ty, /*isSigned=*/true));
@@ -7628,10 +7602,12 @@ private:
}
CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
CombinedInfo.BasePointers.push_back(BP.getPointer());
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
CombinedInfo.Pointers.push_back(LB.getPointer());
Size = CGF.Builder.CreatePtrDiff(
CGF.Int8Ty, CGF.Builder.CreateConstGEP(HB, 1).getPointer(),
- CGF.EmitCastToVoidPtr(LB.getPointer()));
+ LB.getPointer());
CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
CombinedInfo.Types.push_back(Flags);
@@ -7645,6 +7621,8 @@ private:
(Next == CE && MapType != OMPC_MAP_unknown)) {
CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
CombinedInfo.BasePointers.push_back(BP.getPointer());
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
CombinedInfo.Pointers.push_back(LB.getPointer());
CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
@@ -8145,9 +8123,12 @@ private:
auto &&UseDeviceDataCombinedInfoGen =
[&UseDeviceDataCombinedInfo](const ValueDecl *VD, llvm::Value *Ptr,
- CodeGenFunction &CGF) {
+ CodeGenFunction &CGF, bool IsDevAddr) {
UseDeviceDataCombinedInfo.Exprs.push_back(VD);
- UseDeviceDataCombinedInfo.BasePointers.emplace_back(Ptr, VD);
+ UseDeviceDataCombinedInfo.BasePointers.emplace_back(Ptr);
+ UseDeviceDataCombinedInfo.DevicePtrDecls.emplace_back(VD);
+ UseDeviceDataCombinedInfo.DevicePointers.emplace_back(
+ IsDevAddr ? DeviceInfoTy::Address : DeviceInfoTy::Pointer);
UseDeviceDataCombinedInfo.Pointers.push_back(Ptr);
UseDeviceDataCombinedInfo.Sizes.push_back(
llvm::Constant::getNullValue(CGF.Int64Ty));
@@ -8187,7 +8168,7 @@ private:
} else {
Ptr = CGF.EmitLoadOfScalar(CGF.EmitLValue(IE), IE->getExprLoc());
}
- UseDeviceDataCombinedInfoGen(VD, Ptr, CGF);
+ UseDeviceDataCombinedInfoGen(VD, Ptr, CGF, IsDevAddr);
}
};
@@ -8214,6 +8195,7 @@ private:
// item.
if (CI != Data.end()) {
if (IsDevAddr) {
+ CI->ForDeviceAddr = IsDevAddr;
CI->ReturnDevicePointer = true;
Found = true;
break;
@@ -8226,6 +8208,7 @@ private:
PrevCI == CI->Components.rend() ||
isa<MemberExpr>(PrevCI->getAssociatedExpression()) || !VarD ||
VarD->hasLocalStorage()) {
+ CI->ForDeviceAddr = IsDevAddr;
CI->ReturnDevicePointer = true;
Found = true;
break;
@@ -8316,8 +8299,9 @@ private:
assert(RelevantVD &&
"No relevant declaration related with device pointer??");
- CurInfo.BasePointers[CurrentBasePointersIdx].setDevicePtrDecl(
- RelevantVD);
+ CurInfo.DevicePtrDecls[CurrentBasePointersIdx] = RelevantVD;
+ CurInfo.DevicePointers[CurrentBasePointersIdx] =
+ L.ForDeviceAddr ? DeviceInfoTy::Address : DeviceInfoTy::Pointer;
CurInfo.Types[CurrentBasePointersIdx] |=
OpenMPOffloadMappingFlags::OMP_MAP_RETURN_PARAM;
}
@@ -8356,7 +8340,10 @@ private:
OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF);
}
CurInfo.Exprs.push_back(L.VD);
- CurInfo.BasePointers.emplace_back(BasePtr, L.VD);
+ CurInfo.BasePointers.emplace_back(BasePtr);
+ CurInfo.DevicePtrDecls.emplace_back(L.VD);
+ CurInfo.DevicePointers.emplace_back(
+ L.ForDeviceAddr ? DeviceInfoTy::Address : DeviceInfoTy::Pointer);
CurInfo.Pointers.push_back(Ptr);
CurInfo.Sizes.push_back(
llvm::Constant::getNullValue(this->CGF.Int64Ty));
@@ -8367,7 +8354,8 @@ private:
// individual members mapped. Emit an extra combined entry.
if (PartialStruct.Base.isValid()) {
CurInfo.NonContigInfo.Dims.push_back(0);
- emitCombinedEntry(CombinedInfo, CurInfo.Types, PartialStruct, VD);
+ emitCombinedEntry(CombinedInfo, CurInfo.Types, PartialStruct,
+ /*IsMapThis*/ !VD, VD);
}
// We need to append the results of this capture to what we already
@@ -8433,7 +8421,7 @@ public:
/// individual struct members.
void emitCombinedEntry(MapCombinedInfoTy &CombinedInfo,
MapFlagsArrayTy &CurTypes,
- const StructRangeInfoTy &PartialStruct,
+ const StructRangeInfoTy &PartialStruct, bool IsMapThis,
const ValueDecl *VD = nullptr,
bool NotTargetParams = true) const {
if (CurTypes.size() == 1 &&
@@ -8450,12 +8438,14 @@ public:
CombinedInfo.Exprs.push_back(VD);
// Base is the base of the struct
CombinedInfo.BasePointers.push_back(PartialStruct.Base.getPointer());
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
// Pointer is the address of the lowest element
llvm::Value *LB = LBAddr.getPointer();
const CXXMethodDecl *MD =
CGF.CurFuncDecl ? dyn_cast<CXXMethodDecl>(CGF.CurFuncDecl) : nullptr;
const CXXRecordDecl *RD = MD ? MD->getParent() : nullptr;
- bool HasBaseClass = RD ? RD->getNumBases() > 0 : false;
+ bool HasBaseClass = RD && IsMapThis ? RD->getNumBases() > 0 : false;
// There should not be a mapper for a combined entry.
if (HasBaseClass) {
// OpenMP 5.2 148:21:
@@ -8571,6 +8561,8 @@ public:
VDLVal.getPointer(CGF));
CombinedInfo.Exprs.push_back(VD);
CombinedInfo.BasePointers.push_back(ThisLVal.getPointer(CGF));
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
CombinedInfo.Pointers.push_back(ThisLValVal.getPointer(CGF));
CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(CGF.getTypeSize(CGF.getContext().VoidPtrTy),
@@ -8597,6 +8589,8 @@ public:
VDLVal.getPointer(CGF));
CombinedInfo.Exprs.push_back(VD);
CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF));
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
CombinedInfo.Pointers.push_back(VarLValVal.getPointer(CGF));
CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
CGF.getTypeSize(
@@ -8608,6 +8602,8 @@ public:
VDLVal.getPointer(CGF));
CombinedInfo.Exprs.push_back(VD);
CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF));
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
CombinedInfo.Pointers.push_back(VarRVal.getScalarVal());
CombinedInfo.Sizes.push_back(llvm::ConstantInt::get(CGF.Int64Ty, 0));
}
@@ -8632,7 +8628,7 @@ public:
OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF |
OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT))
continue;
- llvm::Value *BasePtr = LambdaPointers.lookup(*BasePointers[I]);
+ llvm::Value *BasePtr = LambdaPointers.lookup(BasePointers[I]);
assert(BasePtr && "Unable to find base lambda address.");
int TgtIdx = -1;
for (unsigned J = I; J > 0; --J) {
@@ -8674,15 +8670,15 @@ public:
// pass its value.
if (VD && (DevPointersMap.count(VD) || HasDevAddrsMap.count(VD))) {
CombinedInfo.Exprs.push_back(VD);
- CombinedInfo.BasePointers.emplace_back(Arg, VD);
+ CombinedInfo.BasePointers.emplace_back(Arg);
+ CombinedInfo.DevicePtrDecls.emplace_back(VD);
+ CombinedInfo.DevicePointers.emplace_back(DeviceInfoTy::Pointer);
CombinedInfo.Pointers.push_back(Arg);
CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
CGF.getTypeSize(CGF.getContext().VoidPtrTy), CGF.Int64Ty,
/*isSigned=*/true));
CombinedInfo.Types.push_back(
- (Cap->capturesVariable()
- ? OpenMPOffloadMappingFlags::OMP_MAP_TO
- : OpenMPOffloadMappingFlags::OMP_MAP_LITERAL) |
+ OpenMPOffloadMappingFlags::OMP_MAP_LITERAL |
OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM);
CombinedInfo.Mappers.push_back(nullptr);
return;
@@ -8916,6 +8912,8 @@ public:
if (CI.capturesThis()) {
CombinedInfo.Exprs.push_back(nullptr);
CombinedInfo.BasePointers.push_back(CV);
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
CombinedInfo.Pointers.push_back(CV);
const auto *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
CombinedInfo.Sizes.push_back(
@@ -8928,6 +8926,8 @@ public:
const VarDecl *VD = CI.getCapturedVar();
CombinedInfo.Exprs.push_back(VD->getCanonicalDecl());
CombinedInfo.BasePointers.push_back(CV);
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
CombinedInfo.Pointers.push_back(CV);
if (!RI.getType()->isAnyPointerType()) {
// We have to signal to the runtime captures passed by value that are
@@ -8959,6 +8959,8 @@ public:
auto I = FirstPrivateDecls.find(VD);
CombinedInfo.Exprs.push_back(VD->getCanonicalDecl());
CombinedInfo.BasePointers.push_back(CV);
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
if (I != FirstPrivateDecls.end() && ElementType->isAnyPointerType()) {
Address PtrAddr = CGF.EmitLoadOfReference(CGF.MakeAddrLValue(
CV, ElementType, CGF.getContext().getDeclAlign(VD),
@@ -8984,74 +8986,6 @@ public:
};
} // anonymous namespace
-static void emitNonContiguousDescriptor(
- CodeGenFunction &CGF, MappableExprsHandler::MapCombinedInfoTy &CombinedInfo,
- CGOpenMPRuntime::TargetDataInfo &Info) {
- CodeGenModule &CGM = CGF.CGM;
- MappableExprsHandler::MapCombinedInfoTy::StructNonContiguousInfo
- &NonContigInfo = CombinedInfo.NonContigInfo;
-
- // Build an array of struct descriptor_dim and then assign it to
- // offload_args.
- //
- // struct descriptor_dim {
- // uint64_t offset;
- // uint64_t count;
- // uint64_t stride
- // };
- ASTContext &C = CGF.getContext();
- QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
- RecordDecl *RD;
- RD = C.buildImplicitRecord("descriptor_dim");
- RD->startDefinition();
- addFieldToRecordDecl(C, RD, Int64Ty);
- addFieldToRecordDecl(C, RD, Int64Ty);
- addFieldToRecordDecl(C, RD, Int64Ty);
- RD->completeDefinition();
- QualType DimTy = C.getRecordType(RD);
-
- enum { OffsetFD = 0, CountFD, StrideFD };
- // We need two index variable here since the size of "Dims" is the same as the
- // size of Components, however, the size of offset, count, and stride is equal
- // to the size of base declaration that is non-contiguous.
- for (unsigned I = 0, L = 0, E = NonContigInfo.Dims.size(); I < E; ++I) {
- // Skip emitting ir if dimension size is 1 since it cannot be
- // non-contiguous.
- if (NonContigInfo.Dims[I] == 1)
- continue;
- llvm::APInt Size(/*numBits=*/32, NonContigInfo.Dims[I]);
- QualType ArrayTy =
- C.getConstantArrayType(DimTy, Size, nullptr, ArrayType::Normal, 0);
- Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims");
- for (unsigned II = 0, EE = NonContigInfo.Dims[I]; II < EE; ++II) {
- unsigned RevIdx = EE - II - 1;
- LValue DimsLVal = CGF.MakeAddrLValue(
- CGF.Builder.CreateConstArrayGEP(DimsAddr, II), DimTy);
- // Offset
- LValue OffsetLVal = CGF.EmitLValueForField(
- DimsLVal, *std::next(RD->field_begin(), OffsetFD));
- CGF.EmitStoreOfScalar(NonContigInfo.Offsets[L][RevIdx], OffsetLVal);
- // Count
- LValue CountLVal = CGF.EmitLValueForField(
- DimsLVal, *std::next(RD->field_begin(), CountFD));
- CGF.EmitStoreOfScalar(NonContigInfo.Counts[L][RevIdx], CountLVal);
- // Stride
- LValue StrideLVal = CGF.EmitLValueForField(
- DimsLVal, *std::next(RD->field_begin(), StrideFD));
- CGF.EmitStoreOfScalar(NonContigInfo.Strides[L][RevIdx], StrideLVal);
- }
- // args[I] = &dims
- Address DAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- DimsAddr, CGM.Int8PtrTy, CGM.Int8Ty);
- llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
- Info.RTArgs.PointersArray, 0, I);
- Address PAddr(P, CGM.VoidPtrTy, CGF.getPointerAlign());
- CGF.Builder.CreateStore(DAddr.getPointer(), PAddr);
- ++L;
- }
-}
-
// Try to extract the base declaration from a `this->x` expression if possible.
static ValueDecl *getDeclFromThisExpr(const Expr *E) {
if (!E)
@@ -9108,196 +9042,45 @@ static void emitOffloadingArrays(
CGOpenMPRuntime::TargetDataInfo &Info, llvm::OpenMPIRBuilder &OMPBuilder,
bool IsNonContiguous = false) {
CodeGenModule &CGM = CGF.CGM;
- ASTContext &Ctx = CGF.getContext();
// Reset the array information.
Info.clearArrayInfo();
Info.NumberOfPtrs = CombinedInfo.BasePointers.size();
- if (Info.NumberOfPtrs) {
- // Detect if we have any capture size requiring runtime evaluation of the
- // size so that a constant array could be eventually used.
-
- llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, /*isSigned=*/true);
- QualType PointerArrayType = Ctx.getConstantArrayType(
- Ctx.VoidPtrTy, PointerNumAP, nullptr, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
-
- Info.RTArgs.BasePointersArray =
- CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer();
- Info.RTArgs.PointersArray =
- CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer();
- Address MappersArray =
- CGF.CreateMemTemp(PointerArrayType, ".offload_mappers");
- Info.RTArgs.MappersArray = MappersArray.getPointer();
-
- // If we don't have any VLA types or other types that require runtime
- // evaluation, we can use a constant array for the map sizes, otherwise we
- // need to fill up the arrays as we do for the pointers.
- QualType Int64Ty =
- Ctx.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
- SmallVector<llvm::Constant *> ConstSizes(
- CombinedInfo.Sizes.size(), llvm::ConstantInt::get(CGF.Int64Ty, 0));
- llvm::SmallBitVector RuntimeSizes(CombinedInfo.Sizes.size());
- for (unsigned I = 0, E = CombinedInfo.Sizes.size(); I < E; ++I) {
- if (auto *CI = dyn_cast<llvm::Constant>(CombinedInfo.Sizes[I])) {
- if (!isa<llvm::ConstantExpr>(CI) && !isa<llvm::GlobalValue>(CI)) {
- if (IsNonContiguous &&
- static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- CombinedInfo.Types[I] &
- OpenMPOffloadMappingFlags::OMP_MAP_NON_CONTIG))
- ConstSizes[I] = llvm::ConstantInt::get(
- CGF.Int64Ty, CombinedInfo.NonContigInfo.Dims[I]);
- else
- ConstSizes[I] = CI;
- continue;
- }
- }
- RuntimeSizes.set(I);
- }
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+ InsertPointTy AllocaIP(CGF.AllocaInsertPt->getParent(),
+ CGF.AllocaInsertPt->getIterator());
+ InsertPointTy CodeGenIP(CGF.Builder.GetInsertBlock(),
+ CGF.Builder.GetInsertPoint());
- if (RuntimeSizes.all()) {
- QualType SizeArrayType = Ctx.getConstantArrayType(
- Int64Ty, PointerNumAP, nullptr, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
- Info.RTArgs.SizesArray =
- CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
- } else {
- auto *SizesArrayInit = llvm::ConstantArray::get(
- llvm::ArrayType::get(CGM.Int64Ty, ConstSizes.size()), ConstSizes);
- std::string Name = CGM.getOpenMPRuntime().getName({"offload_sizes"});
- auto *SizesArrayGbl = new llvm::GlobalVariable(
- CGM.getModule(), SizesArrayInit->getType(), /*isConstant=*/true,
- llvm::GlobalValue::PrivateLinkage, SizesArrayInit, Name);
- SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- if (RuntimeSizes.any()) {
- QualType SizeArrayType = Ctx.getConstantArrayType(
- Int64Ty, PointerNumAP, nullptr, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
- Address Buffer = CGF.CreateMemTemp(SizeArrayType, ".offload_sizes");
- llvm::Value *GblConstPtr =
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- SizesArrayGbl, CGM.Int64Ty->getPointerTo());
- CGF.Builder.CreateMemCpy(
- Buffer,
- Address(GblConstPtr, CGM.Int64Ty,
- CGM.getNaturalTypeAlignment(Ctx.getIntTypeForBitwidth(
- /*DestWidth=*/64, /*Signed=*/false))),
- CGF.getTypeSize(SizeArrayType));
- Info.RTArgs.SizesArray = Buffer.getPointer();
- } else {
- Info.RTArgs.SizesArray = SizesArrayGbl;
- }
- }
+ auto FillInfoMap = [&](MappableExprsHandler::MappingExprInfo &MapExpr) {
+ return emitMappingInformation(CGF, OMPBuilder, MapExpr);
+ };
+ if (CGM.getCodeGenOpts().getDebugInfo() !=
+ llvm::codegenoptions::NoDebugInfo) {
+ CombinedInfo.Names.resize(CombinedInfo.Exprs.size());
+ llvm::transform(CombinedInfo.Exprs, CombinedInfo.Names.begin(),
+ FillInfoMap);
+ }
- // The map types are always constant so we don't need to generate code to
- // fill arrays. Instead, we create an array constant.
- SmallVector<uint64_t, 4> Mapping;
- for (auto mapFlag : CombinedInfo.Types)
- Mapping.push_back(
- static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- mapFlag));
- std::string MaptypesName =
- CGM.getOpenMPRuntime().getName({"offload_maptypes"});
- auto *MapTypesArrayGbl =
- OMPBuilder.createOffloadMaptypes(Mapping, MaptypesName);
- Info.RTArgs.MapTypesArray = MapTypesArrayGbl;
-
- // The information types are only built if there is debug information
- // requested.
- if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo) {
- Info.RTArgs.MapNamesArray = llvm::Constant::getNullValue(
- llvm::Type::getInt8Ty(CGF.Builder.getContext())->getPointerTo());
- } else {
- auto fillInfoMap = [&](MappableExprsHandler::MappingExprInfo &MapExpr) {
- return emitMappingInformation(CGF, OMPBuilder, MapExpr);
- };
- SmallVector<llvm::Constant *, 4> InfoMap(CombinedInfo.Exprs.size());
- llvm::transform(CombinedInfo.Exprs, InfoMap.begin(), fillInfoMap);
- std::string MapnamesName =
- CGM.getOpenMPRuntime().getName({"offload_mapnames"});
- auto *MapNamesArrayGbl =
- OMPBuilder.createOffloadMapnames(InfoMap, MapnamesName);
- Info.RTArgs.MapNamesArray = MapNamesArrayGbl;
- }
-
- // If there's a present map type modifier, it must not be applied to the end
- // of a region, so generate a separate map type array in that case.
- if (Info.separateBeginEndCalls()) {
- bool EndMapTypesDiffer = false;
- for (uint64_t &Type : Mapping) {
- if (Type &
- static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- OpenMPOffloadMappingFlags::OMP_MAP_PRESENT)) {
- Type &=
- ~static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
- OpenMPOffloadMappingFlags::OMP_MAP_PRESENT);
- EndMapTypesDiffer = true;
- }
- }
- if (EndMapTypesDiffer) {
- MapTypesArrayGbl =
- OMPBuilder.createOffloadMaptypes(Mapping, MaptypesName);
- Info.RTArgs.MapTypesArrayEnd = MapTypesArrayGbl;
- }
+ auto DeviceAddrCB = [&](unsigned int I, llvm::Value *NewDecl) {
+ if (const ValueDecl *DevVD = CombinedInfo.DevicePtrDecls[I]) {
+ Info.CaptureDeviceAddrMap.try_emplace(DevVD, NewDecl);
}
+ };
- for (unsigned I = 0; I < Info.NumberOfPtrs; ++I) {
- llvm::Value *BPVal = *CombinedInfo.BasePointers[I];
- llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
- Info.RTArgs.BasePointersArray, 0, I);
- BP = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- BP, BPVal->getType()->getPointerTo(/*AddrSpace=*/0));
- Address BPAddr(BP, BPVal->getType(),
- Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
- CGF.Builder.CreateStore(BPVal, BPAddr);
-
- if (Info.requiresDevicePointerInfo())
- if (const ValueDecl *DevVD =
- CombinedInfo.BasePointers[I].getDevicePtrDecl())
- Info.CaptureDeviceAddrMap.try_emplace(DevVD, BPAddr);
-
- llvm::Value *PVal = CombinedInfo.Pointers[I];
- llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
- Info.RTArgs.PointersArray, 0, I);
- P = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- P, PVal->getType()->getPointerTo(/*AddrSpace=*/0));
- Address PAddr(P, PVal->getType(), Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
- CGF.Builder.CreateStore(PVal, PAddr);
-
- if (RuntimeSizes.test(I)) {
- llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
- Info.RTArgs.SizesArray,
- /*Idx0=*/0,
- /*Idx1=*/I);
- Address SAddr(S, CGM.Int64Ty, Ctx.getTypeAlignInChars(Int64Ty));
- CGF.Builder.CreateStore(CGF.Builder.CreateIntCast(CombinedInfo.Sizes[I],
- CGM.Int64Ty,
- /*isSigned=*/true),
- SAddr);
- }
-
- // Fill up the mapper array.
- llvm::Value *MFunc = llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
- if (CombinedInfo.Mappers[I]) {
- MFunc = CGM.getOpenMPRuntime().getOrCreateUserDefinedMapperFunc(
- cast<OMPDeclareMapperDecl>(CombinedInfo.Mappers[I]));
- MFunc = CGF.Builder.CreatePointerCast(MFunc, CGM.VoidPtrTy);
- Info.HasMapper = true;
- }
- Address MAddr = CGF.Builder.CreateConstArrayGEP(MappersArray, I);
- CGF.Builder.CreateStore(MFunc, MAddr);
+ auto CustomMapperCB = [&](unsigned int I) {
+ llvm::Value *MFunc = nullptr;
+ if (CombinedInfo.Mappers[I]) {
+ Info.HasMapper = true;
+ MFunc = CGF.CGM.getOpenMPRuntime().getOrCreateUserDefinedMapperFunc(
+ cast<OMPDeclareMapperDecl>(CombinedInfo.Mappers[I]));
}
- }
-
- if (!IsNonContiguous || CombinedInfo.NonContigInfo.Offsets.empty() ||
- Info.NumberOfPtrs == 0)
- return;
-
- emitNonContiguousDescriptor(CGF, CombinedInfo, Info);
+ return MFunc;
+ };
+ OMPBuilder.emitOffloadingArrays(AllocaIP, CodeGenIP, CombinedInfo, Info,
+ /*IsNonContiguous=*/true, DeviceAddrCB,
+ CustomMapperCB);
}
/// Check for inner distribute directive.
@@ -9314,7 +9097,8 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
switch (D.getDirectiveKind()) {
case OMPD_target:
- if (isOpenMPDistributeDirective(DKind))
+ // For now, just treat 'target teams loop' as if it's distributed.
+ if (isOpenMPDistributeDirective(DKind) || DKind == OMPD_teams_loop)
return NestedDir;
if (DKind == OMPD_teams) {
Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
@@ -9569,12 +9353,13 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
// Fill up the runtime mapper handle for all components.
for (unsigned I = 0; I < Info.BasePointers.size(); ++I) {
llvm::Value *CurBaseArg = MapperCGF.Builder.CreateBitCast(
- *Info.BasePointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
+ Info.BasePointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
llvm::Value *CurBeginArg = MapperCGF.Builder.CreateBitCast(
Info.Pointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
llvm::Value *CurSizeArg = Info.Sizes[I];
llvm::Value *CurNameArg =
- (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo)
+ (CGM.getCodeGenOpts().getDebugInfo() ==
+ llvm::codegenoptions::NoDebugInfo)
? llvm::ConstantPointerNull::get(CGM.VoidPtrTy)
: emitMappingInformation(MapperCGF, OMPBuilder, Info.Exprs[I]);
@@ -9797,7 +9582,8 @@ llvm::Value *CGOpenMPRuntime::emitTargetNumIterationsCall(
OpenMPDirectiveKind Kind = D.getDirectiveKind();
const OMPExecutableDirective *TD = &D;
// Get nested teams distribute kind directive, if any.
- if (!isOpenMPDistributeDirective(Kind) || !isOpenMPTeamsDirective(Kind))
+ if ((!isOpenMPDistributeDirective(Kind) || !isOpenMPTeamsDirective(Kind)) &&
+ Kind != OMPD_target_teams_loop)
TD = getNestedDistributeDirective(CGM.getContext(), D);
if (!TD)
return llvm::ConstantInt::get(CGF.Int64Ty, 0);
@@ -9808,6 +9594,255 @@ llvm::Value *CGOpenMPRuntime::emitTargetNumIterationsCall(
return llvm::ConstantInt::get(CGF.Int64Ty, 0);
}
+static void
+emitTargetCallFallback(CGOpenMPRuntime *OMPRuntime, llvm::Function *OutlinedFn,
+ const OMPExecutableDirective &D,
+ llvm::SmallVectorImpl<llvm::Value *> &CapturedVars,
+ bool RequiresOuterTask, const CapturedStmt &CS,
+ bool OffloadingMandatory, CodeGenFunction &CGF) {
+ if (OffloadingMandatory) {
+ CGF.Builder.CreateUnreachable();
+ } else {
+ if (RequiresOuterTask) {
+ CapturedVars.clear();
+ CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
+ }
+ OMPRuntime->emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn,
+ CapturedVars);
+ }
+}
+
+static llvm::Value *emitDeviceID(
+ llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
+ CodeGenFunction &CGF) {
+ // Emit device ID if any.
+ llvm::Value *DeviceID;
+ if (Device.getPointer()) {
+ assert((Device.getInt() == OMPC_DEVICE_unknown ||
+ Device.getInt() == OMPC_DEVICE_device_num) &&
+ "Expected device_num modifier.");
+ llvm::Value *DevVal = CGF.EmitScalarExpr(Device.getPointer());
+ DeviceID =
+ CGF.Builder.CreateIntCast(DevVal, CGF.Int64Ty, /*isSigned=*/true);
+ } else {
+ DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
+ }
+ return DeviceID;
+}
+
+llvm::Value *emitDynCGGroupMem(const OMPExecutableDirective &D,
+ CodeGenFunction &CGF) {
+ llvm::Value *DynCGroupMem = CGF.Builder.getInt32(0);
+
+ if (auto *DynMemClause = D.getSingleClause<OMPXDynCGroupMemClause>()) {
+ CodeGenFunction::RunCleanupsScope DynCGroupMemScope(CGF);
+ llvm::Value *DynCGroupMemVal = CGF.EmitScalarExpr(
+ DynMemClause->getSize(), /*IgnoreResultAssign=*/true);
+ DynCGroupMem = CGF.Builder.CreateIntCast(DynCGroupMemVal, CGF.Int32Ty,
+ /*isSigned=*/false);
+ }
+ return DynCGroupMem;
+}
+
+static void emitTargetCallKernelLaunch(
+ CGOpenMPRuntime *OMPRuntime, llvm::Function *OutlinedFn,
+ const OMPExecutableDirective &D,
+ llvm::SmallVectorImpl<llvm::Value *> &CapturedVars, bool RequiresOuterTask,
+ const CapturedStmt &CS, bool OffloadingMandatory,
+ llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
+ llvm::Value *OutlinedFnID, CodeGenFunction::OMPTargetDataInfo &InputInfo,
+ llvm::Value *&MapTypesArray, llvm::Value *&MapNamesArray,
+ llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
+ const OMPLoopDirective &D)>
+ SizeEmitter,
+ CodeGenFunction &CGF, CodeGenModule &CGM) {
+ llvm::OpenMPIRBuilder &OMPBuilder = OMPRuntime->getOMPBuilder();
+
+ // Fill up the arrays with all the captured variables.
+ MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
+
+ // Get mappable expression information.
+ MappableExprsHandler MEHandler(D, CGF);
+ llvm::DenseMap<llvm::Value *, llvm::Value *> LambdaPointers;
+ llvm::DenseSet<CanonicalDeclPtr<const Decl>> MappedVarSet;
+
+ auto RI = CS.getCapturedRecordDecl()->field_begin();
+ auto *CV = CapturedVars.begin();
+ for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
+ CE = CS.capture_end();
+ CI != CE; ++CI, ++RI, ++CV) {
+ MappableExprsHandler::MapCombinedInfoTy CurInfo;
+ MappableExprsHandler::StructRangeInfoTy PartialStruct;
+
+ // VLA sizes are passed to the outlined region by copy and do not have map
+ // information associated.
+ if (CI->capturesVariableArrayType()) {
+ CurInfo.Exprs.push_back(nullptr);
+ CurInfo.BasePointers.push_back(*CV);
+ CurInfo.DevicePtrDecls.push_back(nullptr);
+ CurInfo.DevicePointers.push_back(
+ MappableExprsHandler::DeviceInfoTy::None);
+ CurInfo.Pointers.push_back(*CV);
+ CurInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
+ CGF.getTypeSize(RI->getType()), CGF.Int64Ty, /*isSigned=*/true));
+ // Copy to the device as an argument. No need to retrieve it.
+ CurInfo.Types.push_back(OpenMPOffloadMappingFlags::OMP_MAP_LITERAL |
+ OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM |
+ OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT);
+ CurInfo.Mappers.push_back(nullptr);
+ } else {
+ // If we have any information in the map clause, we use it, otherwise we
+ // just do a default mapping.
+ MEHandler.generateInfoForCapture(CI, *CV, CurInfo, PartialStruct);
+ if (!CI->capturesThis())
+ MappedVarSet.insert(CI->getCapturedVar());
+ else
+ MappedVarSet.insert(nullptr);
+ if (CurInfo.BasePointers.empty() && !PartialStruct.Base.isValid())
+ MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurInfo);
+ // Generate correct mapping for variables captured by reference in
+ // lambdas.
+ if (CI->capturesVariable())
+ MEHandler.generateInfoForLambdaCaptures(CI->getCapturedVar(), *CV,
+ CurInfo, LambdaPointers);
+ }
+ // We expect to have at least an element of information for this capture.
+ assert((!CurInfo.BasePointers.empty() || PartialStruct.Base.isValid()) &&
+ "Non-existing map pointer for capture!");
+ assert(CurInfo.BasePointers.size() == CurInfo.Pointers.size() &&
+ CurInfo.BasePointers.size() == CurInfo.Sizes.size() &&
+ CurInfo.BasePointers.size() == CurInfo.Types.size() &&
+ CurInfo.BasePointers.size() == CurInfo.Mappers.size() &&
+ "Inconsistent map information sizes!");
+
+ // If there is an entry in PartialStruct it means we have a struct with
+ // individual members mapped. Emit an extra combined entry.
+ if (PartialStruct.Base.isValid()) {
+ CombinedInfo.append(PartialStruct.PreliminaryMapData);
+ MEHandler.emitCombinedEntry(
+ CombinedInfo, CurInfo.Types, PartialStruct, CI->capturesThis(),
+ nullptr, !PartialStruct.PreliminaryMapData.BasePointers.empty());
+ }
+
+ // We need to append the results of this capture to what we already have.
+ CombinedInfo.append(CurInfo);
+ }
+ // Adjust MEMBER_OF flags for the lambdas captures.
+ MEHandler.adjustMemberOfForLambdaCaptures(
+ LambdaPointers, CombinedInfo.BasePointers, CombinedInfo.Pointers,
+ CombinedInfo.Types);
+ // Map any list items in a map clause that were not captures because they
+ // weren't referenced within the construct.
+ MEHandler.generateAllInfo(CombinedInfo, MappedVarSet);
+
+ CGOpenMPRuntime::TargetDataInfo Info;
+ // Fill up the arrays and create the arguments.
+ emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder);
+ bool EmitDebug = CGF.CGM.getCodeGenOpts().getDebugInfo() !=
+ llvm::codegenoptions::NoDebugInfo;
+ OMPBuilder.emitOffloadingArraysArgument(CGF.Builder, Info.RTArgs, Info,
+ EmitDebug,
+ /*ForEndCall=*/false);
+
+ InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
+ InputInfo.BasePointersArray = Address(Info.RTArgs.BasePointersArray,
+ CGF.VoidPtrTy, CGM.getPointerAlign());
+ InputInfo.PointersArray =
+ Address(Info.RTArgs.PointersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
+ InputInfo.SizesArray =
+ Address(Info.RTArgs.SizesArray, CGF.Int64Ty, CGM.getPointerAlign());
+ InputInfo.MappersArray =
+ Address(Info.RTArgs.MappersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
+ MapTypesArray = Info.RTArgs.MapTypesArray;
+ MapNamesArray = Info.RTArgs.MapNamesArray;
+
+ auto &&ThenGen = [&OMPRuntime, OutlinedFn, &D, &CapturedVars,
+ RequiresOuterTask, &CS, OffloadingMandatory, Device,
+ OutlinedFnID, &InputInfo, &MapTypesArray, &MapNamesArray,
+ SizeEmitter](CodeGenFunction &CGF, PrePostActionTy &) {
+ bool IsReverseOffloading = Device.getInt() == OMPC_DEVICE_ancestor;
+
+ if (IsReverseOffloading) {
+ // Reverse offloading is not supported, so just execute on the host.
+ // FIXME: This fallback solution is incorrect since it ignores the
+ // OMP_TARGET_OFFLOAD environment variable. Instead it would be better to
+ // assert here and ensure SEMA emits an error.
+ emitTargetCallFallback(OMPRuntime, OutlinedFn, D, CapturedVars,
+ RequiresOuterTask, CS, OffloadingMandatory, CGF);
+ return;
+ }
+
+ bool HasNoWait = D.hasClausesOfKind<OMPNowaitClause>();
+ unsigned NumTargetItems = InputInfo.NumberOfTargetItems;
+
+ llvm::Value *BasePointersArray = InputInfo.BasePointersArray.getPointer();
+ llvm::Value *PointersArray = InputInfo.PointersArray.getPointer();
+ llvm::Value *SizesArray = InputInfo.SizesArray.getPointer();
+ llvm::Value *MappersArray = InputInfo.MappersArray.getPointer();
+
+ auto &&EmitTargetCallFallbackCB =
+ [&OMPRuntime, OutlinedFn, &D, &CapturedVars, RequiresOuterTask, &CS,
+ OffloadingMandatory, &CGF](llvm::OpenMPIRBuilder::InsertPointTy IP)
+ -> llvm::OpenMPIRBuilder::InsertPointTy {
+ CGF.Builder.restoreIP(IP);
+ emitTargetCallFallback(OMPRuntime, OutlinedFn, D, CapturedVars,
+ RequiresOuterTask, CS, OffloadingMandatory, CGF);
+ return CGF.Builder.saveIP();
+ };
+
+ llvm::Value *DeviceID = emitDeviceID(Device, CGF);
+ llvm::Value *NumTeams = OMPRuntime->emitNumTeamsForTargetDirective(CGF, D);
+ llvm::Value *NumThreads =
+ OMPRuntime->emitNumThreadsForTargetDirective(CGF, D);
+ llvm::Value *RTLoc = OMPRuntime->emitUpdateLocation(CGF, D.getBeginLoc());
+ llvm::Value *NumIterations =
+ OMPRuntime->emitTargetNumIterationsCall(CGF, D, SizeEmitter);
+ llvm::Value *DynCGGroupMem = emitDynCGGroupMem(D, CGF);
+ llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
+ CGF.AllocaInsertPt->getParent(), CGF.AllocaInsertPt->getIterator());
+
+ llvm::OpenMPIRBuilder::TargetDataRTArgs RTArgs(
+ BasePointersArray, PointersArray, SizesArray, MapTypesArray,
+ nullptr /* MapTypesArrayEnd */, MappersArray, MapNamesArray);
+
+ llvm::OpenMPIRBuilder::TargetKernelArgs Args(
+ NumTargetItems, RTArgs, NumIterations, NumTeams, NumThreads,
+ DynCGGroupMem, HasNoWait);
+
+ CGF.Builder.restoreIP(OMPRuntime->getOMPBuilder().emitKernelLaunch(
+ CGF.Builder, OutlinedFn, OutlinedFnID, EmitTargetCallFallbackCB, Args,
+ DeviceID, RTLoc, AllocaIP));
+ };
+
+ if (RequiresOuterTask)
+ CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
+ else
+ OMPRuntime->emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
+}
+
+static void
+emitTargetCallElse(CGOpenMPRuntime *OMPRuntime, llvm::Function *OutlinedFn,
+ const OMPExecutableDirective &D,
+ llvm::SmallVectorImpl<llvm::Value *> &CapturedVars,
+ bool RequiresOuterTask, const CapturedStmt &CS,
+ bool OffloadingMandatory, CodeGenFunction &CGF) {
+
+ // Notify that the host version must be executed.
+ auto &&ElseGen =
+ [&OMPRuntime, OutlinedFn, &D, &CapturedVars, RequiresOuterTask, &CS,
+ OffloadingMandatory](CodeGenFunction &CGF, PrePostActionTy &) {
+ emitTargetCallFallback(OMPRuntime, OutlinedFn, D, CapturedVars,
+ RequiresOuterTask, CS, OffloadingMandatory, CGF);
+ };
+
+ if (RequiresOuterTask) {
+ CodeGenFunction::OMPTargetDataInfo InputInfo;
+ CGF.EmitOMPTargetTaskBasedDirective(D, ElseGen, InputInfo);
+ } else {
+ OMPRuntime->emitInlinedDirective(CGF, D.getDirectiveKind(), ElseGen);
+ }
+}
+
void CGOpenMPRuntime::emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
@@ -9818,7 +9853,7 @@ void CGOpenMPRuntime::emitTargetCall(
if (!CGF.HaveInsertPoint())
return;
- const bool OffloadingMandatory = !CGM.getLangOpts().OpenMPIsDevice &&
+ const bool OffloadingMandatory = !CGM.getLangOpts().OpenMPIsTargetDevice &&
CGM.getLangOpts().OpenMPOffloadMandatory;
assert((OffloadingMandatory || OutlinedFn) && "Invalid outlined function!");
@@ -9837,259 +9872,24 @@ void CGOpenMPRuntime::emitTargetCall(
CodeGenFunction::OMPTargetDataInfo InputInfo;
llvm::Value *MapTypesArray = nullptr;
llvm::Value *MapNamesArray = nullptr;
- // Generate code for the host fallback function.
- auto &&FallbackGen = [this, OutlinedFn, &D, &CapturedVars, RequiresOuterTask,
- &CS, OffloadingMandatory](CodeGenFunction &CGF) {
- if (OffloadingMandatory) {
- CGF.Builder.CreateUnreachable();
- } else {
- if (RequiresOuterTask) {
- CapturedVars.clear();
- CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
- }
- emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
- }
- };
- // Fill up the pointer arrays and transfer execution to the device.
- auto &&ThenGen = [this, Device, OutlinedFnID, &D, &InputInfo, &MapTypesArray,
- &MapNamesArray, SizeEmitter,
- FallbackGen](CodeGenFunction &CGF, PrePostActionTy &) {
- if (Device.getInt() == OMPC_DEVICE_ancestor) {
- // Reverse offloading is not supported, so just execute on the host.
- FallbackGen(CGF);
- return;
- }
-
- // On top of the arrays that were filled up, the target offloading call
- // takes as arguments the device id as well as the host pointer. The host
- // pointer is used by the runtime library to identify the current target
- // region, so it only has to be unique and not necessarily point to
- // anything. It could be the pointer to the outlined function that
- // implements the target region, but we aren't using that so that the
- // compiler doesn't need to keep that, and could therefore inline the host
- // function if proven worthwhile during optimization.
-
- // From this point on, we need to have an ID of the target region defined.
- assert(OutlinedFnID && "Invalid outlined function ID!");
- (void)OutlinedFnID;
-
- // Emit device ID if any.
- llvm::Value *DeviceID;
- if (Device.getPointer()) {
- assert((Device.getInt() == OMPC_DEVICE_unknown ||
- Device.getInt() == OMPC_DEVICE_device_num) &&
- "Expected device_num modifier.");
- llvm::Value *DevVal = CGF.EmitScalarExpr(Device.getPointer());
- DeviceID =
- CGF.Builder.CreateIntCast(DevVal, CGF.Int64Ty, /*isSigned=*/true);
- } else {
- DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
- }
-
- // Emit the number of elements in the offloading arrays.
- llvm::Value *PointerNum =
- CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
-
- // Return value of the runtime offloading call.
- llvm::Value *Return;
-
- llvm::Value *NumTeams = emitNumTeamsForTargetDirective(CGF, D);
- llvm::Value *NumThreads = emitNumThreadsForTargetDirective(CGF, D);
-
- // Source location for the ident struct
- llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
-
- // Get tripcount for the target loop-based directive.
- llvm::Value *NumIterations =
- emitTargetNumIterationsCall(CGF, D, SizeEmitter);
-
- llvm::Value *DynCGroupMem = CGF.Builder.getInt32(0);
- if (auto *DynMemClause = D.getSingleClause<OMPXDynCGroupMemClause>()) {
- CodeGenFunction::RunCleanupsScope DynCGroupMemScope(CGF);
- llvm::Value *DynCGroupMemVal = CGF.EmitScalarExpr(
- DynMemClause->getSize(), /*IgnoreResultAssign=*/true);
- DynCGroupMem = CGF.Builder.CreateIntCast(DynCGroupMemVal, CGF.Int32Ty,
- /*isSigned=*/false);
- }
-
- llvm::Value *ZeroArray =
- llvm::Constant::getNullValue(llvm::ArrayType::get(CGF.CGM.Int32Ty, 3));
-
- bool HasNoWait = D.hasClausesOfKind<OMPNowaitClause>();
- llvm::Value *Flags = CGF.Builder.getInt64(HasNoWait);
-
- llvm::Value *NumTeams3D =
- CGF.Builder.CreateInsertValue(ZeroArray, NumTeams, {0});
- llvm::Value *NumThreads3D =
- CGF.Builder.CreateInsertValue(ZeroArray, NumThreads, {0});
-
- // Arguments for the target kernel.
- SmallVector<llvm::Value *> KernelArgs{
- CGF.Builder.getInt32(/* Version */ 2),
- PointerNum,
- InputInfo.BasePointersArray.getPointer(),
- InputInfo.PointersArray.getPointer(),
- InputInfo.SizesArray.getPointer(),
- MapTypesArray,
- MapNamesArray,
- InputInfo.MappersArray.getPointer(),
- NumIterations,
- Flags,
- NumTeams3D,
- NumThreads3D,
- DynCGroupMem,
- };
-
- // The target region is an outlined function launched by the runtime
- // via calls to __tgt_target_kernel().
- //
- // Note that on the host and CPU targets, the runtime implementation of
- // these calls simply call the outlined function without forking threads.
- // The outlined functions themselves have runtime calls to
- // __kmpc_fork_teams() and __kmpc_fork() for this purpose, codegen'd by
- // the compiler in emitTeamsCall() and emitParallelCall().
- //
- // In contrast, on the NVPTX target, the implementation of
- // __tgt_target_teams() launches a GPU kernel with the requested number
- // of teams and threads so no additional calls to the runtime are required.
- // Check the error code and execute the host version if required.
- CGF.Builder.restoreIP(OMPBuilder.emitTargetKernel(
- CGF.Builder, Return, RTLoc, DeviceID, NumTeams, NumThreads,
- OutlinedFnID, KernelArgs));
-
- llvm::BasicBlock *OffloadFailedBlock =
- CGF.createBasicBlock("omp_offload.failed");
- llvm::BasicBlock *OffloadContBlock =
- CGF.createBasicBlock("omp_offload.cont");
- llvm::Value *Failed = CGF.Builder.CreateIsNotNull(Return);
- CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
-
- CGF.EmitBlock(OffloadFailedBlock);
- FallbackGen(CGF);
-
- CGF.EmitBranch(OffloadContBlock);
-
- CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
- };
-
- // Notify that the host version must be executed.
- auto &&ElseGen = [FallbackGen](CodeGenFunction &CGF, PrePostActionTy &) {
- FallbackGen(CGF);
- };
-
- auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
- &MapNamesArray, &CapturedVars, RequiresOuterTask,
- &CS](CodeGenFunction &CGF, PrePostActionTy &) {
- // Fill up the arrays with all the captured variables.
- MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
-
- // Get mappable expression information.
- MappableExprsHandler MEHandler(D, CGF);
- llvm::DenseMap<llvm::Value *, llvm::Value *> LambdaPointers;
- llvm::DenseSet<CanonicalDeclPtr<const Decl>> MappedVarSet;
-
- auto RI = CS.getCapturedRecordDecl()->field_begin();
- auto *CV = CapturedVars.begin();
- for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
- CE = CS.capture_end();
- CI != CE; ++CI, ++RI, ++CV) {
- MappableExprsHandler::MapCombinedInfoTy CurInfo;
- MappableExprsHandler::StructRangeInfoTy PartialStruct;
-
- // VLA sizes are passed to the outlined region by copy and do not have map
- // information associated.
- if (CI->capturesVariableArrayType()) {
- CurInfo.Exprs.push_back(nullptr);
- CurInfo.BasePointers.push_back(*CV);
- CurInfo.Pointers.push_back(*CV);
- CurInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
- CGF.getTypeSize(RI->getType()), CGF.Int64Ty, /*isSigned=*/true));
- // Copy to the device as an argument. No need to retrieve it.
- CurInfo.Types.push_back(
- OpenMPOffloadMappingFlags::OMP_MAP_LITERAL |
- OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM |
- OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT);
- CurInfo.Mappers.push_back(nullptr);
- } else {
- // If we have any information in the map clause, we use it, otherwise we
- // just do a default mapping.
- MEHandler.generateInfoForCapture(CI, *CV, CurInfo, PartialStruct);
- if (!CI->capturesThis())
- MappedVarSet.insert(CI->getCapturedVar());
- else
- MappedVarSet.insert(nullptr);
- if (CurInfo.BasePointers.empty() && !PartialStruct.Base.isValid())
- MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurInfo);
- // Generate correct mapping for variables captured by reference in
- // lambdas.
- if (CI->capturesVariable())
- MEHandler.generateInfoForLambdaCaptures(CI->getCapturedVar(), *CV,
- CurInfo, LambdaPointers);
- }
- // We expect to have at least an element of information for this capture.
- assert((!CurInfo.BasePointers.empty() || PartialStruct.Base.isValid()) &&
- "Non-existing map pointer for capture!");
- assert(CurInfo.BasePointers.size() == CurInfo.Pointers.size() &&
- CurInfo.BasePointers.size() == CurInfo.Sizes.size() &&
- CurInfo.BasePointers.size() == CurInfo.Types.size() &&
- CurInfo.BasePointers.size() == CurInfo.Mappers.size() &&
- "Inconsistent map information sizes!");
-
- // If there is an entry in PartialStruct it means we have a struct with
- // individual members mapped. Emit an extra combined entry.
- if (PartialStruct.Base.isValid()) {
- CombinedInfo.append(PartialStruct.PreliminaryMapData);
- MEHandler.emitCombinedEntry(
- CombinedInfo, CurInfo.Types, PartialStruct, nullptr,
- !PartialStruct.PreliminaryMapData.BasePointers.empty());
- }
-
- // We need to append the results of this capture to what we already have.
- CombinedInfo.append(CurInfo);
- }
- // Adjust MEMBER_OF flags for the lambdas captures.
- MEHandler.adjustMemberOfForLambdaCaptures(
- LambdaPointers, CombinedInfo.BasePointers, CombinedInfo.Pointers,
- CombinedInfo.Types);
- // Map any list items in a map clause that were not captures because they
- // weren't referenced within the construct.
- MEHandler.generateAllInfo(CombinedInfo, MappedVarSet);
-
- CGOpenMPRuntime::TargetDataInfo Info;
- // Fill up the arrays and create the arguments.
- emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder);
- bool EmitDebug =
- CGF.CGM.getCodeGenOpts().getDebugInfo() != codegenoptions::NoDebugInfo;
- OMPBuilder.emitOffloadingArraysArgument(CGF.Builder, Info.RTArgs, Info,
- EmitDebug,
- /*ForEndCall=*/false);
- InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
- InputInfo.BasePointersArray = Address(Info.RTArgs.BasePointersArray,
- CGF.VoidPtrTy, CGM.getPointerAlign());
- InputInfo.PointersArray = Address(Info.RTArgs.PointersArray, CGF.VoidPtrTy,
- CGM.getPointerAlign());
- InputInfo.SizesArray =
- Address(Info.RTArgs.SizesArray, CGF.Int64Ty, CGM.getPointerAlign());
- InputInfo.MappersArray =
- Address(Info.RTArgs.MappersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
- MapTypesArray = Info.RTArgs.MapTypesArray;
- MapNamesArray = Info.RTArgs.MapNamesArray;
- if (RequiresOuterTask)
- CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
- else
- emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
+ auto &&TargetThenGen = [this, OutlinedFn, &D, &CapturedVars,
+ RequiresOuterTask, &CS, OffloadingMandatory, Device,
+ OutlinedFnID, &InputInfo, &MapTypesArray,
+ &MapNamesArray, SizeEmitter](CodeGenFunction &CGF,
+ PrePostActionTy &) {
+ emitTargetCallKernelLaunch(this, OutlinedFn, D, CapturedVars,
+ RequiresOuterTask, CS, OffloadingMandatory,
+ Device, OutlinedFnID, InputInfo, MapTypesArray,
+ MapNamesArray, SizeEmitter, CGF, CGM);
};
- auto &&TargetElseGen = [this, &ElseGen, &D, RequiresOuterTask](
- CodeGenFunction &CGF, PrePostActionTy &) {
- if (RequiresOuterTask) {
- CodeGenFunction::OMPTargetDataInfo InputInfo;
- CGF.EmitOMPTargetTaskBasedDirective(D, ElseGen, InputInfo);
- } else {
- emitInlinedDirective(CGF, D.getDirectiveKind(), ElseGen);
- }
- };
+ auto &&TargetElseGen =
+ [this, OutlinedFn, &D, &CapturedVars, RequiresOuterTask, &CS,
+ OffloadingMandatory](CodeGenFunction &CGF, PrePostActionTy &) {
+ emitTargetCallElse(this, OutlinedFn, D, CapturedVars, RequiresOuterTask,
+ CS, OffloadingMandatory, CGF);
+ };
// If we have a target function ID it means that we need to support
// offloading, otherwise, just execute on the host. We need to execute on host
@@ -10121,12 +9921,13 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
if (RequiresDeviceCodegen) {
const auto &E = *cast<OMPExecutableDirective>(S);
- auto EntryInfo =
- getTargetEntryUniqueInfo(CGM.getContext(), E.getBeginLoc(), ParentName);
+
+ llvm::TargetRegionEntryInfo EntryInfo = getEntryInfoFromPresumedLoc(
+ CGM, OMPBuilder, E.getBeginLoc(), ParentName);
// Is this a target region that should not be emitted as an entry point? If
// so just signal we are done with this target region.
- if (!OffloadEntriesInfoManager.hasTargetRegionEntryInfo(EntryInfo))
+ if (!OMPBuilder.OffloadInfoManager.hasTargetRegionEntryInfo(EntryInfo))
return;
switch (E.getDirectiveKind()) {
@@ -10173,6 +9974,14 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
CGM, ParentName,
cast<OMPTargetTeamsDistributeParallelForSimdDirective>(E));
break;
+ case OMPD_target_teams_loop:
+ CodeGenFunction::EmitOMPTargetTeamsGenericLoopDeviceFunction(
+ CGM, ParentName, cast<OMPTargetTeamsGenericLoopDirective>(E));
+ break;
+ case OMPD_target_parallel_loop:
+ CodeGenFunction::EmitOMPTargetParallelGenericLoopDeviceFunction(
+ CGM, ParentName, cast<OMPTargetParallelGenericLoopDirective>(E));
+ break;
case OMPD_parallel:
case OMPD_for:
case OMPD_parallel_for:
@@ -10272,10 +10081,10 @@ static bool isAssumedToBeNotEmitted(const ValueDecl *VD, bool IsDevice) {
bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
// If emitting code for the host, we do not process FD here. Instead we do
// the normal code generation.
- if (!CGM.getLangOpts().OpenMPIsDevice) {
+ if (!CGM.getLangOpts().OpenMPIsTargetDevice) {
if (const auto *FD = dyn_cast<FunctionDecl>(GD.getDecl()))
if (isAssumedToBeNotEmitted(cast<ValueDecl>(FD),
- CGM.getLangOpts().OpenMPIsDevice))
+ CGM.getLangOpts().OpenMPIsTargetDevice))
return true;
return false;
}
@@ -10286,7 +10095,7 @@ bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
StringRef Name = CGM.getMangledName(GD);
scanForTargetRegionsFunctions(FD->getBody(), Name);
if (isAssumedToBeNotEmitted(cast<ValueDecl>(FD),
- CGM.getLangOpts().OpenMPIsDevice))
+ CGM.getLangOpts().OpenMPIsTargetDevice))
return true;
}
@@ -10297,10 +10106,10 @@ bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
if (isAssumedToBeNotEmitted(cast<ValueDecl>(GD.getDecl()),
- CGM.getLangOpts().OpenMPIsDevice))
+ CGM.getLangOpts().OpenMPIsTargetDevice))
return true;
- if (!CGM.getLangOpts().OpenMPIsDevice)
+ if (!CGM.getLangOpts().OpenMPIsTargetDevice)
return false;
// Check if there are Ctors/Dtors in this declaration and look for target
@@ -10337,19 +10146,13 @@ bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr) {
if (CGM.getLangOpts().OMPTargetTriples.empty() &&
- !CGM.getLangOpts().OpenMPIsDevice)
- return;
-
- // If we have host/nohost variables, they do not need to be registered.
- std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
- OMPDeclareTargetDeclAttr::getDeviceType(VD);
- if (DevTy && *DevTy != OMPDeclareTargetDeclAttr::DT_Any)
+ !CGM.getLangOpts().OpenMPIsTargetDevice)
return;
std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (!Res) {
- if (CGM.getLangOpts().OpenMPIsDevice) {
+ if (CGM.getLangOpts().OpenMPIsTargetDevice) {
// Register non-target variables being emitted in device code (debug info
// may cause this).
StringRef VarName = CGM.getMangledName(VD);
@@ -10357,66 +10160,29 @@ void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
}
return;
}
- // Register declare target variables.
- llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind Flags;
- StringRef VarName;
- int64_t VarSize;
- llvm::GlobalValue::LinkageTypes Linkage;
-
- if ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
- *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
- !HasRequiresUnifiedSharedMemory) {
- Flags = llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryTo;
- VarName = CGM.getMangledName(VD);
- if (VD->hasDefinition(CGM.getContext()) != VarDecl::DeclarationOnly) {
- VarSize =
- CGM.getContext().getTypeSizeInChars(VD->getType()).getQuantity();
- assert(VarSize != 0 && "Expected non-zero size of the variable");
- } else {
- VarSize = 0;
- }
- Linkage = CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
- // Temp solution to prevent optimizations of the internal variables.
- if (CGM.getLangOpts().OpenMPIsDevice && !VD->isExternallyVisible()) {
- // Do not create a "ref-variable" if the original is not also available
- // on the host.
- if (!OffloadEntriesInfoManager.hasDeviceGlobalVarEntryInfo(VarName))
- return;
- std::string RefName = getName({VarName, "ref"});
- if (!CGM.GetGlobalValue(RefName)) {
- llvm::Constant *AddrRef =
- OMPBuilder.getOrCreateInternalVariable(Addr->getType(), RefName);
- auto *GVAddrRef = cast<llvm::GlobalVariable>(AddrRef);
- GVAddrRef->setConstant(/*Val=*/true);
- GVAddrRef->setLinkage(llvm::GlobalValue::InternalLinkage);
- GVAddrRef->setInitializer(Addr);
- CGM.addCompilerUsedGlobal(GVAddrRef);
- }
- }
- } else {
- assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
- ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
- *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
- HasRequiresUnifiedSharedMemory)) &&
- "Declare target attribute must link or to with unified memory.");
- if (*Res == OMPDeclareTargetDeclAttr::MT_Link)
- Flags = llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryLink;
- else
- Flags = llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryTo;
- if (CGM.getLangOpts().OpenMPIsDevice) {
- VarName = Addr->getName();
- Addr = nullptr;
- } else {
- VarName = getAddrOfDeclareTargetVar(VD).getName();
- Addr = cast<llvm::Constant>(getAddrOfDeclareTargetVar(VD).getPointer());
- }
- VarSize = CGM.getPointerSize().getQuantity();
- Linkage = llvm::GlobalValue::WeakAnyLinkage;
- }
+ auto AddrOfGlobal = [&VD, this]() { return CGM.GetAddrOfGlobal(VD); };
+ auto LinkageForVariable = [&VD, this]() {
+ return CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
+ };
+
+ std::vector<llvm::GlobalVariable *> GeneratedRefs;
+ OMPBuilder.registerTargetGlobalVariable(
+ convertCaptureClause(VD), convertDeviceClause(VD),
+ VD->hasDefinition(CGM.getContext()) == VarDecl::DeclarationOnly,
+ VD->isExternallyVisible(),
+ getEntryInfoFromPresumedLoc(CGM, OMPBuilder,
+ VD->getCanonicalDecl()->getBeginLoc()),
+ CGM.getMangledName(VD), GeneratedRefs, CGM.getLangOpts().OpenMPSimd,
+ CGM.getLangOpts().OMPTargetTriples, AddrOfGlobal, LinkageForVariable,
+ CGM.getTypes().ConvertTypeForMem(
+ CGM.getContext().getPointerType(VD->getType())),
+ Addr);
+
+ for (auto *ref : GeneratedRefs)
+ CGM.addCompilerUsedGlobal(ref);
- OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
- VarName, Addr, VarSize, Flags, Linkage);
+ return;
}
bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
@@ -10514,19 +10280,19 @@ bool CGOpenMPRuntime::hasRequiresUnifiedSharedMemory() const {
CGOpenMPRuntime::DisableAutoDeclareTargetRAII::DisableAutoDeclareTargetRAII(
CodeGenModule &CGM)
: CGM(CGM) {
- if (CGM.getLangOpts().OpenMPIsDevice) {
+ if (CGM.getLangOpts().OpenMPIsTargetDevice) {
SavedShouldMarkAsGlobal = CGM.getOpenMPRuntime().ShouldMarkAsGlobal;
CGM.getOpenMPRuntime().ShouldMarkAsGlobal = false;
}
}
CGOpenMPRuntime::DisableAutoDeclareTargetRAII::~DisableAutoDeclareTargetRAII() {
- if (CGM.getLangOpts().OpenMPIsDevice)
+ if (CGM.getLangOpts().OpenMPIsTargetDevice)
CGM.getOpenMPRuntime().ShouldMarkAsGlobal = SavedShouldMarkAsGlobal;
}
bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) {
- if (!CGM.getLangOpts().OpenMPIsDevice || !ShouldMarkAsGlobal)
+ if (!CGM.getLangOpts().OpenMPIsTargetDevice || !ShouldMarkAsGlobal)
return true;
const auto *D = cast<FunctionDecl>(GD.getDecl());
@@ -10549,10 +10315,9 @@ llvm::Function *CGOpenMPRuntime::emitRequiresDirectiveRegFun() {
// If we don't have entries or if we are emitting code for the device, we
// don't need to do anything.
if (CGM.getLangOpts().OMPTargetTriples.empty() ||
- CGM.getLangOpts().OpenMPSimd || CGM.getLangOpts().OpenMPIsDevice ||
- (OffloadEntriesInfoManager.empty() &&
- !HasEmittedDeclareTargetRegion &&
- !HasEmittedTargetRegion))
+ CGM.getLangOpts().OpenMPSimd || CGM.getLangOpts().OpenMPIsTargetDevice ||
+ (OMPBuilder.OffloadInfoManager.empty() &&
+ !HasEmittedDeclareTargetRegion && !HasEmittedTargetRegion))
return nullptr;
// Create and register the function that handles the requires directives.
@@ -10573,9 +10338,8 @@ llvm::Function *CGOpenMPRuntime::emitRequiresDirectiveRegFun() {
// passed to the runtime. This avoids the runtime from throwing an error
// for mismatching requires clauses across compilation units that don't
// contain at least 1 target region.
- assert((HasEmittedTargetRegion ||
- HasEmittedDeclareTargetRegion ||
- !OffloadEntriesInfoManager.empty()) &&
+ assert((HasEmittedTargetRegion || HasEmittedDeclareTargetRegion ||
+ !OMPBuilder.OffloadInfoManager.empty()) &&
"Target or declare target region expected.");
if (HasRequiresUnifiedSharedMemory)
Flags = OMP_REQ_UNIFIED_SHARED_MEMORY;
@@ -10652,140 +10416,94 @@ void CGOpenMPRuntime::emitTargetDataCalls(
// off.
PrePostActionTy NoPrivAction;
- // Generate the code for the opening of the data environment. Capture all the
- // arguments of the runtime call by reference because they are used in the
- // closing of the region.
- auto &&BeginThenGen = [this, &D, Device, &Info,
- &CodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
- // Fill up the arrays with all the mapped variables.
- MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+ InsertPointTy AllocaIP(CGF.AllocaInsertPt->getParent(),
+ CGF.AllocaInsertPt->getIterator());
+ InsertPointTy CodeGenIP(CGF.Builder.GetInsertBlock(),
+ CGF.Builder.GetInsertPoint());
+ llvm::OpenMPIRBuilder::LocationDescription OmpLoc(CodeGenIP);
+
+ llvm::Value *IfCondVal = nullptr;
+ if (IfCond)
+ IfCondVal = CGF.EvaluateExprAsBool(IfCond);
+
+ // Emit device ID if any.
+ llvm::Value *DeviceID = nullptr;
+ if (Device) {
+ DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
+ CGF.Int64Ty, /*isSigned=*/true);
+ } else {
+ DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
+ }
+ // Fill up the arrays with all the mapped variables.
+ MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
+ auto GenMapInfoCB =
+ [&](InsertPointTy CodeGenIP) -> llvm::OpenMPIRBuilder::MapInfosTy & {
+ CGF.Builder.restoreIP(CodeGenIP);
// Get map clause information.
MappableExprsHandler MEHandler(D, CGF);
MEHandler.generateAllInfo(CombinedInfo);
- // Fill up the arrays and create the arguments.
- emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder,
- /*IsNonContiguous=*/true);
-
- llvm::OpenMPIRBuilder::TargetDataRTArgs RTArgs;
- bool EmitDebug =
- CGF.CGM.getCodeGenOpts().getDebugInfo() != codegenoptions::NoDebugInfo;
- OMPBuilder.emitOffloadingArraysArgument(CGF.Builder, RTArgs, Info,
- EmitDebug);
-
- // Emit device ID if any.
- llvm::Value *DeviceID = nullptr;
- if (Device) {
- DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
- CGF.Int64Ty, /*isSigned=*/true);
- } else {
- DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
+ auto FillInfoMap = [&](MappableExprsHandler::MappingExprInfo &MapExpr) {
+ return emitMappingInformation(CGF, OMPBuilder, MapExpr);
+ };
+ if (CGM.getCodeGenOpts().getDebugInfo() !=
+ llvm::codegenoptions::NoDebugInfo) {
+ CombinedInfo.Names.resize(CombinedInfo.Exprs.size());
+ llvm::transform(CombinedInfo.Exprs, CombinedInfo.Names.begin(),
+ FillInfoMap);
}
- // Emit the number of elements in the offloading arrays.
- llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
- //
- // Source location for the ident struct
- llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
-
- llvm::Value *OffloadingArgs[] = {RTLoc,
- DeviceID,
- PointerNum,
- RTArgs.BasePointersArray,
- RTArgs.PointersArray,
- RTArgs.SizesArray,
- RTArgs.MapTypesArray,
- RTArgs.MapNamesArray,
- RTArgs.MappersArray};
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___tgt_target_data_begin_mapper),
- OffloadingArgs);
-
- // If device pointer privatization is required, emit the body of the region
- // here. It will have to be duplicated: with and without privatization.
- if (!Info.CaptureDeviceAddrMap.empty())
- CodeGen(CGF);
+ return CombinedInfo;
};
-
- // Generate code for the closing of the data region.
- auto &&EndThenGen = [this, Device, &Info, &D](CodeGenFunction &CGF,
- PrePostActionTy &) {
- assert(Info.isValid() && "Invalid data environment closing arguments.");
-
- llvm::OpenMPIRBuilder::TargetDataRTArgs RTArgs;
- bool EmitDebug =
- CGF.CGM.getCodeGenOpts().getDebugInfo() != codegenoptions::NoDebugInfo;
- OMPBuilder.emitOffloadingArraysArgument(CGF.Builder, RTArgs, Info,
- EmitDebug,
- /*ForEndCall=*/true);
-
- // Emit device ID if any.
- llvm::Value *DeviceID = nullptr;
- if (Device) {
- DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
- CGF.Int64Ty, /*isSigned=*/true);
- } else {
- DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
+ using BodyGenTy = llvm::OpenMPIRBuilder::BodyGenTy;
+ auto BodyCB = [&](InsertPointTy CodeGenIP, BodyGenTy BodyGenType) {
+ CGF.Builder.restoreIP(CodeGenIP);
+ switch (BodyGenType) {
+ case BodyGenTy::Priv:
+ if (!Info.CaptureDeviceAddrMap.empty())
+ CodeGen(CGF);
+ break;
+ case BodyGenTy::DupNoPriv:
+ if (!Info.CaptureDeviceAddrMap.empty()) {
+ CodeGen.setAction(NoPrivAction);
+ CodeGen(CGF);
+ }
+ break;
+ case BodyGenTy::NoPriv:
+ if (Info.CaptureDeviceAddrMap.empty()) {
+ CodeGen.setAction(NoPrivAction);
+ CodeGen(CGF);
+ }
+ break;
}
-
- // Emit the number of elements in the offloading arrays.
- llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
-
- // Source location for the ident struct
- llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
-
- llvm::Value *OffloadingArgs[] = {RTLoc,
- DeviceID,
- PointerNum,
- RTArgs.BasePointersArray,
- RTArgs.PointersArray,
- RTArgs.SizesArray,
- RTArgs.MapTypesArray,
- RTArgs.MapNamesArray,
- RTArgs.MappersArray};
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___tgt_target_data_end_mapper),
- OffloadingArgs);
+ return InsertPointTy(CGF.Builder.GetInsertBlock(),
+ CGF.Builder.GetInsertPoint());
};
- // If we need device pointer privatization, we need to emit the body of the
- // region with no privatization in the 'else' branch of the conditional.
- // Otherwise, we don't have to do anything.
- auto &&BeginElseGen = [&Info, &CodeGen, &NoPrivAction](CodeGenFunction &CGF,
- PrePostActionTy &) {
- if (!Info.CaptureDeviceAddrMap.empty()) {
- CodeGen.setAction(NoPrivAction);
- CodeGen(CGF);
+ auto DeviceAddrCB = [&](unsigned int I, llvm::Value *NewDecl) {
+ if (const ValueDecl *DevVD = CombinedInfo.DevicePtrDecls[I]) {
+ Info.CaptureDeviceAddrMap.try_emplace(DevVD, NewDecl);
}
};
- // We don't have to do anything to close the region if the if clause evaluates
- // to false.
- auto &&EndElseGen = [](CodeGenFunction &CGF, PrePostActionTy &) {};
-
- if (IfCond) {
- emitIfClause(CGF, IfCond, BeginThenGen, BeginElseGen);
- } else {
- RegionCodeGenTy RCG(BeginThenGen);
- RCG(CGF);
- }
+ auto CustomMapperCB = [&](unsigned int I) {
+ llvm::Value *MFunc = nullptr;
+ if (CombinedInfo.Mappers[I]) {
+ Info.HasMapper = true;
+ MFunc = CGF.CGM.getOpenMPRuntime().getOrCreateUserDefinedMapperFunc(
+ cast<OMPDeclareMapperDecl>(CombinedInfo.Mappers[I]));
+ }
+ return MFunc;
+ };
- // If we don't require privatization of device pointers, we emit the body in
- // between the runtime calls. This avoids duplicating the body code.
- if (Info.CaptureDeviceAddrMap.empty()) {
- CodeGen.setAction(NoPrivAction);
- CodeGen(CGF);
- }
+ // Source location for the ident struct
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
- if (IfCond) {
- emitIfClause(CGF, IfCond, EndThenGen, EndElseGen);
- } else {
- RegionCodeGenTy RCG(EndThenGen);
- RCG(CGF);
- }
+ CGF.Builder.restoreIP(OMPBuilder.createTargetData(
+ OmpLoc, AllocaIP, CodeGenIP, DeviceID, IfCondVal, Info, GenMapInfoCB,
+ /*MapperFunc=*/nullptr, BodyCB, DeviceAddrCB, CustomMapperCB, RTLoc));
}
void CGOpenMPRuntime::emitTargetDataStandAloneCall(
@@ -10939,8 +10657,8 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
/*IsNonContiguous=*/true);
bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>() ||
D.hasClausesOfKind<OMPNowaitClause>();
- bool EmitDebug =
- CGF.CGM.getCodeGenOpts().getDebugInfo() != codegenoptions::NoDebugInfo;
+ bool EmitDebug = CGF.CGM.getCodeGenOpts().getDebugInfo() !=
+ llvm::codegenoptions::NoDebugInfo;
OMPBuilder.emitOffloadingArraysArgument(CGF.Builder, Info.RTArgs, Info,
EmitDebug,
/*ForEndCall=*/false);
@@ -11158,7 +10876,7 @@ static bool getAArch64MTV(QualType QT, ParamKindTy Kind) {
if (Kind == ParamKindTy::Uniform)
return false;
- if (Kind == ParamKindTy::LinearUVal || ParamKindTy::LinearRef)
+ if (Kind == ParamKindTy::LinearUVal || Kind == ParamKindTy::LinearRef)
return false;
if ((Kind == ParamKindTy::Linear || Kind == ParamKindTy::LinearVal) &&
@@ -11654,8 +11372,10 @@ void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
llvm::ArrayRef(FiniArgs));
}
-void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
- const OMPDependClause *C) {
+template <typename T>
+static void EmitDoacrossOrdered(CodeGenFunction &CGF, CodeGenModule &CGM,
+ const T *C, llvm::Value *ULoc,
+ llvm::Value *ThreadID) {
QualType Int64Ty =
CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
llvm::APInt Size(/*numBits=*/32, C->getNumLoops());
@@ -11672,21 +11392,35 @@ void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
/*Volatile=*/false, Int64Ty);
}
llvm::Value *Args[] = {
- emitUpdateLocation(CGF, C->getBeginLoc()),
- getThreadID(CGF, C->getBeginLoc()),
- CGF.Builder.CreateConstArrayGEP(CntAddr, 0).getPointer()};
+ ULoc, ThreadID, CGF.Builder.CreateConstArrayGEP(CntAddr, 0).getPointer()};
llvm::FunctionCallee RTLFn;
- if (C->getDependencyKind() == OMPC_DEPEND_source) {
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+ OMPDoacrossKind<T> ODK;
+ if (ODK.isSource(C)) {
RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
OMPRTL___kmpc_doacross_post);
} else {
- assert(C->getDependencyKind() == OMPC_DEPEND_sink);
+ assert(ODK.isSink(C) && "Expect sink modifier.");
RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
OMPRTL___kmpc_doacross_wait);
}
CGF.EmitRuntimeCall(RTLFn, Args);
}
+void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
+ const OMPDependClause *C) {
+ return EmitDoacrossOrdered<OMPDependClause>(
+ CGF, CGM, C, emitUpdateLocation(CGF, C->getBeginLoc()),
+ getThreadID(CGF, C->getBeginLoc()));
+}
+
+void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
+ const OMPDoacrossClause *C) {
+ return EmitDoacrossOrdered<OMPDoacrossClause>(
+ CGF, CGM, C, emitUpdateLocation(CGF, C->getBeginLoc()),
+ getThreadID(CGF, C->getBeginLoc()));
+}
+
void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee Callee,
ArrayRef<llvm::Value *> Args) const {
@@ -12375,14 +12109,16 @@ void CGOpenMPRuntime::emitLastprivateConditionalFinalUpdate(
}
llvm::Function *CGOpenMPSIMDRuntime::emitParallelOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) {
llvm_unreachable("Not supported in SIMD-only mode");
}
llvm::Function *CGOpenMPSIMDRuntime::emitTeamsOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) {
llvm_unreachable("Not supported in SIMD-only mode");
}
@@ -12671,6 +12407,11 @@ void CGOpenMPSIMDRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
llvm_unreachable("Not supported in SIMD-only mode");
}
+void CGOpenMPSIMDRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
+ const OMPDoacrossClause *C) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
const VarDecl *
CGOpenMPSIMDRuntime::translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
index e7c1a098c768..2ee2a39ba538 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
@@ -232,7 +232,7 @@ public:
/// as those marked as `omp declare target`.
class DisableAutoDeclareTargetRAII {
CodeGenModule &CGM;
- bool SavedShouldMarkAsGlobal;
+ bool SavedShouldMarkAsGlobal = false;
public:
DisableAutoDeclareTargetRAII(CodeGenModule &CGM);
@@ -327,42 +327,6 @@ protected:
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
- /// Emits object of ident_t type with info for source location.
- /// \param Flags Flags for OpenMP location.
- /// \param EmitLoc emit source location with debug-info is off.
- ///
- llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
- unsigned Flags = 0, bool EmitLoc = false);
-
- /// Emit the number of teams for a target directive. Inspect the num_teams
- /// clause associated with a teams construct combined or closely nested
- /// with the target directive.
- ///
- /// Emit a team of size one for directives such as 'target parallel' that
- /// have no associated teams construct.
- ///
- /// Otherwise, return nullptr.
- const Expr *getNumTeamsExprForTargetDirective(CodeGenFunction &CGF,
- const OMPExecutableDirective &D,
- int32_t &DefaultVal);
- llvm::Value *emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
- const OMPExecutableDirective &D);
- /// Emit the number of threads for a target directive. Inspect the
- /// thread_limit clause associated with a teams construct combined or closely
- /// nested with the target directive.
- ///
- /// Emit the num_threads clause for directives such as 'target parallel' that
- /// have no associated teams construct.
- ///
- /// Otherwise, return nullptr.
- const Expr *
- getNumThreadsExprForTargetDirective(CodeGenFunction &CGF,
- const OMPExecutableDirective &D,
- int32_t &DefaultVal);
- llvm::Value *
- emitNumThreadsForTargetDirective(CodeGenFunction &CGF,
- const OMPExecutableDirective &D);
-
/// Returns pointer to ident_t type.
llvm::Type *getIdentTyPointerTy();
@@ -371,9 +335,11 @@ protected:
llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc);
/// Get the function name of an outlined region.
- // The name can be customized depending on the target.
- //
- virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; }
+ std::string getOutlinedHelperName(StringRef Name) const;
+ std::string getOutlinedHelperName(CodeGenFunction &CGF) const;
+
+ /// Get the function name of a reduction function.
+ std::string getReductionFuncName(StringRef Name) const;
/// Emits \p Callee function call with arguments \p Args with location \p Loc.
void emitCall(CodeGenFunction &CGF, SourceLocation Loc,
@@ -508,9 +474,6 @@ protected:
/// kmp_int64 st; // stride
/// };
QualType KmpDimTy;
- /// Entity that registers the offloading constants that were emitted so
- /// far.
- llvm::OffloadEntriesInfoManager OffloadEntriesInfoManager;
bool ShouldMarkAsGlobal = true;
/// List of the emitted declarations.
@@ -552,10 +515,6 @@ protected:
/// Device routines are specific to the
bool HasEmittedDeclareTargetRegion = false;
- /// Loads all the offload entries information from the host IR
- /// metadata.
- void loadOffloadInfoMetadata();
-
/// Start scanning from statement \a S and emit all target regions
/// found along the way.
/// \param S Starting statement.
@@ -657,15 +616,6 @@ protected:
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const OMPTaskDataTy &Data);
- /// Return the trip count of loops associated with constructs / 'target teams
- /// distribute' and 'teams distribute parallel for'. \param SizeEmitter Emits
- /// the int64 value for the number of iterations of the associated loop.
- llvm::Value *emitTargetNumIterationsCall(
- CodeGenFunction &CGF, const OMPExecutableDirective &D,
- llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
- const OMPLoopDirective &D)>
- SizeEmitter);
-
/// Emit update for lastprivate conditional data.
void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal,
StringRef UniqueDeclName, LValue LVal,
@@ -692,8 +642,72 @@ public:
virtual ~CGOpenMPRuntime() {}
virtual void clear();
+ /// Emits object of ident_t type with info for source location.
+ /// \param Flags Flags for OpenMP location.
+ /// \param EmitLoc emit source location with debug-info is off.
+ ///
+ llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
+ unsigned Flags = 0, bool EmitLoc = false);
+
+ /// Emit the number of teams for a target directive. Inspect the num_teams
+ /// clause associated with a teams construct combined or closely nested
+ /// with the target directive.
+ ///
+ /// Emit a team of size one for directives such as 'target parallel' that
+ /// have no associated teams construct.
+ ///
+ /// Otherwise, return nullptr.
+ const Expr *getNumTeamsExprForTargetDirective(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D,
+ int32_t &DefaultVal);
+ llvm::Value *emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D);
+ /// Emit the number of threads for a target directive. Inspect the
+ /// thread_limit clause associated with a teams construct combined or closely
+ /// nested with the target directive.
+ ///
+ /// Emit the num_threads clause for directives such as 'target parallel' that
+ /// have no associated teams construct.
+ ///
+ /// Otherwise, return nullptr.
+ const Expr *
+ getNumThreadsExprForTargetDirective(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D,
+ int32_t &DefaultVal);
+ llvm::Value *
+ emitNumThreadsForTargetDirective(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D);
+
+ /// Return the trip count of loops associated with constructs / 'target teams
+ /// distribute' and 'teams distribute parallel for'. \param SizeEmitter Emits
+ /// the int64 value for the number of iterations of the associated loop.
+ llvm::Value *emitTargetNumIterationsCall(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
+ const OMPLoopDirective &D)>
+ SizeEmitter);
+
/// Returns true if the current target is a GPU.
- virtual bool isTargetCodegen() const { return false; }
+ virtual bool isGPU() const { return false; }
+
+ /// Check if the variable length declaration is delayed:
+ virtual bool isDelayedVariableLengthDecl(CodeGenFunction &CGF,
+ const VarDecl *VD) const {
+ return false;
+ };
+
+ /// Get call to __kmpc_alloc_shared
+ virtual std::pair<llvm::Value *, llvm::Value *>
+ getKmpcAllocShared(CodeGenFunction &CGF, const VarDecl *VD) {
+ llvm_unreachable("not implemented");
+ }
+
+ /// Get call to __kmpc_free_shared
+ virtual void getKmpcFreeShared(
+ CodeGenFunction &CGF,
+ const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair) {
+ llvm_unreachable("not implemented");
+ }
/// Emits code for OpenMP 'if' clause using specified \a CodeGen
/// function. Here is the logic:
@@ -732,26 +746,30 @@ public:
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
+ /// \param CGF Reference to current CodeGenFunction.
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitParallelOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
+ /// \param CGF Reference to current CodeGenFunction.
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitTeamsOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
@@ -1185,18 +1203,17 @@ public:
bool HasCancel = false);
/// Emits reduction function.
+ /// \param ReducerName Name of the function calling the reduction.
/// \param ArgsElemType Array type containing pointers to reduction variables.
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
- llvm::Function *emitReductionFunction(SourceLocation Loc,
- llvm::Type *ArgsElemType,
- ArrayRef<const Expr *> Privates,
- ArrayRef<const Expr *> LHSExprs,
- ArrayRef<const Expr *> RHSExprs,
- ArrayRef<const Expr *> ReductionOps);
+ llvm::Function *emitReductionFunction(
+ StringRef ReducerName, SourceLocation Loc, llvm::Type *ArgsElemType,
+ ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs,
+ ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps);
/// Emits single reduction combiner
void emitSingleReductionCombiner(CodeGenFunction &CGF,
@@ -1441,9 +1458,9 @@ public:
bool SeparateBeginEndCalls)
: llvm::OpenMPIRBuilder::TargetDataInfo(RequiresDevicePointerInfo,
SeparateBeginEndCalls) {}
- /// Map between the a declaration of a capture and the corresponding base
- /// pointer address where the runtime returns the device pointers.
- llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap;
+ /// Map between the a declaration of a capture and the corresponding new
+ /// llvm address where the runtime returns the device pointers.
+ llvm::DenseMap<const ValueDecl *, llvm::Value *> CaptureDeviceAddrMap;
};
/// Emit the target data mapping code associated with \a D.
@@ -1489,6 +1506,11 @@ public:
virtual void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C);
+ /// Emit code for doacross ordered directive with 'doacross' clause.
+ /// \param C 'doacross' clause with 'sink|source' dependence type.
+ virtual void emitDoacrossOrdered(CodeGenFunction &CGF,
+ const OMPDoacrossClause *C);
+
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
@@ -1666,30 +1688,30 @@ public:
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
+ /// \param CGF Reference to current CodeGenFunction.
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
- llvm::Function *
- emitParallelOutlinedFunction(const OMPExecutableDirective &D,
- const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind,
- const RegionCodeGenTy &CodeGen) override;
+ llvm::Function *emitParallelOutlinedFunction(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
+ /// \param CGF Reference to current CodeGenFunction.
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
- llvm::Function *
- emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
- const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind,
- const RegionCodeGenTy &CodeGen) override;
+ llvm::Function *emitTeamsOutlinedFunction(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
@@ -2242,6 +2264,11 @@ public:
void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C) override;
+ /// Emit code for doacross ordered directive with 'doacross' clause.
+ /// \param C 'doacross' clause with 'sink|source' dependence type.
+ void emitDoacrossOrdered(CodeGenFunction &CGF,
+ const OMPDoacrossClause *C) override;
+
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
@@ -2264,6 +2291,34 @@ public:
};
} // namespace CodeGen
+// Utility for openmp doacross clause kind
+namespace {
+template <typename T> class OMPDoacrossKind {
+public:
+ bool isSink(const T *) { return false; }
+ bool isSource(const T *) { return false; }
+};
+template <> class OMPDoacrossKind<OMPDependClause> {
+public:
+ bool isSink(const OMPDependClause *C) {
+ return C->getDependencyKind() == OMPC_DEPEND_sink;
+ }
+ bool isSource(const OMPDependClause *C) {
+ return C->getDependencyKind() == OMPC_DEPEND_source;
+ }
+};
+template <> class OMPDoacrossKind<OMPDoacrossClause> {
+public:
+ bool isSource(const OMPDoacrossClause *C) {
+ return C->getDependenceType() == OMPC_DOACROSS_source ||
+ C->getDependenceType() == OMPC_DOACROSS_source_omp_cur_iteration;
+ }
+ bool isSink(const OMPDoacrossClause *C) {
+ return C->getDependenceType() == OMPC_DOACROSS_sink ||
+ C->getDependenceType() == OMPC_DOACROSS_sink_omp_cur_iteration;
+ }
+};
+} // namespace
} // namespace clang
#endif
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index e8c5f04db49f..62aacb9e24d6 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -190,7 +190,7 @@ static RecordDecl *buildRecordForGlobalizedVars(
IntegerLiteral::Create(C, Align,
C.getIntTypeForBitwidth(32, /*Signed=*/0),
SourceLocation()),
- {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned));
+ {}, AlignedAttr::GNU_aligned));
}
GlobalizedRD->addDecl(Field);
MappedDeclsFields.try_emplace(VD, Field);
@@ -205,6 +205,7 @@ class CheckVarsEscapingDeclContext final
CodeGenFunction &CGF;
llvm::SetVector<const ValueDecl *> EscapedDecls;
llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
+ llvm::SetVector<const ValueDecl *> DelayedVariableLengthDecls;
llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
RecordDecl *GlobalizedRD = nullptr;
llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
@@ -221,10 +222,12 @@ class CheckVarsEscapingDeclContext final
if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>())
return;
// Variables captured by value must be globalized.
+ bool IsCaptured = false;
if (auto *CSI = CGF.CapturedStmtInfo) {
if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
// Check if need to capture the variable that was already captured by
// value in the outer region.
+ IsCaptured = true;
if (!IsForCombinedParallelRegion) {
if (!FD->hasAttrs())
return;
@@ -251,9 +254,14 @@ class CheckVarsEscapingDeclContext final
VD->getType()->isReferenceType())
// Do not globalize variables with reference type.
return;
- if (VD->getType()->isVariablyModifiedType())
- EscapedVariableLengthDecls.insert(VD);
- else
+ if (VD->getType()->isVariablyModifiedType()) {
+ // If not captured at the target region level then mark the escaped
+ // variable as delayed.
+ if (IsCaptured)
+ EscapedVariableLengthDecls.insert(VD);
+ else
+ DelayedVariableLengthDecls.insert(VD);
+ } else
EscapedDecls.insert(VD);
}
@@ -485,10 +493,7 @@ public:
const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
assert(GlobalizedRD &&
"Record for globalized variables must be generated already.");
- auto I = MappedDeclsFields.find(VD);
- if (I == MappedDeclsFields.end())
- return nullptr;
- return I->getSecond();
+ return MappedDeclsFields.lookup(VD);
}
/// Returns the list of the escaped local variables/parameters.
@@ -507,6 +512,12 @@ public:
ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
return EscapedVariableLengthDecls.getArrayRef();
}
+
+ /// Returns the list of the delayed variables with the variably modified
+ /// types.
+ ArrayRef<const ValueDecl *> getDelayedVariableLengthDecls() const {
+ return DelayedVariableLengthDecls.getArrayRef();
+ }
};
} // anonymous namespace
@@ -528,6 +539,7 @@ static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
CGBuilderTy &Bld = CGF.Builder;
unsigned LaneIDBits =
llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size);
+ assert(LaneIDBits < 32 && "Invalid LaneIDBits size in NVPTX device.");
unsigned LaneIDMask = ~0u >> (32u - LaneIDBits);
auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
return Bld.CreateAnd(RT.getGPUThreadID(CGF), Bld.getInt32(LaneIDMask),
@@ -655,6 +667,8 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
case OMPD_target:
case OMPD_target_teams:
return hasNestedSPMDDirective(Ctx, D);
+ case OMPD_target_teams_loop:
+ case OMPD_target_parallel_loop:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
@@ -859,13 +873,12 @@ void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction(
CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM) {
- llvm::OpenMPIRBuilderConfig Config(CGM.getLangOpts().OpenMPIsDevice, true,
- hasRequiresUnifiedSharedMemory(),
+ llvm::OpenMPIRBuilderConfig Config(CGM.getLangOpts().OpenMPIsTargetDevice,
+ isGPU(), hasRequiresUnifiedSharedMemory(),
CGM.getLangOpts().OpenMPOffloadMandatory);
OMPBuilder.setConfig(Config);
- OffloadEntriesInfoManager.setConfig(Config);
- if (!CGM.getLangOpts().OpenMPIsDevice)
+ if (!CGM.getLangOpts().OpenMPIsTargetDevice)
llvm_unreachable("OpenMP can only handle device code.");
llvm::OpenMPIRBuilder &OMPBuilder = getOMPBuilder();
@@ -906,14 +919,15 @@ void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF,
SourceLocation Loc) {}
llvm::Function *CGOpenMPRuntimeGPU::emitParallelOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) {
// Emit target region as a standalone region.
bool PrevIsInTTDRegion = IsInTTDRegion;
IsInTTDRegion = false;
auto *OutlinedFun =
cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
- D, ThreadIDVar, InnermostKind, CodeGen));
+ CGF, D, ThreadIDVar, InnermostKind, CodeGen));
IsInTTDRegion = PrevIsInTTDRegion;
if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD) {
llvm::Function *WrapperFun =
@@ -963,8 +977,9 @@ getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
}
llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) {
SourceLocation Loc = D.getBeginLoc();
const RecordDecl *GlobalizedRD = nullptr;
@@ -1025,7 +1040,7 @@ llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction(
} Action(Loc, GlobalizedRD, MappedDeclsFields);
CodeGen.setAction(Action);
llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
- D, ThreadIDVar, InnermostKind, CodeGen);
+ CGF, D, ThreadIDVar, InnermostKind, CodeGen);
return OutlinedFun;
}
@@ -1083,41 +1098,66 @@ void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
if (auto *DI = CGF.getDebugInfo())
VoidPtr->setDebugLoc(DI->SourceLocToDebugLoc(VD->getLocation()));
}
- for (const auto *VD : I->getSecond().EscapedVariableLengthDecls) {
- // Use actual memory size of the VLA object including the padding
- // for alignment purposes.
- llvm::Value *Size = CGF.getTypeSize(VD->getType());
- CharUnits Align = CGM.getContext().getDeclAlign(VD);
- Size = Bld.CreateNUWAdd(
- Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
- llvm::Value *AlignVal =
- llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
-
- Size = Bld.CreateUDiv(Size, AlignVal);
- Size = Bld.CreateNUWMul(Size, AlignVal);
- // Allocate space for this VLA object to be globalized.
- llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
- llvm::CallBase *VoidPtr =
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_alloc_shared),
- AllocArgs, VD->getName());
- VoidPtr->addRetAttr(
- llvm::Attribute::get(CGM.getLLVMContext(), llvm::Attribute::Alignment,
- CGM.getContext().getTargetInfo().getNewAlign()));
-
- I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(
- std::pair<llvm::Value *, llvm::Value *>(
- {VoidPtr, CGF.getTypeSize(VD->getType())}));
- LValue Base = CGF.MakeAddrLValue(VoidPtr, VD->getType(),
+ for (const auto *ValueD : I->getSecond().EscapedVariableLengthDecls) {
+ const auto *VD = cast<VarDecl>(ValueD);
+ std::pair<llvm::Value *, llvm::Value *> AddrSizePair =
+ getKmpcAllocShared(CGF, VD);
+ I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(AddrSizePair);
+ LValue Base = CGF.MakeAddrLValue(AddrSizePair.first, VD->getType(),
CGM.getContext().getDeclAlign(VD),
AlignmentSource::Decl);
- I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
- Base.getAddress(CGF));
+ I->getSecond().MappedParams->setVarAddr(CGF, VD, Base.getAddress(CGF));
}
I->getSecond().MappedParams->apply(CGF);
}
+bool CGOpenMPRuntimeGPU::isDelayedVariableLengthDecl(CodeGenFunction &CGF,
+ const VarDecl *VD) const {
+ const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
+ if (I == FunctionGlobalizedDecls.end())
+ return false;
+
+ // Check variable declaration is delayed:
+ return llvm::is_contained(I->getSecond().DelayedVariableLengthDecls, VD);
+}
+
+std::pair<llvm::Value *, llvm::Value *>
+CGOpenMPRuntimeGPU::getKmpcAllocShared(CodeGenFunction &CGF,
+ const VarDecl *VD) {
+ CGBuilderTy &Bld = CGF.Builder;
+
+ // Compute size and alignment.
+ llvm::Value *Size = CGF.getTypeSize(VD->getType());
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
+ Size = Bld.CreateNUWAdd(
+ Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
+ llvm::Value *AlignVal =
+ llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
+ Size = Bld.CreateUDiv(Size, AlignVal);
+ Size = Bld.CreateNUWMul(Size, AlignVal);
+
+ // Allocate space for this VLA object to be globalized.
+ llvm::Value *AllocArgs[] = {Size};
+ llvm::CallBase *VoidPtr =
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_alloc_shared),
+ AllocArgs, VD->getName());
+ VoidPtr->addRetAttr(llvm::Attribute::get(
+ CGM.getLLVMContext(), llvm::Attribute::Alignment, Align.getQuantity()));
+
+ return std::make_pair(VoidPtr, Size);
+}
+
+void CGOpenMPRuntimeGPU::getKmpcFreeShared(
+ CodeGenFunction &CGF,
+ const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair) {
+ // Deallocate the memory for each globalized VLA object
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_free_shared),
+ {AddrSizePair.first, AddrSizePair.second});
+}
+
void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF,
bool WithSPMDCheck) {
if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
@@ -1126,8 +1166,9 @@ void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF,
const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
if (I != FunctionGlobalizedDecls.end()) {
- // Deallocate the memory for each globalized VLA object
- for (auto AddrSizePair :
+ // Deallocate the memory for each globalized VLA object that was
+ // globalized in the prolog (i.e. emitGenericVarsProlog).
+ for (const auto &AddrSizePair :
llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_free_shared),
@@ -1555,10 +1596,9 @@ static void emitReductionListCopy(
case RemoteLaneToThread: {
// Step 1.1: Get the address for the src element in the Reduce list.
Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
- SrcElementAddr =
- CGF.EmitLoadOfPointer(CGF.Builder.CreateElementBitCast(
- SrcElementPtrAddr, PrivateLlvmPtrType),
- PrivatePtrType->castAs<PointerType>());
+ SrcElementAddr = CGF.EmitLoadOfPointer(
+ SrcElementPtrAddr.withElementType(PrivateLlvmPtrType),
+ PrivatePtrType->castAs<PointerType>());
// Step 1.2: Create a temporary to store the element in the destination
// Reduce list.
@@ -1572,27 +1612,24 @@ static void emitReductionListCopy(
case ThreadCopy: {
// Step 1.1: Get the address for the src element in the Reduce list.
Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
- SrcElementAddr =
- CGF.EmitLoadOfPointer(CGF.Builder.CreateElementBitCast(
- SrcElementPtrAddr, PrivateLlvmPtrType),
- PrivatePtrType->castAs<PointerType>());
+ SrcElementAddr = CGF.EmitLoadOfPointer(
+ SrcElementPtrAddr.withElementType(PrivateLlvmPtrType),
+ PrivatePtrType->castAs<PointerType>());
// Step 1.2: Get the address for dest element. The destination
// element has already been created on the thread's stack.
DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
- DestElementAddr =
- CGF.EmitLoadOfPointer(CGF.Builder.CreateElementBitCast(
- DestElementPtrAddr, PrivateLlvmPtrType),
- PrivatePtrType->castAs<PointerType>());
+ DestElementAddr = CGF.EmitLoadOfPointer(
+ DestElementPtrAddr.withElementType(PrivateLlvmPtrType),
+ PrivatePtrType->castAs<PointerType>());
break;
}
case ThreadToScratchpad: {
// Step 1.1: Get the address for the src element in the Reduce list.
Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
- SrcElementAddr =
- CGF.EmitLoadOfPointer(CGF.Builder.CreateElementBitCast(
- SrcElementPtrAddr, PrivateLlvmPtrType),
- PrivatePtrType->castAs<PointerType>());
+ SrcElementAddr = CGF.EmitLoadOfPointer(
+ SrcElementPtrAddr.withElementType(PrivateLlvmPtrType),
+ PrivatePtrType->castAs<PointerType>());
// Step 1.2: Get the address for dest element:
// address = base + index * ElementSizeInChars.
@@ -1634,10 +1671,10 @@ static void emitReductionListCopy(
// Regardless of src and dest of copy, we emit the load of src
// element as this is required in all directions
- SrcElementAddr = Bld.CreateElementBitCast(
- SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
- DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
- SrcElementAddr.getElementType());
+ SrcElementAddr = SrcElementAddr.withElementType(
+ CGF.ConvertTypeForMem(Private->getType()));
+ DestElementAddr =
+ DestElementAddr.withElementType(SrcElementAddr.getElementType());
// Now that all active lanes have read the element in the
// Reduce list, shuffle over the value from the remote lane.
@@ -1866,8 +1903,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
// elemptr = ((CopyType*)(elemptrptr)) + I
- Address ElemPtr(ElemPtrPtr, CGF.Int8Ty, Align);
- ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
+ Address ElemPtr(ElemPtrPtr, CopyType, Align);
if (NumIters > 1)
ElemPtr = Bld.CreateGEP(ElemPtr, Cnt);
@@ -1941,8 +1977,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
- Address TargetElemPtr(TargetElemPtrVal, CGF.Int8Ty, Align);
- TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
+ Address TargetElemPtr(TargetElemPtrVal, CopyType, Align);
if (NumIters > 1)
TargetElemPtr = Bld.CreateGEP(TargetElemPtr, Cnt);
@@ -2373,8 +2408,7 @@ static llvm::Value *emitListToGlobalReduceFunction(
Address GlobAddr = GlobLVal.getAddress(CGF);
llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
- llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
- CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
+ CGF.EmitStoreOfScalar(BufferPtr, Elem, /*Volatile=*/false, C.VoidPtrTy);
if ((*IPriv)->getType()->isVariablyModifiedType()) {
// Store array size.
++Idx;
@@ -2390,8 +2424,7 @@ static llvm::Value *emitListToGlobalReduceFunction(
}
// Call reduce_function(GlobalReduceList, ReduceList)
- llvm::Value *GlobalReduceList =
- CGF.EmitCastToVoidPtr(ReductionList.getPointer());
+ llvm::Value *GlobalReduceList = ReductionList.getPointer();
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
@@ -2583,8 +2616,7 @@ static llvm::Value *emitGlobalToListReduceFunction(
Address GlobAddr = GlobLVal.getAddress(CGF);
llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
- llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
- CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
+ CGF.EmitStoreOfScalar(BufferPtr, Elem, /*Volatile=*/false, C.VoidPtrTy);
if ((*IPriv)->getType()->isVariablyModifiedType()) {
// Store array size.
++Idx;
@@ -2600,8 +2632,7 @@ static llvm::Value *emitGlobalToListReduceFunction(
}
// Call reduce_function(ReduceList, GlobalReduceList)
- llvm::Value *GlobalReduceList =
- CGF.EmitCastToVoidPtr(ReductionList.getPointer());
+ llvm::Value *GlobalReduceList = ReductionList.getPointer();
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
@@ -2923,9 +2954,9 @@ void CGOpenMPRuntimeGPU::emitReduction(
llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
ReductionList.getPointer(), CGF.VoidPtrTy);
- llvm::Function *ReductionFn =
- emitReductionFunction(Loc, CGF.ConvertTypeForMem(ReductionArrayTy),
- Privates, LHSExprs, RHSExprs, ReductionOps);
+ llvm::Function *ReductionFn = emitReductionFunction(
+ CGF.CurFn->getName(), Loc, CGF.ConvertTypeForMem(ReductionArrayTy),
+ Privates, LHSExprs, RHSExprs, ReductionOps);
llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
@@ -3085,17 +3116,16 @@ CGOpenMPRuntimeGPU::getParameterAddress(CodeGenFunction &CGF,
unsigned NativePointeeAddrSpace =
CGF.getTypes().getTargetAddressSpace(NativePointeeTy);
QualType TargetTy = TargetParam->getType();
- llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
- LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
+ llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(LocalAddr, /*Volatile=*/false,
+ TargetTy, SourceLocation());
// First cast to generic.
TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- TargetAddr, llvm::PointerType::getWithSamePointeeType(
- cast<llvm::PointerType>(TargetAddr->getType()), /*AddrSpace=*/0));
+ TargetAddr,
+ llvm::PointerType::get(CGF.getLLVMContext(), /*AddrSpace=*/0));
// Cast from generic to native address space.
TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- TargetAddr, llvm::PointerType::getWithSamePointeeType(
- cast<llvm::PointerType>(TargetAddr->getType()),
- NativePointeeAddrSpace));
+ TargetAddr,
+ llvm::PointerType::get(CGF.getLLVMContext(), NativePointeeAddrSpace));
Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
NativeParamType);
@@ -3120,8 +3150,8 @@ void CGOpenMPRuntimeGPU::emitOutlinedFunctionCall(
continue;
}
llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- NativeArg, llvm::PointerType::getWithSamePointeeType(
- cast<llvm::PointerType>(NativeArg->getType()), /*AddrSpace*/ 0));
+ NativeArg,
+ llvm::PointerType::get(CGF.getLLVMContext(), /*AddrSpace*/ 0));
TargetArgs.emplace_back(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
}
@@ -3292,7 +3322,10 @@ void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF,
TeamAndReductions.second.clear();
ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
VarChecker.getEscapedVariableLengthDecls();
- if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
+ ArrayRef<const ValueDecl *> DelayedVariableLengthDecls =
+ VarChecker.getDelayedVariableLengthDecls();
+ if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty() &&
+ DelayedVariableLengthDecls.empty())
return;
auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
I->getSecond().MappedParams =
@@ -3302,6 +3335,8 @@ void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF,
VarChecker.getEscapedParameters().end());
I->getSecond().EscapedVariableLengthDecls.append(
EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
+ I->getSecond().DelayedVariableLengthDecls.append(
+ DelayedVariableLengthDecls.begin(), DelayedVariableLengthDecls.end());
DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
assert(VD->isCanonicalDecl() && "Expected canonical declaration");
@@ -3352,7 +3387,7 @@ Address CGOpenMPRuntimeGPU::getAddressOfLocalVariable(CodeGenFunction &CGF,
llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), VarTy, /*isConstant=*/false,
- llvm::GlobalValue::InternalLinkage, llvm::Constant::getNullValue(VarTy),
+ llvm::GlobalValue::InternalLinkage, llvm::PoisonValue::get(VarTy),
VD->getName(),
/*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
CGM.getContext().getTargetAddressSpace(AS));
@@ -3579,6 +3614,8 @@ void CGOpenMPRuntimeGPU::processRequiresDirective(
case CudaArch::GFX90a:
case CudaArch::GFX90c:
case CudaArch::GFX940:
+ case CudaArch::GFX941:
+ case CudaArch::GFX942:
case CudaArch::GFX1010:
case CudaArch::GFX1011:
case CudaArch::GFX1012:
@@ -3594,6 +3631,8 @@ void CGOpenMPRuntimeGPU::processRequiresDirective(
case CudaArch::GFX1101:
case CudaArch::GFX1102:
case CudaArch::GFX1103:
+ case CudaArch::GFX1150:
+ case CudaArch::GFX1151:
case CudaArch::Generic:
case CudaArch::UNUSED:
case CudaArch::UNKNOWN:
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
index 75d140205773..dddfe5a94dcc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
@@ -110,45 +110,7 @@ private:
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) override;
- /// Emits code for parallel or serial call of the \a OutlinedFn with
- /// variables captured in a record which address is stored in \a
- /// CapturedStruct.
- /// This call is for the Non-SPMD Execution Mode.
- /// \param OutlinedFn Outlined function to be run in parallel threads. Type of
- /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
- /// \param CapturedVars A pointer to the record with the references to
- /// variables used in \a OutlinedFn function.
- /// \param IfCond Condition in the associated 'if' clause, if it was
- /// specified, nullptr otherwise.
- void emitNonSPMDParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
- llvm::Value *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond);
-
- /// Emits code for parallel or serial call of the \a OutlinedFn with
- /// variables captured in a record which address is stored in \a
- /// CapturedStruct.
- /// This call is for a parallel directive within an SPMD target directive.
- /// \param OutlinedFn Outlined function to be run in parallel threads. Type of
- /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
- /// \param CapturedVars A pointer to the record with the references to
- /// variables used in \a OutlinedFn function.
- /// \param IfCond Condition in the associated 'if' clause, if it was
- /// specified, nullptr otherwise.
- ///
- void emitSPMDParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
- llvm::Function *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond);
-
protected:
- /// Get the function name of an outlined region.
- // The name can be customized depending on the target.
- //
- StringRef getOutlinedHelperName() const override {
- return "__omp_outlined__";
- }
-
/// Check if the default location must be constant.
/// Constant for NVPTX for better optimization.
bool isDefaultLocationConstant() const override { return true; }
@@ -157,12 +119,25 @@ public:
explicit CGOpenMPRuntimeGPU(CodeGenModule &CGM);
void clear() override;
- bool isTargetCodegen() const override { return true; };
+ bool isGPU() const override { return true; };
/// Declare generalized virtual functions which need to be defined
/// by all specializations of OpenMPGPURuntime Targets like AMDGCN
/// and NVPTX.
+ /// Check if the variable length declaration is delayed:
+ bool isDelayedVariableLengthDecl(CodeGenFunction &CGF,
+ const VarDecl *VD) const override;
+
+ /// Get call to __kmpc_alloc_shared
+ std::pair<llvm::Value *, llvm::Value *>
+ getKmpcAllocShared(CodeGenFunction &CGF, const VarDecl *VD) override;
+
+ /// Get call to __kmpc_free_shared
+ void getKmpcFreeShared(
+ CodeGenFunction &CGF,
+ const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair) override;
+
/// Get the GPU warp size.
llvm::Value *getGPUWarpSize(CodeGenFunction &CGF);
@@ -197,31 +172,31 @@ public:
// directive.
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
+ /// \param CGF Reference to current CodeGenFunction.
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
- llvm::Function *
- emitParallelOutlinedFunction(const OMPExecutableDirective &D,
- const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind,
- const RegionCodeGenTy &CodeGen) override;
+ llvm::Function *emitParallelOutlinedFunction(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) override;
/// Emits inlined function for the specified OpenMP teams
// directive.
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
+ /// \param CGF Reference to current CodeGenFunction.
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
- llvm::Function *
- emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
- const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind,
- const RegionCodeGenTy &CodeGen) override;
+ llvm::Function *emitTeamsOutlinedFunction(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) override;
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
@@ -294,12 +269,6 @@ public:
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options) override;
- /// Returns specified OpenMP runtime function for the current OpenMP
- /// implementation. Specialized for the NVPTX device.
- /// \param Function OpenMP runtime function.
- /// \return Specified function.
- llvm::FunctionCallee createNVPTXRuntimeFunction(unsigned Function);
-
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
@@ -403,6 +372,7 @@ private:
DeclToAddrMapTy LocalVarData;
EscapedParamsTy EscapedParameters;
llvm::SmallVector<const ValueDecl*, 4> EscapedVariableLengthDecls;
+ llvm::SmallVector<const ValueDecl *, 4> DelayedVariableLengthDecls;
llvm::SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4>
EscapedVariableLengthDeclsAddrs;
std::unique_ptr<CodeGenFunction::OMPMapVars> MappedParams;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 596f0bd33204..888b7ddcccd3 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -182,7 +182,7 @@ struct CGRecordLowering {
llvm::Type *StorageType);
/// Lowers an ASTRecordLayout to a llvm type.
void lower(bool NonVirtualBaseType);
- void lowerUnion();
+ void lowerUnion(bool isNoUniqueAddress);
void accumulateFields();
void accumulateBitFields(RecordDecl::field_iterator Field,
RecordDecl::field_iterator FieldEnd);
@@ -280,7 +280,7 @@ void CGRecordLowering::lower(bool NVBaseType) {
// CodeGenTypes::ComputeRecordLayout.
CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
if (D->isUnion()) {
- lowerUnion();
+ lowerUnion(NVBaseType);
computeVolatileBitfields();
return;
}
@@ -308,8 +308,9 @@ void CGRecordLowering::lower(bool NVBaseType) {
computeVolatileBitfields();
}
-void CGRecordLowering::lowerUnion() {
- CharUnits LayoutSize = Layout.getSize();
+void CGRecordLowering::lowerUnion(bool isNoUniqueAddress) {
+ CharUnits LayoutSize =
+ isNoUniqueAddress ? Layout.getDataSize() : Layout.getSize();
llvm::Type *StorageType = nullptr;
bool SeenNamedMember = false;
// Iterate through the fields setting bitFieldInfo and the Fields array. Also
@@ -365,7 +366,12 @@ void CGRecordLowering::lowerUnion() {
FieldTypes.push_back(StorageType);
appendPaddingBytes(LayoutSize - getSize(StorageType));
// Set packed if we need it.
- if (LayoutSize % getAlignment(StorageType))
+ const auto StorageAlignment = getAlignment(StorageType);
+ assert((Layout.getSize() % StorageAlignment == 0 ||
+ Layout.getDataSize() % StorageAlignment) &&
+ "Union's standard layout and no_unique_address layout must agree on "
+ "packedness");
+ if (Layout.getDataSize() % StorageAlignment)
Packed = true;
}
@@ -379,9 +385,14 @@ void CGRecordLowering::accumulateFields() {
for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
accumulateBitFields(Start, Field);
} else if (!Field->isZeroSize(Context)) {
+ // Use base subobject layout for the potentially-overlapping field,
+ // as it is done in RecordLayoutBuilder
Members.push_back(MemberInfo(
bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
- getStorageType(*Field), *Field));
+ Field->isPotentiallyOverlapping()
+ ? getStorageType(Field->getType()->getAsCXXRecordDecl())
+ : getStorageType(*Field),
+ *Field));
++Field;
} else {
++Field;
@@ -882,7 +893,7 @@ CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty) {
// If we're in C++, compute the base subobject type.
llvm::StructType *BaseTy = nullptr;
- if (isa<CXXRecordDecl>(D) && !D->isUnion() && !D->hasAttr<FinalAttr>()) {
+ if (isa<CXXRecordDecl>(D)) {
BaseTy = Ty;
if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
CGRecordLowering BaseBuilder(*this, D, /*Packed=*/Builder.Packed);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
index 248ffb544014..2184b8600d76 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
@@ -24,6 +24,8 @@
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Assumptions.h"
@@ -414,19 +416,22 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
break;
case Stmt::OMPTeamsGenericLoopDirectiveClass:
- llvm_unreachable("teams loop directive not supported yet.");
+ EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S));
break;
case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
- llvm_unreachable("target teams loop directive not supported yet.");
+ EmitOMPTargetTeamsGenericLoopDirective(
+ cast<OMPTargetTeamsGenericLoopDirective>(*S));
break;
case Stmt::OMPParallelGenericLoopDirectiveClass:
- llvm_unreachable("parallel loop directive not supported yet.");
+ EmitOMPParallelGenericLoopDirective(
+ cast<OMPParallelGenericLoopDirective>(*S));
break;
case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
- llvm_unreachable("target parallel loop directive not supported yet.");
+ EmitOMPTargetParallelGenericLoopDirective(
+ cast<OMPTargetParallelGenericLoopDirective>(*S));
break;
case Stmt::OMPParallelMaskedDirectiveClass:
- llvm_unreachable("parallel masked directive not supported yet.");
+ EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
break;
}
}
@@ -2191,9 +2196,9 @@ std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
Ty = llvm::IntegerType::get(getLLVMContext(), Size);
- return {Builder.CreateLoad(Builder.CreateElementBitCast(
- InputValue.getAddress(*this), Ty)),
- nullptr};
+ return {
+ Builder.CreateLoad(InputValue.getAddress(*this).withElementType(Ty)),
+ nullptr};
}
}
@@ -2327,6 +2332,92 @@ static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
}
}
+static void
+EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
+ const llvm::ArrayRef<llvm::Value *> RegResults,
+ const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
+ const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
+ const llvm::ArrayRef<LValue> ResultRegDests,
+ const llvm::ArrayRef<QualType> ResultRegQualTys,
+ const llvm::BitVector &ResultTypeRequiresCast,
+ const llvm::BitVector &ResultRegIsFlagReg) {
+ CGBuilderTy &Builder = CGF.Builder;
+ CodeGenModule &CGM = CGF.CGM;
+ llvm::LLVMContext &CTX = CGF.getLLVMContext();
+
+ assert(RegResults.size() == ResultRegTypes.size());
+ assert(RegResults.size() == ResultTruncRegTypes.size());
+ assert(RegResults.size() == ResultRegDests.size());
+ // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
+ // in which case its size may grow.
+ assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
+ assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
+
+ for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
+ llvm::Value *Tmp = RegResults[i];
+ llvm::Type *TruncTy = ResultTruncRegTypes[i];
+
+ if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
+ // Target must guarantee the Value `Tmp` here is lowered to a boolean
+ // value.
+ llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
+ llvm::Value *IsBooleanValue =
+ Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
+ llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
+ Builder.CreateCall(FnAssume, IsBooleanValue);
+ }
+
+ // If the result type of the LLVM IR asm doesn't match the result type of
+ // the expression, do the conversion.
+ if (ResultRegTypes[i] != TruncTy) {
+
+ // Truncate the integer result to the right size, note that TruncTy can be
+ // a pointer.
+ if (TruncTy->isFloatingPointTy())
+ Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
+ else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
+ uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
+ Tmp = Builder.CreateTrunc(
+ Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
+ Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
+ } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
+ uint64_t TmpSize =
+ CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
+ Tmp = Builder.CreatePtrToInt(
+ Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
+ Tmp = Builder.CreateTrunc(Tmp, TruncTy);
+ } else if (TruncTy->isIntegerTy()) {
+ Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
+ } else if (TruncTy->isVectorTy()) {
+ Tmp = Builder.CreateBitCast(Tmp, TruncTy);
+ }
+ }
+
+ LValue Dest = ResultRegDests[i];
+ // ResultTypeRequiresCast elements correspond to the first
+ // ResultTypeRequiresCast.size() elements of RegResults.
+ if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
+ unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
+ Address A = Dest.getAddress(CGF).withElementType(ResultRegTypes[i]);
+ if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
+ Builder.CreateStore(Tmp, A);
+ continue;
+ }
+
+ QualType Ty =
+ CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
+ if (Ty.isNull()) {
+ const Expr *OutExpr = S.getOutputExpr(i);
+ CGM.getDiags().Report(OutExpr->getExprLoc(),
+ diag::err_store_value_to_reg);
+ return;
+ }
+ Dest = CGF.MakeAddrLValue(A, Ty);
+ }
+ CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
+ }
+}
+
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Pop all cleanup blocks at the end of the asm statement.
CodeGenFunction::RunCleanupsScope Cleanups(*this);
@@ -2487,8 +2578,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Otherwise there will be a mis-match if the matrix is also an
// input-argument which is represented as vector.
if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
- DestAddr = Builder.CreateElementBitCast(
- DestAddr, ConvertType(OutExpr->getType()));
+ DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
ArgTypes.push_back(DestAddr.getType());
ArgElemTypes.push_back(DestAddr.getElementType());
@@ -2627,7 +2717,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
SmallVector<llvm::BasicBlock *, 16> Transfer;
llvm::BasicBlock *Fallthrough = nullptr;
bool IsGCCAsmGoto = false;
- if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
+ if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
IsGCCAsmGoto = GS->isAsmGoto();
if (IsGCCAsmGoto) {
for (const auto *E : GS->labels()) {
@@ -2690,7 +2780,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
"unwind clobber can't be used with asm goto");
// Add machine specific clobbers
- std::string MachineClobbers = getTarget().getClobbers();
+ std::string_view MachineClobbers = getTarget().getClobbers();
if (!MachineClobbers.empty()) {
if (!Constraints.empty())
Constraints += ',';
@@ -2721,13 +2811,40 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
FTy, AsmString, Constraints, HasSideEffect,
/* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
std::vector<llvm::Value*> RegResults;
+ llvm::CallBrInst *CBR;
+ llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
+ CBRRegResults;
if (IsGCCAsmGoto) {
- llvm::CallBrInst *Result =
- Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
+ CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
EmitBlock(Fallthrough);
- UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
- ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
- ResultRegTypes, ArgElemTypes, *this, RegResults);
+ UpdateAsmCallInst(*CBR, HasSideEffect, false, ReadOnly, ReadNone,
+ InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
+ *this, RegResults);
+ // Because we are emitting code top to bottom, we don't have enough
+ // information at this point to know precisely whether we have a critical
+ // edge. If we have outputs, split all indirect destinations.
+ if (!RegResults.empty()) {
+ unsigned i = 0;
+ for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
+ llvm::Twine SynthName = Dest->getName() + ".split";
+ llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
+ llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
+ Builder.SetInsertPoint(SynthBB);
+
+ if (ResultRegTypes.size() == 1) {
+ CBRRegResults[SynthBB].push_back(CBR);
+ } else {
+ for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
+ llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
+ CBRRegResults[SynthBB].push_back(Tmp);
+ }
+ }
+
+ EmitBranch(Dest);
+ EmitBlock(SynthBB);
+ CBR->setIndirectDest(i++, SynthBB);
+ }
+ }
} else if (HasUnwindClobber) {
llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
@@ -2736,79 +2853,26 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
} else {
llvm::CallInst *Result =
Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
- UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
- ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
- ResultRegTypes, ArgElemTypes, *this, RegResults);
+ UpdateAsmCallInst(*Result, HasSideEffect, false, ReadOnly, ReadNone,
+ InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
+ *this, RegResults);
}
- assert(RegResults.size() == ResultRegTypes.size());
- assert(RegResults.size() == ResultTruncRegTypes.size());
- assert(RegResults.size() == ResultRegDests.size());
- // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
- // in which case its size may grow.
- assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
- assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
- for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
- llvm::Value *Tmp = RegResults[i];
- llvm::Type *TruncTy = ResultTruncRegTypes[i];
-
- if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
- // Target must guarantee the Value `Tmp` here is lowered to a boolean
- // value.
- llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
- llvm::Value *IsBooleanValue =
- Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
- llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
- Builder.CreateCall(FnAssume, IsBooleanValue);
- }
-
- // If the result type of the LLVM IR asm doesn't match the result type of
- // the expression, do the conversion.
- if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
-
- // Truncate the integer result to the right size, note that TruncTy can be
- // a pointer.
- if (TruncTy->isFloatingPointTy())
- Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
- else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
- uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
- Tmp = Builder.CreateTrunc(Tmp,
- llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
- Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
- } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
- uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
- Tmp = Builder.CreatePtrToInt(Tmp,
- llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
- Tmp = Builder.CreateTrunc(Tmp, TruncTy);
- } else if (TruncTy->isIntegerTy()) {
- Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
- } else if (TruncTy->isVectorTy()) {
- Tmp = Builder.CreateBitCast(Tmp, TruncTy);
- }
- }
-
- LValue Dest = ResultRegDests[i];
- // ResultTypeRequiresCast elements correspond to the first
- // ResultTypeRequiresCast.size() elements of RegResults.
- if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
- unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
- Address A = Builder.CreateElementBitCast(Dest.getAddress(*this),
- ResultRegTypes[i]);
- if (getTargetHooks().isScalarizableAsmOperand(*this, TruncTy)) {
- Builder.CreateStore(Tmp, A);
- continue;
- }
-
- QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
- if (Ty.isNull()) {
- const Expr *OutExpr = S.getOutputExpr(i);
- CGM.getDiags().Report(OutExpr->getExprLoc(),
- diag::err_store_value_to_reg);
- return;
- }
- Dest = MakeAddrLValue(A, Ty);
+ EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
+ ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
+ ResultRegIsFlagReg);
+
+ // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
+ // different insertion point; one for each indirect destination and with
+ // CBRRegResults rather than RegResults.
+ if (IsGCCAsmGoto && !CBRRegResults.empty()) {
+ for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
+ llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
+ Builder.SetInsertPoint(Succ, --(Succ->end()));
+ EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
+ ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
+ ResultTypeRequiresCast, ResultRegIsFlagReg);
}
- EmitStoreThroughLValue(RValue::get(Tmp), Dest);
}
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
index 6bc30ad0302e..4910ff6865e4 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -704,7 +704,7 @@ void CodeGenFunction::EmitOMPAggregateAssign(
// Drill down to the base element type on both arrays.
const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe();
llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
- SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
+ SrcAddr = SrcAddr.withElementType(DestAddr.getElementType());
llvm::Value *SrcBegin = SrcAddr.getPointer();
llvm::Value *DestBegin = DestAddr.getPointer();
@@ -802,7 +802,7 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
if (!HaveInsertPoint())
return false;
bool DeviceConstTarget =
- getLangOpts().OpenMPIsDevice &&
+ getLangOpts().OpenMPIsTargetDevice &&
isOpenMPTargetExecutionDirective(D.getDirectiveKind());
bool FirstprivateIsLastprivate = false;
llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates;
@@ -1266,10 +1266,9 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
// implicit variable.
PrivateScope.addPrivate(LHSVD,
RedCG.getSharedLValue(Count).getAddress(*this));
- PrivateScope.addPrivate(RHSVD, Builder.CreateElementBitCast(
- GetAddrOfLocalVar(PrivateVD),
- ConvertTypeForMem(RHSVD->getType()),
- "rhs.begin"));
+ PrivateScope.addPrivate(RHSVD,
+ GetAddrOfLocalVar(PrivateVD).withElementType(
+ ConvertTypeForMem(RHSVD->getType())));
} else {
QualType Type = PrivateVD->getType();
bool IsArray = getContext().getAsArrayType(Type) != nullptr;
@@ -1277,14 +1276,13 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
// Store the address of the original variable associated with the LHS
// implicit variable.
if (IsArray) {
- OriginalAddr = Builder.CreateElementBitCast(
- OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin");
+ OriginalAddr =
+ OriginalAddr.withElementType(ConvertTypeForMem(LHSVD->getType()));
}
PrivateScope.addPrivate(LHSVD, OriginalAddr);
PrivateScope.addPrivate(
- RHSVD, IsArray ? Builder.CreateElementBitCast(
- GetAddrOfLocalVar(PrivateVD),
- ConvertTypeForMem(RHSVD->getType()), "rhs.begin")
+ RHSVD, IsArray ? GetAddrOfLocalVar(PrivateVD).withElementType(
+ ConvertTypeForMem(RHSVD->getType()))
: GetAddrOfLocalVar(PrivateVD));
}
++ILHS;
@@ -1547,7 +1545,8 @@ static void emitCommonOMPParallelDirective(
llvm::Value *NumThreads = nullptr;
llvm::Function *OutlinedFn =
CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
- S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
+ CGF, S, *CS->getCapturedDecl()->param_begin(), InnermostKind,
+ CodeGen);
if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
@@ -1821,6 +1820,11 @@ class OMPTransformDirectiveScopeRAII {
CodeGenFunction::CGCapturedStmtInfo *CGSI = nullptr;
CodeGenFunction::CGCapturedStmtRAII *CapInfoRAII = nullptr;
+ OMPTransformDirectiveScopeRAII(const OMPTransformDirectiveScopeRAII &) =
+ delete;
+ OMPTransformDirectiveScopeRAII &
+ operator=(const OMPTransformDirectiveScopeRAII &) = delete;
+
public:
OMPTransformDirectiveScopeRAII(CodeGenFunction &CGF, const Stmt *S) {
if (const auto *Dir = dyn_cast<OMPLoopBasedDirective>(S)) {
@@ -4489,6 +4493,33 @@ void CodeGenFunction::EmitOMPParallelMasterDirective(
checkForLastprivateConditionalUpdate(*this, S);
}
+void CodeGenFunction::EmitOMPParallelMaskedDirective(
+ const OMPParallelMaskedDirective &S) {
+ // Emit directive as a combined directive that consists of two implicit
+ // directives: 'parallel' with 'masked' directive.
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ OMPPrivateScope PrivateScope(CGF);
+ emitOMPCopyinClause(CGF, S);
+ (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
+ CGF.EmitOMPPrivateClause(S, PrivateScope);
+ CGF.EmitOMPReductionClauseInit(S, PrivateScope);
+ (void)PrivateScope.Privatize();
+ emitMasked(CGF, S);
+ CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
+ };
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ emitCommonOMPParallelDirective(*this, S, OMPD_masked, CodeGen,
+ emitEmptyBoundParameters);
+ emitPostUpdateForReductionClause(*this, S,
+ [](CodeGenFunction &) { return nullptr; });
+ }
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
+}
+
void CodeGenFunction::EmitOMPParallelSectionsDirective(
const OMPParallelSectionsDirective &S) {
// Emit directive as a combined directive that consists of two implicit
@@ -4826,6 +4857,8 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
// a pointer to this memory.
for (auto &Pair : UntiedLocalVars) {
QualType VDType = Pair.first->getType().getNonReferenceType();
+ if (Pair.first->getType()->isLValueReferenceType())
+ VDType = CGF.getContext().getPointerType(VDType);
if (isAllocatableDecl(Pair.first)) {
llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
Address Replacement(
@@ -5811,37 +5844,46 @@ static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
return Fn;
}
+template <typename T>
+static void emitRestoreIP(CodeGenFunction &CGF, const T *C,
+ llvm::OpenMPIRBuilder::InsertPointTy AllocaIP,
+ llvm::OpenMPIRBuilder &OMPBuilder) {
+
+ unsigned NumLoops = C->getNumLoops();
+ QualType Int64Ty = CGF.CGM.getContext().getIntTypeForBitwidth(
+ /*DestWidth=*/64, /*Signed=*/1);
+ llvm::SmallVector<llvm::Value *> StoreValues;
+ for (unsigned I = 0; I < NumLoops; I++) {
+ const Expr *CounterVal = C->getLoopData(I);
+ assert(CounterVal);
+ llvm::Value *StoreValue = CGF.EmitScalarConversion(
+ CGF.EmitScalarExpr(CounterVal), CounterVal->getType(), Int64Ty,
+ CounterVal->getExprLoc());
+ StoreValues.emplace_back(StoreValue);
+ }
+ OMPDoacrossKind<T> ODK;
+ bool IsDependSource = ODK.isSource(C);
+ CGF.Builder.restoreIP(
+ OMPBuilder.createOrderedDepend(CGF.Builder, AllocaIP, NumLoops,
+ StoreValues, ".cnt.addr", IsDependSource));
+}
+
void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
if (CGM.getLangOpts().OpenMPIRBuilder) {
llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
- if (S.hasClausesOfKind<OMPDependClause>()) {
+ if (S.hasClausesOfKind<OMPDependClause>() ||
+ S.hasClausesOfKind<OMPDoacrossClause>()) {
// The ordered directive with depend clause.
- assert(!S.hasAssociatedStmt() &&
- "No associated statement must be in ordered depend construct.");
+ assert(!S.hasAssociatedStmt() && "No associated statement must be in "
+ "ordered depend|doacross construct.");
InsertPointTy AllocaIP(AllocaInsertPt->getParent(),
AllocaInsertPt->getIterator());
- for (const auto *DC : S.getClausesOfKind<OMPDependClause>()) {
- unsigned NumLoops = DC->getNumLoops();
- QualType Int64Ty = CGM.getContext().getIntTypeForBitwidth(
- /*DestWidth=*/64, /*Signed=*/1);
- llvm::SmallVector<llvm::Value *> StoreValues;
- for (unsigned I = 0; I < NumLoops; I++) {
- const Expr *CounterVal = DC->getLoopData(I);
- assert(CounterVal);
- llvm::Value *StoreValue = EmitScalarConversion(
- EmitScalarExpr(CounterVal), CounterVal->getType(), Int64Ty,
- CounterVal->getExprLoc());
- StoreValues.emplace_back(StoreValue);
- }
- bool IsDependSource = false;
- if (DC->getDependencyKind() == OMPC_DEPEND_source)
- IsDependSource = true;
- Builder.restoreIP(OMPBuilder.createOrderedDepend(
- Builder, AllocaIP, NumLoops, StoreValues, ".cnt.addr",
- IsDependSource));
- }
+ for (const auto *DC : S.getClausesOfKind<OMPDependClause>())
+ emitRestoreIP(*this, DC, AllocaIP, OMPBuilder);
+ for (const auto *DC : S.getClausesOfKind<OMPDoacrossClause>())
+ emitRestoreIP(*this, DC, AllocaIP, OMPBuilder);
} else {
// The ordered directive with threads or simd clause, or without clause.
// Without clause, it behaves as if the threads clause is specified.
@@ -5888,6 +5930,13 @@ void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
return;
}
+ if (S.hasClausesOfKind<OMPDoacrossClause>()) {
+ assert(!S.hasAssociatedStmt() &&
+ "No associated statement must be in ordered doacross construct.");
+ for (const auto *DC : S.getClausesOfKind<OMPDoacrossClause>())
+ CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
+ return;
+ }
const auto *C = S.getSingleClause<OMPSIMDClause>();
auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF,
PrePostActionTy &Action) {
@@ -6533,7 +6582,7 @@ static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
CodeGenModule &CGM = CGF.CGM;
// On device emit this construct as inlined code.
- if (CGM.getLangOpts().OpenMPIsDevice) {
+ if (CGM.getLangOpts().OpenMPIsTargetDevice) {
OMPLexicalScope Scope(CGF, S, OMPD_target);
CGM.getOpenMPRuntime().emitInlinedDirective(
CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) {
@@ -6656,7 +6705,8 @@ static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams);
llvm::Function *OutlinedFn =
CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction(
- S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
+ CGF, S, *CS->getCapturedDecl()->param_begin(), InnermostKind,
+ CodeGen);
const auto *NT = S.getSingleClause<OMPNumTeamsClause>();
const auto *TL = S.getSingleClause<OMPThreadLimitClause>();
@@ -6923,27 +6973,27 @@ void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective(
void CodeGenFunction::EmitOMPInteropDirective(const OMPInteropDirective &S) {
llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
llvm::Value *Device = nullptr;
+ llvm::Value *NumDependences = nullptr;
+ llvm::Value *DependenceList = nullptr;
+
if (const auto *C = S.getSingleClause<OMPDeviceClause>())
Device = EmitScalarExpr(C->getDevice());
- llvm::Value *NumDependences = nullptr;
- llvm::Value *DependenceAddress = nullptr;
- if (const auto *DC = S.getSingleClause<OMPDependClause>()) {
- OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(),
- DC->getModifier());
- Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end());
- std::pair<llvm::Value *, Address> DependencePair =
- CGM.getOpenMPRuntime().emitDependClause(*this, Dependencies,
- DC->getBeginLoc());
- NumDependences = DependencePair.first;
- DependenceAddress = Builder.CreatePointerCast(
- DependencePair.second.getPointer(), CGM.Int8PtrTy);
- }
-
- assert(!(S.hasClausesOfKind<OMPNowaitClause>() &&
- !(S.getSingleClause<OMPInitClause>() ||
- S.getSingleClause<OMPDestroyClause>() ||
- S.getSingleClause<OMPUseClause>())) &&
+ // Build list and emit dependences
+ OMPTaskDataTy Data;
+ buildDependences(S, Data);
+ if (!Data.Dependences.empty()) {
+ Address DependenciesArray = Address::invalid();
+ std::tie(NumDependences, DependenciesArray) =
+ CGM.getOpenMPRuntime().emitDependClause(*this, Data.Dependences,
+ S.getBeginLoc());
+ DependenceList = DependenciesArray.getPointer();
+ }
+ Data.HasNowaitClause = S.hasClausesOfKind<OMPNowaitClause>();
+
+ assert(!(Data.HasNowaitClause && !(S.getSingleClause<OMPInitClause>() ||
+ S.getSingleClause<OMPDestroyClause>() ||
+ S.getSingleClause<OMPUseClause>())) &&
"OMPNowaitClause clause is used separately in OMPInteropDirective.");
if (const auto *C = S.getSingleClause<OMPInitClause>()) {
@@ -6957,20 +7007,20 @@ void CodeGenFunction::EmitOMPInteropDirective(const OMPInteropDirective &S) {
InteropType = llvm::omp::OMPInteropType::TargetSync;
}
OMPBuilder.createOMPInteropInit(Builder, InteropvarPtr, InteropType, Device,
- NumDependences, DependenceAddress,
- S.hasClausesOfKind<OMPNowaitClause>());
+ NumDependences, DependenceList,
+ Data.HasNowaitClause);
} else if (const auto *C = S.getSingleClause<OMPDestroyClause>()) {
llvm::Value *InteropvarPtr =
EmitLValue(C->getInteropVar()).getPointer(*this);
OMPBuilder.createOMPInteropDestroy(Builder, InteropvarPtr, Device,
- NumDependences, DependenceAddress,
- S.hasClausesOfKind<OMPNowaitClause>());
+ NumDependences, DependenceList,
+ Data.HasNowaitClause);
} else if (const auto *C = S.getSingleClause<OMPUseClause>()) {
llvm::Value *InteropvarPtr =
EmitLValue(C->getInteropVar()).getPointer(*this);
OMPBuilder.createOMPInteropUse(Builder, InteropvarPtr, Device,
- NumDependences, DependenceAddress,
- S.hasClausesOfKind<OMPNowaitClause>());
+ NumDependences, DependenceList,
+ Data.HasNowaitClause);
}
}
@@ -7129,14 +7179,13 @@ CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
void CodeGenFunction::EmitOMPUseDevicePtrClause(
const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
- const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
- auto OrigVarIt = C.varlist_begin();
- auto InitIt = C.inits().begin();
- for (const Expr *PvtVarIt : C.private_copies()) {
- const auto *OrigVD =
- cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl());
- const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl());
- const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl());
+ const llvm::DenseMap<const ValueDecl *, llvm::Value *>
+ CaptureDeviceAddrMap) {
+ llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
+ for (const Expr *OrigVarIt : C.varlists()) {
+ const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(OrigVarIt)->getDecl());
+ if (!Processed.insert(OrigVD).second)
+ continue;
// In order to identify the right initializer we need to match the
// declaration used by the mapping logic. In some cases we may get
@@ -7146,7 +7195,7 @@ void CodeGenFunction::EmitOMPUseDevicePtrClause(
// OMPCapturedExprDecl are used to privative fields of the current
// structure.
const auto *ME = cast<MemberExpr>(OED->getInit());
- assert(isa<CXXThisExpr>(ME->getBase()) &&
+ assert(isa<CXXThisExpr>(ME->getBase()->IgnoreImpCasts()) &&
"Base should be the current struct!");
MatchingVD = ME->getMemberDecl();
}
@@ -7157,32 +7206,16 @@ void CodeGenFunction::EmitOMPUseDevicePtrClause(
if (InitAddrIt == CaptureDeviceAddrMap.end())
continue;
- // Initialize the temporary initialization variable with the address
- // we get from the runtime library. We have to cast the source address
- // because it is always a void *. References are materialized in the
- // privatization scope, so the initialization here disregards the fact
- // the original variable is a reference.
llvm::Type *Ty = ConvertTypeForMem(OrigVD->getType().getNonReferenceType());
- Address InitAddr = Builder.CreateElementBitCast(InitAddrIt->second, Ty);
- setAddrOfLocalVar(InitVD, InitAddr);
-
- // Emit private declaration, it will be initialized by the value we
- // declaration we just added to the local declarations map.
- EmitDecl(*PvtVD);
-
- // The initialization variables reached its purpose in the emission
- // of the previous declaration, so we don't need it anymore.
- LocalDeclMap.erase(InitVD);
// Return the address of the private variable.
- bool IsRegistered =
- PrivateScope.addPrivate(OrigVD, GetAddrOfLocalVar(PvtVD));
+ bool IsRegistered = PrivateScope.addPrivate(
+ OrigVD,
+ Address(InitAddrIt->second, Ty,
+ getContext().getTypeAlignInChars(getContext().VoidPtrTy)));
assert(IsRegistered && "firstprivate var already registered as private");
// Silence the warning about unused variable.
(void)IsRegistered;
-
- ++OrigVarIt;
- ++InitIt;
}
}
@@ -7197,7 +7230,8 @@ static const VarDecl *getBaseDecl(const Expr *Ref) {
void CodeGenFunction::EmitOMPUseDeviceAddrClause(
const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
- const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
+ const llvm::DenseMap<const ValueDecl *, llvm::Value *>
+ CaptureDeviceAddrMap) {
llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
for (const Expr *Ref : C.varlists()) {
const VarDecl *OrigVD = getBaseDecl(Ref);
@@ -7222,16 +7256,20 @@ void CodeGenFunction::EmitOMPUseDeviceAddrClause(
if (InitAddrIt == CaptureDeviceAddrMap.end())
continue;
- Address PrivAddr = InitAddrIt->getSecond();
+ llvm::Type *Ty = ConvertTypeForMem(OrigVD->getType().getNonReferenceType());
+
+ Address PrivAddr =
+ Address(InitAddrIt->second, Ty,
+ getContext().getTypeAlignInChars(getContext().VoidPtrTy));
// For declrefs and variable length array need to load the pointer for
// correct mapping, since the pointer to the data was passed to the runtime.
if (isa<DeclRefExpr>(Ref->IgnoreParenImpCasts()) ||
MatchingVD->getType()->isArrayType()) {
QualType PtrTy = getContext().getPointerType(
OrigVD->getType().getNonReferenceType());
- PrivAddr = EmitLoadOfPointer(
- Builder.CreateElementBitCast(PrivAddr, ConvertTypeForMem(PtrTy)),
- PtrTy->castAs<PointerType>());
+ PrivAddr =
+ EmitLoadOfPointer(PrivAddr.withElementType(ConvertTypeForMem(PtrTy)),
+ PtrTy->castAs<PointerType>());
}
(void)PrivateScope.addPrivate(OrigVD, PrivAddr);
@@ -7260,16 +7298,13 @@ void CodeGenFunction::EmitOMPTargetDataDirective(
};
DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers);
- auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers](
- CodeGenFunction &CGF, PrePostActionTy &Action) {
+ auto &&CodeGen = [&](CodeGenFunction &CGF, PrePostActionTy &Action) {
auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
};
// Codegen that selects whether to generate the privatization code or not.
- auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers,
- &InnermostCodeGen](CodeGenFunction &CGF,
- PrePostActionTy &Action) {
+ auto &&PrivCodeGen = [&](CodeGenFunction &CGF, PrePostActionTy &Action) {
RegionCodeGenTy RCG(InnermostCodeGen);
PrivatizeDevicePointers = false;
@@ -7289,7 +7324,28 @@ void CodeGenFunction::EmitOMPTargetDataDirective(
(void)PrivateScope.Privatize();
RCG(CGF);
} else {
- OMPLexicalScope Scope(CGF, S, OMPD_unknown);
+ // If we don't have target devices, don't bother emitting the data
+ // mapping code.
+ std::optional<OpenMPDirectiveKind> CaptureRegion;
+ if (CGM.getLangOpts().OMPTargetTriples.empty()) {
+ // Emit helper decls of the use_device_ptr/use_device_addr clauses.
+ for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>())
+ for (const Expr *E : C->varlists()) {
+ const Decl *D = cast<DeclRefExpr>(E)->getDecl();
+ if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
+ CGF.EmitVarDecl(*OED);
+ }
+ for (const auto *C : S.getClausesOfKind<OMPUseDeviceAddrClause>())
+ for (const Expr *E : C->varlists()) {
+ const Decl *D = getBaseDecl(E);
+ if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
+ CGF.EmitVarDecl(*OED);
+ }
+ } else {
+ CaptureRegion = OMPD_unknown;
+ }
+
+ OMPLexicalScope Scope(CGF, S, CaptureRegion);
RCG(CGF);
}
};
@@ -7789,6 +7845,148 @@ void CodeGenFunction::EmitOMPGenericLoopDirective(
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_loop, CodeGen);
}
+void CodeGenFunction::EmitOMPParallelGenericLoopDirective(
+ const OMPLoopDirective &S) {
+ // Emit combined directive as if its consituent constructs are 'parallel'
+ // and 'for'.
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ emitOMPCopyinClause(CGF, S);
+ (void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
+ };
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
+ emitEmptyBoundParameters);
+ }
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
+}
+
+void CodeGenFunction::EmitOMPTeamsGenericLoopDirective(
+ const OMPTeamsGenericLoopDirective &S) {
+ // To be consistent with current behavior of 'target teams loop', emit
+ // 'teams loop' as if its constituent constructs are 'distribute,
+ // 'parallel, and 'for'.
+ auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
+ S.getDistInc());
+ };
+
+ // Emit teams region as a standalone region.
+ auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ OMPPrivateScope PrivateScope(CGF);
+ CGF.EmitOMPReductionClauseInit(S, PrivateScope);
+ (void)PrivateScope.Privatize();
+ CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
+ CodeGenDistribute);
+ CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
+ };
+ emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen);
+ emitPostUpdateForReductionClause(*this, S,
+ [](CodeGenFunction &) { return nullptr; });
+}
+
+static void
+emitTargetTeamsGenericLoopRegion(CodeGenFunction &CGF,
+ const OMPTargetTeamsGenericLoopDirective &S,
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ // Emit 'teams loop' as if its constituent constructs are 'distribute,
+ // 'parallel, and 'for'.
+ auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
+ S.getDistInc());
+ };
+
+ // Emit teams region as a standalone region.
+ auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
+ CGF.EmitOMPReductionClauseInit(S, PrivateScope);
+ (void)PrivateScope.Privatize();
+ CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
+ CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
+ CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
+ };
+
+ emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for,
+ CodeGenTeams);
+ emitPostUpdateForReductionClause(CGF, S,
+ [](CodeGenFunction &) { return nullptr; });
+}
+
+/// Emit combined directive 'target teams loop' as if its constituent
+/// constructs are 'target', 'teams', 'distribute', 'parallel', and 'for'.
+void CodeGenFunction::EmitOMPTargetTeamsGenericLoopDirective(
+ const OMPTargetTeamsGenericLoopDirective &S) {
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ emitTargetTeamsGenericLoopRegion(CGF, S, Action);
+ };
+ emitCommonOMPTargetDirective(*this, S, CodeGen);
+}
+
+void CodeGenFunction::EmitOMPTargetTeamsGenericLoopDeviceFunction(
+ CodeGenModule &CGM, StringRef ParentName,
+ const OMPTargetTeamsGenericLoopDirective &S) {
+ // Emit SPMD target parallel loop region as a standalone region.
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ emitTargetTeamsGenericLoopRegion(CGF, S, Action);
+ };
+ llvm::Function *Fn;
+ llvm::Constant *Addr;
+ // Emit target region as a standalone region.
+ CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
+ S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
+ assert(Fn && Addr &&
+ "Target device function emission failed for 'target teams loop'.");
+}
+
+static void emitTargetParallelGenericLoopRegion(
+ CodeGenFunction &CGF, const OMPTargetParallelGenericLoopDirective &S,
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ // Emit as 'parallel for'.
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ CodeGenFunction::OMPCancelStackRAII CancelRegion(
+ CGF, OMPD_target_parallel_loop, /*hasCancel=*/false);
+ CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
+ emitDispatchForLoopBounds);
+ };
+ emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen,
+ emitEmptyBoundParameters);
+}
+
+void CodeGenFunction::EmitOMPTargetParallelGenericLoopDeviceFunction(
+ CodeGenModule &CGM, StringRef ParentName,
+ const OMPTargetParallelGenericLoopDirective &S) {
+ // Emit target parallel loop region as a standalone region.
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ emitTargetParallelGenericLoopRegion(CGF, S, Action);
+ };
+ llvm::Function *Fn;
+ llvm::Constant *Addr;
+ // Emit target region as a standalone region.
+ CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
+ S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
+ assert(Fn && Addr && "Target device function emission failed.");
+}
+
+/// Emit combined directive 'target parallel loop' as if its constituent
+/// constructs are 'target', 'parallel', and 'for'.
+void CodeGenFunction::EmitOMPTargetParallelGenericLoopDirective(
+ const OMPTargetParallelGenericLoopDirective &S) {
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ emitTargetParallelGenericLoopRegion(CGF, S, Action);
+ };
+ emitCommonOMPTargetDirective(*this, S, CodeGen);
+}
+
void CodeGenFunction::EmitSimpleOMPExecutableDirective(
const OMPExecutableDirective &D) {
if (const auto *SD = dyn_cast<OMPScanDirective>(&D)) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp
index d0c8e351626b..22790147c6f5 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp
@@ -42,8 +42,8 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
llvm::GlobalVariable::LinkageTypes Linkage,
const CXXRecordDecl *RD) {
VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/true);
- llvm::ArrayType *ArrayType =
- llvm::ArrayType::get(CGM.Int8PtrTy, Builder.getVTTComponents().size());
+ llvm::ArrayType *ArrayType = llvm::ArrayType::get(
+ CGM.GlobalsInt8PtrTy, Builder.getVTTComponents().size());
SmallVector<llvm::GlobalVariable *, 8> VTables;
SmallVector<VTableAddressPointsMapTy, 8> VTableAddressPoints;
@@ -81,9 +81,6 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
VTable->getValueType(), VTable, Idxs, /*InBounds=*/true,
/*InRangeIndex=*/1);
- Init = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(Init,
- CGM.Int8PtrTy);
-
VTTComponents.push_back(Init);
}
@@ -112,9 +109,9 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
- llvm::ArrayType *ArrayType =
- llvm::ArrayType::get(CGM.Int8PtrTy, Builder.getVTTComponents().size());
- llvm::Align Align = CGM.getDataLayout().getABITypeAlign(CGM.Int8PtrTy);
+ llvm::ArrayType *ArrayType = llvm::ArrayType::get(
+ CGM.GlobalsInt8PtrTy, Builder.getVTTComponents().size());
+ llvm::Align Align = CGM.getDataLayout().getABITypeAlign(CGM.GlobalsInt8PtrTy);
llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
Name, ArrayType, llvm::GlobalValue::ExternalLinkage, Align);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
index a0b5d9e4b096..91dd7a8e046b 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
@@ -690,7 +690,7 @@ bool CodeGenVTables::useRelativeLayout() const {
llvm::Type *CodeGenModule::getVTableComponentType() const {
if (UseRelativeLayout(*this))
return Int32Ty;
- return Int8PtrTy;
+ return GlobalsInt8PtrTy;
}
llvm::Type *CodeGenVTables::getVTableComponentType() const {
@@ -702,7 +702,7 @@ static void AddPointerLayoutOffset(const CodeGenModule &CGM,
CharUnits offset) {
builder.add(llvm::ConstantExpr::getIntToPtr(
llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()),
- CGM.Int8PtrTy));
+ CGM.GlobalsInt8PtrTy));
}
static void AddRelativeLayoutOffset(const CodeGenModule &CGM,
@@ -739,7 +739,7 @@ void CodeGenVTables::addVTableComponent(ConstantArrayBuilder &builder,
vtableHasLocalLinkage,
/*isCompleteDtor=*/false);
else
- return builder.add(llvm::ConstantExpr::getBitCast(rtti, CGM.Int8PtrTy));
+ return builder.add(rtti);
case VTableComponent::CK_FunctionPointer:
case VTableComponent::CK_CompleteDtorPointer:
@@ -758,7 +758,8 @@ void CodeGenVTables::addVTableComponent(ConstantArrayBuilder &builder,
? MD->hasAttr<CUDADeviceAttr>()
: (MD->hasAttr<CUDAHostAttr>() || !MD->hasAttr<CUDADeviceAttr>());
if (!CanEmitMethod)
- return builder.add(llvm::ConstantExpr::getNullValue(CGM.Int8PtrTy));
+ return builder.add(
+ llvm::ConstantExpr::getNullValue(CGM.GlobalsInt8PtrTy));
// Method is acceptable, continue processing as usual.
}
@@ -771,20 +772,20 @@ void CodeGenVTables::addVTableComponent(ConstantArrayBuilder &builder,
// with the local symbol. As a temporary solution, fill these components
// with zero. We shouldn't be calling these in the first place anyway.
if (useRelativeLayout())
- return llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
+ return llvm::ConstantPointerNull::get(CGM.GlobalsInt8PtrTy);
// For NVPTX devices in OpenMP emit special functon as null pointers,
// otherwise linking ends up with unresolved references.
- if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPIsDevice &&
+ if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPIsTargetDevice &&
CGM.getTriple().isNVPTX())
- return llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
+ return llvm::ConstantPointerNull::get(CGM.GlobalsInt8PtrTy);
llvm::FunctionType *fnTy =
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
llvm::Constant *fn = cast<llvm::Constant>(
CGM.CreateRuntimeFunction(fnTy, name).getCallee());
if (auto f = dyn_cast<llvm::Function>(fn))
f->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- return llvm::ConstantExpr::getBitCast(fn, CGM.Int8PtrTy);
+ return fn;
};
llvm::Constant *fnPtr;
@@ -822,15 +823,26 @@ void CodeGenVTables::addVTableComponent(ConstantArrayBuilder &builder,
return addRelativeComponent(
builder, fnPtr, vtableAddressPoint, vtableHasLocalLinkage,
component.getKind() == VTableComponent::CK_CompleteDtorPointer);
- } else
- return builder.add(llvm::ConstantExpr::getBitCast(fnPtr, CGM.Int8PtrTy));
+ } else {
+ // TODO: this icky and only exists due to functions being in the generic
+ // address space, rather than the global one, even though they are
+ // globals; fixing said issue might be intrusive, and will be done
+ // later.
+ unsigned FnAS = fnPtr->getType()->getPointerAddressSpace();
+ unsigned GVAS = CGM.GlobalsInt8PtrTy->getPointerAddressSpace();
+
+ if (FnAS != GVAS)
+ fnPtr =
+ llvm::ConstantExpr::getAddrSpaceCast(fnPtr, CGM.GlobalsInt8PtrTy);
+ return builder.add(fnPtr);
+ }
}
case VTableComponent::CK_UnusedFunctionPointer:
if (useRelativeLayout())
return builder.add(llvm::ConstantExpr::getNullValue(CGM.Int32Ty));
else
- return builder.addNullPointer(CGM.Int8PtrTy);
+ return builder.addNullPointer(CGM.GlobalsInt8PtrTy);
}
llvm_unreachable("Unexpected vtable component kind");
@@ -1045,19 +1057,20 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
switch (keyFunction->getTemplateSpecializationKind()) {
case TSK_Undeclared:
case TSK_ExplicitSpecialization:
- assert((def || CodeGenOpts.OptimizationLevel > 0 ||
- CodeGenOpts.getDebugInfo() != codegenoptions::NoDebugInfo) &&
- "Shouldn't query vtable linkage without key function, "
- "optimizations, or debug info");
- if (!def && CodeGenOpts.OptimizationLevel > 0)
- return llvm::GlobalVariable::AvailableExternallyLinkage;
+ assert(
+ (def || CodeGenOpts.OptimizationLevel > 0 ||
+ CodeGenOpts.getDebugInfo() != llvm::codegenoptions::NoDebugInfo) &&
+ "Shouldn't query vtable linkage without key function, "
+ "optimizations, or debug info");
+ if (!def && CodeGenOpts.OptimizationLevel > 0)
+ return llvm::GlobalVariable::AvailableExternallyLinkage;
- if (keyFunction->isInlined())
- return !Context.getLangOpts().AppleKext ?
- llvm::GlobalVariable::LinkOnceODRLinkage :
- llvm::Function::InternalLinkage;
+ if (keyFunction->isInlined())
+ return !Context.getLangOpts().AppleKext
+ ? llvm::GlobalVariable::LinkOnceODRLinkage
+ : llvm::Function::InternalLinkage;
- return llvm::GlobalVariable::ExternalLinkage;
+ return llvm::GlobalVariable::ExternalLinkage;
case TSK_ImplicitInstantiation:
return !Context.getLangOpts().AppleKext ?
@@ -1171,9 +1184,16 @@ bool CodeGenVTables::isVTableExternal(const CXXRecordDecl *RD) {
if (!keyFunction)
return false;
+ const FunctionDecl *Def;
// Otherwise, if we don't have a definition of the key function, the
// vtable must be defined somewhere else.
- return !keyFunction->hasBody();
+ if (!keyFunction->hasBody(Def))
+ return true;
+
+ assert(Def && "The body of the key function is not assigned to Def?");
+ // If the non-inline key function comes from another module unit, the vtable
+ // must be defined there.
+ return Def->isInAnotherModuleUnit() && !Def->isInlineSpecified();
}
/// Given that we're currently at the end of the translation unit, and
@@ -1211,7 +1231,8 @@ void CodeGenModule::EmitDeferredVTables() {
}
bool CodeGenModule::AlwaysHasLTOVisibilityPublic(const CXXRecordDecl *RD) {
- if (RD->hasAttr<LTOVisibilityPublicAttr>() || RD->hasAttr<UuidAttr>())
+ if (RD->hasAttr<LTOVisibilityPublicAttr>() || RD->hasAttr<UuidAttr>() ||
+ RD->hasAttr<DLLExportAttr>() || RD->hasAttr<DLLImportAttr>())
return true;
if (!getCodeGenOpts().LTOVisibilityPublicStd)
@@ -1238,13 +1259,9 @@ bool CodeGenModule::HasHiddenLTOVisibility(const CXXRecordDecl *RD) {
if (!isExternallyVisible(LV.getLinkage()))
return true;
- if (getTriple().isOSBinFormatCOFF()) {
- if (RD->hasAttr<DLLExportAttr>() || RD->hasAttr<DLLImportAttr>())
- return false;
- } else {
- if (LV.getVisibility() != HiddenVisibility)
- return false;
- }
+ if (!getTriple().isOSBinFormatCOFF() &&
+ LV.getVisibility() != HiddenVisibility)
+ return false;
return !AlwaysHasLTOVisibilityPublic(RD);
}
@@ -1268,13 +1285,13 @@ llvm::GlobalObject::VCallVisibility CodeGenModule::GetVCallVisibilityLevel(
else
TypeVis = llvm::GlobalObject::VCallVisibilityPublic;
- for (auto B : RD->bases())
+ for (const auto &B : RD->bases())
if (B.getType()->getAsCXXRecordDecl()->isDynamicClass())
TypeVis = std::min(
TypeVis,
GetVCallVisibilityLevel(B.getType()->getAsCXXRecordDecl(), Visited));
- for (auto B : RD->vbases())
+ for (const auto &B : RD->vbases())
if (B.getType()->getAsCXXRecordDecl()->isDynamicClass())
TypeVis = std::min(
TypeVis,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.h b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.h
index e7b59d94f257..9d4223547050 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.h
@@ -75,16 +75,6 @@ class CodeGenVTables {
bool vtableHasLocalLinkage,
bool isCompleteDtor) const;
- /// Create a dso_local stub that will be used for a relative reference in the
- /// relative vtable layout. This stub will just be a tail call to the original
- /// function and propagate any function attributes from the original. If the
- /// original function is already dso_local, the original is returned instead
- /// and a stub is not created.
- llvm::Function *
- getOrCreateRelativeStub(llvm::Function *func,
- llvm::GlobalValue::LinkageTypes stubLinkage,
- bool isCompleteDtor) const;
-
bool useRelativeLayout() const;
llvm::Type *getVTableComponentType() const;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGValue.h b/contrib/llvm-project/clang/lib/CodeGen/CGValue.h
index f01eece042f8..1e6f67250583 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGValue.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGValue.h
@@ -225,6 +225,9 @@ class LValue {
// this lvalue.
bool Nontemporal : 1;
+ // The pointer is known not to be null.
+ bool IsKnownNonNull : 1;
+
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
@@ -238,9 +241,7 @@ private:
if (isGlobalReg())
assert(ElementType == nullptr && "Global reg does not store elem type");
else
- assert(llvm::cast<llvm::PointerType>(V->getType())
- ->isOpaqueOrPointeeTypeMatches(ElementType) &&
- "Pointer element type mismatch");
+ assert(ElementType != nullptr && "Must have elem type");
this->Type = Type;
this->Quals = Quals;
@@ -333,24 +334,35 @@ public:
LValueBaseInfo getBaseInfo() const { return BaseInfo; }
void setBaseInfo(LValueBaseInfo Info) { BaseInfo = Info; }
+ KnownNonNull_t isKnownNonNull() const {
+ return (KnownNonNull_t)IsKnownNonNull;
+ }
+ LValue setKnownNonNull() {
+ IsKnownNonNull = true;
+ return *this;
+ }
+
// simple lvalue
llvm::Value *getPointer(CodeGenFunction &CGF) const {
assert(isSimple());
return V;
}
Address getAddress(CodeGenFunction &CGF) const {
- return Address(getPointer(CGF), ElementType, getAlignment());
+ return Address(getPointer(CGF), ElementType, getAlignment(),
+ isKnownNonNull());
}
void setAddress(Address address) {
assert(isSimple());
V = address.getPointer();
ElementType = address.getElementType();
Alignment = address.getAlignment().getQuantity();
+ IsKnownNonNull = address.isKnownNonNull();
}
// vector elt lvalue
Address getVectorAddress() const {
- return Address(getVectorPointer(), ElementType, getAlignment());
+ return Address(getVectorPointer(), ElementType, getAlignment(),
+ (KnownNonNull_t)isKnownNonNull());
}
llvm::Value *getVectorPointer() const {
assert(isVectorElt());
@@ -362,7 +374,8 @@ public:
}
Address getMatrixAddress() const {
- return Address(getMatrixPointer(), ElementType, getAlignment());
+ return Address(getMatrixPointer(), ElementType, getAlignment(),
+ (KnownNonNull_t)isKnownNonNull());
}
llvm::Value *getMatrixPointer() const {
assert(isMatrixElt());
@@ -375,7 +388,8 @@ public:
// extended vector elements.
Address getExtVectorAddress() const {
- return Address(getExtVectorPointer(), ElementType, getAlignment());
+ return Address(getExtVectorPointer(), ElementType, getAlignment(),
+ (KnownNonNull_t)isKnownNonNull());
}
llvm::Value *getExtVectorPointer() const {
assert(isExtVectorElt());
@@ -388,7 +402,8 @@ public:
// bitfield lvalue
Address getBitFieldAddress() const {
- return Address(getBitFieldPointer(), ElementType, getAlignment());
+ return Address(getBitFieldPointer(), ElementType, getAlignment(),
+ (KnownNonNull_t)isKnownNonNull());
}
llvm::Value *getBitFieldPointer() const { assert(isBitField()); return V; }
const CGBitFieldInfo &getBitFieldInfo() const {
@@ -409,6 +424,7 @@ public:
assert(address.getPointer()->getType()->isPointerTy());
R.V = address.getPointer();
R.ElementType = address.getElementType();
+ R.IsKnownNonNull = address.isKnownNonNull();
R.Initialize(type, qs, address.getAlignment(), BaseInfo, TBAAInfo);
return R;
}
@@ -421,6 +437,7 @@ public:
R.V = vecAddress.getPointer();
R.ElementType = vecAddress.getElementType();
R.VectorIdx = Idx;
+ R.IsKnownNonNull = vecAddress.isKnownNonNull();
R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(),
BaseInfo, TBAAInfo);
return R;
@@ -434,6 +451,7 @@ public:
R.V = vecAddress.getPointer();
R.ElementType = vecAddress.getElementType();
R.VectorElts = Elts;
+ R.IsKnownNonNull = vecAddress.isKnownNonNull();
R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(),
BaseInfo, TBAAInfo);
return R;
@@ -453,6 +471,7 @@ public:
R.V = Addr.getPointer();
R.ElementType = Addr.getElementType();
R.BitFieldInfo = &Info;
+ R.IsKnownNonNull = Addr.isKnownNonNull();
R.Initialize(type, type.getQualifiers(), Addr.getAlignment(), BaseInfo,
TBAAInfo);
return R;
@@ -464,6 +483,7 @@ public:
R.LVType = GlobalReg;
R.V = V;
R.ElementType = nullptr;
+ R.IsKnownNonNull = true;
R.Initialize(type, type.getQualifiers(), alignment,
LValueBaseInfo(AlignmentSource::Decl), TBAAAccessInfo());
return R;
@@ -477,6 +497,7 @@ public:
R.V = matAddress.getPointer();
R.ElementType = matAddress.getElementType();
R.VectorIdx = Idx;
+ R.IsKnownNonNull = matAddress.isKnownNonNull();
R.Initialize(type, type.getQualifiers(), matAddress.getAlignment(),
BaseInfo, TBAAInfo);
return R;
@@ -579,6 +600,8 @@ public:
Overlap_t mayOverlap,
IsZeroed_t isZeroed = IsNotZeroed,
IsSanitizerChecked_t isChecked = IsNotSanitizerChecked) {
+ if (addr.isValid())
+ addr.setKnownNonNull();
return AggValueSlot(addr, quals, isDestructed, needsGC, isZeroed, isAliased,
mayOverlap, isChecked);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
index 2b219267869e..a3b72381d73f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "clang/CodeGen/CodeGenAction.h"
+#include "CGCall.h"
#include "CodeGenModule.h"
#include "CoverageMappingGen.h"
#include "MacroPPCallbacks.h"
@@ -86,7 +87,7 @@ namespace clang {
};
static void reportOptRecordError(Error E, DiagnosticsEngine &Diags,
- const CodeGenOptions CodeGenOpts) {
+ const CodeGenOptions &CodeGenOpts) {
handleAllErrors(
std::move(E),
[&](const LLVMRemarkSetupFileError &E) {
@@ -115,6 +116,7 @@ namespace clang {
const LangOptions &LangOpts;
std::unique_ptr<raw_pwrite_stream> AsmOutStream;
ASTContext *Context;
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS;
Timer LLVMIRGeneration;
unsigned LLVMIRGenerationRefCount;
@@ -147,7 +149,7 @@ namespace clang {
public:
BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags,
- IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
const HeaderSearchOptions &HeaderSearchOpts,
const PreprocessorOptions &PPOpts,
const CodeGenOptions &CodeGenOpts,
@@ -158,10 +160,10 @@ namespace clang {
CoverageSourceInfo *CoverageInfo = nullptr)
: Diags(Diags), Action(Action), HeaderSearchOpts(HeaderSearchOpts),
CodeGenOpts(CodeGenOpts), TargetOpts(TargetOpts), LangOpts(LangOpts),
- AsmOutStream(std::move(OS)), Context(nullptr),
+ AsmOutStream(std::move(OS)), Context(nullptr), FS(VFS),
LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
LLVMIRGenerationRefCount(0),
- Gen(CreateLLVMCodeGen(Diags, InFile, std::move(FS), HeaderSearchOpts,
+ Gen(CreateLLVMCodeGen(Diags, InFile, std::move(VFS), HeaderSearchOpts,
PPOpts, CodeGenOpts, C, CoverageInfo)),
LinkModules(std::move(LinkModules)) {
TimerIsEnabled = CodeGenOpts.TimePasses;
@@ -173,7 +175,7 @@ namespace clang {
// to use the clang diagnostic handler for IR input files. It avoids
// initializing the OS field.
BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags,
- IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
const HeaderSearchOptions &HeaderSearchOpts,
const PreprocessorOptions &PPOpts,
const CodeGenOptions &CodeGenOpts,
@@ -183,10 +185,10 @@ namespace clang {
CoverageSourceInfo *CoverageInfo = nullptr)
: Diags(Diags), Action(Action), HeaderSearchOpts(HeaderSearchOpts),
CodeGenOpts(CodeGenOpts), TargetOpts(TargetOpts), LangOpts(LangOpts),
- Context(nullptr),
+ Context(nullptr), FS(VFS),
LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
LLVMIRGenerationRefCount(0),
- Gen(CreateLLVMCodeGen(Diags, "", std::move(FS), HeaderSearchOpts,
+ Gen(CreateLLVMCodeGen(Diags, "", std::move(VFS), HeaderSearchOpts,
PPOpts, CodeGenOpts, C, CoverageInfo)),
LinkModules(std::move(LinkModules)), CurLinkModule(Module) {
TimerIsEnabled = CodeGenOpts.TimePasses;
@@ -261,15 +263,17 @@ namespace clang {
}
// Links each entry in LinkModules into our module. Returns true on error.
- bool LinkInModules() {
+ bool LinkInModules(llvm::Module *M) {
for (auto &LM : LinkModules) {
+ assert(LM.Module && "LinkModule does not actually have a module");
if (LM.PropagateAttrs)
for (Function &F : *LM.Module) {
// Skip intrinsics. Keep consistent with how intrinsics are created
// in LLVM IR.
if (F.isIntrinsic())
continue;
- Gen->CGM().addDefaultFunctionDefinitionAttributes(F);
+ CodeGen::mergeDefaultFunctionDefinitionAttributes(
+ F, CodeGenOpts, LangOpts, TargetOpts, LM.Internalize);
}
CurLinkModule = LM.Module.get();
@@ -277,20 +281,20 @@ namespace clang {
bool Err;
if (LM.Internalize) {
Err = Linker::linkModules(
- *getModule(), std::move(LM.Module), LM.LinkFlags,
+ *M, std::move(LM.Module), LM.LinkFlags,
[](llvm::Module &M, const llvm::StringSet<> &GVS) {
internalizeModule(M, [&GVS](const llvm::GlobalValue &GV) {
return !GV.hasName() || (GVS.count(GV.getName()) == 0);
});
});
} else {
- Err = Linker::linkModules(*getModule(), std::move(LM.Module),
- LM.LinkFlags);
+ Err = Linker::linkModules(*M, std::move(LM.Module), LM.LinkFlags);
}
if (Err)
return true;
}
+ LinkModules.clear();
return false; // success
}
@@ -353,7 +357,7 @@ namespace clang {
}
// Link each LinkModule into our module.
- if (LinkInModules())
+ if (LinkInModules(getModule()))
return;
for (auto &F : getModule()->functions()) {
@@ -381,7 +385,7 @@ namespace clang {
EmitBackendOutput(Diags, HeaderSearchOpts, CodeGenOpts, TargetOpts,
LangOpts, C.getTargetInfo().getDataLayoutString(),
- getModule(), Action, std::move(AsmOutStream));
+ getModule(), Action, FS, std::move(AsmOutStream));
Ctx.setDiagnosticHandler(std::move(OldDiagnosticHandler));
@@ -631,9 +635,8 @@ BackendConsumer::StackSizeDiagHandler(const llvm::DiagnosticInfoStackSize &D) {
return false;
Diags.Report(*Loc, diag::warn_fe_frame_larger_than)
- << D.getStackSize()
- << D.getStackLimit()
- << llvm::demangle(D.getFunction().getName().str());
+ << D.getStackSize() << D.getStackLimit()
+ << llvm::demangle(D.getFunction().getName());
return true;
}
@@ -647,7 +650,7 @@ bool BackendConsumer::ResourceLimitDiagHandler(
Diags.Report(*Loc, DiagID)
<< D.getResourceName() << D.getResourceSize() << D.getResourceLimit()
- << llvm::demangle(D.getFunction().getName().str());
+ << llvm::demangle(D.getFunction().getName());
return true;
}
@@ -852,7 +855,7 @@ void BackendConsumer::DontCallDiagHandler(const DiagnosticInfoDontCall &D) {
Diags.Report(LocCookie, D.getSeverity() == DiagnosticSeverity::DS_Error
? diag::err_fe_backend_error_attr
: diag::warn_fe_backend_warning_attr)
- << llvm::demangle(D.getFunctionName().str()) << D.getNote();
+ << llvm::demangle(D.getFunctionName()) << D.getNote();
}
void BackendConsumer::MisExpectDiagHandler(
@@ -990,6 +993,36 @@ CodeGenAction::~CodeGenAction() {
delete VMContext;
}
+bool CodeGenAction::loadLinkModules(CompilerInstance &CI) {
+ if (!LinkModules.empty())
+ return false;
+
+ for (const CodeGenOptions::BitcodeFileToLink &F :
+ CI.getCodeGenOpts().LinkBitcodeFiles) {
+ auto BCBuf = CI.getFileManager().getBufferForFile(F.Filename);
+ if (!BCBuf) {
+ CI.getDiagnostics().Report(diag::err_cannot_open_file)
+ << F.Filename << BCBuf.getError().message();
+ LinkModules.clear();
+ return true;
+ }
+
+ Expected<std::unique_ptr<llvm::Module>> ModuleOrErr =
+ getOwningLazyBitcodeModule(std::move(*BCBuf), *VMContext);
+ if (!ModuleOrErr) {
+ handleAllErrors(ModuleOrErr.takeError(), [&](ErrorInfoBase &EIB) {
+ CI.getDiagnostics().Report(diag::err_cannot_open_file)
+ << F.Filename << EIB.message();
+ });
+ LinkModules.clear();
+ return true;
+ }
+ LinkModules.push_back({std::move(ModuleOrErr.get()), F.PropagateAttrs,
+ F.Internalize, F.LinkFlags});
+ }
+ return false;
+}
+
bool CodeGenAction::hasIRSupport() const { return true; }
void CodeGenAction::EndSourceFileAction() {
@@ -1044,33 +1077,9 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
if (BA != Backend_EmitNothing && !OS)
return nullptr;
- VMContext->setOpaquePointers(CI.getCodeGenOpts().OpaquePointers);
-
// Load bitcode modules to link with, if we need to.
- if (LinkModules.empty())
- for (const CodeGenOptions::BitcodeFileToLink &F :
- CI.getCodeGenOpts().LinkBitcodeFiles) {
- auto BCBuf = CI.getFileManager().getBufferForFile(F.Filename);
- if (!BCBuf) {
- CI.getDiagnostics().Report(diag::err_cannot_open_file)
- << F.Filename << BCBuf.getError().message();
- LinkModules.clear();
- return nullptr;
- }
-
- Expected<std::unique_ptr<llvm::Module>> ModuleOrErr =
- getOwningLazyBitcodeModule(std::move(*BCBuf), *VMContext);
- if (!ModuleOrErr) {
- handleAllErrors(ModuleOrErr.takeError(), [&](ErrorInfoBase &EIB) {
- CI.getDiagnostics().Report(diag::err_cannot_open_file)
- << F.Filename << EIB.message();
- });
- LinkModules.clear();
- return nullptr;
- }
- LinkModules.push_back({std::move(ModuleOrErr.get()), F.PropagateAttrs,
- F.Internalize, F.LinkFlags});
- }
+ if (loadLinkModules(CI))
+ return nullptr;
CoverageSourceInfo *CoverageInfo = nullptr;
// Add the preprocessor callback only when the coverage mapping is generated.
@@ -1103,7 +1112,14 @@ CodeGenAction::loadModule(MemoryBufferRef MBRef) {
CompilerInstance &CI = getCompilerInstance();
SourceManager &SM = CI.getSourceManager();
- VMContext->setOpaquePointers(CI.getCodeGenOpts().OpaquePointers);
+ auto DiagErrors = [&](Error E) -> std::unique_ptr<llvm::Module> {
+ unsigned DiagID =
+ CI.getDiagnostics().getCustomDiagID(DiagnosticsEngine::Error, "%0");
+ handleAllErrors(std::move(E), [&](ErrorInfoBase &EIB) {
+ CI.getDiagnostics().Report(DiagID) << EIB.message();
+ });
+ return {};
+ };
// For ThinLTO backend invocations, ensure that the context
// merges types based on ODR identifiers. We also need to read
@@ -1111,15 +1127,6 @@ CodeGenAction::loadModule(MemoryBufferRef MBRef) {
if (!CI.getCodeGenOpts().ThinLTOIndexFile.empty()) {
VMContext->enableDebugTypeODRUniquing();
- auto DiagErrors = [&](Error E) -> std::unique_ptr<llvm::Module> {
- unsigned DiagID =
- CI.getDiagnostics().getCustomDiagID(DiagnosticsEngine::Error, "%0");
- handleAllErrors(std::move(E), [&](ErrorInfoBase &EIB) {
- CI.getDiagnostics().Report(DiagID) << EIB.message();
- });
- return {};
- };
-
Expected<std::vector<BitcodeModule>> BMsOrErr = getBitcodeModuleList(MBRef);
if (!BMsOrErr)
return DiagErrors(BMsOrErr.takeError());
@@ -1140,10 +1147,39 @@ CodeGenAction::loadModule(MemoryBufferRef MBRef) {
return std::move(*MOrErr);
}
+ // Load bitcode modules to link with, if we need to.
+ if (loadLinkModules(CI))
+ return nullptr;
+
+ // Handle textual IR and bitcode file with one single module.
llvm::SMDiagnostic Err;
if (std::unique_ptr<llvm::Module> M = parseIR(MBRef, Err, *VMContext))
return M;
+ // If MBRef is a bitcode with multiple modules (e.g., -fsplit-lto-unit
+ // output), place the extra modules (actually only one, a regular LTO module)
+ // into LinkModules as if we are using -mlink-bitcode-file.
+ Expected<std::vector<BitcodeModule>> BMsOrErr = getBitcodeModuleList(MBRef);
+ if (BMsOrErr && BMsOrErr->size()) {
+ std::unique_ptr<llvm::Module> FirstM;
+ for (auto &BM : *BMsOrErr) {
+ Expected<std::unique_ptr<llvm::Module>> MOrErr =
+ BM.parseModule(*VMContext);
+ if (!MOrErr)
+ return DiagErrors(MOrErr.takeError());
+ if (FirstM)
+ LinkModules.push_back({std::move(*MOrErr), /*PropagateAttrs=*/false,
+ /*Internalize=*/false, /*LinkFlags=*/{}});
+ else
+ FirstM = std::move(*MOrErr);
+ }
+ if (FirstM)
+ return FirstM;
+ }
+ // If BMsOrErr fails, consume the error and use the error message from
+ // parseIR.
+ consumeError(BMsOrErr.takeError());
+
// Translate from the diagnostic info to the SourceManager location if
// available.
// TODO: Unify this with ConvertBackendLocation()
@@ -1219,6 +1255,11 @@ void CodeGenAction::ExecuteAction() {
CI.getCodeGenOpts(), CI.getTargetOpts(),
CI.getLangOpts(), TheModule.get(),
std::move(LinkModules), *VMContext, nullptr);
+
+ // Link in each pending link module.
+ if (Result.LinkInModules(&*TheModule))
+ return;
+
// PR44896: Force DiscardValueNames as false. DiscardValueNames cannot be
// true here because the valued names are needed for reading textual IR.
Ctx.setDiscardValueNames(false);
@@ -1238,10 +1279,10 @@ void CodeGenAction::ExecuteAction() {
std::unique_ptr<llvm::ToolOutputFile> OptRecordFile =
std::move(*OptRecordFileOrErr);
- EmitBackendOutput(Diagnostics, CI.getHeaderSearchOpts(), CodeGenOpts,
- TargetOpts, CI.getLangOpts(),
- CI.getTarget().getDataLayoutString(), TheModule.get(), BA,
- std::move(OS));
+ EmitBackendOutput(
+ Diagnostics, CI.getHeaderSearchOpts(), CodeGenOpts, TargetOpts,
+ CI.getLangOpts(), CI.getTarget().getDataLayoutString(), TheModule.get(),
+ BA, CI.getFileManager().getVirtualFileSystemPtr(), std::move(OS));
if (OptRecordFile)
OptRecordFile->keep();
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
index 8cbe2a540744..fab70b66d1d9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -44,6 +44,7 @@
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Operator.h"
#include "llvm/Support/CRC.h"
+#include "llvm/Support/xxhash.h"
#include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include <optional>
@@ -567,18 +568,17 @@ bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
XRayInstrKind::Typed);
}
-llvm::Value *
-CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
- llvm::Value *EncodedAddr) {
- // Reconstruct the address of the global.
- auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
- auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
- auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
- auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
-
- // Load the original pointer through the global.
- return Builder.CreateLoad(Address(GOTAddr, Int8PtrTy, getPointerAlign()),
- "decoded_addr");
+llvm::ConstantInt *
+CodeGenFunction::getUBSanFunctionTypeHash(QualType Ty) const {
+ // Remove any (C++17) exception specifications, to allow calling e.g. a
+ // noexcept function through a non-noexcept pointer.
+ if (!isa<FunctionNoProtoType>(Ty))
+ Ty = getContext().getFunctionTypeWithExceptionSpec(Ty, EST_None);
+ std::string Mangled;
+ llvm::raw_string_ostream Out(Mangled);
+ CGM.getCXXABI().getMangleContext().mangleTypeName(Ty, Out, false);
+ return llvm::ConstantInt::get(
+ CGM.Int32Ty, static_cast<uint32_t>(llvm::xxh3_64bits(Mangled)));
}
void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD,
@@ -730,31 +730,38 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
if (D) {
const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds);
+ SanitizerMask no_sanitize_mask;
bool NoSanitizeCoverage = false;
for (auto *Attr : D->specific_attrs<NoSanitizeAttr>()) {
- // Apply the no_sanitize* attributes to SanOpts.
- SanitizerMask mask = Attr->getMask();
- SanOpts.Mask &= ~mask;
- if (mask & SanitizerKind::Address)
- SanOpts.set(SanitizerKind::KernelAddress, false);
- if (mask & SanitizerKind::KernelAddress)
- SanOpts.set(SanitizerKind::Address, false);
- if (mask & SanitizerKind::HWAddress)
- SanOpts.set(SanitizerKind::KernelHWAddress, false);
- if (mask & SanitizerKind::KernelHWAddress)
- SanOpts.set(SanitizerKind::HWAddress, false);
-
+ no_sanitize_mask |= Attr->getMask();
// SanitizeCoverage is not handled by SanOpts.
if (Attr->hasCoverage())
NoSanitizeCoverage = true;
}
+ // Apply the no_sanitize* attributes to SanOpts.
+ SanOpts.Mask &= ~no_sanitize_mask;
+ if (no_sanitize_mask & SanitizerKind::Address)
+ SanOpts.set(SanitizerKind::KernelAddress, false);
+ if (no_sanitize_mask & SanitizerKind::KernelAddress)
+ SanOpts.set(SanitizerKind::Address, false);
+ if (no_sanitize_mask & SanitizerKind::HWAddress)
+ SanOpts.set(SanitizerKind::KernelHWAddress, false);
+ if (no_sanitize_mask & SanitizerKind::KernelHWAddress)
+ SanOpts.set(SanitizerKind::HWAddress, false);
+
if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds))
Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds);
if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage())
Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage);
+
+ // Some passes need the non-negated no_sanitize attribute. Pass them on.
+ if (CGM.getCodeGenOpts().hasSanitizeBinaryMetadata()) {
+ if (no_sanitize_mask & SanitizerKind::Thread)
+ Fn->addFnAttr("no_sanitize_thread");
+ }
}
if (ShouldSkipSanitizerInstrumentation()) {
@@ -939,21 +946,14 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// If we are checking function types, emit a function type signature as
// prologue data.
- if (FD && getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
+ if (FD && SanOpts.has(SanitizerKind::Function)) {
if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
- // Remove any (C++17) exception specifications, to allow calling e.g. a
- // noexcept function through a non-noexcept pointer.
- auto ProtoTy = getContext().getFunctionTypeWithExceptionSpec(
- FD->getType(), EST_None);
- llvm::Constant *FTRTTIConst =
- CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
- llvm::GlobalVariable *FTRTTIProxy =
- CGM.GetOrCreateRTTIProxyGlobalVariable(FTRTTIConst);
llvm::LLVMContext &Ctx = Fn->getContext();
llvm::MDBuilder MDB(Ctx);
- Fn->setMetadata(llvm::LLVMContext::MD_func_sanitize,
- MDB.createRTTIPointerPrologue(PrologueSig, FTRTTIProxy));
- CGM.addCompilerUsedGlobal(FTRTTIProxy);
+ Fn->setMetadata(
+ llvm::LLVMContext::MD_func_sanitize,
+ MDB.createRTTIPointerPrologue(
+ PrologueSig, getUBSanFunctionTypeHash(FD->getType())));
}
}
@@ -1104,8 +1104,9 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
auto AI = CurFn->arg_begin();
if (CurFnInfo->getReturnInfo().isSRetAfterThis())
++AI;
- ReturnValue = Address(&*AI, ConvertType(RetTy),
- CurFnInfo->getReturnInfo().getIndirectAlign());
+ ReturnValue =
+ Address(&*AI, ConvertType(RetTy),
+ CurFnInfo->getReturnInfo().getIndirectAlign(), KnownNonNull);
if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
ReturnValuePointer =
CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
@@ -1125,8 +1126,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
cast<llvm::GetElementPtrInst>(Addr)->getResultElementType();
ReturnValuePointer = Address(Addr, Ty, getPointerAlign());
Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
- ReturnValue =
- Address(Addr, ConvertType(RetTy), CGM.getNaturalTypeAlignment(RetTy));
+ ReturnValue = Address(Addr, ConvertType(RetTy),
+ CGM.getNaturalTypeAlignment(RetTy), KnownNonNull);
} else {
ReturnValue = CreateIRTemp(RetTy, "retval");
@@ -1934,8 +1935,7 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
llvm::Value *baseSizeInChars
= llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
- Address begin =
- Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
+ Address begin = dest.withElementType(CGF.Int8Ty);
llvm::Value *end = Builder.CreateInBoundsGEP(
begin.getElementType(), begin.getPointer(), sizeInChars, "vla.end");
@@ -1979,9 +1979,8 @@ CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
}
}
- // Cast the dest ptr to the appropriate i8 pointer type.
if (DestPtr.getElementType() != Int8Ty)
- DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
+ DestPtr = DestPtr.withElementType(Int8Ty);
// Get size and alignment info for this aggregate.
CharUnits size = getContext().getTypeSizeInChars(Ty);
@@ -2141,7 +2140,7 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
}
llvm::Type *baseType = ConvertType(eltType);
- addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
+ addr = addr.withElementType(baseType);
} else {
// Create the actual GEP.
addr = Address(Builder.CreateInBoundsGEP(
@@ -2498,7 +2497,7 @@ Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
auto *PTy = dyn_cast<llvm::PointerType>(VTy);
unsigned AS = PTy ? PTy->getAddressSpace() : 0;
llvm::PointerType *IntrinTy =
- llvm::PointerType::getWithSamePointeeType(CGM.Int8PtrTy, AS);
+ llvm::PointerType::get(CGM.getLLVMContext(), AS);
llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
{IntrinTy, CGM.ConstGlobalsPtrTy});
@@ -2533,7 +2532,7 @@ void CodeGenFunction::InsertHelper(llvm::Instruction *I,
llvm::BasicBlock::iterator InsertPt) const {
LoopStack.InsertHelper(I);
if (IsSanitizerScope)
- CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
+ I->setNoSanitizeMetadata();
}
void CGBuilderInserter::InsertHelper(
@@ -2611,6 +2610,16 @@ void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
}))
CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
<< FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
+ } else if (!FD->isMultiVersion() && FD->hasAttr<TargetAttr>()) {
+ llvm::StringMap<bool> CalleeFeatureMap;
+ CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
+
+ for (const auto &F : CalleeFeatureMap) {
+ if (F.getValue() && (!CallerFeatureMap.lookup(F.getKey()) ||
+ !CallerFeatureMap.find(F.getKey())->getValue()))
+ CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
+ << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
+ }
}
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
index a535aa7c0410..409f48a04906 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
@@ -117,7 +117,7 @@ enum TypeEvaluationKind {
SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
- SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 1) \
+ SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0) \
SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
SANITIZER_CHECK(InvalidObjCCast, invalid_objc_cast, 0) \
@@ -318,10 +318,10 @@ public:
/// CurFuncDecl - Holds the Decl for the current outermost
/// non-closure context.
- const Decl *CurFuncDecl;
+ const Decl *CurFuncDecl = nullptr;
/// CurCodeDecl - This is the inner-most code context, which includes blocks.
- const Decl *CurCodeDecl;
- const CGFunctionInfo *CurFnInfo;
+ const Decl *CurCodeDecl = nullptr;
+ const CGFunctionInfo *CurFnInfo = nullptr;
QualType FnRetTy;
llvm::Function *CurFn = nullptr;
@@ -333,6 +333,7 @@ public:
// in this header.
struct CGCoroInfo {
std::unique_ptr<CGCoroData> Data;
+ bool InSuspendBlock = false;
CGCoroInfo();
~CGCoroInfo();
};
@@ -342,6 +343,10 @@ public:
return CurCoro.Data != nullptr;
}
+ bool inSuspendBlock() const {
+ return isCoroutine() && CurCoro.InSuspendBlock;
+ }
+
/// CurGD - The GlobalDecl for the current function being compiled.
GlobalDecl CurGD;
@@ -743,11 +748,11 @@ public:
/// An i1 variable indicating whether or not the @finally is
/// running for an exception.
- llvm::AllocaInst *ForEHVar;
+ llvm::AllocaInst *ForEHVar = nullptr;
/// An i8* variable into which the exception pointer to rethrow
/// has been saved.
- llvm::AllocaInst *SavedExnVar;
+ llvm::AllocaInst *SavedExnVar = nullptr;
public:
void enter(CodeGenFunction &CGF, const Stmt *Finally,
@@ -2060,6 +2065,8 @@ public:
llvm::Value *CompletePtr,
QualType ElementType);
void pushStackRestore(CleanupKind kind, Address SPMem);
+ void pushKmpcAllocFree(CleanupKind Kind,
+ std::pair<llvm::Value *, llvm::Value *> AddrSizePair);
void emitDestroy(Address addr, QualType type, Destroyer *destroyer,
bool useEHCleanupForArray);
llvm::Function *generateDestroyHelper(Address addr, QualType type,
@@ -2363,10 +2370,9 @@ public:
/// XRay typed event handling calls.
bool AlwaysEmitXRayTypedEvents() const;
- /// Decode an address used in a function prologue, encoded by \c
- /// EncodeAddrForUseInPrologue.
- llvm::Value *DecodeAddrUsedInPrologue(llvm::Value *F,
- llvm::Value *EncodedAddr);
+ /// Return a type hash constant for a function instrumented by
+ /// -fsanitize=function.
+ llvm::ConstantInt *getUBSanFunctionTypeHash(QualType T) const;
/// EmitFunctionProlog - Emit the target specific LLVM code to load the
/// arguments for the given function. This is also responsible for naming the
@@ -2621,9 +2627,6 @@ public:
AggValueSlot::DoesNotOverlap);
}
- /// Emit a cast to void* in the appropriate address space.
- llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
-
/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
/// expression and compare the result against zero, returning an Int1Ty value.
llvm::Value *EvaluateExprAsBool(const Expr *E);
@@ -3164,7 +3167,8 @@ public:
Address getIndirectAddress() const {
assert(isIndirect());
- return Address(Value, ElementType, CharUnits::fromQuantity(Alignment));
+ return Address(Value, ElementType, CharUnits::fromQuantity(Alignment),
+ KnownNonNull);
}
};
@@ -3399,10 +3403,12 @@ public:
OMPPrivateScope &PrivateScope);
void EmitOMPUseDevicePtrClause(
const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
- const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);
+ const llvm::DenseMap<const ValueDecl *, llvm::Value *>
+ CaptureDeviceAddrMap);
void EmitOMPUseDeviceAddrClause(
const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
- const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);
+ const llvm::DenseMap<const ValueDecl *, llvm::Value *>
+ CaptureDeviceAddrMap);
/// Emit code for copyin clause in \a D directive. The next code is
/// generated at the start of outlined functions for directives:
/// \code
@@ -3578,7 +3584,14 @@ public:
void EmitOMPTargetTeamsDistributeSimdDirective(
const OMPTargetTeamsDistributeSimdDirective &S);
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S);
+ void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S);
+ void EmitOMPTargetParallelGenericLoopDirective(
+ const OMPTargetParallelGenericLoopDirective &S);
+ void EmitOMPTargetTeamsGenericLoopDirective(
+ const OMPTargetTeamsGenericLoopDirective &S);
+ void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S);
void EmitOMPInteropDirective(const OMPInteropDirective &S);
+ void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S);
/// Emit device code for the target directive.
static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
@@ -3617,6 +3630,16 @@ public:
CodeGenModule &CGM, StringRef ParentName,
const OMPTargetTeamsDistributeParallelForSimdDirective &S);
+ /// Emit device code for the target teams loop directive.
+ static void EmitOMPTargetTeamsGenericLoopDeviceFunction(
+ CodeGenModule &CGM, StringRef ParentName,
+ const OMPTargetTeamsGenericLoopDirective &S);
+
+ /// Emit device code for the target parallel loop directive.
+ static void EmitOMPTargetParallelGenericLoopDeviceFunction(
+ CodeGenModule &CGM, StringRef ParentName,
+ const OMPTargetParallelGenericLoopDirective &S);
+
static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
CodeGenModule &CGM, StringRef ParentName,
const OMPTargetTeamsDistributeParallelForDirective &S);
@@ -3771,8 +3794,13 @@ public:
/// an LLVM type of the same size of the lvalue's type. If the lvalue has a
/// variable length type, this is not possible.
///
- LValue EmitLValue(const Expr *E);
+ LValue EmitLValue(const Expr *E,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
+private:
+ LValue EmitLValueHelper(const Expr *E, KnownNonNull_t IsKnownNonNull);
+
+public:
/// Same as EmitLValue but additionally we generate checking code to
/// guard against undefined behavior. This is only suitable when we know
/// that the address will be used to access the object.
@@ -4234,6 +4262,7 @@ public:
llvm::Value *EmitSVEMaskedStore(const CallExpr *,
SmallVectorImpl<llvm::Value *> &Ops,
unsigned BuiltinID);
+ llvm::Value *EmitTileslice(llvm::Value *Offset, llvm::Value *Base);
llvm::Value *EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
SmallVectorImpl<llvm::Value *> &Ops,
unsigned BuiltinID);
@@ -4248,6 +4277,20 @@ public:
unsigned IntID);
llvm::Value *EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitSMELd1St1(SVETypeFlags TypeFlags,
+ llvm::SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitSMEReadWrite(SVETypeFlags TypeFlags,
+ llvm::SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitSMEZero(SVETypeFlags TypeFlags,
+ llvm::SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitSMELdrStr(SVETypeFlags TypeFlags,
+ llvm::SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitAArch64SMEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
llvm::Triple::ArchType Arch);
llvm::Value *EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
@@ -4674,6 +4717,14 @@ public:
/// point operation, expressed as the maximum relative error in ulp.
void SetFPAccuracy(llvm::Value *Val, float Accuracy);
+ /// Set the minimum required accuracy of the given sqrt operation
+ /// based on CodeGenOpts.
+ void SetSqrtFPAccuracy(llvm::Value *Val);
+
+ /// Set the minimum required accuracy of the given sqrt operation based on
+ /// CodeGenOpts.
+ void SetDivFPAccuracy(llvm::Value *Val);
+
/// Set the codegen fast-math flags.
void SetFastMathFlags(FPOptions FPFeatures);
@@ -4783,9 +4834,10 @@ public:
/// into the address of a local variable. In such a case, it's quite
/// reasonable to just ignore the returned alignment when it isn't from an
/// explicit source.
- Address EmitPointerWithAlignment(const Expr *Addr,
- LValueBaseInfo *BaseInfo = nullptr,
- TBAAAccessInfo *TBAAInfo = nullptr);
+ Address
+ EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo = nullptr,
+ TBAAAccessInfo *TBAAInfo = nullptr,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
/// If \p E references a parameter with pass_object_size info or a constant
/// array size modifier, emit the object size divided by the size of \p EltTy.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
index 12d602fed693..07a9dec12f6f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
@@ -50,9 +50,9 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
+#include "llvm/IR/AttributeMask.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
@@ -67,8 +67,9 @@
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TimeProfiler.h"
-#include "llvm/Support/X86TargetParser.h"
#include "llvm/Support/xxhash.h"
+#include "llvm/TargetParser/Triple.h"
+#include "llvm/TargetParser/X86TargetParser.h"
#include <optional>
using namespace clang;
@@ -100,6 +101,228 @@ static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
llvm_unreachable("invalid C++ ABI kind");
}
+static std::unique_ptr<TargetCodeGenInfo>
+createTargetCodeGenInfo(CodeGenModule &CGM) {
+ const TargetInfo &Target = CGM.getTarget();
+ const llvm::Triple &Triple = Target.getTriple();
+ const CodeGenOptions &CodeGenOpts = CGM.getCodeGenOpts();
+
+ switch (Triple.getArch()) {
+ default:
+ return createDefaultTargetCodeGenInfo(CGM);
+
+ case llvm::Triple::le32:
+ return createPNaClTargetCodeGenInfo(CGM);
+ case llvm::Triple::m68k:
+ return createM68kTargetCodeGenInfo(CGM);
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ if (Triple.getOS() == llvm::Triple::NaCl)
+ return createPNaClTargetCodeGenInfo(CGM);
+ return createMIPSTargetCodeGenInfo(CGM, /*IsOS32=*/true);
+
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ return createMIPSTargetCodeGenInfo(CGM, /*IsOS32=*/false);
+
+ case llvm::Triple::avr: {
+ // For passing parameters, R8~R25 are used on avr, and R18~R25 are used
+ // on avrtiny. For passing return value, R18~R25 are used on avr, and
+ // R22~R25 are used on avrtiny.
+ unsigned NPR = Target.getABI() == "avrtiny" ? 6 : 18;
+ unsigned NRR = Target.getABI() == "avrtiny" ? 4 : 8;
+ return createAVRTargetCodeGenInfo(CGM, NPR, NRR);
+ }
+
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_32:
+ case llvm::Triple::aarch64_be: {
+ AArch64ABIKind Kind = AArch64ABIKind::AAPCS;
+ if (Target.getABI() == "darwinpcs")
+ Kind = AArch64ABIKind::DarwinPCS;
+ else if (Triple.isOSWindows())
+ return createWindowsAArch64TargetCodeGenInfo(CGM, AArch64ABIKind::Win64);
+
+ return createAArch64TargetCodeGenInfo(CGM, Kind);
+ }
+
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64: {
+ WebAssemblyABIKind Kind = WebAssemblyABIKind::MVP;
+ if (Target.getABI() == "experimental-mv")
+ Kind = WebAssemblyABIKind::ExperimentalMV;
+ return createWebAssemblyTargetCodeGenInfo(CGM, Kind);
+ }
+
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb: {
+ if (Triple.getOS() == llvm::Triple::Win32)
+ return createWindowsARMTargetCodeGenInfo(CGM, ARMABIKind::AAPCS_VFP);
+
+ ARMABIKind Kind = ARMABIKind::AAPCS;
+ StringRef ABIStr = Target.getABI();
+ if (ABIStr == "apcs-gnu")
+ Kind = ARMABIKind::APCS;
+ else if (ABIStr == "aapcs16")
+ Kind = ARMABIKind::AAPCS16_VFP;
+ else if (CodeGenOpts.FloatABI == "hard" ||
+ (CodeGenOpts.FloatABI != "soft" &&
+ (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
+ Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
+ Triple.getEnvironment() == llvm::Triple::EABIHF)))
+ Kind = ARMABIKind::AAPCS_VFP;
+
+ return createARMTargetCodeGenInfo(CGM, Kind);
+ }
+
+ case llvm::Triple::ppc: {
+ if (Triple.isOSAIX())
+ return createAIXTargetCodeGenInfo(CGM, /*Is64Bit=*/false);
+
+ bool IsSoftFloat =
+ CodeGenOpts.FloatABI == "soft" || Target.hasFeature("spe");
+ return createPPC32TargetCodeGenInfo(CGM, IsSoftFloat);
+ }
+ case llvm::Triple::ppcle: {
+ bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
+ return createPPC32TargetCodeGenInfo(CGM, IsSoftFloat);
+ }
+ case llvm::Triple::ppc64:
+ if (Triple.isOSAIX())
+ return createAIXTargetCodeGenInfo(CGM, /*Is64Bit=*/true);
+
+ if (Triple.isOSBinFormatELF()) {
+ PPC64_SVR4_ABIKind Kind = PPC64_SVR4_ABIKind::ELFv1;
+ if (Target.getABI() == "elfv2")
+ Kind = PPC64_SVR4_ABIKind::ELFv2;
+ bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
+
+ return createPPC64_SVR4_TargetCodeGenInfo(CGM, Kind, IsSoftFloat);
+ }
+ return createPPC64TargetCodeGenInfo(CGM);
+ case llvm::Triple::ppc64le: {
+ assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
+ PPC64_SVR4_ABIKind Kind = PPC64_SVR4_ABIKind::ELFv2;
+ if (Target.getABI() == "elfv1")
+ Kind = PPC64_SVR4_ABIKind::ELFv1;
+ bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
+
+ return createPPC64_SVR4_TargetCodeGenInfo(CGM, Kind, IsSoftFloat);
+ }
+
+ case llvm::Triple::nvptx:
+ case llvm::Triple::nvptx64:
+ return createNVPTXTargetCodeGenInfo(CGM);
+
+ case llvm::Triple::msp430:
+ return createMSP430TargetCodeGenInfo(CGM);
+
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64: {
+ StringRef ABIStr = Target.getABI();
+ unsigned XLen = Target.getPointerWidth(LangAS::Default);
+ unsigned ABIFLen = 0;
+ if (ABIStr.endswith("f"))
+ ABIFLen = 32;
+ else if (ABIStr.endswith("d"))
+ ABIFLen = 64;
+ return createRISCVTargetCodeGenInfo(CGM, XLen, ABIFLen);
+ }
+
+ case llvm::Triple::systemz: {
+ bool SoftFloat = CodeGenOpts.FloatABI == "soft";
+ bool HasVector = !SoftFloat && Target.getABI() == "vector";
+ return createSystemZTargetCodeGenInfo(CGM, HasVector, SoftFloat);
+ }
+
+ case llvm::Triple::tce:
+ case llvm::Triple::tcele:
+ return createTCETargetCodeGenInfo(CGM);
+
+ case llvm::Triple::x86: {
+ bool IsDarwinVectorABI = Triple.isOSDarwin();
+ bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
+
+ if (Triple.getOS() == llvm::Triple::Win32) {
+ return createWinX86_32TargetCodeGenInfo(
+ CGM, IsDarwinVectorABI, IsWin32FloatStructABI,
+ CodeGenOpts.NumRegisterParameters);
+ }
+ return createX86_32TargetCodeGenInfo(
+ CGM, IsDarwinVectorABI, IsWin32FloatStructABI,
+ CodeGenOpts.NumRegisterParameters, CodeGenOpts.FloatABI == "soft");
+ }
+
+ case llvm::Triple::x86_64: {
+ StringRef ABI = Target.getABI();
+ X86AVXABILevel AVXLevel = (ABI == "avx512" ? X86AVXABILevel::AVX512
+ : ABI == "avx" ? X86AVXABILevel::AVX
+ : X86AVXABILevel::None);
+
+ switch (Triple.getOS()) {
+ case llvm::Triple::Win32:
+ return createWinX86_64TargetCodeGenInfo(CGM, AVXLevel);
+ default:
+ return createX86_64TargetCodeGenInfo(CGM, AVXLevel);
+ }
+ }
+ case llvm::Triple::hexagon:
+ return createHexagonTargetCodeGenInfo(CGM);
+ case llvm::Triple::lanai:
+ return createLanaiTargetCodeGenInfo(CGM);
+ case llvm::Triple::r600:
+ return createAMDGPUTargetCodeGenInfo(CGM);
+ case llvm::Triple::amdgcn:
+ return createAMDGPUTargetCodeGenInfo(CGM);
+ case llvm::Triple::sparc:
+ return createSparcV8TargetCodeGenInfo(CGM);
+ case llvm::Triple::sparcv9:
+ return createSparcV9TargetCodeGenInfo(CGM);
+ case llvm::Triple::xcore:
+ return createXCoreTargetCodeGenInfo(CGM);
+ case llvm::Triple::arc:
+ return createARCTargetCodeGenInfo(CGM);
+ case llvm::Triple::spir:
+ case llvm::Triple::spir64:
+ return createCommonSPIRTargetCodeGenInfo(CGM);
+ case llvm::Triple::spirv32:
+ case llvm::Triple::spirv64:
+ return createSPIRVTargetCodeGenInfo(CGM);
+ case llvm::Triple::ve:
+ return createVETargetCodeGenInfo(CGM);
+ case llvm::Triple::csky: {
+ bool IsSoftFloat = !Target.hasFeature("hard-float-abi");
+ bool hasFP64 =
+ Target.hasFeature("fpuv2_df") || Target.hasFeature("fpuv3_df");
+ return createCSKYTargetCodeGenInfo(CGM, IsSoftFloat ? 0
+ : hasFP64 ? 64
+ : 32);
+ }
+ case llvm::Triple::bpfeb:
+ case llvm::Triple::bpfel:
+ return createBPFTargetCodeGenInfo(CGM);
+ case llvm::Triple::loongarch32:
+ case llvm::Triple::loongarch64: {
+ StringRef ABIStr = Target.getABI();
+ unsigned ABIFRLen = 0;
+ if (ABIStr.endswith("f"))
+ ABIFRLen = 32;
+ else if (ABIStr.endswith("d"))
+ ABIFRLen = 64;
+ return createLoongArchTargetCodeGenInfo(
+ CGM, Target.getPointerWidth(LangAS::Default), ABIFRLen);
+ }
+ }
+}
+
+const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
+ if (!TheTargetCodeGenInfo)
+ TheTargetCodeGenInfo = createTargetCodeGenInfo(*this);
+ return *TheTargetCodeGenInfo;
+}
+
CodeGenModule::CodeGenModule(ASTContext &C,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
const HeaderSearchOptions &HSO,
@@ -107,11 +330,11 @@ CodeGenModule::CodeGenModule(ASTContext &C,
const CodeGenOptions &CGO, llvm::Module &M,
DiagnosticsEngine &diags,
CoverageSourceInfo *CoverageInfo)
- : Context(C), LangOpts(C.getLangOpts()), FS(std::move(FS)),
- HeaderSearchOpts(HSO), PreprocessorOpts(PPO), CodeGenOpts(CGO),
- TheModule(M), Diags(diags), Target(C.getTargetInfo()),
- ABI(createCXXABI(*this)), VMContext(M.getContext()), Types(*this),
- VTables(*this), SanitizerMD(new SanitizerMetadata(*this)) {
+ : Context(C), LangOpts(C.getLangOpts()), FS(FS), HeaderSearchOpts(HSO),
+ PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags),
+ Target(C.getTargetInfo()), ABI(createCXXABI(*this)),
+ VMContext(M.getContext()), Types(*this), VTables(*this),
+ SanitizerMD(new SanitizerMetadata(*this)) {
// Initialize the type cache.
llvm::LLVMContext &LLVMContext = M.getContext();
@@ -174,8 +397,9 @@ CodeGenModule::CodeGenModule(ASTContext &C,
// If debug info or coverage generation is enabled, create the CGDebugInfo
// object.
- if (CodeGenOpts.getDebugInfo() != codegenoptions::NoDebugInfo ||
- CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes)
+ if (CodeGenOpts.getDebugInfo() != llvm::codegenoptions::NoDebugInfo ||
+ CodeGenOpts.CoverageNotesFile.size() ||
+ CodeGenOpts.CoverageDataFile.size())
DebugInfo.reset(new CGDebugInfo(*this));
Block.GlobalUniqueCount = 0;
@@ -185,7 +409,8 @@ CodeGenModule::CodeGenModule(ASTContext &C,
if (CodeGenOpts.hasProfileClangUse()) {
auto ReaderOrErr = llvm::IndexedInstrProfReader::create(
- CodeGenOpts.ProfileInstrumentUsePath, CodeGenOpts.ProfileRemappingFile);
+ CodeGenOpts.ProfileInstrumentUsePath, *FS,
+ CodeGenOpts.ProfileRemappingFile);
// We're checking for profile read errors in CompilerInvocation, so if
// there was an error it should've already been caught. If it hasn't been
// somehow, trip an assertion.
@@ -245,7 +470,7 @@ void CodeGenModule::createOpenMPRuntime() {
case llvm::Triple::nvptx:
case llvm::Triple::nvptx64:
case llvm::Triple::amdgcn:
- assert(getLangOpts().OpenMPIsDevice &&
+ assert(getLangOpts().OpenMPIsTargetDevice &&
"OpenMP AMDGPU/NVPTX is only prepared to deal with device code.");
OpenMPRuntime.reset(new CGOpenMPRuntimeGPU(*this));
break;
@@ -272,7 +497,7 @@ void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) {
void CodeGenModule::applyReplacements() {
for (auto &I : Replacements) {
- StringRef MangledName = I.first();
+ StringRef MangledName = I.first;
llvm::Constant *Replacement = I.second;
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (!Entry)
@@ -337,10 +562,11 @@ static const llvm::GlobalValue *getAliasedGlobal(const llvm::GlobalValue *GV) {
return FinalGV;
}
-static bool checkAliasedGlobal(DiagnosticsEngine &Diags,
- SourceLocation Location, bool IsIFunc,
- const llvm::GlobalValue *Alias,
- const llvm::GlobalValue *&GV) {
+static bool checkAliasedGlobal(
+ DiagnosticsEngine &Diags, SourceLocation Location, bool IsIFunc,
+ const llvm::GlobalValue *Alias, const llvm::GlobalValue *&GV,
+ const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames,
+ SourceRange AliasRange) {
GV = getAliasedGlobal(Alias);
if (!GV) {
Diags.Report(Location, diag::err_cyclic_alias) << IsIFunc;
@@ -349,6 +575,22 @@ static bool checkAliasedGlobal(DiagnosticsEngine &Diags,
if (GV->isDeclaration()) {
Diags.Report(Location, diag::err_alias_to_undefined) << IsIFunc << IsIFunc;
+ Diags.Report(Location, diag::note_alias_requires_mangled_name)
+ << IsIFunc << IsIFunc;
+ // Provide a note if the given function is not found and exists as a
+ // mangled name.
+ for (const auto &[Decl, Name] : MangledDeclNames) {
+ if (const auto *ND = dyn_cast<NamedDecl>(Decl.getDecl())) {
+ if (ND->getName() == GV->getName()) {
+ Diags.Report(Location, diag::note_alias_mangled_name_alternative)
+ << Name
+ << FixItHint::CreateReplacement(
+ AliasRange,
+ (Twine(IsIFunc ? "ifunc" : "alias") + "(\"" + Name + "\")")
+ .str());
+ }
+ }
+ }
return false;
}
@@ -380,16 +622,19 @@ void CodeGenModule::checkAliases() {
for (const GlobalDecl &GD : Aliases) {
const auto *D = cast<ValueDecl>(GD.getDecl());
SourceLocation Location;
+ SourceRange Range;
bool IsIFunc = D->hasAttr<IFuncAttr>();
- if (const Attr *A = D->getDefiningAttr())
+ if (const Attr *A = D->getDefiningAttr()) {
Location = A->getLocation();
- else
+ Range = A->getRange();
+ } else
llvm_unreachable("Not an alias or ifunc?");
StringRef MangledName = getMangledName(GD);
llvm::GlobalValue *Alias = GetGlobalValue(MangledName);
const llvm::GlobalValue *GV = nullptr;
- if (!checkAliasedGlobal(Diags, Location, IsIFunc, Alias, GV)) {
+ if (!checkAliasedGlobal(Diags, Location, IsIFunc, Alias, GV,
+ MangledDeclNames, Range)) {
Error = true;
continue;
}
@@ -508,7 +753,7 @@ static void setVisibilityFromDLLStorageClass(const clang::LangOptions &LO,
}
void CodeGenModule::Release() {
- Module *Primary = getContext().getModuleForCodeGen();
+ Module *Primary = getContext().getCurrentNamedModule();
if (CXX20ModuleInits && Primary && !Primary->isHeaderLikeModule())
EmitModuleInitializers(Primary);
EmitDeferred();
@@ -527,6 +772,8 @@ void CodeGenModule::Release() {
GlobalTopLevelStmtBlockInFlight = {nullptr, nullptr};
}
+ // Module implementations are initialized the same way as a regular TU that
+ // imports one or more modules.
if (CXX20ModuleInits && Primary && Primary->isInterfaceOrPartition())
EmitCXXModuleInitFunc(Primary);
else
@@ -579,20 +826,6 @@ void CodeGenModule::Release() {
EmitMainVoidAlias();
if (getTriple().isAMDGPU()) {
- // Emit reference of __amdgpu_device_library_preserve_asan_functions to
- // preserve ASAN functions in bitcode libraries.
- if (LangOpts.Sanitize.has(SanitizerKind::Address)) {
- auto *FT = llvm::FunctionType::get(VoidTy, {});
- auto *F = llvm::Function::Create(
- FT, llvm::GlobalValue::ExternalLinkage,
- "__amdgpu_device_library_preserve_asan_functions", &getModule());
- auto *Var = new llvm::GlobalVariable(
- getModule(), FT->getPointerTo(),
- /*isConstant=*/true, llvm::GlobalValue::WeakAnyLinkage, F,
- "__amdgpu_device_library_preserve_asan_functions_ptr", nullptr,
- llvm::GlobalVariable::NotThreadLocal);
- addCompilerUsedGlobal(Var);
- }
// Emit amdgpu_code_object_version module flag, which is code object version
// times 100.
if (getTarget().getTargetOpts().CodeObjectVersion !=
@@ -601,6 +834,17 @@ void CodeGenModule::Release() {
"amdgpu_code_object_version",
getTarget().getTargetOpts().CodeObjectVersion);
}
+
+ // Currently, "-mprintf-kind" option is only supported for HIP
+ if (LangOpts.HIP) {
+ auto *MDStr = llvm::MDString::get(
+ getLLVMContext(), (getTarget().getTargetOpts().AMDGPUPrintfKindVal ==
+ TargetOptions::AMDGPUPrintfKind::Hostcall)
+ ? "hostcall"
+ : "buffered");
+ getModule().addModuleFlag(llvm::Module::Error, "amdgpu_printf_kind",
+ MDStr);
+ }
}
// Emit a global array containing all external kernels or device variables
@@ -845,7 +1089,7 @@ void CodeGenModule::Release() {
// Indicate whether this Module was compiled with -fopenmp
if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
getModule().addModuleFlag(llvm::Module::Max, "openmp", LangOpts.OpenMP);
- if (getLangOpts().OpenMPIsDevice)
+ if (getLangOpts().OpenMPIsTargetDevice)
getModule().addModuleFlag(llvm::Module::Max, "openmp-device",
LangOpts.OpenMP);
@@ -898,6 +1142,12 @@ void CodeGenModule::Release() {
if (CodeGenOpts.NoPLT)
getModule().setRtLibUseGOT();
+ if (getTriple().isOSBinFormatELF() &&
+ CodeGenOpts.DirectAccessExternalData !=
+ getModule().getDirectAccessExternalData()) {
+ getModule().setDirectAccessExternalData(
+ CodeGenOpts.DirectAccessExternalData);
+ }
if (CodeGenOpts.UnwindTables)
getModule().setUwtable(llvm::UWTableKind(CodeGenOpts.UnwindTables));
@@ -918,7 +1168,8 @@ void CodeGenModule::Release() {
if (getCodeGenOpts().EmitDeclMetadata)
EmitDeclMetadata();
- if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes)
+ if (getCodeGenOpts().CoverageNotesFile.size() ||
+ getCodeGenOpts().CoverageDataFile.size())
EmitCoverageFile();
if (CGDebugInfo *DI = getModuleDebugInfo())
@@ -946,6 +1197,10 @@ void CodeGenModule::Release() {
if (getCodeGenOpts().SkipRaxSetup)
getModule().addModuleFlag(llvm::Module::Override, "SkipRaxSetup", 1);
+ if (getContext().getTargetInfo().getMaxTLSAlign())
+ getModule().addModuleFlag(llvm::Module::Error, "MaxTLSAlign",
+ getContext().getTargetInfo().getMaxTLSAlign());
+
getTargetCodeGenInfo().emitTargetMetadata(*this, MangledDeclNames);
EmitBackendOptionsMetadata(getCodeGenOpts());
@@ -977,9 +1232,9 @@ void CodeGenModule::EmitOpenCLMetadata() {
}
void CodeGenModule::EmitBackendOptionsMetadata(
- const CodeGenOptions CodeGenOpts) {
+ const CodeGenOptions &CodeGenOpts) {
if (getTriple().isRISCV()) {
- getModule().addModuleFlag(llvm::Module::Error, "SmallDataLimit",
+ getModule().addModuleFlag(llvm::Module::Min, "SmallDataLimit",
CodeGenOpts.SmallDataLimit);
}
}
@@ -1347,8 +1602,13 @@ static void AppendTargetVersionMangling(const CodeGenModule &CGM,
if (Attr->isDefaultVersion())
return;
Out << "._";
+ const TargetInfo &TI = CGM.getTarget();
llvm::SmallVector<StringRef, 8> Feats;
Attr->getFeatures(Feats);
+ llvm::stable_sort(Feats, [&TI](const StringRef FeatL, const StringRef FeatR) {
+ return TI.multiVersionSortPriority(FeatL) <
+ TI.multiVersionSortPriority(FeatR);
+ });
for (const auto &Feat : Feats) {
Out << 'M';
Out << Feat;
@@ -1400,13 +1660,19 @@ static void AppendTargetClonesMangling(const CodeGenModule &CGM,
const TargetClonesAttr *Attr,
unsigned VersionIndex,
raw_ostream &Out) {
- if (CGM.getTarget().getTriple().isAArch64()) {
+ const TargetInfo &TI = CGM.getTarget();
+ if (TI.getTriple().isAArch64()) {
StringRef FeatureStr = Attr->getFeatureStr(VersionIndex);
if (FeatureStr == "default")
return;
Out << "._";
SmallVector<StringRef, 8> Features;
FeatureStr.split(Features, "+");
+ llvm::stable_sort(Features,
+ [&TI](const StringRef FeatL, const StringRef FeatR) {
+ return TI.multiVersionSortPriority(FeatL) <
+ TI.multiVersionSortPriority(FeatR);
+ });
for (auto &Feat : Features) {
Out << 'M';
Out << Feat;
@@ -1726,7 +1992,11 @@ llvm::ConstantInt *CodeGenModule::CreateKCFITypeId(QualType T) {
std::string OutName;
llvm::raw_string_ostream Out(OutName);
- getCXXABI().getMangleContext().mangleTypeName(T, Out);
+ getCXXABI().getMangleContext().mangleTypeName(
+ T, Out, getCodeGenOpts().SanitizeCfiICallNormalizeIntegers);
+
+ if (getCodeGenOpts().SanitizeCfiICallNormalizeIntegers)
+ Out << ".normalized";
return llvm::ConstantInt::get(Int32Ty,
static_cast<uint32_t>(llvm::xxHash64(OutName)));
@@ -1981,22 +2251,6 @@ CodeGenModule::getMostBaseClasses(const CXXRecordDecl *RD) {
return MostBases.takeVector();
}
-llvm::GlobalVariable *
-CodeGenModule::GetOrCreateRTTIProxyGlobalVariable(llvm::Constant *Addr) {
- auto It = RTTIProxyMap.find(Addr);
- if (It != RTTIProxyMap.end())
- return It->second;
-
- auto *FTRTTIProxy = new llvm::GlobalVariable(
- TheModule, Addr->getType(),
- /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, Addr,
- "__llvm_rtti_proxy");
- FTRTTIProxy->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
-
- RTTIProxyMap[Addr] = FTRTTIProxy;
- return FTRTTIProxy;
-}
-
void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
llvm::Function *F) {
llvm::AttrBuilder B(F->getContext());
@@ -2132,8 +2386,8 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
// functions. If the current target's C++ ABI requires this and this is a
// member function, set its alignment accordingly.
if (getTarget().getCXXABI().areMemberFunctionsAligned()) {
- if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
- F->setAlignment(llvm::Align(2));
+ if (F->getPointerAlignment(getDataLayout()) < 2 && isa<CXXMethodDecl>(D))
+ F->setAlignment(std::max(llvm::Align(2), F->getAlign().valueOrOne()));
}
// In the cross-dso CFI mode with canonical jump tables, we want !type
@@ -2162,15 +2416,6 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
}
}
-void CodeGenModule::setLLVMFunctionFEnvAttributes(const FunctionDecl *D,
- llvm::Function *F) {
- if (D->hasAttr<StrictFPAttr>()) {
- llvm::AttrBuilder FuncAttrs(F->getContext());
- FuncAttrs.addAttribute("strictfp");
- F->addFnAttrs(FuncAttrs);
- }
-}
-
void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
const Decl *D = GD.getDecl();
if (isa_and_nonnull<NamedDecl>(D))
@@ -2181,16 +2426,19 @@ void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
if (D && D->hasAttr<UsedAttr>())
addUsedOrCompilerUsedGlobal(GV);
- if (CodeGenOpts.KeepStaticConsts && D && isa<VarDecl>(D)) {
- const auto *VD = cast<VarDecl>(D);
- if (VD->getType().isConstQualified() &&
- VD->getStorageDuration() == SD_Static)
- addUsedOrCompilerUsedGlobal(GV);
- }
+ if (const auto *VD = dyn_cast_if_present<VarDecl>(D);
+ VD &&
+ ((CodeGenOpts.KeepPersistentStorageVariables &&
+ (VD->getStorageDuration() == SD_Static ||
+ VD->getStorageDuration() == SD_Thread)) ||
+ (CodeGenOpts.KeepStaticConsts && VD->getStorageDuration() == SD_Static &&
+ VD->getType().isConstQualified())))
+ addUsedOrCompilerUsedGlobal(GV);
}
bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
- llvm::AttrBuilder &Attrs) {
+ llvm::AttrBuilder &Attrs,
+ bool SetTargetFeatures) {
// Add target-cpu and target-features attributes to functions. If
// we have a decl for the function and it has a target attribute then
// parse that and add it to the feature set.
@@ -2233,8 +2481,7 @@ bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
if (SD) {
// Apply the given CPU name as the 'tune-cpu' so that the optimizer can
// favor this processor.
- TuneCPU = getTarget().getCPUSpecificTuneName(
- SD->getCPUName(GD.getMultiVersionIndex())->getName());
+ TuneCPU = SD->getCPUName(GD.getMultiVersionIndex())->getName();
}
} else {
// Otherwise just add the existing target cpu and target features to the
@@ -2250,7 +2497,10 @@ bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
Attrs.addAttribute("tune-cpu", TuneCPU);
AddedAttr = true;
}
- if (!Features.empty()) {
+ if (!Features.empty() && SetTargetFeatures) {
+ llvm::erase_if(Features, [&](const std::string& F) {
+ return getTarget().isReadOnlyFeature(F.substr(1));
+ });
llvm::sort(Features);
Attrs.addAttribute("target-features", llvm::join(Features, ","));
AddedAttr = true;
@@ -2353,9 +2603,6 @@ void CodeGenModule::CreateFunctionTypeMetadataForIcall(const FunctionDecl *FD,
}
void CodeGenModule::setKCFIType(const FunctionDecl *FD, llvm::Function *F) {
- if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic())
- return;
-
llvm::LLVMContext &Ctx = F->getContext();
llvm::MDBuilder MDB(Ctx);
F->setMetadata(llvm::LLVMContext::MD_kcfi_type,
@@ -3067,12 +3314,14 @@ bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) {
if (LangOpts.EmitAllDecls)
return true;
- if (CodeGenOpts.KeepStaticConsts) {
- const auto *VD = dyn_cast<VarDecl>(Global);
- if (VD && VD->getType().isConstQualified() &&
- VD->getStorageDuration() == SD_Static)
- return true;
- }
+ const auto *VD = dyn_cast<VarDecl>(Global);
+ if (VD &&
+ ((CodeGenOpts.KeepPersistentStorageVariables &&
+ (VD->getStorageDuration() == SD_Static ||
+ VD->getStorageDuration() == SD_Thread)) ||
+ (CodeGenOpts.KeepStaticConsts && VD->getStorageDuration() == SD_Static &&
+ VD->getType().isConstQualified())))
+ return true;
return getContext().DeclMustBeEmitted(Global);
}
@@ -3115,7 +3364,7 @@ bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
// codegen for global variables, because they may be marked as threadprivate.
if (LangOpts.OpenMP && LangOpts.OpenMPUseTLS &&
getContext().getTargetInfo().isTLSSupported() && isa<VarDecl>(Global) &&
- !isTypeConstant(Global->getType(), false) &&
+ !isTypeConstant(Global->getType(), false, false) &&
!OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(Global))
return false;
@@ -3223,9 +3472,13 @@ ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject(
return ConstantAddress::invalid();
}
- auto *GV = new llvm::GlobalVariable(
- getModule(), Init->getType(),
- /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
+ llvm::GlobalValue::LinkageTypes Linkage =
+ isExternallyVisible(TPO->getLinkageAndVisibility().getLinkage())
+ ? llvm::GlobalValue::LinkOnceODRLinkage
+ : llvm::GlobalValue::InternalLinkage;
+ auto *GV = new llvm::GlobalVariable(getModule(), Init->getType(),
+ /*isConstant=*/true, Linkage, Init, Name);
+ setGVProperties(GV, TPO);
if (supportsCOMDAT())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
Emitter.finalize(GV);
@@ -3318,7 +3571,8 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
if (MustBeEmitted(Global))
EmitOMPDeclareReduction(DRD);
return;
- } else if (auto *DMD = dyn_cast<OMPDeclareMapperDecl>(Global)) {
+ }
+ if (auto *DMD = dyn_cast<OMPDeclareMapperDecl>(Global)) {
if (MustBeEmitted(Global))
EmitOMPDeclareMapper(DMD);
return;
@@ -4007,7 +4261,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
// the iFunc instead. Name Mangling will handle the rest of the changes.
if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(D)) {
// For the device mark the function as one that should be emitted.
- if (getLangOpts().OpenMPIsDevice && OpenMPRuntime &&
+ if (getLangOpts().OpenMPIsTargetDevice && OpenMPRuntime &&
!OpenMPRuntime->markAsGlobalTarget(GD) && FD->isDefined() &&
!DontDefer && !IsForDefinition) {
if (const FunctionDecl *FDDef = FD->getDefinition()) {
@@ -4184,13 +4438,10 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
/// GetAddrOfFunction - Return the address of the given function. If Ty is
/// non-null, then this function will use the specified type if it has to
/// create it (this occurs when we see a definition of the function).
-llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
- llvm::Type *Ty,
- bool ForVTable,
- bool DontDefer,
- ForDefinition_t IsForDefinition) {
- assert(!cast<FunctionDecl>(GD.getDecl())->isConsteval() &&
- "consteval function should never be emitted");
+llvm::Constant *
+CodeGenModule::GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty, bool ForVTable,
+ bool DontDefer,
+ ForDefinition_t IsForDefinition) {
// If there was no specific requested type, just convert it now.
if (!Ty) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
@@ -4315,8 +4566,9 @@ CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
///
/// If ExcludeCtor is true, the duration when the object's constructor runs
/// will not be considered. The caller will need to verify that the object is
-/// not written to during its construction.
-bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) {
+/// not written to during its construction. ExcludeDtor works similarly.
+bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor,
+ bool ExcludeDtor) {
if (!Ty.isConstant(Context) && !Ty->isReferenceType())
return false;
@@ -4324,7 +4576,7 @@ bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) {
if (const CXXRecordDecl *Record
= Context.getBaseElementType(Ty)->getAsCXXRecordDecl())
return ExcludeCtor && !Record->hasMutableFields() &&
- Record->hasTrivialDestructor();
+ (Record->hasTrivialDestructor() || ExcludeDtor);
}
return true;
@@ -4437,7 +4689,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
// FIXME: This code is overly simple and should be merged with other global
// handling.
- GV->setConstant(isTypeConstant(D->getType(), false));
+ GV->setConstant(isTypeConstant(D->getType(), false, false));
GV->setAlignment(getContext().getDeclAlign(D).getAsAlign());
@@ -4514,7 +4766,8 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
}
}
- if (GV->isDeclaration()) {
+ if (D &&
+ D->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly) {
getTargetCodeGenInfo().setTargetAttributes(D, GV, *this);
// External HIP managed variables needed to be recorded for transformation
// in both device and host compilations.
@@ -4687,16 +4940,17 @@ LangAS CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D) {
return LangAS::sycl_global;
if (LangOpts.CUDA && LangOpts.CUDAIsDevice) {
- if (D && D->hasAttr<CUDAConstantAttr>())
- return LangAS::cuda_constant;
- else if (D && D->hasAttr<CUDASharedAttr>())
- return LangAS::cuda_shared;
- else if (D && D->hasAttr<CUDADeviceAttr>())
- return LangAS::cuda_device;
- else if (D && D->getType().isConstQualified())
- return LangAS::cuda_constant;
- else
- return LangAS::cuda_device;
+ if (D) {
+ if (D->hasAttr<CUDAConstantAttr>())
+ return LangAS::cuda_constant;
+ if (D->hasAttr<CUDASharedAttr>())
+ return LangAS::cuda_shared;
+ if (D->hasAttr<CUDADeviceAttr>())
+ return LangAS::cuda_device;
+ if (D->getType().isConstQualified())
+ return LangAS::cuda_constant;
+ }
+ return LangAS::cuda_device;
}
if (LangOpts.OpenMP) {
@@ -4807,6 +5061,10 @@ static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) {
llvm_unreachable("No such linkage");
}
+bool CodeGenModule::supportsCOMDAT() const {
+ return getTriple().supportsCOMDAT();
+}
+
void CodeGenModule::maybeSetTrivialComdat(const Decl &D,
llvm::GlobalObject &GO) {
if (!shouldBeInCOMDAT(*this, D))
@@ -4825,7 +5083,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// If this is OpenMP device, check if it is legal to emit this global
// normally.
- if (LangOpts.OpenMPIsDevice && OpenMPRuntime &&
+ if (LangOpts.OpenMPIsTargetDevice && OpenMPRuntime &&
OpenMPRuntime->emitTargetGlobalVariable(D))
return;
@@ -4973,7 +5231,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// Is accessible from all the threads within the grid and from the host
// through the runtime library (cudaGetSymbolAddress() / cudaGetSymbolSize()
// / cudaMemcpyToSymbol() / cudaMemcpyFromSymbol())."
- if (GV && LangOpts.CUDA) {
+ if (LangOpts.CUDA) {
if (LangOpts.CUDAIsDevice) {
if (Linkage != llvm::GlobalValue::InternalLinkage &&
(D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
@@ -4992,7 +5250,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// If it is safe to mark the global 'constant', do so now.
GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor &&
- isTypeConstant(D->getType(), true));
+ isTypeConstant(D->getType(), true, true));
// If it is in a read-only section, mark it 'constant'.
if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
@@ -5396,9 +5654,6 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
maybeSetTrivialComdat(*D, *Fn);
- // Set CodeGen attributes that represent floating point environment.
- setLLVMFunctionFEnvAttributes(D, Fn);
-
CodeGenFunction(*this).GenerateCode(GD, Fn, FI);
setNonAliasAttributes(GD, Fn);
@@ -5845,6 +6100,7 @@ CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) {
// Resize the string to the right size, which is indicated by its type.
const ConstantArrayType *CAT = Context.getAsConstantArrayType(E->getType());
+ assert(CAT && "String literal not of constant array type!");
Str.resize(CAT->getSize().getZExtValue());
return llvm::ConstantDataArray::getString(VMContext, Str, false);
}
@@ -6066,7 +6322,8 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
emitter.emplace(*this);
InitialValue = emitter->emitForInitializer(*Value, AddrSpace,
MaterializedType);
- Constant = isTypeConstant(MaterializedType, /*ExcludeCtor*/Value);
+ Constant = isTypeConstant(MaterializedType, /*ExcludeCtor*/ Value,
+ /*ExcludeDtor*/ false);
Type = InitialValue->getType();
} else {
// No initializer, the initialization will be provided when we
@@ -6228,6 +6485,10 @@ void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
}
void CodeGenModule::EmitTopLevelStmt(const TopLevelStmtDecl *D) {
+ // Device code should not be at top level.
+ if (LangOpts.CUDA && LangOpts.CUDAIsDevice)
+ return;
+
std::unique_ptr<CodeGenFunction> &CurCGF =
GlobalTopLevelStmtBlockInFlight.first;
@@ -6283,9 +6544,8 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
return;
// Consteval function shouldn't be emitted.
- if (auto *FD = dyn_cast<FunctionDecl>(D))
- if (FD->isConsteval())
- return;
+ if (auto *FD = dyn_cast<FunctionDecl>(D); FD && FD->isImmediateFunction())
+ return;
switch (D->getKind()) {
case Decl::CXXConversion:
@@ -6459,7 +6719,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
if (LangOpts.CUDA && LangOpts.CUDAIsDevice)
break;
// File-scope asm is ignored during device-side OpenMP compilation.
- if (LangOpts.OpenMPIsDevice)
+ if (LangOpts.OpenMPIsTargetDevice)
break;
// File-scope asm is ignored during device-side SYCL compilation.
if (LangOpts.SYCLIsDevice)
@@ -6511,16 +6771,14 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
EmitTopLevelDecl(D);
// Visit the submodules of this module.
- for (clang::Module::submodule_iterator Sub = Mod->submodule_begin(),
- SubEnd = Mod->submodule_end();
- Sub != SubEnd; ++Sub) {
+ for (auto *Submodule : Mod->submodules()) {
// Skip explicit children; they need to be explicitly imported to emit
// the initializers.
- if ((*Sub)->IsExplicit)
+ if (Submodule->IsExplicit)
continue;
- if (Visited.insert(*Sub).second)
- Stack.push_back(*Sub);
+ if (Visited.insert(Submodule).second)
+ Stack.push_back(Submodule);
}
}
break;
@@ -6869,10 +7127,6 @@ void CodeGenModule::EmitCommandLineMetadata() {
}
void CodeGenModule::EmitCoverageFile() {
- if (getCodeGenOpts().CoverageDataFile.empty() &&
- getCodeGenOpts().CoverageNotesFile.empty())
- return;
-
llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata("llvm.dbg.cu");
if (!CUNode)
return;
@@ -6895,10 +7149,8 @@ llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
// Return a bogus pointer if RTTI is disabled, unless it's for EH.
// FIXME: should we even be calling this method if RTTI is disabled
// and it's not for EH?
- if ((!ForEH && !getLangOpts().RTTI) || getLangOpts().CUDAIsDevice ||
- (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
- getTriple().isNVPTX()))
- return llvm::Constant::getNullValue(Int8PtrTy);
+ if (!shouldEmitRTTI(ForEH))
+ return llvm::Constant::getNullValue(GlobalsInt8PtrTy);
if (ForEH && Ty->isObjCObjectPointerType() &&
LangOpts.ObjCRuntime.isGNUFamily())
@@ -6942,7 +7194,12 @@ CodeGenModule::CreateMetadataIdentifierImpl(QualType T, MetadataTypeMap &Map,
if (isExternallyVisible(T->getLinkage())) {
std::string OutName;
llvm::raw_string_ostream Out(OutName);
- getCXXABI().getMangleContext().mangleTypeName(T, Out);
+ getCXXABI().getMangleContext().mangleTypeName(
+ T, Out, getCodeGenOpts().SanitizeCfiICallNormalizeIntegers);
+
+ if (getCodeGenOpts().SanitizeCfiICallNormalizeIntegers)
+ Out << ".normalized";
+
Out << Suffix;
InternalId = llvm::MDString::get(getLLVMContext(), Out.str());
@@ -7202,7 +7459,6 @@ void CodeGenModule::moveLazyEmissionStates(CodeGenModule *NewBuilder) {
"Newly created module should not have manglings");
NewBuilder->Manglings = std::move(Manglings);
- assert(WeakRefReferences.empty() && "Not all WeakRefRefs have been applied");
NewBuilder->WeakRefReferences = std::move(WeakRefReferences);
NewBuilder->TBAA = std::move(TBAA);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
index b3354657b237..05cb217e2bee 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
@@ -30,6 +30,7 @@
#include "clang/Basic/XRayLists.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
@@ -378,8 +379,7 @@ private:
/// multiversion function resolvers and ifuncs are defined and emitted.
std::vector<GlobalDecl> MultiVersionFuncs;
- typedef llvm::StringMap<llvm::TrackingVH<llvm::Constant> > ReplacementsTy;
- ReplacementsTy Replacements;
+ llvm::MapVector<StringRef, llvm::TrackingVH<llvm::Constant>> Replacements;
/// List of global values to be replaced with something else. Used when we
/// want to replace a GlobalValue but can't identify it by its mangled name
@@ -590,8 +590,6 @@ private:
MetadataTypeMap VirtualMetadataIdMap;
MetadataTypeMap GeneralizedMetadataIdMap;
- llvm::DenseMap<const llvm::Constant *, llvm::GlobalVariable *> RTTIProxyMap;
-
// Helps squashing blocks of TopLevelStmtDecl into a single llvm::Function
// when used with -fincremental-extensions.
std::pair<std::unique_ptr<CodeGenFunction>, const TopLevelStmtDecl *>
@@ -816,7 +814,7 @@ public:
return getTBAAAccessInfo(AccessType);
}
- bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor);
+ bool isTypeConstant(QualType QTy, bool ExcludeCtor, bool ExcludeDtor);
bool isPaddedAtomicType(QualType type);
bool isPaddedAtomicType(const AtomicType *type);
@@ -928,6 +926,13 @@ public:
// Return the function body address of the given function.
llvm::Constant *GetFunctionStart(const ValueDecl *Decl);
+ // Return whether RTTI information should be emitted for this target.
+ bool shouldEmitRTTI(bool ForEH = false) {
+ return (ForEH || getLangOpts().RTTI) && !getLangOpts().CUDAIsDevice &&
+ !(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
+ getTriple().isNVPTX());
+ }
+
/// Get the address of the RTTI descriptor for the given type.
llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false);
@@ -1272,6 +1277,8 @@ public:
/// function which relies on particular fast-math attributes for correctness.
/// It's up to you to ensure that this is safe.
void addDefaultFunctionDefinitionAttributes(llvm::Function &F);
+ void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F,
+ bool WillInternalize);
/// Like the overload taking a `Function &`, but intended specifically
/// for frontends that want to build on Clang's target-configuration logic.
@@ -1501,9 +1508,6 @@ public:
std::vector<const CXXRecordDecl *>
getMostBaseClasses(const CXXRecordDecl *RD);
- llvm::GlobalVariable *
- GetOrCreateRTTIProxyGlobalVariable(llvm::Constant *Addr);
-
/// Get the declaration of std::terminate for the platform.
llvm::FunctionCallee getTerminateFn();
@@ -1581,7 +1585,8 @@ private:
ForDefinition_t IsForDefinition = NotForDefinition);
bool GetCPUAndFeaturesAttributes(GlobalDecl GD,
- llvm::AttrBuilder &AttrBuilder);
+ llvm::AttrBuilder &AttrBuilder,
+ bool SetTargetFeatures = true);
void setNonAliasAttributes(GlobalDecl GD, llvm::GlobalObject *GO);
/// Set function attributes for a function declaration.
@@ -1711,7 +1716,7 @@ private:
/// Emit the module flag metadata used to pass options controlling the
/// the backend to LLVM.
- void EmitBackendOptionsMetadata(const CodeGenOptions CodeGenOpts);
+ void EmitBackendOptionsMetadata(const CodeGenOptions &CodeGenOpts);
/// Emits OpenCL specific Metadata e.g. OpenCL version.
void EmitOpenCLMetadata();
@@ -1734,6 +1739,12 @@ private:
/// function.
void SimplifyPersonality();
+ /// Helper function for getDefaultFunctionAttributes. Builds a set of function
+ /// attributes which can be simply added to a function.
+ void getTrivialDefaultFunctionAttributes(StringRef Name, bool HasOptnone,
+ bool AttrOnCallSite,
+ llvm::AttrBuilder &FuncAttrs);
+
/// Helper function for ConstructAttributeList and
/// addDefaultFunctionDefinitionAttributes. Builds a set of function
/// attributes to add to a function with the given properties.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
index 15a3d74666ca..b80317529b72 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
@@ -1036,7 +1036,7 @@ void CodeGenPGO::loadRegionCounts(llvm::IndexedInstrProfReader *PGOReader,
llvm::Expected<llvm::InstrProfRecord> RecordExpected =
PGOReader->getInstrProfRecord(FuncName, FunctionHash);
if (auto E = RecordExpected.takeError()) {
- auto IPE = llvm::InstrProfError::take(std::move(E));
+ auto IPE = std::get<0>(llvm::InstrProfError::take(std::move(E)));
if (IPE == llvm::instrprof_error::unknown_function)
CGM.getPGOStats().addMissing(IsInMainFile);
else if (IPE == llvm::instrprof_error::hash_mismatch)
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h
index 66c93cba4bb0..392ec5a144fe 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h
@@ -114,7 +114,12 @@ public:
return 0;
if (!haveRegionCounts())
return 0;
- return RegionCounts[(*RegionCounterMap)[S]];
+ // With profiles from a differing version of clang we can have mismatched
+ // decl counts. Don't crash in such a case.
+ auto Index = (*RegionCounterMap)[S];
+ if (Index >= RegionCounts.size())
+ return 0;
+ return RegionCounts[Index];
}
};
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
index abbf71daf1d5..30021794a0bb 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -125,93 +125,9 @@ bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
return I != RecordDeclTypes.end() && !I->second->isOpaque();
}
-static bool
-isSafeToConvert(QualType T, CodeGenTypes &CGT,
- llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked);
-
-
-/// isSafeToConvert - Return true if it is safe to convert the specified record
-/// decl to IR and lay it out, false if doing so would cause us to get into a
-/// recursive compilation mess.
-static bool
-isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT,
- llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
- // If we have already checked this type (maybe the same type is used by-value
- // multiple times in multiple structure fields, don't check again.
- if (!AlreadyChecked.insert(RD).second)
- return true;
-
- const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr();
-
- // If this type is already laid out, converting it is a noop.
- if (CGT.isRecordLayoutComplete(Key)) return true;
-
- // If this type is currently being laid out, we can't recursively compile it.
- if (CGT.isRecordBeingLaidOut(Key))
- return false;
-
- // If this type would require laying out bases that are currently being laid
- // out, don't do it. This includes virtual base classes which get laid out
- // when a class is translated, even though they aren't embedded by-value into
- // the class.
- if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
- for (const auto &I : CRD->bases())
- if (!isSafeToConvert(I.getType()->castAs<RecordType>()->getDecl(), CGT,
- AlreadyChecked))
- return false;
- }
-
- // If this type would require laying out members that are currently being laid
- // out, don't do it.
- for (const auto *I : RD->fields())
- if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked))
- return false;
-
- // If there are no problems, lets do it.
- return true;
-}
-
-/// isSafeToConvert - Return true if it is safe to convert this field type,
-/// which requires the structure elements contained by-value to all be
-/// recursively safe to convert.
-static bool
-isSafeToConvert(QualType T, CodeGenTypes &CGT,
- llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
- // Strip off atomic type sugar.
- if (const auto *AT = T->getAs<AtomicType>())
- T = AT->getValueType();
-
- // If this is a record, check it.
- if (const auto *RT = T->getAs<RecordType>())
- return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked);
-
- // If this is an array, check the elements, which are embedded inline.
- if (const auto *AT = CGT.getContext().getAsArrayType(T))
- return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked);
-
- // Otherwise, there is no concern about transforming this. We only care about
- // things that are contained by-value in a structure that can have another
- // structure as a member.
- return true;
-}
-
-
-/// isSafeToConvert - Return true if it is safe to convert the specified record
-/// decl to IR and lay it out, false if doing so would cause us to get into a
-/// recursive compilation mess.
-static bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT) {
- // If no structs are being laid out, we can certainly do this one.
- if (CGT.noRecordsBeingLaidOut()) return true;
-
- llvm::SmallPtrSet<const RecordDecl*, 16> AlreadyChecked;
- return isSafeToConvert(RD, CGT, AlreadyChecked);
-}
-
/// isFuncParamTypeConvertible - Return true if the specified type in a
/// function parameter or result position can be converted to an IR type at this
-/// point. This boils down to being whether it is complete, as well as whether
-/// we've temporarily deferred expanding the type because we're in a recursive
-/// context.
+/// point. This boils down to being whether it is complete.
bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) {
// Some ABIs cannot have their member pointers represented in IR unless
// certain circumstances have been reached.
@@ -223,21 +139,7 @@ bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) {
if (!TT) return true;
// Incomplete types cannot be converted.
- if (TT->isIncompleteType())
- return false;
-
- // If this is an enum, then it is always safe to convert.
- const RecordType *RT = dyn_cast<RecordType>(TT);
- if (!RT) return true;
-
- // Otherwise, we have to be careful. If it is a struct that we're in the
- // process of expanding, then we can't convert the function type. That's ok
- // though because we must be in a pointer context under the struct, so we can
- // just convert it to a dummy type.
- //
- // We decide this by checking whether ConvertRecordDeclType returns us an
- // opaque type for a struct that we know is defined.
- return isSafeToConvert(RT->getDecl(), *this);
+ return !TT->isIncompleteType();
}
@@ -333,7 +235,6 @@ static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
assert(QFT.isCanonical());
- const Type *Ty = QFT.getTypePtr();
const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr());
// First, check whether we can build the full function type. If the
// function type depends on an incomplete type (e.g. a struct or enum), we
@@ -356,14 +257,6 @@ llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
return llvm::StructType::get(getLLVMContext());
}
- // While we're converting the parameter types for a function, we don't want
- // to recursively convert any pointed-to structs. Converting directly-used
- // structs is ok though.
- if (!RecordsBeingLaidOut.insert(Ty).second) {
- SkippedLayout = true;
- return llvm::StructType::get(getLLVMContext());
- }
-
// The function type can be built; call the appropriate routines to
// build it.
const CGFunctionInfo *FI;
@@ -389,11 +282,6 @@ llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
ResultType = GetFunctionType(*FI);
}
- RecordsBeingLaidOut.erase(Ty);
-
- if (RecordsBeingLaidOut.empty())
- while (!DeferredRecords.empty())
- ConvertRecordDeclType(DeferredRecords.pop_back_val());
return ResultType;
}
@@ -421,27 +309,16 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
if (const RecordType *RT = dyn_cast<RecordType>(Ty))
return ConvertRecordDeclType(RT->getDecl());
- // The LLVM type we return for a given Clang type may not always be the same,
- // most notably when dealing with recursive structs. We mark these potential
- // cases with ShouldUseCache below. Builtin types cannot be recursive.
- // TODO: when clang uses LLVM opaque pointers we won't be able to represent
- // recursive types with LLVM types, making this logic much simpler.
llvm::Type *CachedType = nullptr;
- bool ShouldUseCache =
- Ty->isBuiltinType() ||
- (noRecordsBeingLaidOut() && FunctionsBeingProcessed.empty());
- if (ShouldUseCache) {
- llvm::DenseMap<const Type *, llvm::Type *>::iterator TCI =
- TypeCache.find(Ty);
- if (TCI != TypeCache.end())
- CachedType = TCI->second;
- // With expensive checks, check that the type we compute matches the
- // cached type.
+ auto TCI = TypeCache.find(Ty);
+ if (TCI != TypeCache.end())
+ CachedType = TCI->second;
+ // With expensive checks, check that the type we compute matches the
+ // cached type.
#ifndef EXPENSIVE_CHECKS
- if (CachedType)
- return CachedType;
+ if (CachedType)
+ return CachedType;
#endif
- }
// If we don't have it in the cache, convert it now.
llvm::Type *ResultType = nullptr;
@@ -596,6 +473,8 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
case BuiltinType::SveInt64x4:
case BuiltinType::SveUint64x4:
case BuiltinType::SveBool:
+ case BuiltinType::SveBoolx2:
+ case BuiltinType::SveBoolx4:
case BuiltinType::SveFloat16:
case BuiltinType::SveFloat16x2:
case BuiltinType::SveFloat16x3:
@@ -618,6 +497,8 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
Info.EC.getKnownMinValue() *
Info.NumVectors);
}
+ case BuiltinType::SveCount:
+ return llvm::TargetExtType::get(getLLVMContext(), "aarch64.svcount");
#define PPC_VECTOR_TYPE(Name, Id, Size) \
case BuiltinType::Id: \
ResultType = \
@@ -626,14 +507,31 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
- {
- ASTContext::BuiltinVectorTypeInfo Info =
- Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
- return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
- Info.EC.getKnownMinValue() *
- Info.NumVectors);
- }
- case BuiltinType::Dependent:
+ {
+ ASTContext::BuiltinVectorTypeInfo Info =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
+ // Tuple types are expressed as aggregregate types of the same scalable
+ // vector type (e.g. vint32m1x2_t is two vint32m1_t, which is {<vscale x
+ // 2 x i32>, <vscale x 2 x i32>}).
+ if (Info.NumVectors != 1) {
+ llvm::Type *EltTy = llvm::ScalableVectorType::get(
+ ConvertType(Info.ElementType), Info.EC.getKnownMinValue());
+ llvm::SmallVector<llvm::Type *, 4> EltTys(Info.NumVectors, EltTy);
+ return llvm::StructType::get(getLLVMContext(), EltTys);
+ }
+ return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
+ Info.EC.getKnownMinValue() *
+ Info.NumVectors);
+ }
+#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
+ case BuiltinType::Id: { \
+ if (BuiltinType::Id == BuiltinType::WasmExternRef) \
+ ResultType = CGM.getTargetCodeGenInfo().getWasmExternrefReferenceType(); \
+ else \
+ llvm_unreachable("Unexpected wasm reference builtin type!"); \
+ } break;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
+ case BuiltinType::Dependent:
#define BUILTIN_TYPE(Id, SingletonId)
#define PLACEHOLDER_TYPE(Id, SingletonId) \
case BuiltinType::Id:
@@ -654,19 +552,15 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
case Type::RValueReference: {
const ReferenceType *RTy = cast<ReferenceType>(Ty);
QualType ETy = RTy->getPointeeType();
- llvm::Type *PointeeType = ConvertTypeForMem(ETy);
unsigned AS = getTargetAddressSpace(ETy);
- ResultType = llvm::PointerType::get(PointeeType, AS);
+ ResultType = llvm::PointerType::get(getLLVMContext(), AS);
break;
}
case Type::Pointer: {
const PointerType *PTy = cast<PointerType>(Ty);
QualType ETy = PTy->getPointeeType();
- llvm::Type *PointeeType = ConvertTypeForMem(ETy);
- if (PointeeType->isVoidTy())
- PointeeType = llvm::Type::getInt8Ty(getLLVMContext());
unsigned AS = getTargetAddressSpace(ETy);
- ResultType = llvm::PointerType::get(PointeeType, AS);
+ ResultType = llvm::PointerType::get(getLLVMContext(), AS);
break;
}
@@ -743,15 +637,9 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
break;
}
- case Type::ObjCObjectPointer: {
- // Protocol qualifications do not influence the LLVM type, we just return a
- // pointer to the underlying interface type. We don't need to worry about
- // recursive conversion.
- llvm::Type *T =
- ConvertTypeForMem(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
- ResultType = T->getPointerTo();
+ case Type::ObjCObjectPointer:
+ ResultType = llvm::PointerType::getUnqual(getLLVMContext());
break;
- }
case Type::Enum: {
const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
@@ -765,18 +653,15 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
}
case Type::BlockPointer: {
- const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType();
- llvm::Type *PointeeType = CGM.getLangOpts().OpenCL
- ? CGM.getGenericBlockLiteralType()
- : ConvertTypeForMem(FTy);
// Block pointers lower to function type. For function type,
// getTargetAddressSpace() returns default address space for
// function pointer i.e. program address space. Therefore, for block
// pointers, it is important to pass the pointee AST address space when
// calling getTargetAddressSpace(), to ensure that we get the LLVM IR
// address space for data pointers and not function pointers.
+ const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType();
unsigned AS = Context.getTargetAddressSpace(FTy.getAddressSpace());
- ResultType = llvm::PointerType::get(PointeeType, AS);
+ ResultType = llvm::PointerType::get(getLLVMContext(), AS);
break;
}
@@ -827,8 +712,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
assert((!CachedType || CachedType == ResultType) &&
"Cached type doesn't match computed type");
- if (ShouldUseCache)
- TypeCache[Ty] = ResultType;
+ TypeCache[Ty] = ResultType;
return ResultType;
}
@@ -861,17 +745,6 @@ llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque())
return Ty;
- // If converting this type would cause us to infinitely loop, don't do it!
- if (!isSafeToConvert(RD, *this)) {
- DeferredRecords.push_back(RD);
- return Ty;
- }
-
- // Okay, this is a definition of a type. Compile the implementation now.
- bool InsertResult = RecordsBeingLaidOut.insert(Key).second;
- (void)InsertResult;
- assert(InsertResult && "Recursively compiling a struct?");
-
// Force conversion of non-virtual base classes recursively.
if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
for (const auto &I : CRD->bases()) {
@@ -884,22 +757,12 @@ llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(RD, Ty);
CGRecordLayouts[Key] = std::move(Layout);
- // We're done laying out this struct.
- bool EraseResult = RecordsBeingLaidOut.erase(Key); (void)EraseResult;
- assert(EraseResult && "struct not in RecordsBeingLaidOut set?");
-
// If this struct blocked a FunctionType conversion, then recompute whatever
// was derived from that.
// FIXME: This is hugely overconservative.
if (SkippedLayout)
TypeCache.clear();
- // If we're done converting the outer-most record, then convert any deferred
- // structs as well.
- if (RecordsBeingLaidOut.empty())
- while (!DeferredRecords.empty())
- ConvertRecordDeclType(DeferredRecords.pop_back_val());
-
return Ty;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
index e76fda95513f..9088f77b95c3 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
@@ -78,20 +78,12 @@ class CodeGenTypes {
/// Hold memoized CGFunctionInfo results.
llvm::FoldingSet<CGFunctionInfo> FunctionInfos{FunctionInfosLog2InitSize};
- /// This set keeps track of records that we're currently converting
- /// to an IR type. For example, when converting:
- /// struct A { struct B { int x; } } when processing 'x', the 'A' and 'B'
- /// types will be in this set.
- llvm::SmallPtrSet<const Type*, 4> RecordsBeingLaidOut;
-
llvm::SmallPtrSet<const CGFunctionInfo*, 4> FunctionsBeingProcessed;
/// True if we didn't layout a function due to a being inside
/// a recursive struct conversion, set this to true.
bool SkippedLayout;
- SmallVector<const RecordDecl *, 8> DeferredRecords;
-
/// This map keeps cache of llvm::Types and maps clang::Type to
/// corresponding llvm::Type.
llvm::DenseMap<const Type *, llvm::Type *> TypeCache;
@@ -300,12 +292,6 @@ public: // These are internal details of CGT that shouldn't be used externally.
bool isZeroInitializable(const RecordDecl *RD);
bool isRecordLayoutComplete(const Type *Ty) const;
- bool noRecordsBeingLaidOut() const {
- return RecordsBeingLaidOut.empty();
- }
- bool isRecordBeingLaidOut(const Type *Ty) const {
- return RecordsBeingLaidOut.count(Ty);
- }
unsigned getTargetAddressSpace(QualType T) const;
};
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h b/contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h
index 1a7a181ca7f0..a55da0dcad79 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h
@@ -42,7 +42,7 @@ private:
/// The AST address space where this (non-abstract) initializer is going.
/// Used for generating appropriate placeholders.
- LangAS DestAddressSpace;
+ LangAS DestAddressSpace = LangAS::Default;
llvm::SmallVector<std::pair<llvm::Constant *, llvm::GlobalVariable*>, 4>
PlaceholderAddresses;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp
index 101cd6a67b49..bb4c6f5e0cde 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp
@@ -37,6 +37,11 @@ static llvm::cl::opt<bool> EmptyLineCommentCoverage(
"disable it on test)"),
llvm::cl::init(true), llvm::cl::Hidden);
+static llvm::cl::opt<bool> SystemHeadersCoverage(
+ "system-headers-coverage",
+ llvm::cl::desc("Enable collecting coverage from system headers"),
+ llvm::cl::init(false), llvm::cl::Hidden);
+
using namespace clang;
using namespace CodeGen;
using namespace llvm::coverage;
@@ -301,8 +306,9 @@ public:
if (!Visited.insert(File).second)
continue;
- // Do not map FileID's associated with system headers.
- if (SM.isInSystemHeader(SM.getSpellingLoc(Loc)))
+ // Do not map FileID's associated with system headers unless collecting
+ // coverage from system headers is explicitly enabled.
+ if (!SystemHeadersCoverage && SM.isInSystemHeader(SM.getSpellingLoc(Loc)))
continue;
unsigned Depth = 0;
@@ -416,8 +422,10 @@ public:
SourceLocation LocStart = Region.getBeginLoc();
assert(SM.getFileID(LocStart).isValid() && "region in invalid file");
- // Ignore regions from system headers.
- if (SM.isInSystemHeader(SM.getSpellingLoc(LocStart)))
+ // Ignore regions from system headers unless collecting coverage from
+ // system headers is explicitly enabled.
+ if (!SystemHeadersCoverage &&
+ SM.isInSystemHeader(SM.getSpellingLoc(LocStart)))
continue;
auto CovFileID = getCoverageFileID(LocStart);
@@ -594,6 +602,19 @@ struct CounterCoverageMappingBuilder
MostRecentLocation = *StartLoc;
}
+ // If either of these locations is invalid, something elsewhere in the
+ // compiler has broken.
+ assert((!StartLoc || StartLoc->isValid()) && "Start location is not valid");
+ assert((!EndLoc || EndLoc->isValid()) && "End location is not valid");
+
+ // However, we can still recover without crashing.
+ // If either location is invalid, set it to std::nullopt to avoid
+ // letting users of RegionStack think that region has a valid start/end
+ // location.
+ if (StartLoc && StartLoc->isInvalid())
+ StartLoc = std::nullopt;
+ if (EndLoc && EndLoc->isInvalid())
+ EndLoc = std::nullopt;
RegionStack.emplace_back(Count, FalseCount, StartLoc, EndLoc);
return RegionStack.size() - 1;
@@ -616,7 +637,8 @@ struct CounterCoverageMappingBuilder
assert(RegionStack.size() >= ParentIndex && "parent not in stack");
while (RegionStack.size() > ParentIndex) {
SourceMappingRegion &Region = RegionStack.back();
- if (Region.hasStartLoc()) {
+ if (Region.hasStartLoc() &&
+ (Region.hasEndLoc() || RegionStack[ParentIndex].hasEndLoc())) {
SourceLocation StartLoc = Region.getBeginLoc();
SourceLocation EndLoc = Region.hasEndLoc()
? Region.getEndLoc()
@@ -683,7 +705,7 @@ struct CounterCoverageMappingBuilder
assert(SM.isWrittenInSameFile(Region.getBeginLoc(), EndLoc));
assert(SpellingRegion(SM, Region).isInSourceOrder());
SourceRegions.push_back(Region);
- }
+ }
RegionStack.pop_back();
}
}
@@ -1000,8 +1022,10 @@ struct CounterCoverageMappingBuilder
void VisitDecl(const Decl *D) {
Stmt *Body = D->getBody();
- // Do not propagate region counts into system headers.
- if (Body && SM.isInSystemHeader(SM.getSpellingLoc(getStart(Body))))
+ // Do not propagate region counts into system headers unless collecting
+ // coverage from system headers is explicitly enabled.
+ if (!SystemHeadersCoverage && Body &&
+ SM.isInSystemHeader(SM.getSpellingLoc(getStart(Body))))
return;
// Do not visit the artificial children nodes of defaulted methods. The
@@ -1456,6 +1480,7 @@ struct CounterCoverageMappingBuilder
Counter TrueCount = getRegionCounter(E);
propagateCounts(ParentCount, E->getCond());
+ Counter OutCount;
if (!isa<BinaryConditionalOperator>(E)) {
// The 'then' count applies to the area immediately after the condition.
@@ -1465,12 +1490,18 @@ struct CounterCoverageMappingBuilder
fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), TrueCount);
extendRegion(E->getTrueExpr());
- propagateCounts(TrueCount, E->getTrueExpr());
+ OutCount = propagateCounts(TrueCount, E->getTrueExpr());
}
extendRegion(E->getFalseExpr());
- propagateCounts(subtractCounters(ParentCount, TrueCount),
- E->getFalseExpr());
+ OutCount = addCounters(
+ OutCount, propagateCounts(subtractCounters(ParentCount, TrueCount),
+ E->getFalseExpr()));
+
+ if (OutCount != ParentCount) {
+ pushRegion(OutCount);
+ GapRegionCounter = OutCount;
+ }
// Create Branch Region around condition.
createBranchRegion(E->getCond(), TrueCount,
@@ -1504,9 +1535,19 @@ struct CounterCoverageMappingBuilder
subtractCounters(RHSExecCnt, RHSTrueCnt));
}
+ // Determine whether the right side of OR operation need to be visited.
+ bool shouldVisitRHS(const Expr *LHS) {
+ bool LHSIsTrue = false;
+ bool LHSIsConst = false;
+ if (!LHS->isValueDependent())
+ LHSIsConst = LHS->EvaluateAsBooleanCondition(
+ LHSIsTrue, CVM.getCodeGenModule().getContext());
+ return !LHSIsConst || (LHSIsConst && !LHSIsTrue);
+ }
+
void VisitBinLOr(const BinaryOperator *E) {
extendRegion(E->getLHS());
- propagateCounts(getRegion().getCounter(), E->getLHS());
+ Counter OutCount = propagateCounts(getRegion().getCounter(), E->getLHS());
handleFileExit(getEnd(E->getLHS()));
// Counter tracks the right hand side of a logical or operator.
@@ -1519,6 +1560,10 @@ struct CounterCoverageMappingBuilder
// Extract the RHS's "False" Instance Counter.
Counter RHSFalseCnt = getRegionCounter(E->getRHS());
+ if (!shouldVisitRHS(E->getLHS())) {
+ GapRegionCounter = OutCount;
+ }
+
// Extract the Parent Region Counter.
Counter ParentCnt = getRegion().getCounter();
@@ -1535,6 +1580,15 @@ struct CounterCoverageMappingBuilder
// Lambdas are treated as their own functions for now, so we shouldn't
// propagate counts into them.
}
+
+ void VisitPseudoObjectExpr(const PseudoObjectExpr *POE) {
+ // Just visit syntatic expression as this is what users actually write.
+ VisitStmt(POE->getSyntacticForm());
+ }
+
+ void VisitOpaqueValueExpr(const OpaqueValueExpr* OVE) {
+ Visit(OVE->getSourceExpr());
+ }
};
} // end anonymous namespace
@@ -1580,9 +1634,7 @@ static void dump(llvm::raw_ostream &OS, StringRef FunctionName,
CoverageMappingModuleGen::CoverageMappingModuleGen(
CodeGenModule &CGM, CoverageSourceInfo &SourceInfo)
- : CGM(CGM), SourceInfo(SourceInfo) {
- CoveragePrefixMap = CGM.getCodeGenOpts().CoveragePrefixMap;
-}
+ : CGM(CGM), SourceInfo(SourceInfo) {}
std::string CoverageMappingModuleGen::getCurrentDirname() {
if (!CGM.getCodeGenOpts().CoverageCompilationDir.empty())
@@ -1596,8 +1648,13 @@ std::string CoverageMappingModuleGen::getCurrentDirname() {
std::string CoverageMappingModuleGen::normalizeFilename(StringRef Filename) {
llvm::SmallString<256> Path(Filename);
llvm::sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
- for (const auto &Entry : CoveragePrefixMap) {
- if (llvm::sys::path::replace_path_prefix(Path, Entry.first, Entry.second))
+
+ /// Traverse coverage prefix map in reverse order because prefix replacements
+ /// are applied in reverse order starting from the last one when multiple
+ /// prefix replacement options are provided.
+ for (const auto &[From, To] :
+ llvm::reverse(CGM.getCodeGenOpts().CoveragePrefixMap)) {
+ if (llvm::sys::path::replace_path_prefix(Path, From, To))
break;
}
return Path.str().str();
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h
index f5282601b640..eca68d9abd79 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h
@@ -107,7 +107,6 @@ class CoverageMappingModuleGen {
llvm::SmallDenseMap<const FileEntry *, unsigned, 8> FileEntries;
std::vector<llvm::Constant *> FunctionNames;
std::vector<FunctionInfo> FunctionRecords;
- std::map<std::string, std::string> CoveragePrefixMap;
std::string getCurrentDirname();
std::string normalizeFilename(StringRef Filename);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/EHScopeStack.h b/contrib/llvm-project/clang/lib/CodeGen/EHScopeStack.h
index cd649cb11f9b..3c8a51590d1b 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/EHScopeStack.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/EHScopeStack.h
@@ -148,6 +148,12 @@ public:
public:
Cleanup(const Cleanup &) = default;
Cleanup(Cleanup &&) {}
+
+ // The copy and move assignment operator is defined as deleted pending
+ // further motivation.
+ Cleanup &operator=(const Cleanup &) = delete;
+ Cleanup &operator=(Cleanup &&) = delete;
+
Cleanup() = default;
virtual bool isRedundantBeforeReturn() { return false; }
@@ -272,6 +278,9 @@ public:
CGF(nullptr) {}
~EHScopeStack() { delete[] StartOfBuffer; }
+ EHScopeStack(const EHScopeStack &) = delete;
+ EHScopeStack &operator=(const EHScopeStack &) = delete;
+
/// Push a lazily-created cleanup on the stack.
template <class T, class... As> void pushCleanup(CleanupKind Kind, As... A) {
static_assert(alignof(T) <= ScopeStackAlignment,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 18403036e700..79a926cb9edd 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -36,6 +36,8 @@
#include "llvm/IR/Value.h"
#include "llvm/Support/ScopedPrinter.h"
+#include <optional>
+
using namespace clang;
using namespace CodeGen;
@@ -185,14 +187,58 @@ public:
bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
QualType SrcRecordTy) override;
- llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
+ /// Determine whether we know that all instances of type RecordTy will have
+ /// the same vtable pointer values, that is distinct from all other vtable
+ /// pointers. While this is required by the Itanium ABI, it doesn't happen in
+ /// practice in some cases due to language extensions.
+ bool hasUniqueVTablePointer(QualType RecordTy) {
+ const CXXRecordDecl *RD = RecordTy->getAsCXXRecordDecl();
+
+ // Under -fapple-kext, multiple definitions of the same vtable may be
+ // emitted.
+ if (!CGM.getCodeGenOpts().AssumeUniqueVTables ||
+ getContext().getLangOpts().AppleKext)
+ return false;
+
+ // If the type_info* would be null, the vtable might be merged with that of
+ // another type.
+ if (!CGM.shouldEmitRTTI())
+ return false;
+
+ // If there's only one definition of the vtable in the program, it has a
+ // unique address.
+ if (!llvm::GlobalValue::isWeakForLinker(CGM.getVTableLinkage(RD)))
+ return true;
+
+ // Even if there are multiple definitions of the vtable, they are required
+ // by the ABI to use the same symbol name, so should be merged at load
+ // time. However, if the class has hidden visibility, there can be
+ // different versions of the class in different modules, and the ABI
+ // library might treat them as being the same.
+ if (CGM.GetLLVMVisibility(RD->getVisibility()) !=
+ llvm::GlobalValue::DefaultVisibility)
+ return false;
+
+ return true;
+ }
+
+ bool shouldEmitExactDynamicCast(QualType DestRecordTy) override {
+ return hasUniqueVTablePointer(DestRecordTy);
+ }
+
+ llvm::Value *emitDynamicCastCall(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy, QualType DestTy,
QualType DestRecordTy,
llvm::BasicBlock *CastEnd) override;
- llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
- QualType SrcRecordTy,
- QualType DestTy) override;
+ llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address ThisAddr,
+ QualType SrcRecordTy, QualType DestTy,
+ QualType DestRecordTy,
+ llvm::BasicBlock *CastSuccess,
+ llvm::BasicBlock *CastFail) override;
+
+ llvm::Value *emitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
+ QualType SrcRecordTy) override;
bool EmitBadCastCall(CodeGenFunction &CGF) override;
@@ -580,13 +626,10 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
CGBuilderTy &Builder = CGF.Builder;
const FunctionProtoType *FPT =
- MPT->getPointeeType()->getAs<FunctionProtoType>();
+ MPT->getPointeeType()->castAs<FunctionProtoType>();
auto *RD =
cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
- CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
-
llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
@@ -628,7 +671,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
CGF.EmitBlock(FnVirtual);
// Cast the adjusted this to a pointer to vtable pointer and load.
- llvm::Type *VTableTy = Builder.getInt8PtrTy();
+ llvm::Type *VTableTy = CGF.CGM.GlobalsInt8PtrTy;
CharUnits VTablePtrAlign =
CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
CGF.getPointerAlign());
@@ -687,8 +730,6 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
{VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
- VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(),
- "memptr.virtualfn");
} else {
// When not doing VFE, emit a normal load, as it allows more
// optimisations than type.checked.load.
@@ -709,15 +750,12 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
CGM.getIntrinsic(llvm::Intrinsic::load_relative,
{VTableOffset->getType()}),
{VTable, VTableOffset});
- VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
} else {
llvm::Value *VFPAddr =
CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
- VFPAddr = CGF.Builder.CreateBitCast(
- VFPAddr, FTy->getPointerTo()->getPointerTo());
VirtualFn = CGF.Builder.CreateAlignedLoad(
- FTy->getPointerTo(), VFPAddr, CGF.getPointerAlign(),
- "memptr.virtualfn");
+ llvm::PointerType::getUnqual(CGF.getLLVMContext()), VFPAddr,
+ CGF.getPointerAlign(), "memptr.virtualfn");
}
}
assert(VirtualFn && "Virtual fuction pointer not created!");
@@ -757,8 +795,9 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// In the non-virtual path, the function pointer is actually a
// function pointer.
CGF.EmitBlock(FnNonVirtual);
- llvm::Value *NonVirtualFn =
- Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
+ llvm::Value *NonVirtualFn = Builder.CreateIntToPtr(
+ FnAsInt, llvm::PointerType::getUnqual(CGF.getLLVMContext()),
+ "memptr.nonvirtualfn");
// Check the function pointer if CFI on member function pointers is enabled.
if (ShouldEmitCFICheck) {
@@ -799,7 +838,8 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// We're done.
CGF.EmitBlock(FnEnd);
- llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
+ llvm::PHINode *CalleePtr =
+ Builder.CreatePHI(llvm::PointerType::getUnqual(CGF.getLLVMContext()), 2);
CalleePtr->addIncoming(VirtualFn, FnVirtual);
CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
@@ -816,18 +856,9 @@ llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
CGBuilderTy &Builder = CGF.Builder;
- // Cast to char*.
- Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
-
// Apply the offset, which we assume is non-null.
- llvm::Value *Addr = Builder.CreateInBoundsGEP(
- Base.getElementType(), Base.getPointer(), MemPtr, "memptr.offset");
-
- // Cast the address to the appropriate pointer type, adopting the
- // address space of the base pointer.
- llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
- ->getPointerTo(Base.getAddressSpace());
- return Builder.CreateBitCast(Addr, PType);
+ return Builder.CreateInBoundsGEP(CGF.Int8Ty, Base.getPointer(), MemPtr,
+ "memptr.offset");
}
/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
@@ -1212,13 +1243,14 @@ void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
// Grab the vtable pointer as an intptr_t*.
auto *ClassDecl =
cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
- llvm::Value *VTable =
- CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
+ llvm::Value *VTable = CGF.GetVTablePtr(
+ Ptr, llvm::PointerType::getUnqual(CGF.getLLVMContext()), ClassDecl);
// Track back to entry -2 and pull out the offset there.
llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
- llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr, CGF.getPointerAlign());
+ llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr,
+ CGF.getPointerAlign());
// Apply the offset.
llvm::Value *CompletePtr =
@@ -1268,7 +1300,7 @@ static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
// void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
// void (*dest) (void *));
- llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
+ llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.GlobalsInt8PtrTy, CGM.Int8PtrTy };
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
@@ -1417,8 +1449,8 @@ llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
llvm::Type *StdTypeInfoPtrTy) {
auto *ClassDecl =
cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
- llvm::Value *Value =
- CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
+ llvm::Value *Value = CGF.GetVTablePtr(
+ ThisPtr, llvm::PointerType::getUnqual(CGF.getLLVMContext()), ClassDecl);
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
// Load the type info.
@@ -1426,9 +1458,6 @@ llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
Value = CGF.Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
{Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
-
- // Setup to dereference again since this is a proxy we accessed.
- Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
} else {
// Load the type info.
Value =
@@ -1443,12 +1472,11 @@ bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
return SrcIsPtr;
}
-llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
+llvm::Value *ItaniumCXXABI::emitDynamicCastCall(
CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
llvm::Type *PtrDiffLTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
- llvm::Type *DestLTy = CGF.ConvertType(DestTy);
llvm::Value *SrcRTTI =
CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
@@ -1463,12 +1491,9 @@ llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
// Emit the call to __dynamic_cast.
- llvm::Value *Value = ThisAddr.getPointer();
- Value = CGF.EmitCastToVoidPtr(Value);
-
- llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
- Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
- Value = CGF.Builder.CreateBitCast(Value, DestLTy);
+ llvm::Value *Args[] = {ThisAddr.getPointer(), SrcRTTI, DestRTTI, OffsetHint};
+ llvm::Value *Value =
+ CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), Args);
/// C++ [expr.dynamic.cast]p9:
/// A failed cast to reference type throws std::bad_cast
@@ -1486,18 +1511,95 @@ llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
return Value;
}
-llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
+llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
+ CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
+ QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastSuccess,
+ llvm::BasicBlock *CastFail) {
+ ASTContext &Context = getContext();
+
+ // Find all the inheritance paths.
+ const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
+ const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ (void)DestDecl->isDerivedFrom(SrcDecl, Paths);
+
+ // Find an offset within `DestDecl` where a `SrcDecl` instance and its vptr
+ // might appear.
+ std::optional<CharUnits> Offset;
+ for (const CXXBasePath &Path : Paths) {
+ // dynamic_cast only finds public inheritance paths.
+ if (Path.Access != AS_public)
+ continue;
+
+ CharUnits PathOffset;
+ for (const CXXBasePathElement &PathElement : Path) {
+ // Find the offset along this inheritance step.
+ const CXXRecordDecl *Base =
+ PathElement.Base->getType()->getAsCXXRecordDecl();
+ if (PathElement.Base->isVirtual()) {
+ // For a virtual base class, we know that the derived class is exactly
+ // DestDecl, so we can use the vbase offset from its layout.
+ const ASTRecordLayout &L = Context.getASTRecordLayout(DestDecl);
+ PathOffset = L.getVBaseClassOffset(Base);
+ } else {
+ const ASTRecordLayout &L =
+ Context.getASTRecordLayout(PathElement.Class);
+ PathOffset += L.getBaseClassOffset(Base);
+ }
+ }
+
+ if (!Offset)
+ Offset = PathOffset;
+ else if (Offset != PathOffset) {
+ // Base appears in at least two different places. Find the most-derived
+ // object and see if it's a DestDecl. Note that the most-derived object
+ // must be at least as aligned as this base class subobject, and must
+ // have a vptr at offset 0.
+ ThisAddr = Address(emitDynamicCastToVoid(CGF, ThisAddr, SrcRecordTy),
+ CGF.VoidPtrTy, ThisAddr.getAlignment());
+ SrcDecl = DestDecl;
+ Offset = CharUnits::Zero();
+ break;
+ }
+ }
+
+ if (!Offset) {
+ // If there are no public inheritance paths, the cast always fails.
+ CGF.EmitBranch(CastFail);
+ return llvm::PoisonValue::get(CGF.VoidPtrTy);
+ }
+
+ // Compare the vptr against the expected vptr for the destination type at
+ // this offset. Note that we do not know what type ThisAddr points to in
+ // the case where the derived class multiply inherits from the base class
+ // so we can't use GetVTablePtr, so we load the vptr directly instead.
+ llvm::Instruction *VPtr = CGF.Builder.CreateLoad(
+ ThisAddr.withElementType(CGF.VoidPtrPtrTy), "vtable");
+ CGM.DecorateInstructionWithTBAA(
+ VPtr, CGM.getTBAAVTablePtrAccessInfo(CGF.VoidPtrPtrTy));
+ llvm::Value *Success = CGF.Builder.CreateICmpEQ(
+ VPtr, getVTableAddressPoint(BaseSubobject(SrcDecl, *Offset), DestDecl));
+ llvm::Value *Result = ThisAddr.getPointer();
+ if (!Offset->isZero())
+ Result = CGF.Builder.CreateInBoundsGEP(
+ CGF.CharTy, Result,
+ {llvm::ConstantInt::get(CGF.PtrDiffTy, -Offset->getQuantity())});
+ CGF.Builder.CreateCondBr(Success, CastSuccess, CastFail);
+ return Result;
+}
+
+llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
Address ThisAddr,
- QualType SrcRecordTy,
- QualType DestTy) {
- llvm::Type *DestLTy = CGF.ConvertType(DestTy);
+ QualType SrcRecordTy) {
auto *ClassDecl =
cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
llvm::Value *OffsetToTop;
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
// Get the vtable pointer.
- llvm::Value *VTable =
- CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl);
+ llvm::Value *VTable = CGF.GetVTablePtr(
+ ThisAddr, llvm::PointerType::getUnqual(CGF.getLLVMContext()),
+ ClassDecl);
// Get the offset-to-top from the vtable.
OffsetToTop =
@@ -1509,8 +1611,9 @@ llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
CGF.ConvertType(CGF.getContext().getPointerDiffType());
// Get the vtable pointer.
- llvm::Value *VTable =
- CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
+ llvm::Value *VTable = CGF.GetVTablePtr(
+ ThisAddr, llvm::PointerType::getUnqual(CGF.getLLVMContext()),
+ ClassDecl);
// Get the offset-to-top from the vtable.
OffsetToTop =
@@ -1519,10 +1622,8 @@ llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
}
// Finally, add the offset to the pointer.
- llvm::Value *Value = ThisAddr.getPointer();
- Value = CGF.EmitCastToVoidPtr(Value);
- Value = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, Value, OffsetToTop);
- return CGF.Builder.CreateBitCast(Value, DestLTy);
+ return CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ThisAddr.getPointer(),
+ OffsetToTop);
}
bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
@@ -1549,14 +1650,10 @@ ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
llvm::Value *VBaseOffset;
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
- VBaseOffsetPtr =
- CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
VBaseOffset = CGF.Builder.CreateAlignedLoad(
CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
"vbase.offset");
} else {
- VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
- CGM.PtrDiffTy->getPointerTo());
VBaseOffset = CGF.Builder.CreateAlignedLoad(
CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
}
@@ -1587,12 +1684,14 @@ ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
// All parameters are already in place except VTT, which goes after 'this'.
// These are Clang types, so we don't need to worry about sret yet.
- // Check if we need to add a VTT parameter (which has type void **).
+ // Check if we need to add a VTT parameter (which has type global void **).
if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
: GD.getDtorType() == Dtor_Base) &&
cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
+ LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
+ QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS);
ArgTys.insert(ArgTys.begin() + 1,
- Context.getPointerType(Context.VoidPtrTy));
+ Context.getPointerType(CanQualType::CreateUnsafe(Q)));
return AddedStructorArgCounts::prefix(1);
}
return AddedStructorArgCounts{};
@@ -1625,7 +1724,9 @@ void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
ASTContext &Context = getContext();
// FIXME: avoid the fake decl
- QualType T = Context.getPointerType(Context.VoidPtrTy);
+ LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
+ QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS);
+ QualType T = Context.getPointerType(Q);
auto *VTTDecl = ImplicitParamDecl::Create(
Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
T, ImplicitParamDecl::CXXVTT);
@@ -1667,10 +1768,14 @@ CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
if (!NeedsVTTParameter(GlobalDecl(D, Type)))
return AddedStructorArgs{};
- // Insert the implicit 'vtt' argument as the second argument.
+ // Insert the implicit 'vtt' argument as the second argument. Make sure to
+ // correctly reflect its address space, which can differ from generic on
+ // some targets.
llvm::Value *VTT =
CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
- QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
+ LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
+ QualType Q = getContext().getAddrSpaceQualType(getContext().VoidPtrTy, AS);
+ QualType VTTTy = getContext().getPointerType(Q);
return AddedStructorArgs::prefix({{VTT, VTTTy}});
}
@@ -1842,11 +1947,11 @@ llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
/// Load the VTT.
llvm::Value *VTT = CGF.LoadCXXVTT();
if (VirtualPointerIndex)
- VTT = CGF.Builder.CreateConstInBoundsGEP1_64(
- CGF.VoidPtrTy, VTT, VirtualPointerIndex);
+ VTT = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.GlobalsVoidPtrTy, VTT,
+ VirtualPointerIndex);
// And load the address point from the VTT.
- return CGF.Builder.CreateAlignedLoad(CGF.VoidPtrTy, VTT,
+ return CGF.Builder.CreateAlignedLoad(CGF.GlobalsVoidPtrTy, VTT,
CGF.getPointerAlign());
}
@@ -1874,12 +1979,13 @@ llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
CGM.getItaniumVTableContext().getVTableLayout(RD);
llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
- // Use pointer alignment for the vtable. Otherwise we would align them based
- // on the size of the initializer which doesn't make sense as only single
- // values are read.
+ // Use pointer to global alignment for the vtable. Otherwise we would align
+ // them based on the size of the initializer which doesn't make sense as only
+ // single values are read.
+ LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
? 32
- : CGM.getTarget().getPointerAlign(LangAS::Default);
+ : CGM.getTarget().getPointerAlign(AS);
VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
Name, VTableType, llvm::GlobalValue::ExternalLinkage,
@@ -1914,16 +2020,15 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
Address This,
llvm::Type *Ty,
SourceLocation Loc) {
- llvm::Type *TyPtr = Ty->getPointerTo();
+ llvm::Type *PtrTy = CGM.GlobalsInt8PtrTy;
auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
- llvm::Value *VTable = CGF.GetVTablePtr(
- This, TyPtr->getPointerTo(), MethodDecl->getParent());
+ llvm::Value *VTable = CGF.GetVTablePtr(This, PtrTy, MethodDecl->getParent());
uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
llvm::Value *VFunc;
if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
VFunc = CGF.EmitVTableTypeCheckedLoad(
- MethodDecl->getParent(), VTable, TyPtr,
+ MethodDecl->getParent(), VTable, PtrTy,
VTableIndex *
CGM.getContext().getTargetInfo().getPointerWidth(LangAS::Default) /
8);
@@ -1932,19 +2037,14 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
llvm::Value *VFuncLoad;
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
- VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
- llvm::Value *Load = CGF.Builder.CreateCall(
+ VFuncLoad = CGF.Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
{VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
- VFuncLoad = CGF.Builder.CreateBitCast(Load, TyPtr);
} else {
- VTable =
- CGF.Builder.CreateBitCast(VTable, TyPtr->getPointerTo());
llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
- TyPtr, VTable, VTableIndex, "vfn");
- VFuncLoad =
- CGF.Builder.CreateAlignedLoad(TyPtr, VTableSlotPtr,
- CGF.getPointerAlign());
+ PtrTy, VTable, VTableIndex, "vfn");
+ VFuncLoad = CGF.Builder.CreateAlignedLoad(PtrTy, VTableSlotPtr,
+ CGF.getPointerAlign());
}
// Add !invariant.load md to virtual function load to indicate that
@@ -2067,7 +2167,7 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
if (!NonVirtualAdjustment && !VirtualAdjustment)
return InitialPtr.getPointer();
- Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
+ Address V = InitialPtr.withElementType(CGF.Int8Ty);
// In a base-to-derived cast, the non-virtual adjustment is applied first.
if (NonVirtualAdjustment && !IsReturnAdjustment) {
@@ -2078,7 +2178,7 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
// Perform the virtual adjustment if we have one.
llvm::Value *ResultPtr;
if (VirtualAdjustment) {
- Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
+ Address VTablePtrPtr = V.withElementType(CGF.Int8PtrTy);
llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
llvm::Value *Offset;
@@ -2086,8 +2186,6 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
CGF.Int8Ty, VTablePtr, VirtualAdjustment);
if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
// Load the adjustment offset from the vtable as a 32-bit int.
- OffsetPtr =
- CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo());
Offset =
CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
CharUnits::fromQuantity(4));
@@ -2095,9 +2193,6 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
- OffsetPtr =
- CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
-
// Load the adjustment offset from the vtable.
Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
CGF.getPointerAlign());
@@ -2180,8 +2275,7 @@ Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
// Write the number of elements into the appropriate slot.
- Address NumElementsPtr =
- CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
+ Address NumElementsPtr = CookiePtr.withElementType(CGF.SizeTy);
llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
// Handle the array cookie specially in ASan.
@@ -2189,7 +2283,7 @@ Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
(expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
// The store to the CookiePtr does not need to be instrumented.
- CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
+ SI->setNoSanitizeMetadata();
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
llvm::FunctionCallee F =
@@ -2213,7 +2307,7 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
unsigned AS = allocPtr.getAddressSpace();
- numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
+ numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy);
if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
return CGF.Builder.CreateLoad(numElementsPtr);
// In asan mode emit a function call instead of a regular load and let the
@@ -2221,8 +2315,8 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
// cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
// We can't simply ignore this load using nosanitize metadata because
// the metadata may be lost.
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(
+ CGF.SizeTy, llvm::PointerType::getUnqual(CGF.getLLVMContext()), false);
llvm::FunctionCallee F =
CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
@@ -2252,7 +2346,7 @@ Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
Address cookie = newPtr;
// The first element is the element size.
- cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
+ cookie = cookie.withElementType(CGF.SizeTy);
llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
getContext().getTypeSizeInChars(elementType).getQuantity());
CGF.Builder.CreateStore(elementSize, cookie);
@@ -2275,7 +2369,7 @@ llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
Address numElementsPtr
= CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
- numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
+ numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy);
return CGF.Builder.CreateLoad(numElementsPtr);
}
@@ -2372,7 +2466,8 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlign(guardTy));
}
}
- llvm::PointerType *guardPtrTy = guardTy->getPointerTo(
+ llvm::PointerType *guardPtrTy = llvm::PointerType::get(
+ CGF.CGM.getLLVMContext(),
CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
// Create the guard variable if we don't already have it (as we
@@ -2443,7 +2538,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
if (!threadsafe || MaxInlineWidthInBits) {
// Load the first byte of the guard variable.
llvm::LoadInst *LI =
- Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
+ Builder.CreateLoad(guardAddr.withElementType(CGM.Int8Ty));
// Itanium ABI:
// An implementation supporting thread-safety on multiprocessor
@@ -2524,7 +2619,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// variable before the object initialization begins so that references
// to the variable during initialization don't restart initialization.
Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
- Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
+ guardAddr.withElementType(CGM.Int8Ty));
}
// Emit the initializer and add a global destructor if appropriate.
@@ -2542,7 +2637,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// after the object initialization completes so that initialization is
// retried if initialization is interrupted by an exception.
Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
- Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
+ guardAddr.withElementType(CGM.Int8Ty));
}
CGF.EmitBlock(EndBlock);
@@ -2563,15 +2658,13 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
}
// We're assuming that the destructor function is something we can
- // reasonably call with the default CC. Go ahead and cast it to the
- // right prototype.
- llvm::Type *dtorTy =
- llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
+ // reasonably call with the default CC.
+ llvm::Type *dtorTy = llvm::PointerType::getUnqual(CGF.getLLVMContext());
// Preserve address space of addr.
auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
- auto AddrInt8PtrTy =
- AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy;
+ auto AddrPtrTy = AddrAS ? llvm::PointerType::get(CGF.getLLVMContext(), AddrAS)
+ : CGF.Int8PtrTy;
// Create a variable that binds the atexit to this shared object.
llvm::Constant *handle =
@@ -2580,7 +2673,7 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
// extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
- llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
+ llvm::Type *paramTys[] = {dtorTy, AddrPtrTy, handle->getType()};
llvm::FunctionType *atexitTy =
llvm::FunctionType::get(CGF.IntTy, paramTys, false);
@@ -2596,10 +2689,7 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
// function.
addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
- llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
- cast<llvm::Constant>(dtor.getCallee()), dtorTy),
- llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
- handle};
+ llvm::Value *args[] = {dtor.getCallee(), addr, handle};
CGF.EmitNounwindRuntimeCall(atexit, args);
}
@@ -2631,7 +2721,6 @@ void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
// Get the destructor function type, void(*)(void).
llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
- llvm::Type *dtorTy = dtorFuncTy->getPointerTo();
// Destructor functions are run/unregistered in non-ascending
// order of their priorities.
@@ -2641,10 +2730,8 @@ void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
llvm::Function *Dtor = *itv;
// We're assuming that the destructor function is something we can
- // reasonably call with the correct CC. Go ahead and cast it to the
- // right prototype.
- llvm::Constant *dtor = llvm::ConstantExpr::getBitCast(Dtor, dtorTy);
- llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtor);
+ // reasonably call with the correct CC.
+ llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(Dtor);
llvm::Value *NeedsDestruct =
CGF.Builder.CreateIsNull(V, "needs_destruct");
@@ -2659,7 +2746,7 @@ void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
CGF.EmitBlock(DestructCallBlock);
// Emit the call to casted Dtor.
- llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, dtor);
+ llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, Dtor);
// Make sure the call and the callee agree on calling convention.
CI->setCallingConv(Dtor->getCallingConv());
@@ -2699,15 +2786,9 @@ void CodeGenModule::registerGlobalDtorsWithAtExit() {
if (getCodeGenOpts().CXAAtExit) {
emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
} else {
- // Get the destructor function type, void(*)(void).
- llvm::Type *dtorTy =
- llvm::FunctionType::get(CGF.VoidTy, false)->getPointerTo();
-
// We're assuming that the destructor function is something we can
- // reasonably call with the correct CC. Go ahead and cast it to the
- // right prototype.
- CGF.registerGlobalDtorWithAtExit(
- llvm::ConstantExpr::getBitCast(Dtor, dtorTy));
+ // reasonably call with the correct CC.
+ CGF.registerGlobalDtorWithAtExit(Dtor);
}
}
@@ -3203,10 +3284,9 @@ ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
// Note for the future: If we would ever like to do deferred emission of
// RTTI, check if emitting vtables opportunistically need any adjustment.
- GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
- /*isConstant=*/true,
- llvm::GlobalValue::ExternalLinkage, nullptr,
- Name);
+ GV = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.GlobalsInt8PtrTy,
+ /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr, Name);
const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
CGM.setGVProperties(GV, RD);
// Import the typeinfo symbol when all non-inline virtual methods are
@@ -3219,7 +3299,7 @@ ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
}
}
- return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
+ return GV;
}
/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
@@ -3290,6 +3370,8 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
case BuiltinType::ShortAccum:
case BuiltinType::Accum:
case BuiltinType::LongAccum:
@@ -3595,7 +3677,8 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
if (CGM.getItaniumVTableContext().isRelativeLayout())
VTable = CGM.getModule().getNamedAlias(VTableName);
if (!VTable)
- VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
+ VTable =
+ CGM.getModule().getOrInsertGlobal(VTableName, CGM.GlobalsInt8PtrTy);
CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
@@ -3607,15 +3690,13 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
// The vtable address point is 8 bytes after its start:
// 4 for the offset to top + 4 for the relative offset to rtti.
llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
- VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
VTable =
llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
} else {
llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
- VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable,
- Two);
+ VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.GlobalsInt8PtrTy,
+ VTable, Two);
}
- VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
Fields.push_back(VTable);
}
@@ -3643,7 +3724,6 @@ static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
return llvm::GlobalValue::InternalLinkage;
case VisibleNoLinkage:
- case ModuleInternalLinkage:
case ModuleLinkage:
case ExternalLinkage:
// RTTI is not enabled, which means that this type info struct is going
@@ -3688,7 +3768,7 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
assert(!OldGV->hasAvailableExternallyLinkage() &&
"available_externally typeinfos not yet implemented");
- return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
+ return OldGV;
}
// Check if there is already an external RTTI descriptor for this type.
@@ -3748,9 +3828,9 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
TypeNameField =
- llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
+ llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.GlobalsInt8PtrTy);
} else {
- TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
+ TypeNameField = TypeName;
}
Fields.push_back(TypeNameField);
@@ -3880,7 +3960,7 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
GV->setComdat(M.getOrInsertComdat(GV->getName()));
CharUnits Align = CGM.getContext().toCharUnitsFromBits(
- CGM.getTarget().getPointerAlign(LangAS::Default));
+ CGM.getTarget().getPointerAlign(CGM.GetGlobalVarAddressSpace(nullptr)));
GV->setAlignment(Align.getAsAlign());
// The Itanium ABI specifies that type_info objects must be globally
@@ -3912,7 +3992,7 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
- return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
+ return GV;
}
/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
@@ -4558,10 +4638,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
// Otherwise, it returns a pointer into the exception object.
- llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
- llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
-
- LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
+ LValue srcLV = CGF.MakeNaturalAlignAddrLValue(AdjustedExn, CatchType);
LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
switch (TEK) {
case TEK_Complex:
@@ -4583,7 +4660,8 @@ static void InitCatchParam(CodeGenFunction &CGF,
auto catchRD = CatchType->getAsCXXRecordDecl();
CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
- llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+ llvm::Type *PtrTy =
+ llvm::PointerType::getUnqual(CGF.getLLVMContext()); // addrspace 0 ok
// Check for a copy expression. If we don't have a copy expression,
// that means a trivial copy is okay.
@@ -4688,6 +4766,7 @@ static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
if (fn->empty()) {
CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, fn, /*IsThunk=*/false);
+ CGM.SetLLVMFunctionAttributesForDefinition(nullptr, fn);
fn->setDoesNotThrow();
fn->setDoesNotReturn();
@@ -4770,14 +4849,12 @@ void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
llvm::FunctionCallee Dtor,
llvm::Constant *Addr) {
if (D.getTLSKind() != VarDecl::TLS_None) {
- // atexit routine expects "int(*)(int,...)"
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGM.IntTy, CGM.IntTy, true);
- llvm::PointerType *FpTy = FTy->getPointerTo();
+ llvm::PointerType *PtrTy =
+ llvm::PointerType::getUnqual(CGF.getLLVMContext());
// extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
llvm::FunctionType *AtExitTy =
- llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, FpTy}, true);
+ llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, PtrTy}, true);
// Fetch the actual function.
llvm::FunctionCallee AtExit =
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 52d442cc587f..a692abaf3b75 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -13,6 +13,7 @@
//
//===----------------------------------------------------------------------===//
+#include "ABIInfo.h"
#include "CGCXXABI.h"
#include "CGCleanup.h"
#include "CGVTables.h"
@@ -152,14 +153,25 @@ public:
bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
QualType SrcRecordTy) override;
- llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
+ bool shouldEmitExactDynamicCast(QualType DestRecordTy) override {
+ // TODO: Add support for exact dynamic_casts.
+ return false;
+ }
+ llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address Value,
+ QualType SrcRecordTy, QualType DestTy,
+ QualType DestRecordTy,
+ llvm::BasicBlock *CastSuccess,
+ llvm::BasicBlock *CastFail) override {
+ llvm_unreachable("unsupported");
+ }
+
+ llvm::Value *emitDynamicCastCall(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy, QualType DestTy,
QualType DestRecordTy,
llvm::BasicBlock *CastEnd) override;
- llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
- QualType SrcRecordTy,
- QualType DestTy) override;
+ llvm::Value *emitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
+ QualType SrcRecordTy) override;
bool EmitBadCastCall(CodeGenFunction &CGF) override;
bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override {
@@ -936,7 +948,7 @@ void MicrosoftCXXABI::emitBeginCatch(CodeGenFunction &CGF,
std::tuple<Address, llvm::Value *, const CXXRecordDecl *>
MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy) {
- Value = CGF.Builder.CreateElementBitCast(Value, CGF.Int8Ty);
+ Value = Value.withElementType(CGF.Int8Ty);
const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
const ASTContext &Context = getContext();
@@ -1010,11 +1022,9 @@ bool MicrosoftCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
!getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr();
}
-llvm::Value *MicrosoftCXXABI::EmitDynamicCastCall(
- CodeGenFunction &CGF, Address This, QualType SrcRecordTy,
- QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
- llvm::Type *DestLTy = CGF.ConvertType(DestTy);
-
+llvm::Value *MicrosoftCXXABI::emitDynamicCastCall(
+ CodeGenFunction &CGF, Address This, QualType SrcRecordTy, QualType DestTy,
+ QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
llvm::Value *SrcRTTI =
CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
llvm::Value *DestRTTI =
@@ -1040,14 +1050,12 @@ llvm::Value *MicrosoftCXXABI::EmitDynamicCastCall(
llvm::Value *Args[] = {
ThisPtr, Offset, SrcRTTI, DestRTTI,
llvm::ConstantInt::get(CGF.Int32Ty, DestTy->isReferenceType())};
- ThisPtr = CGF.EmitRuntimeCallOrInvoke(Function, Args);
- return CGF.Builder.CreateBitCast(ThisPtr, DestLTy);
+ return CGF.EmitRuntimeCallOrInvoke(Function, Args);
}
-llvm::Value *
-MicrosoftCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
- QualType SrcRecordTy,
- QualType DestTy) {
+llvm::Value *MicrosoftCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
+ Address Value,
+ QualType SrcRecordTy) {
std::tie(Value, std::ignore, std::ignore) =
performBaseAdjustment(CGF, Value, SrcRecordTy);
@@ -1099,7 +1107,19 @@ bool MicrosoftCXXABI::hasMostDerivedReturn(GlobalDecl GD) const {
return isDeletingDtor(GD);
}
-static bool isTrivialForMSVC(const CXXRecordDecl *RD) {
+static bool isTrivialForMSVC(const CXXRecordDecl *RD, QualType Ty,
+ CodeGenModule &CGM) {
+ // On AArch64, HVAs that can be passed in registers can also be returned
+ // in registers. (Note this is using the MSVC definition of an HVA; see
+ // isPermittedToBeHomogeneousAggregate().)
+ const Type *Base = nullptr;
+ uint64_t NumElts = 0;
+ if (CGM.getTarget().getTriple().isAArch64() &&
+ CGM.getTypes().getABIInfo().isHomogeneousAggregate(Ty, Base, NumElts) &&
+ isa<VectorType>(Base)) {
+ return true;
+ }
+
// We use the C++14 definition of an aggregate, so we also
// check for:
// No private or protected non static data members.
@@ -1128,7 +1148,8 @@ bool MicrosoftCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
if (!RD)
return false;
- bool isTrivialForABI = RD->canPassInRegisters() && isTrivialForMSVC(RD);
+ bool isTrivialForABI = RD->canPassInRegisters() &&
+ isTrivialForMSVC(RD, FI.getReturnType(), CGM);
// MSVC always returns structs indirectly from C++ instance methods.
bool isIndirectReturn = !isTrivialForABI || FI.isInstanceMethod();
@@ -1279,7 +1300,7 @@ void MicrosoftCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF,
const CXXRecordDecl *RD) {
Address This = getThisAddress(CGF);
- This = CGF.Builder.CreateElementBitCast(This, CGM.Int8Ty, "this.int8");
+ This = This.withElementType(CGM.Int8Ty);
const ASTContext &Context = getContext();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
@@ -1296,8 +1317,7 @@ void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF,
Address VBPtr = CGF.Builder.CreateConstInBoundsByteGEP(This, Offs);
llvm::Value *GVPtr =
CGF.Builder.CreateConstInBoundsGEP2_32(GV->getValueType(), GV, 0, 0);
- VBPtr = CGF.Builder.CreateElementBitCast(VBPtr, GVPtr->getType(),
- "vbptr." + VBT->ObjectWithVPtr->getName());
+ VBPtr = VBPtr.withElementType(GVPtr->getType());
CGF.Builder.CreateStore(GVPtr, VBPtr);
}
}
@@ -1439,7 +1459,7 @@ Address MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
if (Adjustment.isZero())
return This;
- This = CGF.Builder.CreateElementBitCast(This, CGF.Int8Ty);
+ This = This.withElementType(CGF.Int8Ty);
assert(Adjustment.isPositive());
return CGF.Builder.CreateConstByteGEP(This, Adjustment);
}
@@ -1470,7 +1490,7 @@ Address MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
Address Result = This;
if (ML.VBase) {
- Result = CGF.Builder.CreateElementBitCast(Result, CGF.Int8Ty);
+ Result = Result.withElementType(CGF.Int8Ty);
const CXXRecordDecl *Derived = MD->getParent();
const CXXRecordDecl *VBase = ML.VBase;
@@ -1484,7 +1504,7 @@ Address MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
}
if (!StaticOffset.isZero()) {
assert(StaticOffset.isPositive());
- Result = CGF.Builder.CreateElementBitCast(Result, CGF.Int8Ty);
+ Result = Result.withElementType(CGF.Int8Ty);
if (ML.VBase) {
// Non-virtual adjustment might result in a pointer outside the allocated
// object, e.g. if the final overrider class is laid out after the virtual
@@ -1569,11 +1589,8 @@ void MicrosoftCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
// 1) getThisValue is currently protected
// 2) in theory, an ABI could implement 'this' returns some other way;
// HasThisReturn only specifies a contract, not the implementation
- if (HasThisReturn(CGF.CurGD))
+ if (HasThisReturn(CGF.CurGD) || hasMostDerivedReturn(CGF.CurGD))
CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
- else if (hasMostDerivedReturn(CGF.CurGD))
- CGF.Builder.CreateStore(CGF.EmitCastToVoidPtr(getThisValue(CGF)),
- CGF.ReturnValue);
if (isa<CXXConstructorDecl>(MD) && MD->getParent()->getNumVBases()) {
assert(getStructorImplicitParamDecl(CGF) &&
@@ -2204,7 +2221,7 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
if (TA.isEmpty())
return This.getPointer();
- This = CGF.Builder.CreateElementBitCast(This, CGF.Int8Ty);
+ This = This.withElementType(CGF.Int8Ty);
llvm::Value *V;
if (TA.Virtual.isEmpty()) {
@@ -2215,7 +2232,7 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
Address VtorDispPtr =
CGF.Builder.CreateConstInBoundsByteGEP(This,
CharUnits::fromQuantity(TA.Virtual.Microsoft.VtordispOffset));
- VtorDispPtr = CGF.Builder.CreateElementBitCast(VtorDispPtr, CGF.Int32Ty);
+ VtorDispPtr = VtorDispPtr.withElementType(CGF.Int32Ty);
llvm::Value *VtorDisp = CGF.Builder.CreateLoad(VtorDispPtr, "vtordisp");
V = CGF.Builder.CreateGEP(This.getElementType(), This.getPointer(),
CGF.Builder.CreateNeg(VtorDisp));
@@ -2257,7 +2274,7 @@ MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
return Ret.getPointer();
auto OrigTy = Ret.getType();
- Ret = CGF.Builder.CreateElementBitCast(Ret, CGF.Int8Ty);
+ Ret = Ret.withElementType(CGF.Int8Ty);
llvm::Value *V = Ret.getPointer();
if (RA.Virtual.Microsoft.VBIndex) {
@@ -2301,8 +2318,7 @@ CharUnits MicrosoftCXXABI::getArrayCookieSizeImpl(QualType type) {
llvm::Value *MicrosoftCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
Address allocPtr,
CharUnits cookieSize) {
- Address numElementsPtr =
- CGF.Builder.CreateElementBitCast(allocPtr, CGF.SizeTy);
+ Address numElementsPtr = allocPtr.withElementType(CGF.SizeTy);
return CGF.Builder.CreateLoad(numElementsPtr);
}
@@ -2320,8 +2336,7 @@ Address MicrosoftCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
Address cookiePtr = newPtr;
// Write the number of elements into the appropriate slot.
- Address numElementsPtr
- = CGF.Builder.CreateElementBitCast(cookiePtr, CGF.SizeTy);
+ Address numElementsPtr = cookiePtr.withElementType(CGF.SizeTy);
CGF.Builder.CreateStore(numElements, numElementsPtr);
// Finally, compute a pointer to the actual data buffer by skipping
@@ -3121,12 +3136,10 @@ MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
llvm::Value **VBPtrOut) {
CGBuilderTy &Builder = CGF.Builder;
// Load the vbtable pointer from the vbptr in the instance.
- This = Builder.CreateElementBitCast(This, CGM.Int8Ty);
- llvm::Value *VBPtr = Builder.CreateInBoundsGEP(
- This.getElementType(), This.getPointer(), VBPtrOffset, "vbptr");
- if (VBPtrOut) *VBPtrOut = VBPtr;
- VBPtr = Builder.CreateBitCast(VBPtr,
- CGM.Int32Ty->getPointerTo(0)->getPointerTo(This.getAddressSpace()));
+ llvm::Value *VBPtr = Builder.CreateInBoundsGEP(CGM.Int8Ty, This.getPointer(),
+ VBPtrOffset, "vbptr");
+ if (VBPtrOut)
+ *VBPtrOut = VBPtr;
CharUnits VBPtrAlign;
if (auto CI = dyn_cast<llvm::ConstantInt>(VBPtrOffset)) {
@@ -3147,7 +3160,6 @@ MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
// Load an i32 offset from the vb-table.
llvm::Value *VBaseOffs =
Builder.CreateInBoundsGEP(CGM.Int32Ty, VBTable, VBTableIndex);
- VBaseOffs = Builder.CreateBitCast(VBaseOffs, CGM.Int32Ty->getPointerTo(0));
return Builder.CreateAlignedLoad(CGM.Int32Ty, VBaseOffs,
CharUnits::fromQuantity(4), "vbase_offs");
}
@@ -3158,7 +3170,7 @@ llvm::Value *MicrosoftCXXABI::AdjustVirtualBase(
CodeGenFunction &CGF, const Expr *E, const CXXRecordDecl *RD,
Address Base, llvm::Value *VBTableOffset, llvm::Value *VBPtrOffset) {
CGBuilderTy &Builder = CGF.Builder;
- Base = Builder.CreateElementBitCast(Base, CGM.Int8Ty);
+ Base = Base.withElementType(CGM.Int8Ty);
llvm::BasicBlock *OriginalBB = nullptr;
llvm::BasicBlock *SkipAdjustBB = nullptr;
llvm::BasicBlock *VBaseAdjustBB = nullptr;
@@ -3668,7 +3680,6 @@ static llvm::GlobalValue::LinkageTypes getLinkageForRTTI(QualType Ty) {
return llvm::GlobalValue::InternalLinkage;
case VisibleNoLinkage:
- case ModuleInternalLinkage:
case ModuleLinkage:
case ExternalLinkage:
return llvm::GlobalValue::LinkOnceODRLinkage;
@@ -3758,7 +3769,7 @@ llvm::GlobalVariable *MSRTTIBuilder::getClassHierarchyDescriptor() {
Classes.front().initialize(/*Parent=*/nullptr, /*Specifier=*/nullptr);
detectAmbiguousBases(Classes);
int Flags = 0;
- for (auto Class : Classes) {
+ for (const MSRTTIClass &Class : Classes) {
if (Class.RD->getNumBases() > 1)
Flags |= HasBranchingHierarchy;
// Note: cl.exe does not calculate "HasAmbiguousBases" correctly. We
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp b/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp
index e3e953c34c59..3594f4c66e67 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp
@@ -36,7 +36,7 @@ namespace {
IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS; // Only used for debug info.
const HeaderSearchOptions &HeaderSearchOpts; // Only used for debug info.
const PreprocessorOptions &PreprocessorOpts; // Only used for debug info.
- const CodeGenOptions CodeGenOpts; // Intentionally copied in.
+ const CodeGenOptions &CodeGenOpts;
unsigned HandlingTopLevelDecls;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp b/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
index 677b66d3e1dc..114a9c1e2eac 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
@@ -158,7 +158,7 @@ public:
// When building a module MainFileName is the name of the modulemap file.
CodeGenOpts.MainFileName =
LangOpts.CurrentModule.empty() ? MainFileName : LangOpts.CurrentModule;
- CodeGenOpts.setDebugInfo(codegenoptions::FullDebugInfo);
+ CodeGenOpts.setDebugInfo(llvm::codegenoptions::FullDebugInfo);
CodeGenOpts.setDebuggerTuning(CI.getCodeGenOpts().getDebuggerTuning());
CodeGenOpts.DebugPrefixMap =
CI.getInvocation().getCodeGenOpts().DebugPrefixMap;
@@ -320,7 +320,7 @@ public:
clang::EmitBackendOutput(
Diags, HeaderSearchOpts, CodeGenOpts, TargetOpts, LangOpts,
Ctx.getTargetInfo().getDataLayoutString(), M.get(),
- BackendAction::Backend_EmitLL,
+ BackendAction::Backend_EmitLL, FS,
std::make_unique<llvm::raw_svector_ostream>(Buffer));
llvm::dbgs() << Buffer;
});
@@ -329,7 +329,7 @@ public:
clang::EmitBackendOutput(Diags, HeaderSearchOpts, CodeGenOpts, TargetOpts,
LangOpts,
Ctx.getTargetInfo().getDataLayoutString(), M.get(),
- BackendAction::Backend_EmitObj, std::move(OS));
+ BackendAction::Backend_EmitObj, FS, std::move(OS));
// Free the memory for the temporary buffer.
llvm::SmallVector<char, 0> Empty;
@@ -349,6 +349,11 @@ ObjectFilePCHContainerWriter::CreatePCHContainerGenerator(
CI, MainFileName, OutputFileName, std::move(OS), Buffer);
}
+ArrayRef<StringRef> ObjectFilePCHContainerReader::getFormats() const {
+ static StringRef Formats[] = {"obj", "raw"};
+ return Formats;
+}
+
StringRef
ObjectFilePCHContainerReader::ExtractPCH(llvm::MemoryBufferRef Buffer) const {
StringRef PCH;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp b/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp
index 554f1ea2a47d..53161c316c58 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp
@@ -101,8 +101,3 @@ void SanitizerMetadata::reportGlobal(llvm::GlobalVariable *GV, const VarDecl &D,
void SanitizerMetadata::disableSanitizerForGlobal(llvm::GlobalVariable *GV) {
reportGlobal(GV, SourceLocation(), "", QualType(), SanitizerKind::All);
}
-
-void SanitizerMetadata::disableSanitizerForInstruction(llvm::Instruction *I) {
- I->setMetadata(llvm::LLVMContext::MD_nosanitize,
- llvm::MDNode::get(CGM.getLLVMContext(), std::nullopt));
-}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h b/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h
index f5dd0e503cc0..000f02cf8dcf 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h
@@ -44,7 +44,6 @@ public:
SanitizerMask NoSanitizeAttrMask = {},
bool IsDynInit = false);
void disableSanitizerForGlobal(llvm::GlobalVariable *GV);
- void disableSanitizerForInstruction(llvm::Instruction *I);
};
} // end namespace CodeGen
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp b/contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp
index 63d975193c02..055dd3704386 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp
@@ -728,7 +728,7 @@ void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize,
// The largest size that we're still considering making subvectors of.
// Always a power of 2.
- unsigned logCandidateNumElts = llvm::findLastSet(numElts, llvm::ZB_Undefined);
+ unsigned logCandidateNumElts = llvm::Log2_32(numElts);
unsigned candidateNumElts = 1U << logCandidateNumElts;
assert(candidateNumElts <= numElts && candidateNumElts * 2 > numElts);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
index 9e2d7a85d100..3d79f92137ab 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
@@ -13,243 +13,18 @@
#include "TargetInfo.h"
#include "ABIInfo.h"
-#include "CGBlocks.h"
-#include "CGCXXABI.h"
-#include "CGValue.h"
+#include "ABIInfoImpl.h"
#include "CodeGenFunction.h"
-#include "clang/AST/Attr.h"
-#include "clang/AST/RecordLayout.h"
-#include "clang/Basic/Builtins.h"
#include "clang/Basic/CodeGenOptions.h"
-#include "clang/Basic/DiagnosticFrontend.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/IntrinsicsNVPTX.h"
-#include "llvm/IR/IntrinsicsS390.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
using namespace clang;
using namespace CodeGen;
-// Helper for coercing an aggregate argument or return value into an integer
-// array of the same size (including padding) and alignment. This alternate
-// coercion happens only for the RenderScript ABI and can be removed after
-// runtimes that rely on it are no longer supported.
-//
-// RenderScript assumes that the size of the argument / return value in the IR
-// is the same as the size of the corresponding qualified type. This helper
-// coerces the aggregate type into an array of the same size (including
-// padding). This coercion is used in lieu of expansion of struct members or
-// other canonical coercions that return a coerced-type of larger size.
-//
-// Ty - The argument / return value type
-// Context - The associated ASTContext
-// LLVMContext - The associated LLVMContext
-static ABIArgInfo coerceToIntArray(QualType Ty,
- ASTContext &Context,
- llvm::LLVMContext &LLVMContext) {
- // Alignment and Size are measured in bits.
- const uint64_t Size = Context.getTypeSize(Ty);
- const uint64_t Alignment = Context.getTypeAlign(Ty);
- llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
- const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
- return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
-}
-
-static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
- llvm::Value *Array,
- llvm::Value *Value,
- unsigned FirstIndex,
- unsigned LastIndex) {
- // Alternatively, we could emit this as a loop in the source.
- for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
- llvm::Value *Cell =
- Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
- Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
- }
-}
-
-static bool isAggregateTypeForABI(QualType T) {
- return !CodeGenFunction::hasScalarEvaluationKind(T) ||
- T->isMemberFunctionPointerType();
-}
-
-ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByVal,
- bool Realign,
- llvm::Type *Padding) const {
- return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), ByVal,
- Realign, Padding);
-}
-
-ABIArgInfo
-ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
- return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
- /*ByVal*/ false, Realign);
-}
-
-Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- return Address::invalid();
-}
-
-static llvm::Type *getVAListElementType(CodeGenFunction &CGF) {
- return CGF.ConvertTypeForMem(
- CGF.getContext().getBuiltinVaListType()->getPointeeType());
-}
-
-bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
- if (getContext().isPromotableIntegerType(Ty))
- return true;
-
- if (const auto *EIT = Ty->getAs<BitIntType>())
- if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy))
- return true;
-
- return false;
-}
-
-ABIInfo::~ABIInfo() = default;
-
-SwiftABIInfo::~SwiftABIInfo() = default;
-
-/// Does the given lowering require more than the given number of
-/// registers when expanded?
-///
-/// This is intended to be the basis of a reasonable basic implementation
-/// of should{Pass,Return}IndirectlyForSwift.
-///
-/// For most targets, a limit of four total registers is reasonable; this
-/// limits the amount of code required in order to move around the value
-/// in case it wasn't produced immediately prior to the call by the caller
-/// (or wasn't produced in exactly the right registers) or isn't used
-/// immediately within the callee. But some targets may need to further
-/// limit the register count due to an inability to support that many
-/// return registers.
-static bool occupiesMoreThan(CodeGenTypes &cgt,
- ArrayRef<llvm::Type*> scalarTypes,
- unsigned maxAllRegisters) {
- unsigned intCount = 0, fpCount = 0;
- for (llvm::Type *type : scalarTypes) {
- if (type->isPointerTy()) {
- intCount++;
- } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
- auto ptrWidth = cgt.getTarget().getPointerWidth(LangAS::Default);
- intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
- } else {
- assert(type->isVectorTy() || type->isFloatingPointTy());
- fpCount++;
- }
- }
-
- return (intCount + fpCount > maxAllRegisters);
-}
-
-bool SwiftABIInfo::shouldPassIndirectly(ArrayRef<llvm::Type *> ComponentTys,
- bool AsReturnValue) const {
- return occupiesMoreThan(CGT, ComponentTys, /*total=*/4);
-}
-
-bool SwiftABIInfo::isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
- unsigned NumElts) const {
- // The default implementation of this assumes that the target guarantees
- // 128-bit SIMD support but nothing more.
- return (VectorSize.getQuantity() > 8 && VectorSize.getQuantity() <= 16);
-}
-
-static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
- CGCXXABI &CXXABI) {
- const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- if (!RD) {
- if (!RT->getDecl()->canPassInRegisters())
- return CGCXXABI::RAA_Indirect;
- return CGCXXABI::RAA_Default;
- }
- return CXXABI.getRecordArgABI(RD);
-}
-
-static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
- CGCXXABI &CXXABI) {
- const RecordType *RT = T->getAs<RecordType>();
- if (!RT)
- return CGCXXABI::RAA_Default;
- return getRecordArgABI(RT, CXXABI);
-}
-
-static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
- const ABIInfo &Info) {
- QualType Ty = FI.getReturnType();
-
- if (const auto *RT = Ty->getAs<RecordType>())
- if (!isa<CXXRecordDecl>(RT->getDecl()) &&
- !RT->getDecl()->canPassInRegisters()) {
- FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty);
- return true;
- }
-
- return CXXABI.classifyReturnType(FI);
-}
-
-/// Pass transparent unions as if they were the type of the first element. Sema
-/// should ensure that all elements of the union have the same "machine type".
-static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
- if (const RecordType *UT = Ty->getAsUnionType()) {
- const RecordDecl *UD = UT->getDecl();
- if (UD->hasAttr<TransparentUnionAttr>()) {
- assert(!UD->field_empty() && "sema created an empty transparent union");
- return UD->field_begin()->getType();
- }
- }
- return Ty;
-}
-
-CGCXXABI &ABIInfo::getCXXABI() const {
- return CGT.getCXXABI();
-}
-
-ASTContext &ABIInfo::getContext() const {
- return CGT.getContext();
-}
-
-llvm::LLVMContext &ABIInfo::getVMContext() const {
- return CGT.getLLVMContext();
-}
-
-const llvm::DataLayout &ABIInfo::getDataLayout() const {
- return CGT.getDataLayout();
-}
-
-const TargetInfo &ABIInfo::getTarget() const {
- return CGT.getTarget();
-}
-
-const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
- return CGT.getCodeGenOpts();
-}
-
-bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
-
-bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
- return false;
-}
-
-bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
- uint64_t Members) const {
- return false;
-}
-
-bool ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const {
- // For compatibility with GCC, ignore empty bitfields in C++ mode.
- return getContext().getLangOpts().CPlusPlus;
-}
-
LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
raw_ostream &OS = llvm::errs();
OS << "(ABIArgInfo Kind=";
@@ -291,171 +66,6 @@ LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
OS << ")\n";
}
-// Dynamically round a pointer up to a multiple of the given alignment.
-static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
- llvm::Value *Ptr,
- CharUnits Align) {
- llvm::Value *PtrAsInt = Ptr;
- // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
- PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
- PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
- llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
- PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
- llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
- PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
- Ptr->getType(),
- Ptr->getName() + ".aligned");
- return PtrAsInt;
-}
-
-/// Emit va_arg for a platform using the common void* representation,
-/// where arguments are simply emitted in an array of slots on the stack.
-///
-/// This version implements the core direct-value passing rules.
-///
-/// \param SlotSize - The size and alignment of a stack slot.
-/// Each argument will be allocated to a multiple of this number of
-/// slots, and all the slots will be aligned to this value.
-/// \param AllowHigherAlign - The slot alignment is not a cap;
-/// an argument type with an alignment greater than the slot size
-/// will be emitted on a higher-alignment address, potentially
-/// leaving one or more empty slots behind as padding. If this
-/// is false, the returned address might be less-aligned than
-/// DirectAlign.
-/// \param ForceRightAdjust - Default is false. On big-endian platform and
-/// if the argument is smaller than a slot, set this flag will force
-/// right-adjust the argument in its slot irrespective of the type.
-static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
- Address VAListAddr,
- llvm::Type *DirectTy,
- CharUnits DirectSize,
- CharUnits DirectAlign,
- CharUnits SlotSize,
- bool AllowHigherAlign,
- bool ForceRightAdjust = false) {
- // Cast the element type to i8* if necessary. Some platforms define
- // va_list as a struct containing an i8* instead of just an i8*.
- if (VAListAddr.getElementType() != CGF.Int8PtrTy)
- VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
-
- llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
-
- // If the CC aligns values higher than the slot size, do so if needed.
- Address Addr = Address::invalid();
- if (AllowHigherAlign && DirectAlign > SlotSize) {
- Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
- CGF.Int8Ty, DirectAlign);
- } else {
- Addr = Address(Ptr, CGF.Int8Ty, SlotSize);
- }
-
- // Advance the pointer past the argument, then store that back.
- CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
- Address NextPtr =
- CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next");
- CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
-
- // If the argument is smaller than a slot, and this is a big-endian
- // target, the argument will be right-adjusted in its slot.
- if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
- (!DirectTy->isStructTy() || ForceRightAdjust)) {
- Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
- }
-
- Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
- return Addr;
-}
-
-/// Emit va_arg for a platform using the common void* representation,
-/// where arguments are simply emitted in an array of slots on the stack.
-///
-/// \param IsIndirect - Values of this type are passed indirectly.
-/// \param ValueInfo - The size and alignment of this type, generally
-/// computed with getContext().getTypeInfoInChars(ValueTy).
-/// \param SlotSizeAndAlign - The size and alignment of a stack slot.
-/// Each argument will be allocated to a multiple of this number of
-/// slots, and all the slots will be aligned to this value.
-/// \param AllowHigherAlign - The slot alignment is not a cap;
-/// an argument type with an alignment greater than the slot size
-/// will be emitted on a higher-alignment address, potentially
-/// leaving one or more empty slots behind as padding.
-/// \param ForceRightAdjust - Default is false. On big-endian platform and
-/// if the argument is smaller than a slot, set this flag will force
-/// right-adjust the argument in its slot irrespective of the type.
-static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType ValueTy, bool IsIndirect,
- TypeInfoChars ValueInfo,
- CharUnits SlotSizeAndAlign,
- bool AllowHigherAlign,
- bool ForceRightAdjust = false) {
- // The size and alignment of the value that was passed directly.
- CharUnits DirectSize, DirectAlign;
- if (IsIndirect) {
- DirectSize = CGF.getPointerSize();
- DirectAlign = CGF.getPointerAlign();
- } else {
- DirectSize = ValueInfo.Width;
- DirectAlign = ValueInfo.Align;
- }
-
- // Cast the address we've calculated to the right type.
- llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy), *ElementTy = DirectTy;
- if (IsIndirect)
- DirectTy = DirectTy->getPointerTo(0);
-
- Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy, DirectSize,
- DirectAlign, SlotSizeAndAlign,
- AllowHigherAlign, ForceRightAdjust);
-
- if (IsIndirect) {
- Addr = Address(CGF.Builder.CreateLoad(Addr), ElementTy, ValueInfo.Align);
- }
-
- return Addr;
-}
-
-static Address complexTempStructure(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty, CharUnits SlotSize,
- CharUnits EltSize, const ComplexType *CTy) {
- Address Addr =
- emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, SlotSize * 2,
- SlotSize, SlotSize, /*AllowHigher*/ true);
-
- Address RealAddr = Addr;
- Address ImagAddr = RealAddr;
- if (CGF.CGM.getDataLayout().isBigEndian()) {
- RealAddr =
- CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize - EltSize);
- ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
- 2 * SlotSize - EltSize);
- } else {
- ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
- }
-
- llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
- RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
- ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
- llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
- llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
-
- Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
- CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
- /*init*/ true);
- return Temp;
-}
-
-static Address emitMergePHI(CodeGenFunction &CGF,
- Address Addr1, llvm::BasicBlock *Block1,
- Address Addr2, llvm::BasicBlock *Block2,
- const llvm::Twine &Name = "") {
- assert(Addr1.getType() == Addr2.getType());
- llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
- PHI->addIncoming(Addr1.getPointer(), Block1);
- PHI->addIncoming(Addr2.getPointer(), Block2);
- CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
- return Address(PHI, Addr1.getElementType(), Align);
-}
-
TargetCodeGenInfo::TargetCodeGenInfo(std::unique_ptr<ABIInfo> Info)
: Info(std::move(Info)) {}
@@ -467,7 +77,7 @@ unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
// Verified for:
// x86-64 FreeBSD, Linux, Darwin
// x86-32 FreeBSD, Linux, Darwin
- // PowerPC Linux, Darwin
+ // PowerPC Linux
// ARM Darwin (*not* EABI)
// AArch64 Linux
return 32;
@@ -548,2152 +158,9 @@ TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */
}
-static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
-
-/// isEmptyField - Return true iff a the field is "empty", that is it
-/// is an unnamed bit-field or an (array of) empty record(s).
-static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
- bool AllowArrays) {
- if (FD->isUnnamedBitfield())
- return true;
-
- QualType FT = FD->getType();
-
- // Constant arrays of empty records count as empty, strip them off.
- // Constant arrays of zero length always count as empty.
- bool WasArray = false;
- if (AllowArrays)
- while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
- if (AT->getSize() == 0)
- return true;
- FT = AT->getElementType();
- // The [[no_unique_address]] special case below does not apply to
- // arrays of C++ empty records, so we need to remember this fact.
- WasArray = true;
- }
-
- const RecordType *RT = FT->getAs<RecordType>();
- if (!RT)
- return false;
-
- // C++ record fields are never empty, at least in the Itanium ABI.
- //
- // FIXME: We should use a predicate for whether this behavior is true in the
- // current ABI.
- //
- // The exception to the above rule are fields marked with the
- // [[no_unique_address]] attribute (since C++20). Those do count as empty
- // according to the Itanium ABI. The exception applies only to records,
- // not arrays of records, so we must also check whether we stripped off an
- // array type above.
- if (isa<CXXRecordDecl>(RT->getDecl()) &&
- (WasArray || !FD->hasAttr<NoUniqueAddressAttr>()))
- return false;
-
- return isEmptyRecord(Context, FT, AllowArrays);
-}
-
-/// isEmptyRecord - Return true iff a structure contains only empty
-/// fields. Note that a structure with a flexible array member is not
-/// considered empty.
-static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
- const RecordType *RT = T->getAs<RecordType>();
- if (!RT)
- return false;
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return false;
-
- // If this is a C++ record, check the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
- for (const auto &I : CXXRD->bases())
- if (!isEmptyRecord(Context, I.getType(), true))
- return false;
-
- for (const auto *I : RD->fields())
- if (!isEmptyField(Context, I, AllowArrays))
- return false;
- return true;
-}
-
-/// isSingleElementStruct - Determine if a structure is a "single
-/// element struct", i.e. it has exactly one non-empty field or
-/// exactly one field which is itself a single element
-/// struct. Structures with flexible array members are never
-/// considered single element structs.
-///
-/// \return The field declaration for the single non-empty field, if
-/// it exists.
-static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
- const RecordType *RT = T->getAs<RecordType>();
- if (!RT)
- return nullptr;
-
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return nullptr;
-
- const Type *Found = nullptr;
-
- // If this is a C++ record, check the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
- for (const auto &I : CXXRD->bases()) {
- // Ignore empty records.
- if (isEmptyRecord(Context, I.getType(), true))
- continue;
-
- // If we already found an element then this isn't a single-element struct.
- if (Found)
- return nullptr;
-
- // If this is non-empty and not a single element struct, the composite
- // cannot be a single element struct.
- Found = isSingleElementStruct(I.getType(), Context);
- if (!Found)
- return nullptr;
- }
- }
-
- // Check for single element.
- for (const auto *FD : RD->fields()) {
- QualType FT = FD->getType();
-
- // Ignore empty fields.
- if (isEmptyField(Context, FD, true))
- continue;
-
- // If we already found an element then this isn't a single-element
- // struct.
- if (Found)
- return nullptr;
-
- // Treat single element arrays as the element.
- while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
- if (AT->getSize().getZExtValue() != 1)
- break;
- FT = AT->getElementType();
- }
-
- if (!isAggregateTypeForABI(FT)) {
- Found = FT.getTypePtr();
- } else {
- Found = isSingleElementStruct(FT, Context);
- if (!Found)
- return nullptr;
- }
- }
-
- // We don't consider a struct a single-element struct if it has
- // padding beyond the element type.
- if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
- return nullptr;
-
- return Found;
-}
-
-namespace {
-Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
- const ABIArgInfo &AI) {
- // This default implementation defers to the llvm backend's va_arg
- // instruction. It can handle only passing arguments directly
- // (typically only handled in the backend for primitive types), or
- // aggregates passed indirectly by pointer (NOTE: if the "byval"
- // flag has ABI impact in the callee, this implementation cannot
- // work.)
-
- // Only a few cases are covered here at the moment -- those needed
- // by the default abi.
- llvm::Value *Val;
-
- if (AI.isIndirect()) {
- assert(!AI.getPaddingType() &&
- "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
- assert(
- !AI.getIndirectRealign() &&
- "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
-
- auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
- CharUnits TyAlignForABI = TyInfo.Align;
-
- llvm::Type *ElementTy = CGF.ConvertTypeForMem(Ty);
- llvm::Type *BaseTy = llvm::PointerType::getUnqual(ElementTy);
- llvm::Value *Addr =
- CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
- return Address(Addr, ElementTy, TyAlignForABI);
- } else {
- assert((AI.isDirect() || AI.isExtend()) &&
- "Unexpected ArgInfo Kind in generic VAArg emitter!");
-
- assert(!AI.getInReg() &&
- "Unexpected InReg seen in arginfo in generic VAArg emitter!");
- assert(!AI.getPaddingType() &&
- "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
- assert(!AI.getDirectOffset() &&
- "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
- assert(!AI.getCoerceToType() &&
- "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
-
- Address Temp = CGF.CreateMemTemp(Ty, "varet");
- Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(),
- CGF.ConvertTypeForMem(Ty));
- CGF.Builder.CreateStore(Val, Temp);
- return Temp;
- }
-}
-
-/// DefaultABIInfo - The default implementation for ABI specific
-/// details. This implementation provides information which results in
-/// self-consistent and sensible LLVM IR generation, but does not
-/// conform to any particular ABI.
-class DefaultABIInfo : public ABIInfo {
-public:
- DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
-
- void computeInfo(CGFunctionInfo &FI) const override {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
- }
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override {
- return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
- }
-};
-
-class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
-};
-
-ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- if (isAggregateTypeForABI(Ty)) {
- // Records with non-trivial destructors/copy-constructors should not be
- // passed by value.
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
-
- return getNaturalAlignIndirect(Ty);
- }
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- ASTContext &Context = getContext();
- if (const auto *EIT = Ty->getAs<BitIntType>())
- if (EIT->getNumBits() >
- Context.getTypeSize(Context.getTargetInfo().hasInt128Type()
- ? Context.Int128Ty
- : Context.LongLongTy))
- return getNaturalAlignIndirect(Ty);
-
- return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
-}
-
-ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- if (isAggregateTypeForABI(RetTy))
- return getNaturalAlignIndirect(RetTy);
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- if (const auto *EIT = RetTy->getAs<BitIntType>())
- if (EIT->getNumBits() >
- getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type()
- ? getContext().Int128Ty
- : getContext().LongLongTy))
- return getNaturalAlignIndirect(RetTy);
-
- return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
-}
-
-//===----------------------------------------------------------------------===//
-// WebAssembly ABI Implementation
-//
-// This is a very simple ABI that relies a lot on DefaultABIInfo.
-//===----------------------------------------------------------------------===//
-
-class WebAssemblyABIInfo final : public ABIInfo {
-public:
- enum ABIKind {
- MVP = 0,
- ExperimentalMV = 1,
- };
-
-private:
- DefaultABIInfo defaultInfo;
- ABIKind Kind;
-
-public:
- explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind)
- : ABIInfo(CGT), defaultInfo(CGT), Kind(Kind) {}
-
-private:
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType Ty) const;
-
- // DefaultABIInfo's classifyReturnType and classifyArgumentType are
- // non-virtual, but computeInfo and EmitVAArg are virtual, so we
- // overload them.
- void computeInfo(CGFunctionInfo &FI) const override {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &Arg : FI.arguments())
- Arg.info = classifyArgumentType(Arg.type);
- }
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-};
-
-class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
-public:
- explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
- WebAssemblyABIInfo::ABIKind K)
- : TargetCodeGenInfo(std::make_unique<WebAssemblyABIInfo>(CGT, K)) {
- SwiftInfo =
- std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/false);
- }
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override {
- TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
- if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
- llvm::Function *Fn = cast<llvm::Function>(GV);
- llvm::AttrBuilder B(GV->getContext());
- B.addAttribute("wasm-import-module", Attr->getImportModule());
- Fn->addFnAttrs(B);
- }
- if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
- llvm::Function *Fn = cast<llvm::Function>(GV);
- llvm::AttrBuilder B(GV->getContext());
- B.addAttribute("wasm-import-name", Attr->getImportName());
- Fn->addFnAttrs(B);
- }
- if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) {
- llvm::Function *Fn = cast<llvm::Function>(GV);
- llvm::AttrBuilder B(GV->getContext());
- B.addAttribute("wasm-export-name", Attr->getExportName());
- Fn->addFnAttrs(B);
- }
- }
-
- if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- llvm::Function *Fn = cast<llvm::Function>(GV);
- if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
- Fn->addFnAttr("no-prototype");
- }
- }
-};
-
-/// Classify argument of given type \p Ty.
-ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- if (isAggregateTypeForABI(Ty)) {
- // Records with non-trivial destructors/copy-constructors should not be
- // passed by value.
- if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
- // Lower single-element structs to just pass a regular value. TODO: We
- // could do reasonable-size multiple-element structs too, using getExpand(),
- // though watch out for things like bitfields.
- if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
- return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
- // For the experimental multivalue ABI, fully expand all other aggregates
- if (Kind == ABIKind::ExperimentalMV) {
- const RecordType *RT = Ty->getAs<RecordType>();
- assert(RT);
- bool HasBitField = false;
- for (auto *Field : RT->getDecl()->fields()) {
- if (Field->isBitField()) {
- HasBitField = true;
- break;
- }
- }
- if (!HasBitField)
- return ABIArgInfo::getExpand();
- }
- }
-
- // Otherwise just do the default thing.
- return defaultInfo.classifyArgumentType(Ty);
-}
-
-ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
- if (isAggregateTypeForABI(RetTy)) {
- // Records with non-trivial destructors/copy-constructors should not be
- // returned by value.
- if (!getRecordArgABI(RetTy, getCXXABI())) {
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), RetTy, true))
- return ABIArgInfo::getIgnore();
- // Lower single-element structs to just return a regular value. TODO: We
- // could do reasonable-size multiple-element structs too, using
- // ABIArgInfo::getDirect().
- if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
- return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
- // For the experimental multivalue ABI, return all other aggregates
- if (Kind == ABIKind::ExperimentalMV)
- return ABIArgInfo::getDirect();
- }
- }
-
- // Otherwise just do the default thing.
- return defaultInfo.classifyReturnType(RetTy);
-}
-
-Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- bool IsIndirect = isAggregateTypeForABI(Ty) &&
- !isEmptyRecord(getContext(), Ty, true) &&
- !isSingleElementStruct(Ty, getContext());
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
- getContext().getTypeInfoInChars(Ty),
- CharUnits::fromQuantity(4),
- /*AllowHigherAlign=*/true);
-}
-
-//===----------------------------------------------------------------------===//
-// le32/PNaCl bitcode ABI Implementation
-//
-// This is a simplified version of the x86_32 ABI. Arguments and return values
-// are always passed on the stack.
-//===----------------------------------------------------------------------===//
-
-class PNaClABIInfo : public ABIInfo {
- public:
- PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
-
- void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF,
- Address VAListAddr, QualType Ty) const override;
-};
-
-class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
- public:
- PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<PNaClABIInfo>(CGT)) {}
-};
-
-void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
-
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
-}
-
-Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- // The PNaCL ABI is a bit odd, in that varargs don't use normal
- // function classification. Structs get passed directly for varargs
- // functions, through a rewriting transform in
- // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
- // this target to actually support a va_arg instructions with an
- // aggregate type, unlike other targets.
- return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
-}
-
-/// Classify argument of given type \p Ty.
-ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
- if (isAggregateTypeForABI(Ty)) {
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
- return getNaturalAlignIndirect(Ty);
- } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
- // Treat an enum type as its underlying type.
- Ty = EnumTy->getDecl()->getIntegerType();
- } else if (Ty->isFloatingType()) {
- // Floating-point types don't go inreg.
- return ABIArgInfo::getDirect();
- } else if (const auto *EIT = Ty->getAs<BitIntType>()) {
- // Treat bit-precise integers as integers if <= 64, otherwise pass
- // indirectly.
- if (EIT->getNumBits() > 64)
- return getNaturalAlignIndirect(Ty);
- return ABIArgInfo::getDirect();
- }
-
- return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
-}
-
-ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- // In the PNaCl ABI we always return records/structures on the stack.
- if (isAggregateTypeForABI(RetTy))
- return getNaturalAlignIndirect(RetTy);
-
- // Treat bit-precise integers as integers if <= 64, otherwise pass indirectly.
- if (const auto *EIT = RetTy->getAs<BitIntType>()) {
- if (EIT->getNumBits() > 64)
- return getNaturalAlignIndirect(RetTy);
- return ABIArgInfo::getDirect();
- }
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
-}
-
-/// IsX86_MMXType - Return true if this is an MMX type.
-bool IsX86_MMXType(llvm::Type *IRType) {
- // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
- return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
- cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
- IRType->getScalarSizeInBits() != 64;
-}
-
-static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- StringRef Constraint,
- llvm::Type* Ty) {
- bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
- .Cases("y", "&y", "^Ym", true)
- .Default(false);
- if (IsMMXCons && Ty->isVectorTy()) {
- if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedValue() !=
- 64) {
- // Invalid MMX constraint
- return nullptr;
- }
-
- return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
- }
-
- // No operation needed
- return Ty;
-}
-
-/// Returns true if this type can be passed in SSE registers with the
-/// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
-static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
- if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
- if (BT->getKind() == BuiltinType::LongDouble) {
- if (&Context.getTargetInfo().getLongDoubleFormat() ==
- &llvm::APFloat::x87DoubleExtended())
- return false;
- }
- return true;
- }
- } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
- // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
- // registers specially.
- unsigned VecSize = Context.getTypeSize(VT);
- if (VecSize == 128 || VecSize == 256 || VecSize == 512)
- return true;
- }
- return false;
-}
-
-/// Returns true if this aggregate is small enough to be passed in SSE registers
-/// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
-static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
- return NumMembers <= 4;
-}
-
-/// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
-static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
- auto AI = ABIArgInfo::getDirect(T);
- AI.setInReg(true);
- AI.setCanBeFlattened(false);
- return AI;
-}
-
-//===----------------------------------------------------------------------===//
-// X86-32 ABI Implementation
-//===----------------------------------------------------------------------===//
-
-/// Similar to llvm::CCState, but for Clang.
-struct CCState {
- CCState(CGFunctionInfo &FI)
- : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {}
-
- llvm::SmallBitVector IsPreassigned;
- unsigned CC = CallingConv::CC_C;
- unsigned FreeRegs = 0;
- unsigned FreeSSERegs = 0;
-};
-
-/// X86_32ABIInfo - The X86-32 ABI information.
-class X86_32ABIInfo : public ABIInfo {
- enum Class {
- Integer,
- Float
- };
-
- static const unsigned MinABIStackAlignInBytes = 4;
-
- bool IsDarwinVectorABI;
- bool IsRetSmallStructInRegABI;
- bool IsWin32StructABI;
- bool IsSoftFloatABI;
- bool IsMCUABI;
- bool IsLinuxABI;
- unsigned DefaultNumRegisterParameters;
-
- static bool isRegisterSize(unsigned Size) {
- return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
- }
-
- bool isHomogeneousAggregateBaseType(QualType Ty) const override {
- // FIXME: Assumes vectorcall is in use.
- return isX86VectorTypeForVectorCall(getContext(), Ty);
- }
-
- bool isHomogeneousAggregateSmallEnough(const Type *Ty,
- uint64_t NumMembers) const override {
- // FIXME: Assumes vectorcall is in use.
- return isX86VectorCallAggregateSmallEnough(NumMembers);
- }
-
- bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
-
- /// getIndirectResult - Give a source type \arg Ty, return a suitable result
- /// such that the argument will be passed in memory.
- ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
-
- ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
-
- /// Return the alignment to use for the given type on the stack.
- unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
-
- Class classify(QualType Ty) const;
- ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
-
- /// Updates the number of available free registers, returns
- /// true if any registers were allocated.
- bool updateFreeRegs(QualType Ty, CCState &State) const;
-
- bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
- bool &NeedsPadding) const;
- bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
-
- bool canExpandIndirectArgument(QualType Ty) const;
-
- /// Rewrite the function info so that all memory arguments use
- /// inalloca.
- void rewriteWithInAlloca(CGFunctionInfo &FI) const;
-
- void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
- CharUnits &StackOffset, ABIArgInfo &Info,
- QualType Type) const;
- void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const;
-
-public:
-
- void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
- bool RetSmallStructInRegABI, bool Win32StructABI,
- unsigned NumRegisterParameters, bool SoftFloatABI)
- : ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
- IsRetSmallStructInRegABI(RetSmallStructInRegABI),
- IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
- IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
- IsLinuxABI(CGT.getTarget().getTriple().isOSLinux() ||
- CGT.getTarget().getTriple().isOSCygMing()),
- DefaultNumRegisterParameters(NumRegisterParameters) {}
-};
-
-class X86_32SwiftABIInfo : public SwiftABIInfo {
-public:
- explicit X86_32SwiftABIInfo(CodeGenTypes &CGT)
- : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/false) {}
-
- bool shouldPassIndirectly(ArrayRef<llvm::Type *> ComponentTys,
- bool AsReturnValue) const override {
- // LLVM's x86-32 lowering currently only assigns up to three
- // integer registers and three fp registers. Oddly, it'll use up to
- // four vector registers for vectors, but those can overlap with the
- // scalar registers.
- return occupiesMoreThan(CGT, ComponentTys, /*total=*/3);
- }
-};
-
-class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
- bool RetSmallStructInRegABI, bool Win32StructABI,
- unsigned NumRegisterParameters, bool SoftFloatABI)
- : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
- CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
- NumRegisterParameters, SoftFloatABI)) {
- SwiftInfo = std::make_unique<X86_32SwiftABIInfo>(CGT);
- }
-
- static bool isStructReturnInRegABI(
- const llvm::Triple &Triple, const CodeGenOptions &Opts);
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override;
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
- // Darwin uses different dwarf register numbers for EH.
- if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
- return 4;
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override;
-
- llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- StringRef Constraint,
- llvm::Type* Ty) const override {
- return X86AdjustInlineAsmType(CGF, Constraint, Ty);
- }
-
- void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
- std::string &Constraints,
- std::vector<llvm::Type *> &ResultRegTypes,
- std::vector<llvm::Type *> &ResultTruncRegTypes,
- std::vector<LValue> &ResultRegDests,
- std::string &AsmString,
- unsigned NumOutputs) const override;
-
- llvm::Constant *
- getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
- unsigned Sig = (0xeb << 0) | // jmp rel8
- (0x06 << 8) | // .+0x08
- ('v' << 16) |
- ('2' << 24);
- return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
- }
-
- StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
- return "movl\t%ebp, %ebp"
- "\t\t// marker for objc_retainAutoreleaseReturnValue";
- }
-};
-
-}
-
-/// Rewrite input constraint references after adding some output constraints.
-/// In the case where there is one output and one input and we add one output,
-/// we need to replace all operand references greater than or equal to 1:
-/// mov $0, $1
-/// mov eax, $1
-/// The result will be:
-/// mov $0, $2
-/// mov eax, $2
-static void rewriteInputConstraintReferences(unsigned FirstIn,
- unsigned NumNewOuts,
- std::string &AsmString) {
- std::string Buf;
- llvm::raw_string_ostream OS(Buf);
- size_t Pos = 0;
- while (Pos < AsmString.size()) {
- size_t DollarStart = AsmString.find('$', Pos);
- if (DollarStart == std::string::npos)
- DollarStart = AsmString.size();
- size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
- if (DollarEnd == std::string::npos)
- DollarEnd = AsmString.size();
- OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
- Pos = DollarEnd;
- size_t NumDollars = DollarEnd - DollarStart;
- if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
- // We have an operand reference.
- size_t DigitStart = Pos;
- if (AsmString[DigitStart] == '{') {
- OS << '{';
- ++DigitStart;
- }
- size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
- if (DigitEnd == std::string::npos)
- DigitEnd = AsmString.size();
- StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
- unsigned OperandIndex;
- if (!OperandStr.getAsInteger(10, OperandIndex)) {
- if (OperandIndex >= FirstIn)
- OperandIndex += NumNewOuts;
- OS << OperandIndex;
- } else {
- OS << OperandStr;
- }
- Pos = DigitEnd;
- }
- }
- AsmString = std::move(OS.str());
-}
-
-/// Add output constraints for EAX:EDX because they are return registers.
-void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
- CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
- std::vector<llvm::Type *> &ResultRegTypes,
- std::vector<llvm::Type *> &ResultTruncRegTypes,
- std::vector<LValue> &ResultRegDests, std::string &AsmString,
- unsigned NumOutputs) const {
- uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
-
- // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
- // larger.
- if (!Constraints.empty())
- Constraints += ',';
- if (RetWidth <= 32) {
- Constraints += "={eax}";
- ResultRegTypes.push_back(CGF.Int32Ty);
- } else {
- // Use the 'A' constraint for EAX:EDX.
- Constraints += "=A";
- ResultRegTypes.push_back(CGF.Int64Ty);
- }
-
- // Truncate EAX or EAX:EDX to an integer of the appropriate size.
- llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
- ResultTruncRegTypes.push_back(CoerceTy);
-
- // Coerce the integer by bitcasting the return slot pointer.
- ReturnSlot.setAddress(
- CGF.Builder.CreateElementBitCast(ReturnSlot.getAddress(CGF), CoerceTy));
- ResultRegDests.push_back(ReturnSlot);
-
- rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
-}
-
-/// shouldReturnTypeInRegister - Determine if the given type should be
-/// returned in a register (for the Darwin and MCU ABI).
-bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
- ASTContext &Context) const {
- uint64_t Size = Context.getTypeSize(Ty);
-
- // For i386, type must be register sized.
- // For the MCU ABI, it only needs to be <= 8-byte
- if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
- return false;
-
- if (Ty->isVectorType()) {
- // 64- and 128- bit vectors inside structures are not returned in
- // registers.
- if (Size == 64 || Size == 128)
- return false;
-
- return true;
- }
-
- // If this is a builtin, pointer, enum, complex type, member pointer, or
- // member function pointer it is ok.
- if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
- Ty->isAnyComplexType() || Ty->isEnumeralType() ||
- Ty->isBlockPointerType() || Ty->isMemberPointerType())
- return true;
-
- // Arrays are treated like records.
- if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
- return shouldReturnTypeInRegister(AT->getElementType(), Context);
-
- // Otherwise, it must be a record type.
- const RecordType *RT = Ty->getAs<RecordType>();
- if (!RT) return false;
-
- // FIXME: Traverse bases here too.
-
- // Structure types are passed in register if all fields would be
- // passed in a register.
- for (const auto *FD : RT->getDecl()->fields()) {
- // Empty fields are ignored.
- if (isEmptyField(Context, FD, true))
- continue;
-
- // Check fields recursively.
- if (!shouldReturnTypeInRegister(FD->getType(), Context))
- return false;
- }
- return true;
-}
-
-static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
- // Treat complex types as the element type.
- if (const ComplexType *CTy = Ty->getAs<ComplexType>())
- Ty = CTy->getElementType();
-
- // Check for a type which we know has a simple scalar argument-passing
- // convention without any padding. (We're specifically looking for 32
- // and 64-bit integer and integer-equivalents, float, and double.)
- if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
- !Ty->isEnumeralType() && !Ty->isBlockPointerType())
- return false;
-
- uint64_t Size = Context.getTypeSize(Ty);
- return Size == 32 || Size == 64;
-}
-
-static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
- uint64_t &Size) {
- for (const auto *FD : RD->fields()) {
- // Scalar arguments on the stack get 4 byte alignment on x86. If the
- // argument is smaller than 32-bits, expanding the struct will create
- // alignment padding.
- if (!is32Or64BitBasicType(FD->getType(), Context))
- return false;
-
- // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
- // how to expand them yet, and the predicate for telling if a bitfield still
- // counts as "basic" is more complicated than what we were doing previously.
- if (FD->isBitField())
- return false;
-
- Size += Context.getTypeSize(FD->getType());
- }
- return true;
-}
-
-static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
- uint64_t &Size) {
- // Don't do this if there are any non-empty bases.
- for (const CXXBaseSpecifier &Base : RD->bases()) {
- if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
- Size))
- return false;
- }
- if (!addFieldSizes(Context, RD, Size))
- return false;
- return true;
-}
-
-/// Test whether an argument type which is to be passed indirectly (on the
-/// stack) would have the equivalent layout if it was expanded into separate
-/// arguments. If so, we prefer to do the latter to avoid inhibiting
-/// optimizations.
-bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
- // We can only expand structure types.
- const RecordType *RT = Ty->getAs<RecordType>();
- if (!RT)
- return false;
- const RecordDecl *RD = RT->getDecl();
- uint64_t Size = 0;
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
- if (!IsWin32StructABI) {
- // On non-Windows, we have to conservatively match our old bitcode
- // prototypes in order to be ABI-compatible at the bitcode level.
- if (!CXXRD->isCLike())
- return false;
- } else {
- // Don't do this for dynamic classes.
- if (CXXRD->isDynamicClass())
- return false;
- }
- if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
- return false;
- } else {
- if (!addFieldSizes(getContext(), RD, Size))
- return false;
- }
-
- // We can do this if there was no alignment padding.
- return Size == getContext().getTypeSize(Ty);
-}
-
-ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
- // If the return value is indirect, then the hidden argument is consuming one
- // integer register.
- if (State.FreeRegs) {
- --State.FreeRegs;
- if (!IsMCUABI)
- return getNaturalAlignIndirectInReg(RetTy);
- }
- return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
-}
-
-ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
- CCState &State) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- const Type *Base = nullptr;
- uint64_t NumElts = 0;
- if ((State.CC == llvm::CallingConv::X86_VectorCall ||
- State.CC == llvm::CallingConv::X86_RegCall) &&
- isHomogeneousAggregate(RetTy, Base, NumElts)) {
- // The LLVM struct type for such an aggregate should lower properly.
- return ABIArgInfo::getDirect();
- }
-
- if (const VectorType *VT = RetTy->getAs<VectorType>()) {
- // On Darwin, some vectors are returned in registers.
- if (IsDarwinVectorABI) {
- uint64_t Size = getContext().getTypeSize(RetTy);
-
- // 128-bit vectors are a special case; they are returned in
- // registers and we need to make sure to pick a type the LLVM
- // backend will like.
- if (Size == 128)
- return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
- llvm::Type::getInt64Ty(getVMContext()), 2));
-
- // Always return in register if it fits in a general purpose
- // register, or if it is 64 bits and has a single element.
- if ((Size == 8 || Size == 16 || Size == 32) ||
- (Size == 64 && VT->getNumElements() == 1))
- return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
- Size));
-
- return getIndirectReturnResult(RetTy, State);
- }
-
- return ABIArgInfo::getDirect();
- }
-
- if (isAggregateTypeForABI(RetTy)) {
- if (const RecordType *RT = RetTy->getAs<RecordType>()) {
- // Structures with flexible arrays are always indirect.
- if (RT->getDecl()->hasFlexibleArrayMember())
- return getIndirectReturnResult(RetTy, State);
- }
-
- // If specified, structs and unions are always indirect.
- if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
- return getIndirectReturnResult(RetTy, State);
-
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), RetTy, true))
- return ABIArgInfo::getIgnore();
-
- // Return complex of _Float16 as <2 x half> so the backend will use xmm0.
- if (const ComplexType *CT = RetTy->getAs<ComplexType>()) {
- QualType ET = getContext().getCanonicalType(CT->getElementType());
- if (ET->isFloat16Type())
- return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
- llvm::Type::getHalfTy(getVMContext()), 2));
- }
-
- // Small structures which are register sized are generally returned
- // in a register.
- if (shouldReturnTypeInRegister(RetTy, getContext())) {
- uint64_t Size = getContext().getTypeSize(RetTy);
-
- // As a special-case, if the struct is a "single-element" struct, and
- // the field is of type "float" or "double", return it in a
- // floating-point register. (MSVC does not apply this special case.)
- // We apply a similar transformation for pointer types to improve the
- // quality of the generated IR.
- if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
- if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
- || SeltTy->hasPointerRepresentation())
- return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
-
- // FIXME: We should be able to narrow this integer in cases with dead
- // padding.
- return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
- }
-
- return getIndirectReturnResult(RetTy, State);
- }
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- if (const auto *EIT = RetTy->getAs<BitIntType>())
- if (EIT->getNumBits() > 64)
- return getIndirectReturnResult(RetTy, State);
-
- return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
-}
-
-static bool isSIMDVectorType(ASTContext &Context, QualType Ty) {
- return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
-}
-
-static bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) {
- const RecordType *RT = Ty->getAs<RecordType>();
- if (!RT)
- return false;
- const RecordDecl *RD = RT->getDecl();
-
- // If this is a C++ record, check the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
- for (const auto &I : CXXRD->bases())
- if (!isRecordWithSIMDVectorType(Context, I.getType()))
- return false;
-
- for (const auto *i : RD->fields()) {
- QualType FT = i->getType();
-
- if (isSIMDVectorType(Context, FT))
- return true;
-
- if (isRecordWithSIMDVectorType(Context, FT))
- return true;
- }
-
- return false;
-}
-
-unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
- unsigned Align) const {
- // Otherwise, if the alignment is less than or equal to the minimum ABI
- // alignment, just use the default; the backend will handle this.
- if (Align <= MinABIStackAlignInBytes)
- return 0; // Use default alignment.
-
- if (IsLinuxABI) {
- // Exclude other System V OS (e.g Darwin, PS4 and FreeBSD) since we don't
- // want to spend any effort dealing with the ramifications of ABI breaks.
- //
- // If the vector type is __m128/__m256/__m512, return the default alignment.
- if (Ty->isVectorType() && (Align == 16 || Align == 32 || Align == 64))
- return Align;
- }
- // On non-Darwin, the stack type alignment is always 4.
- if (!IsDarwinVectorABI) {
- // Set explicit alignment, since we may need to realign the top.
- return MinABIStackAlignInBytes;
- }
-
- // Otherwise, if the type contains an SSE vector type, the alignment is 16.
- if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) ||
- isRecordWithSIMDVectorType(getContext(), Ty)))
- return 16;
-
- return MinABIStackAlignInBytes;
-}
-
-ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
- CCState &State) const {
- if (!ByVal) {
- if (State.FreeRegs) {
- --State.FreeRegs; // Non-byval indirects just use one pointer.
- if (!IsMCUABI)
- return getNaturalAlignIndirectInReg(Ty);
- }
- return getNaturalAlignIndirect(Ty, false);
- }
-
- // Compute the byval alignment.
- unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
- unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
- if (StackAlign == 0)
- return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
-
- // If the stack alignment is less than the type alignment, realign the
- // argument.
- bool Realign = TypeAlign > StackAlign;
- return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
- /*ByVal=*/true, Realign);
-}
-
-X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
- const Type *T = isSingleElementStruct(Ty, getContext());
- if (!T)
- T = Ty.getTypePtr();
-
- if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
- BuiltinType::Kind K = BT->getKind();
- if (K == BuiltinType::Float || K == BuiltinType::Double)
- return Float;
- }
- return Integer;
-}
-
-bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
- if (!IsSoftFloatABI) {
- Class C = classify(Ty);
- if (C == Float)
- return false;
- }
-
- unsigned Size = getContext().getTypeSize(Ty);
- unsigned SizeInRegs = (Size + 31) / 32;
-
- if (SizeInRegs == 0)
- return false;
-
- if (!IsMCUABI) {
- if (SizeInRegs > State.FreeRegs) {
- State.FreeRegs = 0;
- return false;
- }
- } else {
- // The MCU psABI allows passing parameters in-reg even if there are
- // earlier parameters that are passed on the stack. Also,
- // it does not allow passing >8-byte structs in-register,
- // even if there are 3 free registers available.
- if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
- return false;
- }
-
- State.FreeRegs -= SizeInRegs;
- return true;
-}
-
-bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
- bool &InReg,
- bool &NeedsPadding) const {
- // On Windows, aggregates other than HFAs are never passed in registers, and
- // they do not consume register slots. Homogenous floating-point aggregates
- // (HFAs) have already been dealt with at this point.
- if (IsWin32StructABI && isAggregateTypeForABI(Ty))
- return false;
-
- NeedsPadding = false;
- InReg = !IsMCUABI;
-
- if (!updateFreeRegs(Ty, State))
- return false;
-
- if (IsMCUABI)
- return true;
-
- if (State.CC == llvm::CallingConv::X86_FastCall ||
- State.CC == llvm::CallingConv::X86_VectorCall ||
- State.CC == llvm::CallingConv::X86_RegCall) {
- if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
- NeedsPadding = true;
-
- return false;
- }
-
- return true;
-}
-
-bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
- bool IsPtrOrInt = (getContext().getTypeSize(Ty) <= 32) &&
- (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
- Ty->isReferenceType());
-
- if (!IsPtrOrInt && (State.CC == llvm::CallingConv::X86_FastCall ||
- State.CC == llvm::CallingConv::X86_VectorCall))
- return false;
-
- if (!updateFreeRegs(Ty, State))
- return false;
-
- if (!IsPtrOrInt && State.CC == llvm::CallingConv::X86_RegCall)
- return false;
-
- // Return true to apply inreg to all legal parameters except for MCU targets.
- return !IsMCUABI;
-}
-
-void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const {
- // Vectorcall x86 works subtly different than in x64, so the format is
- // a bit different than the x64 version. First, all vector types (not HVAs)
- // are assigned, with the first 6 ending up in the [XYZ]MM0-5 registers.
- // This differs from the x64 implementation, where the first 6 by INDEX get
- // registers.
- // In the second pass over the arguments, HVAs are passed in the remaining
- // vector registers if possible, or indirectly by address. The address will be
- // passed in ECX/EDX if available. Any other arguments are passed according to
- // the usual fastcall rules.
- MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
- for (int I = 0, E = Args.size(); I < E; ++I) {
- const Type *Base = nullptr;
- uint64_t NumElts = 0;
- const QualType &Ty = Args[I].type;
- if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
- isHomogeneousAggregate(Ty, Base, NumElts)) {
- if (State.FreeSSERegs >= NumElts) {
- State.FreeSSERegs -= NumElts;
- Args[I].info = ABIArgInfo::getDirectInReg();
- State.IsPreassigned.set(I);
- }
- }
- }
-}
-
-ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
- CCState &State) const {
- // FIXME: Set alignment on indirect arguments.
- bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
- bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
- bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
-
- Ty = useFirstFieldIfTransparentUnion(Ty);
- TypeInfo TI = getContext().getTypeInfo(Ty);
-
- // Check with the C++ ABI first.
- const RecordType *RT = Ty->getAs<RecordType>();
- if (RT) {
- CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
- if (RAA == CGCXXABI::RAA_Indirect) {
- return getIndirectResult(Ty, false, State);
- } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
- // The field index doesn't matter, we'll fix it up later.
- return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
- }
- }
-
- // Regcall uses the concept of a homogenous vector aggregate, similar
- // to other targets.
- const Type *Base = nullptr;
- uint64_t NumElts = 0;
- if ((IsRegCall || IsVectorCall) &&
- isHomogeneousAggregate(Ty, Base, NumElts)) {
- if (State.FreeSSERegs >= NumElts) {
- State.FreeSSERegs -= NumElts;
-
- // Vectorcall passes HVAs directly and does not flatten them, but regcall
- // does.
- if (IsVectorCall)
- return getDirectX86Hva();
-
- if (Ty->isBuiltinType() || Ty->isVectorType())
- return ABIArgInfo::getDirect();
- return ABIArgInfo::getExpand();
- }
- return getIndirectResult(Ty, /*ByVal=*/false, State);
- }
-
- if (isAggregateTypeForABI(Ty)) {
- // Structures with flexible arrays are always indirect.
- // FIXME: This should not be byval!
- if (RT && RT->getDecl()->hasFlexibleArrayMember())
- return getIndirectResult(Ty, true, State);
-
- // Ignore empty structs/unions on non-Windows.
- if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
-
- llvm::LLVMContext &LLVMContext = getVMContext();
- llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
- bool NeedsPadding = false;
- bool InReg;
- if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
- unsigned SizeInRegs = (TI.Width + 31) / 32;
- SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
- llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
- if (InReg)
- return ABIArgInfo::getDirectInReg(Result);
- else
- return ABIArgInfo::getDirect(Result);
- }
- llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
-
- // Pass over-aligned aggregates on Windows indirectly. This behavior was
- // added in MSVC 2015.
- if (IsWin32StructABI && TI.isAlignRequired() && TI.Align > 32)
- return getIndirectResult(Ty, /*ByVal=*/false, State);
-
- // Expand small (<= 128-bit) record types when we know that the stack layout
- // of those arguments will match the struct. This is important because the
- // LLVM backend isn't smart enough to remove byval, which inhibits many
- // optimizations.
- // Don't do this for the MCU if there are still free integer registers
- // (see X86_64 ABI for full explanation).
- if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
- canExpandIndirectArgument(Ty))
- return ABIArgInfo::getExpandWithPadding(
- IsFastCall || IsVectorCall || IsRegCall, PaddingType);
-
- return getIndirectResult(Ty, true, State);
- }
-
- if (const VectorType *VT = Ty->getAs<VectorType>()) {
- // On Windows, vectors are passed directly if registers are available, or
- // indirectly if not. This avoids the need to align argument memory. Pass
- // user-defined vector types larger than 512 bits indirectly for simplicity.
- if (IsWin32StructABI) {
- if (TI.Width <= 512 && State.FreeSSERegs > 0) {
- --State.FreeSSERegs;
- return ABIArgInfo::getDirectInReg();
- }
- return getIndirectResult(Ty, /*ByVal=*/false, State);
- }
-
- // On Darwin, some vectors are passed in memory, we handle this by passing
- // it as an i8/i16/i32/i64.
- if (IsDarwinVectorABI) {
- if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) ||
- (TI.Width == 64 && VT->getNumElements() == 1))
- return ABIArgInfo::getDirect(
- llvm::IntegerType::get(getVMContext(), TI.Width));
- }
-
- if (IsX86_MMXType(CGT.ConvertType(Ty)))
- return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
-
- return ABIArgInfo::getDirect();
- }
-
-
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- bool InReg = shouldPrimitiveUseInReg(Ty, State);
-
- if (isPromotableIntegerTypeForABI(Ty)) {
- if (InReg)
- return ABIArgInfo::getExtendInReg(Ty);
- return ABIArgInfo::getExtend(Ty);
- }
-
- if (const auto *EIT = Ty->getAs<BitIntType>()) {
- if (EIT->getNumBits() <= 64) {
- if (InReg)
- return ABIArgInfo::getDirectInReg();
- return ABIArgInfo::getDirect();
- }
- return getIndirectResult(Ty, /*ByVal=*/false, State);
- }
-
- if (InReg)
- return ABIArgInfo::getDirectInReg();
- return ABIArgInfo::getDirect();
-}
-
-void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
- CCState State(FI);
- if (IsMCUABI)
- State.FreeRegs = 3;
- else if (State.CC == llvm::CallingConv::X86_FastCall) {
- State.FreeRegs = 2;
- State.FreeSSERegs = 3;
- } else if (State.CC == llvm::CallingConv::X86_VectorCall) {
- State.FreeRegs = 2;
- State.FreeSSERegs = 6;
- } else if (FI.getHasRegParm())
- State.FreeRegs = FI.getRegParm();
- else if (State.CC == llvm::CallingConv::X86_RegCall) {
- State.FreeRegs = 5;
- State.FreeSSERegs = 8;
- } else if (IsWin32StructABI) {
- // Since MSVC 2015, the first three SSE vectors have been passed in
- // registers. The rest are passed indirectly.
- State.FreeRegs = DefaultNumRegisterParameters;
- State.FreeSSERegs = 3;
- } else
- State.FreeRegs = DefaultNumRegisterParameters;
-
- if (!::classifyReturnType(getCXXABI(), FI, *this)) {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
- } else if (FI.getReturnInfo().isIndirect()) {
- // The C++ ABI is not aware of register usage, so we have to check if the
- // return value was sret and put it in a register ourselves if appropriate.
- if (State.FreeRegs) {
- --State.FreeRegs; // The sret parameter consumes a register.
- if (!IsMCUABI)
- FI.getReturnInfo().setInReg(true);
- }
- }
-
- // The chain argument effectively gives us another free register.
- if (FI.isChainCall())
- ++State.FreeRegs;
-
- // For vectorcall, do a first pass over the arguments, assigning FP and vector
- // arguments to XMM registers as available.
- if (State.CC == llvm::CallingConv::X86_VectorCall)
- runVectorCallFirstPass(FI, State);
-
- bool UsedInAlloca = false;
- MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
- for (int I = 0, E = Args.size(); I < E; ++I) {
- // Skip arguments that have already been assigned.
- if (State.IsPreassigned.test(I))
- continue;
-
- Args[I].info = classifyArgumentType(Args[I].type, State);
- UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca);
- }
-
- // If we needed to use inalloca for any argument, do a second pass and rewrite
- // all the memory arguments to use inalloca.
- if (UsedInAlloca)
- rewriteWithInAlloca(FI);
-}
-
-void
-X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
- CharUnits &StackOffset, ABIArgInfo &Info,
- QualType Type) const {
- // Arguments are always 4-byte-aligned.
- CharUnits WordSize = CharUnits::fromQuantity(4);
- assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct");
-
- // sret pointers and indirect things will require an extra pointer
- // indirection, unless they are byval. Most things are byval, and will not
- // require this indirection.
- bool IsIndirect = false;
- if (Info.isIndirect() && !Info.getIndirectByVal())
- IsIndirect = true;
- Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect);
- llvm::Type *LLTy = CGT.ConvertTypeForMem(Type);
- if (IsIndirect)
- LLTy = LLTy->getPointerTo(0);
- FrameFields.push_back(LLTy);
- StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type);
-
- // Insert padding bytes to respect alignment.
- CharUnits FieldEnd = StackOffset;
- StackOffset = FieldEnd.alignTo(WordSize);
- if (StackOffset != FieldEnd) {
- CharUnits NumBytes = StackOffset - FieldEnd;
- llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
- Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
- FrameFields.push_back(Ty);
- }
-}
-
-static bool isArgInAlloca(const ABIArgInfo &Info) {
- // Leave ignored and inreg arguments alone.
- switch (Info.getKind()) {
- case ABIArgInfo::InAlloca:
- return true;
- case ABIArgInfo::Ignore:
- case ABIArgInfo::IndirectAliased:
- return false;
- case ABIArgInfo::Indirect:
- case ABIArgInfo::Direct:
- case ABIArgInfo::Extend:
- return !Info.getInReg();
- case ABIArgInfo::Expand:
- case ABIArgInfo::CoerceAndExpand:
- // These are aggregate types which are never passed in registers when
- // inalloca is involved.
- return true;
- }
- llvm_unreachable("invalid enum");
-}
-
-void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
- assert(IsWin32StructABI && "inalloca only supported on win32");
-
- // Build a packed struct type for all of the arguments in memory.
- SmallVector<llvm::Type *, 6> FrameFields;
-
- // The stack alignment is always 4.
- CharUnits StackAlign = CharUnits::fromQuantity(4);
-
- CharUnits StackOffset;
- CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
-
- // Put 'this' into the struct before 'sret', if necessary.
- bool IsThisCall =
- FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
- ABIArgInfo &Ret = FI.getReturnInfo();
- if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
- isArgInAlloca(I->info)) {
- addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
- ++I;
- }
-
- // Put the sret parameter into the inalloca struct if it's in memory.
- if (Ret.isIndirect() && !Ret.getInReg()) {
- addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType());
- // On Windows, the hidden sret parameter is always returned in eax.
- Ret.setInAllocaSRet(IsWin32StructABI);
- }
-
- // Skip the 'this' parameter in ecx.
- if (IsThisCall)
- ++I;
-
- // Put arguments passed in memory into the struct.
- for (; I != E; ++I) {
- if (isArgInAlloca(I->info))
- addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
- }
-
- FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
- /*isPacked=*/true),
- StackAlign);
-}
-
-Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
- Address VAListAddr, QualType Ty) const {
-
- auto TypeInfo = getContext().getTypeInfoInChars(Ty);
-
- // x86-32 changes the alignment of certain arguments on the stack.
- //
- // Just messing with TypeInfo like this works because we never pass
- // anything indirectly.
- TypeInfo.Align = CharUnits::fromQuantity(
- getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));
-
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
- TypeInfo, CharUnits::fromQuantity(4),
- /*AllowHigherAlign*/ true);
-}
-
-bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
- const llvm::Triple &Triple, const CodeGenOptions &Opts) {
- assert(Triple.getArch() == llvm::Triple::x86);
-
- switch (Opts.getStructReturnConvention()) {
- case CodeGenOptions::SRCK_Default:
- break;
- case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
- return false;
- case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
- return true;
- }
-
- if (Triple.isOSDarwin() || Triple.isOSIAMCU())
- return true;
-
- switch (Triple.getOS()) {
- case llvm::Triple::DragonFly:
- case llvm::Triple::FreeBSD:
- case llvm::Triple::OpenBSD:
- case llvm::Triple::Win32:
- return true;
- default:
- return false;
- }
-}
-
-static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) {
- if (!FD->hasAttr<AnyX86InterruptAttr>())
- return;
-
- llvm::Function *Fn = cast<llvm::Function>(GV);
- Fn->setCallingConv(llvm::CallingConv::X86_INTR);
- if (FD->getNumParams() == 0)
- return;
-
- auto PtrTy = cast<PointerType>(FD->getParamDecl(0)->getType());
- llvm::Type *ByValTy = CGM.getTypes().ConvertType(PtrTy->getPointeeType());
- llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
- Fn->getContext(), ByValTy);
- Fn->addParamAttr(0, NewAttr);
-}
-
-void X86_32TargetCodeGenInfo::setTargetAttributes(
+void TargetCodeGenInfo::addStackProbeTargetAttributes(
const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
- if (GV->isDeclaration())
- return;
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
- llvm::Function *Fn = cast<llvm::Function>(GV);
- Fn->addFnAttr("stackrealign");
- }
-
- addX86InterruptAttrs(FD, GV, CGM);
- }
-}
-
-bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
- CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const {
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
-
- llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
-
- // 0-7 are the eight integer registers; the order is different
- // on Darwin (for EH), but the range is the same.
- // 8 is %eip.
- AssignToArrayRange(Builder, Address, Four8, 0, 8);
-
- if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
- // 12-16 are st(0..4). Not sure why we stop at 4.
- // These have size 16, which is sizeof(long double) on
- // platforms with 8-byte alignment for that type.
- llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
- AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
-
- } else {
- // 9 is %eflags, which doesn't get a size on Darwin for some
- // reason.
- Builder.CreateAlignedStore(
- Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
- CharUnits::One());
-
- // 11-16 are st(0..5). Not sure why we stop at 5.
- // These have size 12, which is sizeof(long double) on
- // platforms with 4-byte alignment for that type.
- llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
- AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
- }
-
- return false;
-}
-
-//===----------------------------------------------------------------------===//
-// X86-64 ABI Implementation
-//===----------------------------------------------------------------------===//
-
-
-namespace {
-/// The AVX ABI level for X86 targets.
-enum class X86AVXABILevel {
- None,
- AVX,
- AVX512
-};
-
-/// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
-static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
- switch (AVXLevel) {
- case X86AVXABILevel::AVX512:
- return 512;
- case X86AVXABILevel::AVX:
- return 256;
- case X86AVXABILevel::None:
- return 128;
- }
- llvm_unreachable("Unknown AVXLevel");
-}
-
-/// X86_64ABIInfo - The X86_64 ABI information.
-class X86_64ABIInfo : public ABIInfo {
- enum Class {
- Integer = 0,
- SSE,
- SSEUp,
- X87,
- X87Up,
- ComplexX87,
- NoClass,
- Memory
- };
-
- /// merge - Implement the X86_64 ABI merging algorithm.
- ///
- /// Merge an accumulating classification \arg Accum with a field
- /// classification \arg Field.
- ///
- /// \param Accum - The accumulating classification. This should
- /// always be either NoClass or the result of a previous merge
- /// call. In addition, this should never be Memory (the caller
- /// should just return Memory for the aggregate).
- static Class merge(Class Accum, Class Field);
-
- /// postMerge - Implement the X86_64 ABI post merging algorithm.
- ///
- /// Post merger cleanup, reduces a malformed Hi and Lo pair to
- /// final MEMORY or SSE classes when necessary.
- ///
- /// \param AggregateSize - The size of the current aggregate in
- /// the classification process.
- ///
- /// \param Lo - The classification for the parts of the type
- /// residing in the low word of the containing object.
- ///
- /// \param Hi - The classification for the parts of the type
- /// residing in the higher words of the containing object.
- ///
- void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
-
- /// classify - Determine the x86_64 register classes in which the
- /// given type T should be passed.
- ///
- /// \param Lo - The classification for the parts of the type
- /// residing in the low word of the containing object.
- ///
- /// \param Hi - The classification for the parts of the type
- /// residing in the high word of the containing object.
- ///
- /// \param OffsetBase - The bit offset of this type in the
- /// containing object. Some parameters are classified different
- /// depending on whether they straddle an eightbyte boundary.
- ///
- /// \param isNamedArg - Whether the argument in question is a "named"
- /// argument, as used in AMD64-ABI 3.5.7.
- ///
- /// \param IsRegCall - Whether the calling conversion is regcall.
- ///
- /// If a word is unused its result will be NoClass; if a type should
- /// be passed in Memory then at least the classification of \arg Lo
- /// will be Memory.
- ///
- /// The \arg Lo class will be NoClass iff the argument is ignored.
- ///
- /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
- /// also be ComplexX87.
- void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
- bool isNamedArg, bool IsRegCall = false) const;
-
- llvm::Type *GetByteVectorType(QualType Ty) const;
- llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
- unsigned IROffset, QualType SourceTy,
- unsigned SourceOffset) const;
- llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
- unsigned IROffset, QualType SourceTy,
- unsigned SourceOffset) const;
-
- /// getIndirectResult - Give a source type \arg Ty, return a suitable result
- /// such that the argument will be returned in memory.
- ABIArgInfo getIndirectReturnResult(QualType Ty) const;
-
- /// getIndirectResult - Give a source type \arg Ty, return a suitable result
- /// such that the argument will be passed in memory.
- ///
- /// \param freeIntRegs - The number of free integer registers remaining
- /// available.
- ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
-
- ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
- unsigned &neededInt, unsigned &neededSSE,
- bool isNamedArg,
- bool IsRegCall = false) const;
-
- ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
- unsigned &NeededSSE,
- unsigned &MaxVectorWidth) const;
-
- ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
- unsigned &NeededSSE,
- unsigned &MaxVectorWidth) const;
-
- bool IsIllegalVectorType(QualType Ty) const;
-
- /// The 0.98 ABI revision clarified a lot of ambiguities,
- /// unfortunately in ways that were not always consistent with
- /// certain previous compilers. In particular, platforms which
- /// required strict binary compatibility with older versions of GCC
- /// may need to exempt themselves.
- bool honorsRevision0_98() const {
- return !getTarget().getTriple().isOSDarwin();
- }
-
- /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
- /// classify it as INTEGER (for compatibility with older clang compilers).
- bool classifyIntegerMMXAsSSE() const {
- // Clang <= 3.8 did not do this.
- if (getContext().getLangOpts().getClangABICompat() <=
- LangOptions::ClangABI::Ver3_8)
- return false;
-
- const llvm::Triple &Triple = getTarget().getTriple();
- if (Triple.isOSDarwin() || Triple.isPS() || Triple.isOSFreeBSD())
- return false;
- return true;
- }
-
- // GCC classifies vectors of __int128 as memory.
- bool passInt128VectorsInMem() const {
- // Clang <= 9.0 did not do this.
- if (getContext().getLangOpts().getClangABICompat() <=
- LangOptions::ClangABI::Ver9)
- return false;
-
- const llvm::Triple &T = getTarget().getTriple();
- return T.isOSLinux() || T.isOSNetBSD();
- }
-
- X86AVXABILevel AVXLevel;
- // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
- // 64-bit hardware.
- bool Has64BitPointers;
-
-public:
- X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
- : ABIInfo(CGT), AVXLevel(AVXLevel),
- Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {}
-
- bool isPassedUsingAVXType(QualType type) const {
- unsigned neededInt, neededSSE;
- // The freeIntRegs argument doesn't matter here.
- ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
- /*isNamedArg*/true);
- if (info.isDirect()) {
- llvm::Type *ty = info.getCoerceToType();
- if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
- return vectorTy->getPrimitiveSizeInBits().getFixedValue() > 128;
- }
- return false;
- }
-
- void computeInfo(CGFunctionInfo &FI) const override;
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
- Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- bool has64BitPointers() const {
- return Has64BitPointers;
- }
-};
-
-/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
-class WinX86_64ABIInfo : public ABIInfo {
-public:
- WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
- : ABIInfo(CGT), AVXLevel(AVXLevel),
- IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
-
- void computeInfo(CGFunctionInfo &FI) const override;
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- bool isHomogeneousAggregateBaseType(QualType Ty) const override {
- // FIXME: Assumes vectorcall is in use.
- return isX86VectorTypeForVectorCall(getContext(), Ty);
- }
-
- bool isHomogeneousAggregateSmallEnough(const Type *Ty,
- uint64_t NumMembers) const override {
- // FIXME: Assumes vectorcall is in use.
- return isX86VectorCallAggregateSmallEnough(NumMembers);
- }
-
-private:
- ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
- bool IsVectorCall, bool IsRegCall) const;
- ABIArgInfo reclassifyHvaArgForVectorCall(QualType Ty, unsigned &FreeSSERegs,
- const ABIArgInfo &current) const;
-
- X86AVXABILevel AVXLevel;
-
- bool IsMingw64;
-};
-
-class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
- : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {
- SwiftInfo =
- std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/true);
- }
-
- const X86_64ABIInfo &getABIInfo() const {
- return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
- }
-
- /// Disable tail call on x86-64. The epilogue code before the tail jump blocks
- /// autoreleaseRV/retainRV and autoreleaseRV/unsafeClaimRV optimizations.
- bool markARCOptimizedReturnCallsAsNoTail() const override { return true; }
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
- return 7;
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override {
- llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
-
- // 0-15 are the 16 integer registers.
- // 16 is %rip.
- AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
- return false;
- }
-
- llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- StringRef Constraint,
- llvm::Type* Ty) const override {
- return X86AdjustInlineAsmType(CGF, Constraint, Ty);
- }
-
- bool isNoProtoCallVariadic(const CallArgList &args,
- const FunctionNoProtoType *fnType) const override {
- // The default CC on x86-64 sets %al to the number of SSA
- // registers used, and GCC sets this when calling an unprototyped
- // function, so we override the default behavior. However, don't do
- // that when AVX types are involved: the ABI explicitly states it is
- // undefined, and it doesn't work in practice because of how the ABI
- // defines varargs anyway.
- if (fnType->getCallConv() == CC_C) {
- bool HasAVXType = false;
- for (CallArgList::const_iterator
- it = args.begin(), ie = args.end(); it != ie; ++it) {
- if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
- HasAVXType = true;
- break;
- }
- }
-
- if (!HasAVXType)
- return true;
- }
-
- return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
- }
-
- llvm::Constant *
- getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
- unsigned Sig = (0xeb << 0) | // jmp rel8
- (0x06 << 8) | // .+0x08
- ('v' << 16) |
- ('2' << 24);
- return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
- }
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override {
- if (GV->isDeclaration())
- return;
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
- llvm::Function *Fn = cast<llvm::Function>(GV);
- Fn->addFnAttr("stackrealign");
- }
-
- addX86InterruptAttrs(FD, GV, CGM);
- }
- }
-
- void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
- const FunctionDecl *Caller,
- const FunctionDecl *Callee,
- const CallArgList &Args) const override;
-};
-
-static void initFeatureMaps(const ASTContext &Ctx,
- llvm::StringMap<bool> &CallerMap,
- const FunctionDecl *Caller,
- llvm::StringMap<bool> &CalleeMap,
- const FunctionDecl *Callee) {
- if (CalleeMap.empty() && CallerMap.empty()) {
- // The caller is potentially nullptr in the case where the call isn't in a
- // function. In this case, the getFunctionFeatureMap ensures we just get
- // the TU level setting (since it cannot be modified by 'target'..
- Ctx.getFunctionFeatureMap(CallerMap, Caller);
- Ctx.getFunctionFeatureMap(CalleeMap, Callee);
- }
-}
-
-static bool checkAVXParamFeature(DiagnosticsEngine &Diag,
- SourceLocation CallLoc,
- const llvm::StringMap<bool> &CallerMap,
- const llvm::StringMap<bool> &CalleeMap,
- QualType Ty, StringRef Feature,
- bool IsArgument) {
- bool CallerHasFeat = CallerMap.lookup(Feature);
- bool CalleeHasFeat = CalleeMap.lookup(Feature);
- if (!CallerHasFeat && !CalleeHasFeat)
- return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
- << IsArgument << Ty << Feature;
-
- // Mixing calling conventions here is very clearly an error.
- if (!CallerHasFeat || !CalleeHasFeat)
- return Diag.Report(CallLoc, diag::err_avx_calling_convention)
- << IsArgument << Ty << Feature;
-
- // Else, both caller and callee have the required feature, so there is no need
- // to diagnose.
- return false;
-}
-
-static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx,
- SourceLocation CallLoc,
- const llvm::StringMap<bool> &CallerMap,
- const llvm::StringMap<bool> &CalleeMap, QualType Ty,
- bool IsArgument) {
- uint64_t Size = Ctx.getTypeSize(Ty);
- if (Size > 256)
- return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty,
- "avx512f", IsArgument);
-
- if (Size > 128)
- return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx",
- IsArgument);
-
- return false;
-}
-
-void X86_64TargetCodeGenInfo::checkFunctionCallABI(
- CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
- const FunctionDecl *Callee, const CallArgList &Args) const {
- llvm::StringMap<bool> CallerMap;
- llvm::StringMap<bool> CalleeMap;
- unsigned ArgIndex = 0;
-
- // We need to loop through the actual call arguments rather than the
- // function's parameters, in case this variadic.
- for (const CallArg &Arg : Args) {
- // The "avx" feature changes how vectors >128 in size are passed. "avx512f"
- // additionally changes how vectors >256 in size are passed. Like GCC, we
- // warn when a function is called with an argument where this will change.
- // Unlike GCC, we also error when it is an obvious ABI mismatch, that is,
- // the caller and callee features are mismatched.
- // Unfortunately, we cannot do this diagnostic in SEMA, since the callee can
- // change its ABI with attribute-target after this call.
- if (Arg.getType()->isVectorType() &&
- CGM.getContext().getTypeSize(Arg.getType()) > 128) {
- initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
- QualType Ty = Arg.getType();
- // The CallArg seems to have desugared the type already, so for clearer
- // diagnostics, replace it with the type in the FunctionDecl if possible.
- if (ArgIndex < Callee->getNumParams())
- Ty = Callee->getParamDecl(ArgIndex)->getType();
-
- if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
- CalleeMap, Ty, /*IsArgument*/ true))
- return;
- }
- ++ArgIndex;
- }
-
- // Check return always, as we don't have a good way of knowing in codegen
- // whether this value is used, tail-called, etc.
- if (Callee->getReturnType()->isVectorType() &&
- CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) {
- initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
- checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
- CalleeMap, Callee->getReturnType(),
- /*IsArgument*/ false);
- }
-}
-
-static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
- // If the argument does not end in .lib, automatically add the suffix.
- // If the argument contains a space, enclose it in quotes.
- // This matches the behavior of MSVC.
- bool Quote = Lib.contains(' ');
- std::string ArgStr = Quote ? "\"" : "";
- ArgStr += Lib;
- if (!Lib.endswith_insensitive(".lib") && !Lib.endswith_insensitive(".a"))
- ArgStr += ".lib";
- ArgStr += Quote ? "\"" : "";
- return ArgStr;
-}
-
-class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
-public:
- WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
- bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
- unsigned NumRegisterParameters)
- : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
- Win32StructABI, NumRegisterParameters, false) {}
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override;
-
- void getDependentLibraryOption(llvm::StringRef Lib,
- llvm::SmallString<24> &Opt) const override {
- Opt = "/DEFAULTLIB:";
- Opt += qualifyWindowsLibrary(Lib);
- }
-
- void getDetectMismatchOption(llvm::StringRef Name,
- llvm::StringRef Value,
- llvm::SmallString<32> &Opt) const override {
- Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
- }
-};
-
-static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) {
if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
-
if (CGM.getCodeGenOpts().StackProbeSize != 4096)
Fn->addFnAttr("stack-probe-size",
llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
@@ -2702,9737 +169,13 @@ static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
}
}
-void WinX86_32TargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
- X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
- if (GV->isDeclaration())
- return;
- addStackProbeTargetAttributes(D, GV, CGM);
-}
-
-class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
- X86AVXABILevel AVXLevel)
- : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {
- SwiftInfo =
- std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/true);
- }
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override;
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
- return 7;
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override {
- llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
-
- // 0-15 are the 16 integer registers.
- // 16 is %rip.
- AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
- return false;
- }
-
- void getDependentLibraryOption(llvm::StringRef Lib,
- llvm::SmallString<24> &Opt) const override {
- Opt = "/DEFAULTLIB:";
- Opt += qualifyWindowsLibrary(Lib);
- }
-
- void getDetectMismatchOption(llvm::StringRef Name,
- llvm::StringRef Value,
- llvm::SmallString<32> &Opt) const override {
- Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
- }
-};
-
-void WinX86_64TargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
- TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
- if (GV->isDeclaration())
- return;
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
- llvm::Function *Fn = cast<llvm::Function>(GV);
- Fn->addFnAttr("stackrealign");
- }
-
- addX86InterruptAttrs(FD, GV, CGM);
- }
-
- addStackProbeTargetAttributes(D, GV, CGM);
-}
-}
-
-void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
- Class &Hi) const {
- // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
- //
- // (a) If one of the classes is Memory, the whole argument is passed in
- // memory.
- //
- // (b) If X87UP is not preceded by X87, the whole argument is passed in
- // memory.
- //
- // (c) If the size of the aggregate exceeds two eightbytes and the first
- // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
- // argument is passed in memory. NOTE: This is necessary to keep the
- // ABI working for processors that don't support the __m256 type.
- //
- // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
- //
- // Some of these are enforced by the merging logic. Others can arise
- // only with unions; for example:
- // union { _Complex double; unsigned; }
- //
- // Note that clauses (b) and (c) were added in 0.98.
- //
- if (Hi == Memory)
- Lo = Memory;
- if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
- Lo = Memory;
- if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
- Lo = Memory;
- if (Hi == SSEUp && Lo != SSE)
- Hi = SSE;
-}
-
-X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
- // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
- // classified recursively so that always two fields are
- // considered. The resulting class is calculated according to
- // the classes of the fields in the eightbyte:
- //
- // (a) If both classes are equal, this is the resulting class.
- //
- // (b) If one of the classes is NO_CLASS, the resulting class is
- // the other class.
- //
- // (c) If one of the classes is MEMORY, the result is the MEMORY
- // class.
- //
- // (d) If one of the classes is INTEGER, the result is the
- // INTEGER.
- //
- // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
- // MEMORY is used as class.
- //
- // (f) Otherwise class SSE is used.
-
- // Accum should never be memory (we should have returned) or
- // ComplexX87 (because this cannot be passed in a structure).
- assert((Accum != Memory && Accum != ComplexX87) &&
- "Invalid accumulated classification during merge.");
- if (Accum == Field || Field == NoClass)
- return Accum;
- if (Field == Memory)
- return Memory;
- if (Accum == NoClass)
- return Field;
- if (Accum == Integer || Field == Integer)
- return Integer;
- if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
- Accum == X87 || Accum == X87Up)
- return Memory;
- return SSE;
-}
-
-void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
- Class &Hi, bool isNamedArg, bool IsRegCall) const {
- // FIXME: This code can be simplified by introducing a simple value class for
- // Class pairs with appropriate constructor methods for the various
- // situations.
-
- // FIXME: Some of the split computations are wrong; unaligned vectors
- // shouldn't be passed in registers for example, so there is no chance they
- // can straddle an eightbyte. Verify & simplify.
-
- Lo = Hi = NoClass;
-
- Class &Current = OffsetBase < 64 ? Lo : Hi;
- Current = Memory;
-
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
- BuiltinType::Kind k = BT->getKind();
-
- if (k == BuiltinType::Void) {
- Current = NoClass;
- } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
- Lo = Integer;
- Hi = Integer;
- } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
- Current = Integer;
- } else if (k == BuiltinType::Float || k == BuiltinType::Double ||
- k == BuiltinType::Float16 || k == BuiltinType::BFloat16) {
- Current = SSE;
- } else if (k == BuiltinType::LongDouble) {
- const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
- if (LDF == &llvm::APFloat::IEEEquad()) {
- Lo = SSE;
- Hi = SSEUp;
- } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
- Lo = X87;
- Hi = X87Up;
- } else if (LDF == &llvm::APFloat::IEEEdouble()) {
- Current = SSE;
- } else
- llvm_unreachable("unexpected long double representation!");
- }
- // FIXME: _Decimal32 and _Decimal64 are SSE.
- // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
- return;
- }
-
- if (const EnumType *ET = Ty->getAs<EnumType>()) {
- // Classify the underlying integer type.
- classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
- return;
- }
-
- if (Ty->hasPointerRepresentation()) {
- Current = Integer;
- return;
- }
-
- if (Ty->isMemberPointerType()) {
- if (Ty->isMemberFunctionPointerType()) {
- if (Has64BitPointers) {
- // If Has64BitPointers, this is an {i64, i64}, so classify both
- // Lo and Hi now.
- Lo = Hi = Integer;
- } else {
- // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
- // straddles an eightbyte boundary, Hi should be classified as well.
- uint64_t EB_FuncPtr = (OffsetBase) / 64;
- uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
- if (EB_FuncPtr != EB_ThisAdj) {
- Lo = Hi = Integer;
- } else {
- Current = Integer;
- }
- }
- } else {
- Current = Integer;
- }
- return;
- }
-
- if (const VectorType *VT = Ty->getAs<VectorType>()) {
- uint64_t Size = getContext().getTypeSize(VT);
- if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
- // gcc passes the following as integer:
- // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
- // 2 bytes - <2 x char>, <1 x short>
- // 1 byte - <1 x char>
- Current = Integer;
-
- // If this type crosses an eightbyte boundary, it should be
- // split.
- uint64_t EB_Lo = (OffsetBase) / 64;
- uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
- if (EB_Lo != EB_Hi)
- Hi = Lo;
- } else if (Size == 64) {
- QualType ElementType = VT->getElementType();
-
- // gcc passes <1 x double> in memory. :(
- if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
- return;
-
- // gcc passes <1 x long long> as SSE but clang used to unconditionally
- // pass them as integer. For platforms where clang is the de facto
- // platform compiler, we must continue to use integer.
- if (!classifyIntegerMMXAsSSE() &&
- (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
- ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
- ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
- ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
- Current = Integer;
- else
- Current = SSE;
-
- // If this type crosses an eightbyte boundary, it should be
- // split.
- if (OffsetBase && OffsetBase != 64)
- Hi = Lo;
- } else if (Size == 128 ||
- (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
- QualType ElementType = VT->getElementType();
-
- // gcc passes 256 and 512 bit <X x __int128> vectors in memory. :(
- if (passInt128VectorsInMem() && Size != 128 &&
- (ElementType->isSpecificBuiltinType(BuiltinType::Int128) ||
- ElementType->isSpecificBuiltinType(BuiltinType::UInt128)))
- return;
-
- // Arguments of 256-bits are split into four eightbyte chunks. The
- // least significant one belongs to class SSE and all the others to class
- // SSEUP. The original Lo and Hi design considers that types can't be
- // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
- // This design isn't correct for 256-bits, but since there're no cases
- // where the upper parts would need to be inspected, avoid adding
- // complexity and just consider Hi to match the 64-256 part.
- //
- // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
- // registers if they are "named", i.e. not part of the "..." of a
- // variadic function.
- //
- // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
- // split into eight eightbyte chunks, one SSE and seven SSEUP.
- Lo = SSE;
- Hi = SSEUp;
- }
- return;
- }
-
- if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
- QualType ET = getContext().getCanonicalType(CT->getElementType());
-
- uint64_t Size = getContext().getTypeSize(Ty);
- if (ET->isIntegralOrEnumerationType()) {
- if (Size <= 64)
- Current = Integer;
- else if (Size <= 128)
- Lo = Hi = Integer;
- } else if (ET->isFloat16Type() || ET == getContext().FloatTy ||
- ET->isBFloat16Type()) {
- Current = SSE;
- } else if (ET == getContext().DoubleTy) {
- Lo = Hi = SSE;
- } else if (ET == getContext().LongDoubleTy) {
- const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
- if (LDF == &llvm::APFloat::IEEEquad())
- Current = Memory;
- else if (LDF == &llvm::APFloat::x87DoubleExtended())
- Current = ComplexX87;
- else if (LDF == &llvm::APFloat::IEEEdouble())
- Lo = Hi = SSE;
- else
- llvm_unreachable("unexpected long double representation!");
- }
-
- // If this complex type crosses an eightbyte boundary then it
- // should be split.
- uint64_t EB_Real = (OffsetBase) / 64;
- uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
- if (Hi == NoClass && EB_Real != EB_Imag)
- Hi = Lo;
-
- return;
- }
-
- if (const auto *EITy = Ty->getAs<BitIntType>()) {
- if (EITy->getNumBits() <= 64)
- Current = Integer;
- else if (EITy->getNumBits() <= 128)
- Lo = Hi = Integer;
- // Larger values need to get passed in memory.
- return;
- }
-
- if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
- // Arrays are treated like structures.
-
- uint64_t Size = getContext().getTypeSize(Ty);
-
- // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
- // than eight eightbytes, ..., it has class MEMORY.
- // regcall ABI doesn't have limitation to an object. The only limitation
- // is the free registers, which will be checked in computeInfo.
- if (!IsRegCall && Size > 512)
- return;
-
- // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
- // fields, it has class MEMORY.
- //
- // Only need to check alignment of array base.
- if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
- return;
-
- // Otherwise implement simplified merge. We could be smarter about
- // this, but it isn't worth it and would be harder to verify.
- Current = NoClass;
- uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
- uint64_t ArraySize = AT->getSize().getZExtValue();
-
- // The only case a 256-bit wide vector could be used is when the array
- // contains a single 256-bit element. Since Lo and Hi logic isn't extended
- // to work for sizes wider than 128, early check and fallback to memory.
- //
- if (Size > 128 &&
- (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
- return;
-
- for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
- Class FieldLo, FieldHi;
- classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
- Lo = merge(Lo, FieldLo);
- Hi = merge(Hi, FieldHi);
- if (Lo == Memory || Hi == Memory)
- break;
- }
-
- postMerge(Size, Lo, Hi);
- assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
- return;
- }
-
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- uint64_t Size = getContext().getTypeSize(Ty);
-
- // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
- // than eight eightbytes, ..., it has class MEMORY.
- if (Size > 512)
- return;
-
- // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
- // copy constructor or a non-trivial destructor, it is passed by invisible
- // reference.
- if (getRecordArgABI(RT, getCXXABI()))
- return;
-
- const RecordDecl *RD = RT->getDecl();
-
- // Assume variable sized types are passed in memory.
- if (RD->hasFlexibleArrayMember())
- return;
-
- const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
-
- // Reset Lo class, this will be recomputed.
- Current = NoClass;
-
- // If this is a C++ record, classify the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
- for (const auto &I : CXXRD->bases()) {
- assert(!I.isVirtual() && !I.getType()->isDependentType() &&
- "Unexpected base class!");
- const auto *Base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
-
- // Classify this field.
- //
- // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
- // single eightbyte, each is classified separately. Each eightbyte gets
- // initialized to class NO_CLASS.
- Class FieldLo, FieldHi;
- uint64_t Offset =
- OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
- classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
- Lo = merge(Lo, FieldLo);
- Hi = merge(Hi, FieldHi);
- if (Lo == Memory || Hi == Memory) {
- postMerge(Size, Lo, Hi);
- return;
- }
- }
- }
-
- // Classify the fields one at a time, merging the results.
- unsigned idx = 0;
- bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
- LangOptions::ClangABI::Ver11 ||
- getContext().getTargetInfo().getTriple().isPS();
- bool IsUnion = RT->isUnionType() && !UseClang11Compat;
-
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- i != e; ++i, ++idx) {
- uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
- bool BitField = i->isBitField();
-
- // Ignore padding bit-fields.
- if (BitField && i->isUnnamedBitfield())
- continue;
-
- // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
- // eight eightbytes, or it contains unaligned fields, it has class MEMORY.
- //
- // The only case a 256-bit or a 512-bit wide vector could be used is when
- // the struct contains a single 256-bit or 512-bit element. Early check
- // and fallback to memory.
- //
- // FIXME: Extended the Lo and Hi logic properly to work for size wider
- // than 128.
- if (Size > 128 &&
- ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
- Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
- Lo = Memory;
- postMerge(Size, Lo, Hi);
- return;
- }
- // Note, skip this test for bit-fields, see below.
- if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
- Lo = Memory;
- postMerge(Size, Lo, Hi);
- return;
- }
-
- // Classify this field.
- //
- // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
- // exceeds a single eightbyte, each is classified
- // separately. Each eightbyte gets initialized to class
- // NO_CLASS.
- Class FieldLo, FieldHi;
-
- // Bit-fields require special handling, they do not force the
- // structure to be passed in memory even if unaligned, and
- // therefore they can straddle an eightbyte.
- if (BitField) {
- assert(!i->isUnnamedBitfield());
- uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
- uint64_t Size = i->getBitWidthValue(getContext());
-
- uint64_t EB_Lo = Offset / 64;
- uint64_t EB_Hi = (Offset + Size - 1) / 64;
-
- if (EB_Lo) {
- assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
- FieldLo = NoClass;
- FieldHi = Integer;
- } else {
- FieldLo = Integer;
- FieldHi = EB_Hi ? Integer : NoClass;
- }
- } else
- classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
- Lo = merge(Lo, FieldLo);
- Hi = merge(Hi, FieldHi);
- if (Lo == Memory || Hi == Memory)
- break;
- }
-
- postMerge(Size, Lo, Hi);
- }
-}
-
-ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
- // If this is a scalar LLVM value then assume LLVM will pass it in the right
- // place naturally.
- if (!isAggregateTypeForABI(Ty)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- if (Ty->isBitIntType())
- return getNaturalAlignIndirect(Ty);
-
- return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
- }
-
- return getNaturalAlignIndirect(Ty);
-}
-
-bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
- if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
- uint64_t Size = getContext().getTypeSize(VecTy);
- unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
- if (Size <= 64 || Size > LargestVector)
- return true;
- QualType EltTy = VecTy->getElementType();
- if (passInt128VectorsInMem() &&
- (EltTy->isSpecificBuiltinType(BuiltinType::Int128) ||
- EltTy->isSpecificBuiltinType(BuiltinType::UInt128)))
- return true;
- }
-
- return false;
-}
-
-ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
- unsigned freeIntRegs) const {
- // If this is a scalar LLVM value then assume LLVM will pass it in the right
- // place naturally.
- //
- // This assumption is optimistic, as there could be free registers available
- // when we need to pass this argument in memory, and LLVM could try to pass
- // the argument in the free register. This does not seem to happen currently,
- // but this code would be much safer if we could mark the argument with
- // 'onstack'. See PR12193.
- if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) &&
- !Ty->isBitIntType()) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
- }
-
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
-
- // Compute the byval alignment. We specify the alignment of the byval in all
- // cases so that the mid-level optimizer knows the alignment of the byval.
- unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
-
- // Attempt to avoid passing indirect results using byval when possible. This
- // is important for good codegen.
- //
- // We do this by coercing the value into a scalar type which the backend can
- // handle naturally (i.e., without using byval).
- //
- // For simplicity, we currently only do this when we have exhausted all of the
- // free integer registers. Doing this when there are free integer registers
- // would require more care, as we would have to ensure that the coerced value
- // did not claim the unused register. That would require either reording the
- // arguments to the function (so that any subsequent inreg values came first),
- // or only doing this optimization when there were no following arguments that
- // might be inreg.
- //
- // We currently expect it to be rare (particularly in well written code) for
- // arguments to be passed on the stack when there are still free integer
- // registers available (this would typically imply large structs being passed
- // by value), so this seems like a fair tradeoff for now.
- //
- // We can revisit this if the backend grows support for 'onstack' parameter
- // attributes. See PR12193.
- if (freeIntRegs == 0) {
- uint64_t Size = getContext().getTypeSize(Ty);
-
- // If this type fits in an eightbyte, coerce it into the matching integral
- // type, which will end up on the stack (with alignment 8).
- if (Align == 8 && Size <= 64)
- return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
- Size));
- }
-
- return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
-}
-
-/// The ABI specifies that a value should be passed in a full vector XMM/YMM
-/// register. Pick an LLVM IR type that will be passed as a vector register.
-llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
- // Wrapper structs/arrays that only contain vectors are passed just like
- // vectors; strip them off if present.
- if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
- Ty = QualType(InnerTy, 0);
-
- llvm::Type *IRType = CGT.ConvertType(Ty);
- if (isa<llvm::VectorType>(IRType)) {
- // Don't pass vXi128 vectors in their native type, the backend can't
- // legalize them.
- if (passInt128VectorsInMem() &&
- cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
- // Use a vXi64 vector.
- uint64_t Size = getContext().getTypeSize(Ty);
- return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
- Size / 64);
- }
-
- return IRType;
- }
-
- if (IRType->getTypeID() == llvm::Type::FP128TyID)
- return IRType;
-
- // We couldn't find the preferred IR vector type for 'Ty'.
- uint64_t Size = getContext().getTypeSize(Ty);
- assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");
-
-
- // Return a LLVM IR vector type based on the size of 'Ty'.
- return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
- Size / 64);
-}
-
-/// BitsContainNoUserData - Return true if the specified [start,end) bit range
-/// is known to either be off the end of the specified type or being in
-/// alignment padding. The user type specified is known to be at most 128 bits
-/// in size, and have passed through X86_64ABIInfo::classify with a successful
-/// classification that put one of the two halves in the INTEGER class.
-///
-/// It is conservatively correct to return false.
-static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
- unsigned EndBit, ASTContext &Context) {
- // If the bytes being queried are off the end of the type, there is no user
- // data hiding here. This handles analysis of builtins, vectors and other
- // types that don't contain interesting padding.
- unsigned TySize = (unsigned)Context.getTypeSize(Ty);
- if (TySize <= StartBit)
- return true;
-
- if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
- unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
- unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
-
- // Check each element to see if the element overlaps with the queried range.
- for (unsigned i = 0; i != NumElts; ++i) {
- // If the element is after the span we care about, then we're done..
- unsigned EltOffset = i*EltSize;
- if (EltOffset >= EndBit) break;
-
- unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
- if (!BitsContainNoUserData(AT->getElementType(), EltStart,
- EndBit-EltOffset, Context))
- return false;
- }
- // If it overlaps no elements, then it is safe to process as padding.
- return true;
- }
-
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-
- // If this is a C++ record, check the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
- for (const auto &I : CXXRD->bases()) {
- assert(!I.isVirtual() && !I.getType()->isDependentType() &&
- "Unexpected base class!");
- const auto *Base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
-
- // If the base is after the span we care about, ignore it.
- unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
- if (BaseOffset >= EndBit) continue;
-
- unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
- if (!BitsContainNoUserData(I.getType(), BaseStart,
- EndBit-BaseOffset, Context))
- return false;
- }
- }
-
- // Verify that no field has data that overlaps the region of interest. Yes
- // this could be sped up a lot by being smarter about queried fields,
- // however we're only looking at structs up to 16 bytes, so we don't care
- // much.
- unsigned idx = 0;
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- i != e; ++i, ++idx) {
- unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
-
- // If we found a field after the region we care about, then we're done.
- if (FieldOffset >= EndBit) break;
-
- unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
- if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
- Context))
- return false;
- }
-
- // If nothing in this record overlapped the area of interest, then we're
- // clean.
- return true;
- }
-
- return false;
-}
-
-/// getFPTypeAtOffset - Return a floating point type at the specified offset.
-static llvm::Type *getFPTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
- const llvm::DataLayout &TD) {
- if (IROffset == 0 && IRType->isFloatingPointTy())
- return IRType;
-
- // If this is a struct, recurse into the field at the specified offset.
- if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
- if (!STy->getNumContainedTypes())
- return nullptr;
-
- const llvm::StructLayout *SL = TD.getStructLayout(STy);
- unsigned Elt = SL->getElementContainingOffset(IROffset);
- IROffset -= SL->getElementOffset(Elt);
- return getFPTypeAtOffset(STy->getElementType(Elt), IROffset, TD);
- }
-
- // If this is an array, recurse into the field at the specified offset.
- if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
- llvm::Type *EltTy = ATy->getElementType();
- unsigned EltSize = TD.getTypeAllocSize(EltTy);
- IROffset -= IROffset / EltSize * EltSize;
- return getFPTypeAtOffset(EltTy, IROffset, TD);
- }
-
- return nullptr;
-}
-
-/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
-/// low 8 bytes of an XMM register, corresponding to the SSE class.
-llvm::Type *X86_64ABIInfo::
-GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
- QualType SourceTy, unsigned SourceOffset) const {
- const llvm::DataLayout &TD = getDataLayout();
- unsigned SourceSize =
- (unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset;
- llvm::Type *T0 = getFPTypeAtOffset(IRType, IROffset, TD);
- if (!T0 || T0->isDoubleTy())
- return llvm::Type::getDoubleTy(getVMContext());
-
- // Get the adjacent FP type.
- llvm::Type *T1 = nullptr;
- unsigned T0Size = TD.getTypeAllocSize(T0);
- if (SourceSize > T0Size)
- T1 = getFPTypeAtOffset(IRType, IROffset + T0Size, TD);
- if (T1 == nullptr) {
- // Check if IRType is a half/bfloat + float. float type will be in IROffset+4 due
- // to its alignment.
- if (T0->is16bitFPTy() && SourceSize > 4)
- T1 = getFPTypeAtOffset(IRType, IROffset + 4, TD);
- // If we can't get a second FP type, return a simple half or float.
- // avx512fp16-abi.c:pr51813_2 shows it works to return float for
- // {float, i8} too.
- if (T1 == nullptr)
- return T0;
- }
-
- if (T0->isFloatTy() && T1->isFloatTy())
- return llvm::FixedVectorType::get(T0, 2);
-
- if (T0->is16bitFPTy() && T1->is16bitFPTy()) {
- llvm::Type *T2 = nullptr;
- if (SourceSize > 4)
- T2 = getFPTypeAtOffset(IRType, IROffset + 4, TD);
- if (T2 == nullptr)
- return llvm::FixedVectorType::get(T0, 2);
- return llvm::FixedVectorType::get(T0, 4);
- }
-
- if (T0->is16bitFPTy() || T1->is16bitFPTy())
- return llvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4);
-
- return llvm::Type::getDoubleTy(getVMContext());
-}
-
-
-/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
-/// an 8-byte GPR. This means that we either have a scalar or we are talking
-/// about the high or low part of an up-to-16-byte struct. This routine picks
-/// the best LLVM IR type to represent this, which may be i64 or may be anything
-/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
-/// etc).
-///
-/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
-/// the source type. IROffset is an offset in bytes into the LLVM IR type that
-/// the 8-byte value references. PrefType may be null.
-///
-/// SourceTy is the source-level type for the entire argument. SourceOffset is
-/// an offset into this that we're processing (which is always either 0 or 8).
-///
-llvm::Type *X86_64ABIInfo::
-GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
- QualType SourceTy, unsigned SourceOffset) const {
- // If we're dealing with an un-offset LLVM IR type, then it means that we're
- // returning an 8-byte unit starting with it. See if we can safely use it.
- if (IROffset == 0) {
- // Pointers and int64's always fill the 8-byte unit.
- if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
- IRType->isIntegerTy(64))
- return IRType;
-
- // If we have a 1/2/4-byte integer, we can use it only if the rest of the
- // goodness in the source type is just tail padding. This is allowed to
- // kick in for struct {double,int} on the int, but not on
- // struct{double,int,int} because we wouldn't return the second int. We
- // have to do this analysis on the source type because we can't depend on
- // unions being lowered a specific way etc.
- if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
- IRType->isIntegerTy(32) ||
- (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
- unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
- cast<llvm::IntegerType>(IRType)->getBitWidth();
-
- if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
- SourceOffset*8+64, getContext()))
- return IRType;
- }
- }
-
- if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
- // If this is a struct, recurse into the field at the specified offset.
- const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
- if (IROffset < SL->getSizeInBytes()) {
- unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
- IROffset -= SL->getElementOffset(FieldIdx);
-
- return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
- SourceTy, SourceOffset);
- }
- }
-
- if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
- llvm::Type *EltTy = ATy->getElementType();
- unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
- unsigned EltOffset = IROffset/EltSize*EltSize;
- return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
- SourceOffset);
- }
-
- // Okay, we don't have any better idea of what to pass, so we pass this in an
- // integer register that isn't too big to fit the rest of the struct.
- unsigned TySizeInBytes =
- (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
-
- assert(TySizeInBytes != SourceOffset && "Empty field?");
-
- // It is always safe to classify this as an integer type up to i64 that
- // isn't larger than the structure.
- return llvm::IntegerType::get(getVMContext(),
- std::min(TySizeInBytes-SourceOffset, 8U)*8);
-}
-
-
-/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
-/// be used as elements of a two register pair to pass or return, return a
-/// first class aggregate to represent them. For example, if the low part of
-/// a by-value argument should be passed as i32* and the high part as float,
-/// return {i32*, float}.
-static llvm::Type *
-GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
- const llvm::DataLayout &TD) {
- // In order to correctly satisfy the ABI, we need to the high part to start
- // at offset 8. If the high and low parts we inferred are both 4-byte types
- // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
- // the second element at offset 8. Check for this:
- unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
- llvm::Align HiAlign = TD.getABITypeAlign(Hi);
- unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
- assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
-
- // To handle this, we have to increase the size of the low part so that the
- // second element will start at an 8 byte offset. We can't increase the size
- // of the second element because it might make us access off the end of the
- // struct.
- if (HiStart != 8) {
- // There are usually two sorts of types the ABI generation code can produce
- // for the low part of a pair that aren't 8 bytes in size: half, float or
- // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
- // NaCl).
- // Promote these to a larger type.
- if (Lo->isHalfTy() || Lo->isFloatTy())
- Lo = llvm::Type::getDoubleTy(Lo->getContext());
- else {
- assert((Lo->isIntegerTy() || Lo->isPointerTy())
- && "Invalid/unknown lo type");
- Lo = llvm::Type::getInt64Ty(Lo->getContext());
- }
- }
-
- llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
-
- // Verify that the second element is at an 8-byte offset.
- assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
- "Invalid x86-64 argument pair!");
- return Result;
-}
-
-ABIArgInfo X86_64ABIInfo::
-classifyReturnType(QualType RetTy) const {
- // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
- // classification algorithm.
- X86_64ABIInfo::Class Lo, Hi;
- classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
-
- // Check some invariants.
- assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
- assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
-
- llvm::Type *ResType = nullptr;
- switch (Lo) {
- case NoClass:
- if (Hi == NoClass)
- return ABIArgInfo::getIgnore();
- // If the low part is just padding, it takes no register, leave ResType
- // null.
- assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
- "Unknown missing lo part");
- break;
-
- case SSEUp:
- case X87Up:
- llvm_unreachable("Invalid classification for lo word.");
-
- // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
- // hidden argument.
- case Memory:
- return getIndirectReturnResult(RetTy);
-
- // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
- // available register of the sequence %rax, %rdx is used.
- case Integer:
- ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
-
- // If we have a sign or zero extended integer, make sure to return Extend
- // so that the parameter gets the right LLVM IR attributes.
- if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- if (RetTy->isIntegralOrEnumerationType() &&
- isPromotableIntegerTypeForABI(RetTy))
- return ABIArgInfo::getExtend(RetTy);
- }
- break;
-
- // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
- // available SSE register of the sequence %xmm0, %xmm1 is used.
- case SSE:
- ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
- break;
-
- // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
- // returned on the X87 stack in %st0 as 80-bit x87 number.
- case X87:
- ResType = llvm::Type::getX86_FP80Ty(getVMContext());
- break;
-
- // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
- // part of the value is returned in %st0 and the imaginary part in
- // %st1.
- case ComplexX87:
- assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
- ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
- llvm::Type::getX86_FP80Ty(getVMContext()));
- break;
- }
-
- llvm::Type *HighPart = nullptr;
- switch (Hi) {
- // Memory was handled previously and X87 should
- // never occur as a hi class.
- case Memory:
- case X87:
- llvm_unreachable("Invalid classification for hi word.");
-
- case ComplexX87: // Previously handled.
- case NoClass:
- break;
-
- case Integer:
- HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
- if (Lo == NoClass) // Return HighPart at offset 8 in memory.
- return ABIArgInfo::getDirect(HighPart, 8);
- break;
- case SSE:
- HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
- if (Lo == NoClass) // Return HighPart at offset 8 in memory.
- return ABIArgInfo::getDirect(HighPart, 8);
- break;
-
- // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
- // is passed in the next available eightbyte chunk if the last used
- // vector register.
- //
- // SSEUP should always be preceded by SSE, just widen.
- case SSEUp:
- assert(Lo == SSE && "Unexpected SSEUp classification.");
- ResType = GetByteVectorType(RetTy);
- break;
-
- // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
- // returned together with the previous X87 value in %st0.
- case X87Up:
- // If X87Up is preceded by X87, we don't need to do
- // anything. However, in some cases with unions it may not be
- // preceded by X87. In such situations we follow gcc and pass the
- // extra bits in an SSE reg.
- if (Lo != X87) {
- HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
- if (Lo == NoClass) // Return HighPart at offset 8 in memory.
- return ABIArgInfo::getDirect(HighPart, 8);
- }
- break;
- }
-
- // If a high part was specified, merge it together with the low part. It is
- // known to pass in the high eightbyte of the result. We do this by forming a
- // first class struct aggregate with the high and low part: {low, high}
- if (HighPart)
- ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
-
- return ABIArgInfo::getDirect(ResType);
-}
-
-ABIArgInfo
-X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned freeIntRegs,
- unsigned &neededInt, unsigned &neededSSE,
- bool isNamedArg, bool IsRegCall) const {
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- X86_64ABIInfo::Class Lo, Hi;
- classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);
-
- // Check some invariants.
- // FIXME: Enforce these by construction.
- assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
- assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
-
- neededInt = 0;
- neededSSE = 0;
- llvm::Type *ResType = nullptr;
- switch (Lo) {
- case NoClass:
- if (Hi == NoClass)
- return ABIArgInfo::getIgnore();
- // If the low part is just padding, it takes no register, leave ResType
- // null.
- assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
- "Unknown missing lo part");
- break;
-
- // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
- // on the stack.
- case Memory:
-
- // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
- // COMPLEX_X87, it is passed in memory.
- case X87:
- case ComplexX87:
- if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
- ++neededInt;
- return getIndirectResult(Ty, freeIntRegs);
-
- case SSEUp:
- case X87Up:
- llvm_unreachable("Invalid classification for lo word.");
-
- // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
- // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
- // and %r9 is used.
- case Integer:
- ++neededInt;
-
- // Pick an 8-byte type based on the preferred type.
- ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
-
- // If we have a sign or zero extended integer, make sure to return Extend
- // so that the parameter gets the right LLVM IR attributes.
- if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- if (Ty->isIntegralOrEnumerationType() &&
- isPromotableIntegerTypeForABI(Ty))
- return ABIArgInfo::getExtend(Ty);
- }
-
- break;
-
- // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
- // available SSE register is used, the registers are taken in the
- // order from %xmm0 to %xmm7.
- case SSE: {
- llvm::Type *IRType = CGT.ConvertType(Ty);
- ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
- ++neededSSE;
- break;
- }
- }
-
- llvm::Type *HighPart = nullptr;
- switch (Hi) {
- // Memory was handled previously, ComplexX87 and X87 should
- // never occur as hi classes, and X87Up must be preceded by X87,
- // which is passed in memory.
- case Memory:
- case X87:
- case ComplexX87:
- llvm_unreachable("Invalid classification for hi word.");
-
- case NoClass: break;
-
- case Integer:
- ++neededInt;
- // Pick an 8-byte type based on the preferred type.
- HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
-
- if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
- return ABIArgInfo::getDirect(HighPart, 8);
- break;
-
- // X87Up generally doesn't occur here (long double is passed in
- // memory), except in situations involving unions.
- case X87Up:
- case SSE:
- HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
-
- if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
- return ABIArgInfo::getDirect(HighPart, 8);
-
- ++neededSSE;
- break;
-
- // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
- // eightbyte is passed in the upper half of the last used SSE
- // register. This only happens when 128-bit vectors are passed.
- case SSEUp:
- assert(Lo == SSE && "Unexpected SSEUp classification");
- ResType = GetByteVectorType(Ty);
- break;
- }
-
- // If a high part was specified, merge it together with the low part. It is
- // known to pass in the high eightbyte of the result. We do this by forming a
- // first class struct aggregate with the high and low part: {low, high}
- if (HighPart)
- ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
-
- return ABIArgInfo::getDirect(ResType);
-}
-
-ABIArgInfo
-X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
- unsigned &NeededSSE,
- unsigned &MaxVectorWidth) const {
- auto RT = Ty->getAs<RecordType>();
- assert(RT && "classifyRegCallStructType only valid with struct types");
-
- if (RT->getDecl()->hasFlexibleArrayMember())
- return getIndirectReturnResult(Ty);
-
- // Sum up bases
- if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- if (CXXRD->isDynamicClass()) {
- NeededInt = NeededSSE = 0;
- return getIndirectReturnResult(Ty);
- }
-
- for (const auto &I : CXXRD->bases())
- if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE,
- MaxVectorWidth)
- .isIndirect()) {
- NeededInt = NeededSSE = 0;
- return getIndirectReturnResult(Ty);
- }
- }
-
- // Sum up members
- for (const auto *FD : RT->getDecl()->fields()) {
- QualType MTy = FD->getType();
- if (MTy->isRecordType() && !MTy->isUnionType()) {
- if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
- MaxVectorWidth)
- .isIndirect()) {
- NeededInt = NeededSSE = 0;
- return getIndirectReturnResult(Ty);
- }
- } else {
- unsigned LocalNeededInt, LocalNeededSSE;
- if (classifyArgumentType(MTy, UINT_MAX, LocalNeededInt, LocalNeededSSE,
- true, true)
- .isIndirect()) {
- NeededInt = NeededSSE = 0;
- return getIndirectReturnResult(Ty);
- }
- if (const auto *AT = getContext().getAsConstantArrayType(MTy))
- MTy = AT->getElementType();
- if (const auto *VT = MTy->getAs<VectorType>())
- if (getContext().getTypeSize(VT) > MaxVectorWidth)
- MaxVectorWidth = getContext().getTypeSize(VT);
- NeededInt += LocalNeededInt;
- NeededSSE += LocalNeededSSE;
- }
- }
-
- return ABIArgInfo::getDirect();
-}
-
-ABIArgInfo
-X86_64ABIInfo::classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
- unsigned &NeededSSE,
- unsigned &MaxVectorWidth) const {
-
- NeededInt = 0;
- NeededSSE = 0;
- MaxVectorWidth = 0;
-
- return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE,
- MaxVectorWidth);
-}
-
-void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
-
- const unsigned CallingConv = FI.getCallingConvention();
- // It is possible to force Win64 calling convention on any x86_64 target by
- // using __attribute__((ms_abi)). In such case to correctly emit Win64
- // compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
- if (CallingConv == llvm::CallingConv::Win64) {
- WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
- Win64ABIInfo.computeInfo(FI);
- return;
- }
-
- bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
-
- // Keep track of the number of assigned registers.
- unsigned FreeIntRegs = IsRegCall ? 11 : 6;
- unsigned FreeSSERegs = IsRegCall ? 16 : 8;
- unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;
-
- if (!::classifyReturnType(getCXXABI(), FI, *this)) {
- if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
- !FI.getReturnType()->getTypePtr()->isUnionType()) {
- FI.getReturnInfo() = classifyRegCallStructType(
- FI.getReturnType(), NeededInt, NeededSSE, MaxVectorWidth);
- if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
- FreeIntRegs -= NeededInt;
- FreeSSERegs -= NeededSSE;
- } else {
- FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
- }
- } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() &&
- getContext().getCanonicalType(FI.getReturnType()
- ->getAs<ComplexType>()
- ->getElementType()) ==
- getContext().LongDoubleTy)
- // Complex Long Double Type is passed in Memory when Regcall
- // calling convention is used.
- FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
- else
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- }
-
- // If the return value is indirect, then the hidden argument is consuming one
- // integer register.
- if (FI.getReturnInfo().isIndirect())
- --FreeIntRegs;
- else if (NeededSSE && MaxVectorWidth > 0)
- FI.setMaxVectorWidth(MaxVectorWidth);
-
- // The chain argument effectively gives us another free register.
- if (FI.isChainCall())
- ++FreeIntRegs;
-
- unsigned NumRequiredArgs = FI.getNumRequiredArgs();
- // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
- // get assigned (in left-to-right order) for passing as follows...
- unsigned ArgNo = 0;
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it, ++ArgNo) {
- bool IsNamedArg = ArgNo < NumRequiredArgs;
-
- if (IsRegCall && it->type->isStructureOrClassType())
- it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,
- MaxVectorWidth);
- else
- it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
- NeededSSE, IsNamedArg);
-
- // AMD64-ABI 3.2.3p3: If there are no registers available for any
- // eightbyte of an argument, the whole argument is passed on the
- // stack. If registers have already been assigned for some
- // eightbytes of such an argument, the assignments get reverted.
- if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
- FreeIntRegs -= NeededInt;
- FreeSSERegs -= NeededSSE;
- if (MaxVectorWidth > FI.getMaxVectorWidth())
- FI.setMaxVectorWidth(MaxVectorWidth);
- } else {
- it->info = getIndirectResult(it->type, FreeIntRegs);
- }
- }
-}
-
-static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
- Address VAListAddr, QualType Ty) {
- Address overflow_arg_area_p =
- CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
- llvm::Value *overflow_arg_area =
- CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
-
- // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
- // byte boundary if alignment needed by type exceeds 8 byte boundary.
- // It isn't stated explicitly in the standard, but in practice we use
- // alignment greater than 16 where necessary.
- CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
- if (Align > CharUnits::fromQuantity(8)) {
- overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
- Align);
- }
-
- // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
- llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
- llvm::Value *Res =
- CGF.Builder.CreateBitCast(overflow_arg_area,
- llvm::PointerType::getUnqual(LTy));
-
- // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
- // l->overflow_arg_area + sizeof(type).
- // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
- // an 8 byte boundary.
-
- uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
- llvm::Value *Offset =
- llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
- overflow_arg_area = CGF.Builder.CreateGEP(CGF.Int8Ty, overflow_arg_area,
- Offset, "overflow_arg_area.next");
- CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
-
- // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
- return Address(Res, LTy, Align);
-}
-
-Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- // Assume that va_list type is correct; should be pointer to LLVM type:
- // struct {
- // i32 gp_offset;
- // i32 fp_offset;
- // i8* overflow_arg_area;
- // i8* reg_save_area;
- // };
- unsigned neededInt, neededSSE;
-
- Ty = getContext().getCanonicalType(Ty);
- ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
- /*isNamedArg*/false);
-
- // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
- // in the registers. If not go to step 7.
- if (!neededInt && !neededSSE)
- return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
-
- // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
- // general purpose registers needed to pass type and num_fp to hold
- // the number of floating point registers needed.
-
- // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
- // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
- // l->fp_offset > 304 - num_fp * 16 go to step 7.
- //
- // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
- // register save space).
-
- llvm::Value *InRegs = nullptr;
- Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
- llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
- if (neededInt) {
- gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
- gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
- InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
- InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
- }
-
- if (neededSSE) {
- fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
- fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
- llvm::Value *FitsInFP =
- llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
- FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
- InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
- }
-
- llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
- llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
- llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
- CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
-
- // Emit code to load the value if it was passed in registers.
-
- CGF.EmitBlock(InRegBlock);
-
- // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
- // an offset of l->gp_offset and/or l->fp_offset. This may require
- // copying to a temporary location in case the parameter is passed
- // in different register classes or requires an alignment greater
- // than 8 for general purpose registers and 16 for XMM registers.
- //
- // FIXME: This really results in shameful code when we end up needing to
- // collect arguments from different places; often what should result in a
- // simple assembling of a structure from scattered addresses has many more
- // loads than necessary. Can we clean this up?
- llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
- llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
- CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area");
-
- Address RegAddr = Address::invalid();
- if (neededInt && neededSSE) {
- // FIXME: Cleanup.
- assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
- llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
- Address Tmp = CGF.CreateMemTemp(Ty);
- Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
- assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
- llvm::Type *TyLo = ST->getElementType(0);
- llvm::Type *TyHi = ST->getElementType(1);
- assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
- "Unexpected ABI info for mixed regs");
- llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
- llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
- llvm::Value *GPAddr =
- CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset);
- llvm::Value *FPAddr =
- CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset);
- llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
- llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
-
- // Copy the first element.
- // FIXME: Our choice of alignment here and below is probably pessimistic.
- llvm::Value *V = CGF.Builder.CreateAlignedLoad(
- TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
- CharUnits::fromQuantity(getDataLayout().getABITypeAlign(TyLo)));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
-
- // Copy the second element.
- V = CGF.Builder.CreateAlignedLoad(
- TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
- CharUnits::fromQuantity(getDataLayout().getABITypeAlign(TyHi)));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
-
- RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
- } else if (neededInt) {
- RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset),
- CGF.Int8Ty, CharUnits::fromQuantity(8));
- RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
-
- // Copy to a temporary if necessary to ensure the appropriate alignment.
- auto TInfo = getContext().getTypeInfoInChars(Ty);
- uint64_t TySize = TInfo.Width.getQuantity();
- CharUnits TyAlign = TInfo.Align;
-
- // Copy into a temporary if the type is more aligned than the
- // register save area.
- if (TyAlign.getQuantity() > 8) {
- Address Tmp = CGF.CreateMemTemp(Ty);
- CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
- RegAddr = Tmp;
- }
-
- } else if (neededSSE == 1) {
- RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset),
- CGF.Int8Ty, CharUnits::fromQuantity(16));
- RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
- } else {
- assert(neededSSE == 2 && "Invalid number of needed registers!");
- // SSE registers are spaced 16 bytes apart in the register save
- // area, we need to collect the two eightbytes together.
- // The ABI isn't explicit about this, but it seems reasonable
- // to assume that the slots are 16-byte aligned, since the stack is
- // naturally 16-byte aligned and the prologue is expected to store
- // all the SSE registers to the RSA.
- Address RegAddrLo = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea,
- fp_offset),
- CGF.Int8Ty, CharUnits::fromQuantity(16));
- Address RegAddrHi =
- CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
- CharUnits::fromQuantity(16));
- llvm::Type *ST = AI.canHaveCoerceToType()
- ? AI.getCoerceToType()
- : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy);
- llvm::Value *V;
- Address Tmp = CGF.CreateMemTemp(Ty);
- Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
- RegAddrLo, ST->getStructElementType(0)));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
- RegAddrHi, ST->getStructElementType(1)));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
-
- RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
- }
-
- // AMD64-ABI 3.5.7p5: Step 5. Set:
- // l->gp_offset = l->gp_offset + num_gp * 8
- // l->fp_offset = l->fp_offset + num_fp * 16.
- if (neededInt) {
- llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
- CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
- gp_offset_p);
- }
- if (neededSSE) {
- llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
- CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
- fp_offset_p);
- }
- CGF.EmitBranch(ContBlock);
-
- // Emit code to load the value if it was passed in memory.
-
- CGF.EmitBlock(InMemBlock);
- Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
-
- // Return the appropriate result.
-
- CGF.EmitBlock(ContBlock);
- Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
- "vaarg.addr");
- return ResAddr;
-}
-
-Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
- // not 1, 2, 4, or 8 bytes, must be passed by reference."
- uint64_t Width = getContext().getTypeSize(Ty);
- bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
-
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
- CGF.getContext().getTypeInfoInChars(Ty),
- CharUnits::fromQuantity(8),
- /*allowHigherAlign*/ false);
-}
-
-ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
- QualType Ty, unsigned &FreeSSERegs, const ABIArgInfo &current) const {
- const Type *Base = nullptr;
- uint64_t NumElts = 0;
-
- if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
- isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
- FreeSSERegs -= NumElts;
- return getDirectX86Hva();
- }
- return current;
-}
-
-ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
- bool IsReturnType, bool IsVectorCall,
- bool IsRegCall) const {
-
- if (Ty->isVoidType())
- return ABIArgInfo::getIgnore();
-
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- TypeInfo Info = getContext().getTypeInfo(Ty);
- uint64_t Width = Info.Width;
- CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
-
- const RecordType *RT = Ty->getAs<RecordType>();
- if (RT) {
- if (!IsReturnType) {
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
- }
-
- if (RT->getDecl()->hasFlexibleArrayMember())
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-
- }
-
- const Type *Base = nullptr;
- uint64_t NumElts = 0;
- // vectorcall adds the concept of a homogenous vector aggregate, similar to
- // other targets.
- if ((IsVectorCall || IsRegCall) &&
- isHomogeneousAggregate(Ty, Base, NumElts)) {
- if (IsRegCall) {
- if (FreeSSERegs >= NumElts) {
- FreeSSERegs -= NumElts;
- if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
- return ABIArgInfo::getDirect();
- return ABIArgInfo::getExpand();
- }
- return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
- } else if (IsVectorCall) {
- if (FreeSSERegs >= NumElts &&
- (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
- FreeSSERegs -= NumElts;
- return ABIArgInfo::getDirect();
- } else if (IsReturnType) {
- return ABIArgInfo::getExpand();
- } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
- // HVAs are delayed and reclassified in the 2nd step.
- return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
- }
- }
- }
-
- if (Ty->isMemberPointerType()) {
- // If the member pointer is represented by an LLVM int or ptr, pass it
- // directly.
- llvm::Type *LLTy = CGT.ConvertType(Ty);
- if (LLTy->isPointerTy() || LLTy->isIntegerTy())
- return ABIArgInfo::getDirect();
- }
-
- if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
- // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
- // not 1, 2, 4, or 8 bytes, must be passed by reference."
- if (Width > 64 || !llvm::isPowerOf2_64(Width))
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-
- // Otherwise, coerce it to a small integer.
- return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
- }
-
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
- switch (BT->getKind()) {
- case BuiltinType::Bool:
- // Bool type is always extended to the ABI, other builtin types are not
- // extended.
- return ABIArgInfo::getExtend(Ty);
-
- case BuiltinType::LongDouble:
- // Mingw64 GCC uses the old 80 bit extended precision floating point
- // unit. It passes them indirectly through memory.
- if (IsMingw64) {
- const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
- if (LDF == &llvm::APFloat::x87DoubleExtended())
- return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
- }
- break;
-
- case BuiltinType::Int128:
- case BuiltinType::UInt128:
- // If it's a parameter type, the normal ABI rule is that arguments larger
- // than 8 bytes are passed indirectly. GCC follows it. We follow it too,
- // even though it isn't particularly efficient.
- if (!IsReturnType)
- return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
-
- // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
- // Clang matches them for compatibility.
- return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
- llvm::Type::getInt64Ty(getVMContext()), 2));
-
- default:
- break;
- }
- }
-
- if (Ty->isBitIntType()) {
- // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
- // not 1, 2, 4, or 8 bytes, must be passed by reference."
- // However, non-power-of-two bit-precise integers will be passed as 1, 2, 4,
- // or 8 bytes anyway as long is it fits in them, so we don't have to check
- // the power of 2.
- if (Width <= 64)
- return ABIArgInfo::getDirect();
- return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
- }
-
- return ABIArgInfo::getDirect();
-}
-
-void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
- const unsigned CC = FI.getCallingConvention();
- bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
- bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
-
- // If __attribute__((sysv_abi)) is in use, use the SysV argument
- // classification rules.
- if (CC == llvm::CallingConv::X86_64_SysV) {
- X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
- SysVABIInfo.computeInfo(FI);
- return;
- }
-
- unsigned FreeSSERegs = 0;
- if (IsVectorCall) {
- // We can use up to 4 SSE return registers with vectorcall.
- FreeSSERegs = 4;
- } else if (IsRegCall) {
- // RegCall gives us 16 SSE registers.
- FreeSSERegs = 16;
- }
-
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
- IsVectorCall, IsRegCall);
-
- if (IsVectorCall) {
- // We can use up to 6 SSE register parameters with vectorcall.
- FreeSSERegs = 6;
- } else if (IsRegCall) {
- // RegCall gives us 16 SSE registers, we can reuse the return registers.
- FreeSSERegs = 16;
- }
-
- unsigned ArgNum = 0;
- unsigned ZeroSSERegs = 0;
- for (auto &I : FI.arguments()) {
- // Vectorcall in x64 only permits the first 6 arguments to be passed as
- // XMM/YMM registers. After the sixth argument, pretend no vector
- // registers are left.
- unsigned *MaybeFreeSSERegs =
- (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
- I.info =
- classify(I.type, *MaybeFreeSSERegs, false, IsVectorCall, IsRegCall);
- ++ArgNum;
- }
-
- if (IsVectorCall) {
- // For vectorcall, assign aggregate HVAs to any free vector registers in a
- // second pass.
- for (auto &I : FI.arguments())
- I.info = reclassifyHvaArgForVectorCall(I.type, FreeSSERegs, I.info);
- }
-}
-
-Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
- // not 1, 2, 4, or 8 bytes, must be passed by reference."
- uint64_t Width = getContext().getTypeSize(Ty);
- bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
-
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
- CGF.getContext().getTypeInfoInChars(Ty),
- CharUnits::fromQuantity(8),
- /*allowHigherAlign*/ false);
-}
-
-static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address, bool Is64Bit,
- bool IsAIX) {
- // This is calculated from the LLVM and GCC tables and verified
- // against gcc output. AFAIK all PPC ABIs use the same encoding.
-
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
-
- llvm::IntegerType *i8 = CGF.Int8Ty;
- llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
- llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
- llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
-
- // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers
- AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31);
-
- // 32-63: fp0-31, the 8-byte floating-point registers
- AssignToArrayRange(Builder, Address, Eight8, 32, 63);
-
- // 64-67 are various 4-byte or 8-byte special-purpose registers:
- // 64: mq
- // 65: lr
- // 66: ctr
- // 67: ap
- AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67);
-
- // 68-76 are various 4-byte special-purpose registers:
- // 68-75 cr0-7
- // 76: xer
- AssignToArrayRange(Builder, Address, Four8, 68, 76);
-
- // 77-108: v0-31, the 16-byte vector registers
- AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
-
- // 109: vrsave
- // 110: vscr
- AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110);
-
- // AIX does not utilize the rest of the registers.
- if (IsAIX)
- return false;
-
- // 111: spe_acc
- // 112: spefscr
- // 113: sfp
- AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113);
-
- if (!Is64Bit)
- return false;
-
- // TODO: Need to verify if these registers are used on 64 bit AIX with Power8
- // or above CPU.
- // 64-bit only registers:
- // 114: tfhar
- // 115: tfiar
- // 116: texasr
- AssignToArrayRange(Builder, Address, Eight8, 114, 116);
-
- return false;
-}
-
-// AIX
-namespace {
-/// AIXABIInfo - The AIX XCOFF ABI information.
-class AIXABIInfo : public ABIInfo {
- const bool Is64Bit;
- const unsigned PtrByteSize;
- CharUnits getParamTypeAlignment(QualType Ty) const;
-
-public:
- AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
- : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
-
- bool isPromotableTypeForABI(QualType Ty) const;
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType Ty) const;
-
- void computeInfo(CGFunctionInfo &FI) const override {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
-
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
- }
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-};
-
-class AIXTargetCodeGenInfo : public TargetCodeGenInfo {
- const bool Is64Bit;
-
-public:
- AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
- : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)),
- Is64Bit(Is64Bit) {}
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
- return 1; // r1 is the dedicated stack pointer
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override;
-};
-} // namespace
-
-// Return true if the ABI requires Ty to be passed sign- or zero-
-// extended to 32/64 bits.
-bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- // Promotable integer types are required to be promoted by the ABI.
- if (getContext().isPromotableIntegerType(Ty))
- return true;
-
- if (!Is64Bit)
- return false;
-
- // For 64 bit mode, in addition to the usual promotable integer types, we also
- // need to extend all 32-bit types, since the ABI requires promotion to 64
- // bits.
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
- switch (BT->getKind()) {
- case BuiltinType::Int:
- case BuiltinType::UInt:
- return true;
- default:
- break;
- }
-
- return false;
-}
-
-ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isAnyComplexType())
- return ABIArgInfo::getDirect();
-
- if (RetTy->isVectorType())
- return ABIArgInfo::getDirect();
-
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- if (isAggregateTypeForABI(RetTy))
- return getNaturalAlignIndirect(RetTy);
-
- return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
-}
-
-ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const {
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- if (Ty->isAnyComplexType())
- return ABIArgInfo::getDirect();
-
- if (Ty->isVectorType())
- return ABIArgInfo::getDirect();
-
- if (isAggregateTypeForABI(Ty)) {
- // Records with non-trivial destructors/copy-constructors should not be
- // passed by value.
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
-
- CharUnits CCAlign = getParamTypeAlignment(Ty);
- CharUnits TyAlign = getContext().getTypeAlignInChars(Ty);
-
- return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true,
- /*Realign*/ TyAlign > CCAlign);
- }
-
- return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
-}
-
-CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
- // Complex types are passed just like their elements.
- if (const ComplexType *CTy = Ty->getAs<ComplexType>())
- Ty = CTy->getElementType();
-
- if (Ty->isVectorType())
- return CharUnits::fromQuantity(16);
-
- // If the structure contains a vector type, the alignment is 16.
- if (isRecordWithSIMDVectorType(getContext(), Ty))
- return CharUnits::fromQuantity(16);
-
- return CharUnits::fromQuantity(PtrByteSize);
-}
-
-Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
-
- auto TypeInfo = getContext().getTypeInfoInChars(Ty);
- TypeInfo.Align = getParamTypeAlignment(Ty);
-
- CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize);
-
- // If we have a complex type and the base type is smaller than the register
- // size, the ABI calls for the real and imaginary parts to be right-adjusted
- // in separate words in 32bit mode or doublewords in 64bit mode. However,
- // Clang expects us to produce a pointer to a structure with the two parts
- // packed tightly. So generate loads of the real and imaginary parts relative
- // to the va_list pointer, and store them to a temporary structure. We do the
- // same as the PPC64ABI here.
- if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
- CharUnits EltSize = TypeInfo.Width / 2;
- if (EltSize < SlotSize)
- return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy);
- }
-
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
- SlotSize, /*AllowHigher*/ true);
-}
-
-bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
- CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const {
- return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true);
-}
-
-// PowerPC-32
-namespace {
-/// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
-class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
- bool IsSoftFloatABI;
- bool IsRetSmallStructInRegABI;
-
- CharUnits getParamTypeAlignment(QualType Ty) const;
-
-public:
- PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI,
- bool RetSmallStructInRegABI)
- : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI),
- IsRetSmallStructInRegABI(RetSmallStructInRegABI) {}
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
-
- void computeInfo(CGFunctionInfo &FI) const override {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
- }
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-};
-
-class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI,
- bool RetSmallStructInRegABI)
- : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>(
- CGT, SoftFloatABI, RetSmallStructInRegABI)) {}
-
- static bool isStructReturnInRegABI(const llvm::Triple &Triple,
- const CodeGenOptions &Opts);
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
- // This is recovered from gcc output.
- return 1; // r1 is the dedicated stack pointer
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override;
-};
-}
-
-CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
- // Complex types are passed just like their elements.
- if (const ComplexType *CTy = Ty->getAs<ComplexType>())
- Ty = CTy->getElementType();
-
- if (Ty->isVectorType())
- return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
- : 4);
-
- // For single-element float/vector structs, we consider the whole type
- // to have the same alignment requirements as its single element.
- const Type *AlignTy = nullptr;
- if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
- const BuiltinType *BT = EltType->getAs<BuiltinType>();
- if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
- (BT && BT->isFloatingPoint()))
- AlignTy = EltType;
- }
-
- if (AlignTy)
- return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
- return CharUnits::fromQuantity(4);
-}
-
-ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
- uint64_t Size;
-
- // -msvr4-struct-return puts small aggregates in GPR3 and GPR4.
- if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI &&
- (Size = getContext().getTypeSize(RetTy)) <= 64) {
- // System V ABI (1995), page 3-22, specified:
- // > A structure or union whose size is less than or equal to 8 bytes
- // > shall be returned in r3 and r4, as if it were first stored in the
- // > 8-byte aligned memory area and then the low addressed word were
- // > loaded into r3 and the high-addressed word into r4. Bits beyond
- // > the last member of the structure or union are not defined.
- //
- // GCC for big-endian PPC32 inserts the pad before the first member,
- // not "beyond the last member" of the struct. To stay compatible
- // with GCC, we coerce the struct to an integer of the same size.
- // LLVM will extend it and return i32 in r3, or i64 in r3:r4.
- if (Size == 0)
- return ABIArgInfo::getIgnore();
- else {
- llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size);
- return ABIArgInfo::getDirect(CoerceTy);
- }
- }
-
- return DefaultABIInfo::classifyReturnType(RetTy);
-}
-
-// TODO: this implementation is now likely redundant with
-// DefaultABIInfo::EmitVAArg.
-Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
- QualType Ty) const {
- if (getTarget().getTriple().isOSDarwin()) {
- auto TI = getContext().getTypeInfoInChars(Ty);
- TI.Align = getParamTypeAlignment(Ty);
-
- CharUnits SlotSize = CharUnits::fromQuantity(4);
- return emitVoidPtrVAArg(CGF, VAList, Ty,
- classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
- /*AllowHigherAlign=*/true);
- }
-
- const unsigned OverflowLimit = 8;
- if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
- // TODO: Implement this. For now ignore.
- (void)CTy;
- return Address::invalid(); // FIXME?
- }
-
- // struct __va_list_tag {
- // unsigned char gpr;
- // unsigned char fpr;
- // unsigned short reserved;
- // void *overflow_arg_area;
- // void *reg_save_area;
- // };
-
- bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
- bool isInt = !Ty->isFloatingType();
- bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
-
- // All aggregates are passed indirectly? That doesn't seem consistent
- // with the argument-lowering code.
- bool isIndirect = isAggregateTypeForABI(Ty);
-
- CGBuilderTy &Builder = CGF.Builder;
-
- // The calling convention either uses 1-2 GPRs or 1 FPR.
- Address NumRegsAddr = Address::invalid();
- if (isInt || IsSoftFloatABI) {
- NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr");
- } else {
- NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr");
- }
-
- llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
-
- // "Align" the register count when TY is i64.
- if (isI64 || (isF64 && IsSoftFloatABI)) {
- NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
- NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
- }
-
- llvm::Value *CC =
- Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
-
- llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
- llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
- llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
-
- Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
-
- llvm::Type *DirectTy = CGF.ConvertType(Ty), *ElementTy = DirectTy;
- if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
-
- // Case 1: consume registers.
- Address RegAddr = Address::invalid();
- {
- CGF.EmitBlock(UsingRegs);
-
- Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
- RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), CGF.Int8Ty,
- CharUnits::fromQuantity(8));
- assert(RegAddr.getElementType() == CGF.Int8Ty);
-
- // Floating-point registers start after the general-purpose registers.
- if (!(isInt || IsSoftFloatABI)) {
- RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
- CharUnits::fromQuantity(32));
- }
-
- // Get the address of the saved value by scaling the number of
- // registers we've used by the number of
- CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
- llvm::Value *RegOffset =
- Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
- RegAddr = Address(
- Builder.CreateInBoundsGEP(CGF.Int8Ty, RegAddr.getPointer(), RegOffset),
- CGF.Int8Ty, RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
- RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
-
- // Increase the used-register count.
- NumRegs =
- Builder.CreateAdd(NumRegs,
- Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
- Builder.CreateStore(NumRegs, NumRegsAddr);
-
- CGF.EmitBranch(Cont);
- }
-
- // Case 2: consume space in the overflow area.
- Address MemAddr = Address::invalid();
- {
- CGF.EmitBlock(UsingOverflow);
-
- Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
-
- // Everything in the overflow area is rounded up to a size of at least 4.
- CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
-
- CharUnits Size;
- if (!isIndirect) {
- auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
- Size = TypeInfo.Width.alignTo(OverflowAreaAlign);
- } else {
- Size = CGF.getPointerSize();
- }
-
- Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
- Address OverflowArea =
- Address(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), CGF.Int8Ty,
- OverflowAreaAlign);
- // Round up address of argument to alignment
- CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
- if (Align > OverflowAreaAlign) {
- llvm::Value *Ptr = OverflowArea.getPointer();
- OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
- OverflowArea.getElementType(), Align);
- }
-
- MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
-
- // Increase the overflow area.
- OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
- Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
- CGF.EmitBranch(Cont);
- }
-
- CGF.EmitBlock(Cont);
-
- // Merge the cases with a phi.
- Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
- "vaarg.addr");
-
- // Load the pointer if the argument was passed indirectly.
- if (isIndirect) {
- Result = Address(Builder.CreateLoad(Result, "aggr"), ElementTy,
- getContext().getTypeAlignInChars(Ty));
- }
-
- return Result;
-}
-
-bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
- const llvm::Triple &Triple, const CodeGenOptions &Opts) {
- assert(Triple.isPPC32());
-
- switch (Opts.getStructReturnConvention()) {
- case CodeGenOptions::SRCK_Default:
- break;
- case CodeGenOptions::SRCK_OnStack: // -maix-struct-return
- return false;
- case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return
- return true;
- }
-
- if (Triple.isOSBinFormatELF() && !Triple.isOSLinux())
- return true;
-
- return false;
-}
-
-bool
-PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const {
- return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false,
- /*IsAIX*/ false);
-}
-
-// PowerPC-64
-
-namespace {
-/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
-class PPC64_SVR4_ABIInfo : public ABIInfo {
-public:
- enum ABIKind {
- ELFv1 = 0,
- ELFv2
- };
-
-private:
- static const unsigned GPRBits = 64;
- ABIKind Kind;
- bool IsSoftFloatABI;
-
-public:
- PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind,
- bool SoftFloatABI)
- : ABIInfo(CGT), Kind(Kind), IsSoftFloatABI(SoftFloatABI) {}
-
- bool isPromotableTypeForABI(QualType Ty) const;
- CharUnits getParamTypeAlignment(QualType Ty) const;
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType Ty) const;
-
- bool isHomogeneousAggregateBaseType(QualType Ty) const override;
- bool isHomogeneousAggregateSmallEnough(const Type *Ty,
- uint64_t Members) const override;
-
- // TODO: We can add more logic to computeInfo to improve performance.
- // Example: For aggregate arguments that fit in a register, we could
- // use getDirectInReg (as is done below for structs containing a single
- // floating-point value) to avoid pushing them to memory on function
- // entry. This would require changing the logic in PPCISelLowering
- // when lowering the parameters in the caller and args in the callee.
- void computeInfo(CGFunctionInfo &FI) const override {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &I : FI.arguments()) {
- // We rely on the default argument classification for the most part.
- // One exception: An aggregate containing a single floating-point
- // or vector item must be passed in a register if one is available.
- const Type *T = isSingleElementStruct(I.type, getContext());
- if (T) {
- const BuiltinType *BT = T->getAs<BuiltinType>();
- if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
- (BT && BT->isFloatingPoint())) {
- QualType QT(T, 0);
- I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
- continue;
- }
- }
- I.info = classifyArgumentType(I.type);
- }
- }
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-};
-
-class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
-
-public:
- PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
- PPC64_SVR4_ABIInfo::ABIKind Kind,
- bool SoftFloatABI)
- : TargetCodeGenInfo(
- std::make_unique<PPC64_SVR4_ABIInfo>(CGT, Kind, SoftFloatABI)) {
- SwiftInfo =
- std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/false);
- }
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
- // This is recovered from gcc output.
- return 1; // r1 is the dedicated stack pointer
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override;
-};
-
-class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
-public:
- PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
- // This is recovered from gcc output.
- return 1; // r1 is the dedicated stack pointer
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override;
-};
-
-}
-
-// Return true if the ABI requires Ty to be passed sign- or zero-
-// extended to 64 bits.
-bool
-PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- // Promotable integer types are required to be promoted by the ABI.
- if (isPromotableIntegerTypeForABI(Ty))
- return true;
-
- // In addition to the usual promotable integer types, we also need to
- // extend all 32-bit types, since the ABI requires promotion to 64 bits.
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
- switch (BT->getKind()) {
- case BuiltinType::Int:
- case BuiltinType::UInt:
- return true;
- default:
- break;
- }
-
- if (const auto *EIT = Ty->getAs<BitIntType>())
- if (EIT->getNumBits() < 64)
- return true;
-
- return false;
-}
-
-/// isAlignedParamType - Determine whether a type requires 16-byte or
-/// higher alignment in the parameter area. Always returns at least 8.
-CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
- // Complex types are passed just like their elements.
- if (const ComplexType *CTy = Ty->getAs<ComplexType>())
- Ty = CTy->getElementType();
-
- auto FloatUsesVector = [this](QualType Ty){
- return Ty->isRealFloatingType() && &getContext().getFloatTypeSemantics(
- Ty) == &llvm::APFloat::IEEEquad();
- };
-
- // Only vector types of size 16 bytes need alignment (larger types are
- // passed via reference, smaller types are not aligned).
- if (Ty->isVectorType()) {
- return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
- } else if (FloatUsesVector(Ty)) {
- // According to ABI document section 'Optional Save Areas': If extended
- // precision floating-point values in IEEE BINARY 128 QUADRUPLE PRECISION
- // format are supported, map them to a single quadword, quadword aligned.
- return CharUnits::fromQuantity(16);
- }
-
- // For single-element float/vector structs, we consider the whole type
- // to have the same alignment requirements as its single element.
- const Type *AlignAsType = nullptr;
- const Type *EltType = isSingleElementStruct(Ty, getContext());
- if (EltType) {
- const BuiltinType *BT = EltType->getAs<BuiltinType>();
- if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
- (BT && BT->isFloatingPoint()))
- AlignAsType = EltType;
- }
-
- // Likewise for ELFv2 homogeneous aggregates.
- const Type *Base = nullptr;
- uint64_t Members = 0;
- if (!AlignAsType && Kind == ELFv2 &&
- isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
- AlignAsType = Base;
-
- // With special case aggregates, only vector base types need alignment.
- if (AlignAsType) {
- bool UsesVector = AlignAsType->isVectorType() ||
- FloatUsesVector(QualType(AlignAsType, 0));
- return CharUnits::fromQuantity(UsesVector ? 16 : 8);
- }
-
- // Otherwise, we only need alignment for any aggregate type that
- // has an alignment requirement of >= 16 bytes.
- if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
- return CharUnits::fromQuantity(16);
- }
-
- return CharUnits::fromQuantity(8);
-}
-
-/// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
-/// aggregate. Base is set to the base element type, and Members is set
-/// to the number of base elements.
-bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
- uint64_t &Members) const {
- if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
- uint64_t NElements = AT->getSize().getZExtValue();
- if (NElements == 0)
- return false;
- if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
- return false;
- Members *= NElements;
- } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return false;
-
- Members = 0;
-
- // If this is a C++ record, check the properties of the record such as
- // bases and ABI specific restrictions
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
- if (!getCXXABI().isPermittedToBeHomogeneousAggregate(CXXRD))
- return false;
-
- for (const auto &I : CXXRD->bases()) {
- // Ignore empty records.
- if (isEmptyRecord(getContext(), I.getType(), true))
- continue;
-
- uint64_t FldMembers;
- if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
- return false;
-
- Members += FldMembers;
- }
- }
-
- for (const auto *FD : RD->fields()) {
- // Ignore (non-zero arrays of) empty records.
- QualType FT = FD->getType();
- while (const ConstantArrayType *AT =
- getContext().getAsConstantArrayType(FT)) {
- if (AT->getSize().getZExtValue() == 0)
- return false;
- FT = AT->getElementType();
- }
- if (isEmptyRecord(getContext(), FT, true))
- continue;
-
- if (isZeroLengthBitfieldPermittedInHomogeneousAggregate() &&
- FD->isZeroLengthBitField(getContext()))
- continue;
-
- uint64_t FldMembers;
- if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
- return false;
-
- Members = (RD->isUnion() ?
- std::max(Members, FldMembers) : Members + FldMembers);
- }
-
- if (!Base)
- return false;
-
- // Ensure there is no padding.
- if (getContext().getTypeSize(Base) * Members !=
- getContext().getTypeSize(Ty))
- return false;
- } else {
- Members = 1;
- if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
- Members = 2;
- Ty = CT->getElementType();
- }
-
- // Most ABIs only support float, double, and some vector type widths.
- if (!isHomogeneousAggregateBaseType(Ty))
- return false;
-
- // The base type must be the same for all members. Types that
- // agree in both total size and mode (float vs. vector) are
- // treated as being equivalent here.
- const Type *TyPtr = Ty.getTypePtr();
- if (!Base) {
- Base = TyPtr;
- // If it's a non-power-of-2 vector, its size is already a power-of-2,
- // so make sure to widen it explicitly.
- if (const VectorType *VT = Base->getAs<VectorType>()) {
- QualType EltTy = VT->getElementType();
- unsigned NumElements =
- getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
- Base = getContext()
- .getVectorType(EltTy, NumElements, VT->getVectorKind())
- .getTypePtr();
- }
- }
-
- if (Base->isVectorType() != TyPtr->isVectorType() ||
- getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
- return false;
- }
- return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
-}
-
-bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
- // Homogeneous aggregates for ELFv2 must have base types of float,
- // double, long double, or 128-bit vectors.
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
- if (BT->getKind() == BuiltinType::Float ||
- BT->getKind() == BuiltinType::Double ||
- BT->getKind() == BuiltinType::LongDouble ||
- BT->getKind() == BuiltinType::Ibm128 ||
- (getContext().getTargetInfo().hasFloat128Type() &&
- (BT->getKind() == BuiltinType::Float128))) {
- if (IsSoftFloatABI)
- return false;
- return true;
- }
- }
- if (const VectorType *VT = Ty->getAs<VectorType>()) {
- if (getContext().getTypeSize(VT) == 128)
- return true;
- }
- return false;
-}
-
-bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
- const Type *Base, uint64_t Members) const {
- // Vector and fp128 types require one register, other floating point types
- // require one or two registers depending on their size.
- uint32_t NumRegs =
- ((getContext().getTargetInfo().hasFloat128Type() &&
- Base->isFloat128Type()) ||
- Base->isVectorType()) ? 1
- : (getContext().getTypeSize(Base) + 63) / 64;
-
- // Homogeneous Aggregates may occupy at most 8 registers.
- return Members * NumRegs <= 8;
-}
-
-ABIArgInfo
-PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- if (Ty->isAnyComplexType())
- return ABIArgInfo::getDirect();
-
- // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
- // or via reference (larger than 16 bytes).
- if (Ty->isVectorType()) {
- uint64_t Size = getContext().getTypeSize(Ty);
- if (Size > 128)
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
- else if (Size < 128) {
- llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
- return ABIArgInfo::getDirect(CoerceTy);
- }
- }
-
- if (const auto *EIT = Ty->getAs<BitIntType>())
- if (EIT->getNumBits() > 128)
- return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
-
- if (isAggregateTypeForABI(Ty)) {
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
-
- uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
- uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
-
- // ELFv2 homogeneous aggregates are passed as array types.
- const Type *Base = nullptr;
- uint64_t Members = 0;
- if (Kind == ELFv2 &&
- isHomogeneousAggregate(Ty, Base, Members)) {
- llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
- llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
- return ABIArgInfo::getDirect(CoerceTy);
- }
-
- // If an aggregate may end up fully in registers, we do not
- // use the ByVal method, but pass the aggregate as array.
- // This is usually beneficial since we avoid forcing the
- // back-end to store the argument to memory.
- uint64_t Bits = getContext().getTypeSize(Ty);
- if (Bits > 0 && Bits <= 8 * GPRBits) {
- llvm::Type *CoerceTy;
-
- // Types up to 8 bytes are passed as integer type (which will be
- // properly aligned in the argument save area doubleword).
- if (Bits <= GPRBits)
- CoerceTy =
- llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
- // Larger types are passed as arrays, with the base type selected
- // according to the required alignment in the save area.
- else {
- uint64_t RegBits = ABIAlign * 8;
- uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
- llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
- CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
- }
-
- return ABIArgInfo::getDirect(CoerceTy);
- }
-
- // All other aggregates are passed ByVal.
- return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
- /*ByVal=*/true,
- /*Realign=*/TyAlign > ABIAlign);
- }
-
- return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
-}
-
-ABIArgInfo
-PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- if (RetTy->isAnyComplexType())
- return ABIArgInfo::getDirect();
-
- // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
- // or via reference (larger than 16 bytes).
- if (RetTy->isVectorType()) {
- uint64_t Size = getContext().getTypeSize(RetTy);
- if (Size > 128)
- return getNaturalAlignIndirect(RetTy);
- else if (Size < 128) {
- llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
- return ABIArgInfo::getDirect(CoerceTy);
- }
- }
-
- if (const auto *EIT = RetTy->getAs<BitIntType>())
- if (EIT->getNumBits() > 128)
- return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
-
- if (isAggregateTypeForABI(RetTy)) {
- // ELFv2 homogeneous aggregates are returned as array types.
- const Type *Base = nullptr;
- uint64_t Members = 0;
- if (Kind == ELFv2 &&
- isHomogeneousAggregate(RetTy, Base, Members)) {
- llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
- llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
- return ABIArgInfo::getDirect(CoerceTy);
- }
-
- // ELFv2 small aggregates are returned in up to two registers.
- uint64_t Bits = getContext().getTypeSize(RetTy);
- if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
- if (Bits == 0)
- return ABIArgInfo::getIgnore();
-
- llvm::Type *CoerceTy;
- if (Bits > GPRBits) {
- CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
- CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
- } else
- CoerceTy =
- llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
- return ABIArgInfo::getDirect(CoerceTy);
- }
-
- // All other aggregates are returned indirectly.
- return getNaturalAlignIndirect(RetTy);
- }
-
- return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
-}
-
-// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
-Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- auto TypeInfo = getContext().getTypeInfoInChars(Ty);
- TypeInfo.Align = getParamTypeAlignment(Ty);
-
- CharUnits SlotSize = CharUnits::fromQuantity(8);
-
- // If we have a complex type and the base type is smaller than 8 bytes,
- // the ABI calls for the real and imaginary parts to be right-adjusted
- // in separate doublewords. However, Clang expects us to produce a
- // pointer to a structure with the two parts packed tightly. So generate
- // loads of the real and imaginary parts relative to the va_list pointer,
- // and store them to a temporary structure.
- if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
- CharUnits EltSize = TypeInfo.Width / 2;
- if (EltSize < SlotSize)
- return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy);
- }
-
- // Otherwise, just use the general rule.
- //
- // The PPC64 ABI passes some arguments in integer registers, even to variadic
- // functions. To allow va_list to use the simple "void*" representation,
- // variadic calls allocate space in the argument area for the integer argument
- // registers, and variadic functions spill their integer argument registers to
- // this area in their prologues. When aggregates smaller than a register are
- // passed this way, they are passed in the least significant bits of the
- // register, which means that after spilling on big-endian targets they will
- // be right-aligned in their argument slot. This is uncommon; for a variety of
- // reasons, other big-endian targets don't end up right-aligning aggregate
- // types this way, and so right-alignment only applies to fundamental types.
- // So on PPC64, we must force the use of right-alignment even for aggregates.
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
- SlotSize, /*AllowHigher*/ true,
- /*ForceRightAdjust*/ true);
-}
-
-bool
-PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
- CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const {
- return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
- /*IsAIX*/ false);
-}
-
-bool
-PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const {
- return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
- /*IsAIX*/ false);
-}
-
-//===----------------------------------------------------------------------===//
-// AArch64 ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class AArch64ABIInfo : public ABIInfo {
-public:
- enum ABIKind {
- AAPCS = 0,
- DarwinPCS,
- Win64
- };
-
-private:
- ABIKind Kind;
-
-public:
- AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {}
-
-private:
- ABIKind getABIKind() const { return Kind; }
- bool isDarwinPCS() const { return Kind == DarwinPCS; }
-
- ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic,
- unsigned CallingConvention) const;
- ABIArgInfo coerceIllegalVector(QualType Ty) const;
- bool isHomogeneousAggregateBaseType(QualType Ty) const override;
- bool isHomogeneousAggregateSmallEnough(const Type *Ty,
- uint64_t Members) const override;
- bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override;
-
- bool isIllegalVectorType(QualType Ty) const;
-
- void computeInfo(CGFunctionInfo &FI) const override {
- if (!::classifyReturnType(getCXXABI(), FI, *this))
- FI.getReturnInfo() =
- classifyReturnType(FI.getReturnType(), FI.isVariadic());
-
- for (auto &it : FI.arguments())
- it.info = classifyArgumentType(it.type, FI.isVariadic(),
- FI.getCallingConvention());
- }
-
- Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-
- Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override {
- llvm::Type *BaseTy = CGF.ConvertType(Ty);
- if (isa<llvm::ScalableVectorType>(BaseTy))
- llvm::report_fatal_error("Passing SVE types to variadic functions is "
- "currently not supported");
-
- return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
- : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
- : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
- }
-
- Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- bool allowBFloatArgsAndRet() const override {
- return getTarget().hasBFloat16Type();
- }
-};
-
-class AArch64SwiftABIInfo : public SwiftABIInfo {
-public:
- explicit AArch64SwiftABIInfo(CodeGenTypes &CGT)
- : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {}
-
- bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
- unsigned NumElts) const override;
-};
-
-class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
- : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {
- SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGT);
- }
-
- StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
- return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
- }
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
- return 31;
- }
-
- bool doesReturnSlotInterfereWithArgs() const override { return false; }
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override {
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD)
- return;
-
- const auto *TA = FD->getAttr<TargetAttr>();
- if (TA == nullptr)
- return;
-
- ParsedTargetAttr Attr =
- CGM.getTarget().parseTargetAttr(TA->getFeaturesStr());
- if (Attr.BranchProtection.empty())
- return;
-
- TargetInfo::BranchProtectionInfo BPI;
- StringRef Error;
- (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
- Attr.CPU, BPI, Error);
- assert(Error.empty());
-
- auto *Fn = cast<llvm::Function>(GV);
- static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
- Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
-
- if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
- Fn->addFnAttr("sign-return-address-key",
- BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey
- ? "a_key"
- : "b_key");
- }
-
- Fn->addFnAttr("branch-target-enforcement",
- BPI.BranchTargetEnforcement ? "true" : "false");
- }
-
- bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF,
- llvm::Type *Ty) const override {
- if (CGF.getTarget().hasFeature("ls64")) {
- auto *ST = dyn_cast<llvm::StructType>(Ty);
- if (ST && ST->getNumElements() == 1) {
- auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
- if (AT && AT->getNumElements() == 8 &&
- AT->getElementType()->isIntegerTy(64))
- return true;
- }
- }
- return TargetCodeGenInfo::isScalarizableAsmOperand(CGF, Ty);
- }
-};
-
-class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
-public:
- WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K)
- : AArch64TargetCodeGenInfo(CGT, K) {}
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override;
-
- void getDependentLibraryOption(llvm::StringRef Lib,
- llvm::SmallString<24> &Opt) const override {
- Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
- }
-
- void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
- llvm::SmallString<32> &Opt) const override {
- Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
- }
-};
-
-void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
- AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
- if (GV->isDeclaration())
- return;
- addStackProbeTargetAttributes(D, GV, CGM);
-}
-}
-
-ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const {
- assert(Ty->isVectorType() && "expected vector type!");
-
- const auto *VT = Ty->castAs<VectorType>();
- if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) {
- assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
- assert(VT->getElementType()->castAs<BuiltinType>()->getKind() ==
- BuiltinType::UChar &&
- "unexpected builtin type for SVE predicate!");
- return ABIArgInfo::getDirect(llvm::ScalableVectorType::get(
- llvm::Type::getInt1Ty(getVMContext()), 16));
- }
-
- if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) {
- assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
-
- const auto *BT = VT->getElementType()->castAs<BuiltinType>();
- llvm::ScalableVectorType *ResType = nullptr;
- switch (BT->getKind()) {
- default:
- llvm_unreachable("unexpected builtin type for SVE vector!");
- case BuiltinType::SChar:
- case BuiltinType::UChar:
- ResType = llvm::ScalableVectorType::get(
- llvm::Type::getInt8Ty(getVMContext()), 16);
- break;
- case BuiltinType::Short:
- case BuiltinType::UShort:
- ResType = llvm::ScalableVectorType::get(
- llvm::Type::getInt16Ty(getVMContext()), 8);
- break;
- case BuiltinType::Int:
- case BuiltinType::UInt:
- ResType = llvm::ScalableVectorType::get(
- llvm::Type::getInt32Ty(getVMContext()), 4);
- break;
- case BuiltinType::Long:
- case BuiltinType::ULong:
- ResType = llvm::ScalableVectorType::get(
- llvm::Type::getInt64Ty(getVMContext()), 2);
- break;
- case BuiltinType::Half:
- ResType = llvm::ScalableVectorType::get(
- llvm::Type::getHalfTy(getVMContext()), 8);
- break;
- case BuiltinType::Float:
- ResType = llvm::ScalableVectorType::get(
- llvm::Type::getFloatTy(getVMContext()), 4);
- break;
- case BuiltinType::Double:
- ResType = llvm::ScalableVectorType::get(
- llvm::Type::getDoubleTy(getVMContext()), 2);
- break;
- case BuiltinType::BFloat16:
- ResType = llvm::ScalableVectorType::get(
- llvm::Type::getBFloatTy(getVMContext()), 8);
- break;
- }
- return ABIArgInfo::getDirect(ResType);
- }
-
- uint64_t Size = getContext().getTypeSize(Ty);
- // Android promotes <2 x i8> to i16, not i32
- if (isAndroid() && (Size <= 16)) {
- llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
- return ABIArgInfo::getDirect(ResType);
- }
- if (Size <= 32) {
- llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
- return ABIArgInfo::getDirect(ResType);
- }
- if (Size == 64) {
- auto *ResType =
- llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
- return ABIArgInfo::getDirect(ResType);
- }
- if (Size == 128) {
- auto *ResType =
- llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
- return ABIArgInfo::getDirect(ResType);
- }
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-}
-
-ABIArgInfo
-AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic,
- unsigned CallingConvention) const {
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- // Handle illegal vector types here.
- if (isIllegalVectorType(Ty))
- return coerceIllegalVector(Ty);
-
- if (!isAggregateTypeForABI(Ty)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- if (const auto *EIT = Ty->getAs<BitIntType>())
- if (EIT->getNumBits() > 128)
- return getNaturalAlignIndirect(Ty);
-
- return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
- ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
- }
-
- // Structures with either a non-trivial destructor or a non-trivial
- // copy constructor are always indirect.
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
- return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
- CGCXXABI::RAA_DirectInMemory);
- }
-
- // Empty records are always ignored on Darwin, but actually passed in C++ mode
- // elsewhere for GNU compatibility.
- uint64_t Size = getContext().getTypeSize(Ty);
- bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
- if (IsEmpty || Size == 0) {
- if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
- return ABIArgInfo::getIgnore();
-
- // GNU C mode. The only argument that gets ignored is an empty one with size
- // 0.
- if (IsEmpty && Size == 0)
- return ABIArgInfo::getIgnore();
- return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
- }
-
- // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
- const Type *Base = nullptr;
- uint64_t Members = 0;
- bool IsWin64 = Kind == Win64 || CallingConvention == llvm::CallingConv::Win64;
- bool IsWinVariadic = IsWin64 && IsVariadic;
- // In variadic functions on Windows, all composite types are treated alike,
- // no special handling of HFAs/HVAs.
- if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) {
- if (Kind != AArch64ABIInfo::AAPCS)
- return ABIArgInfo::getDirect(
- llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
-
- // For alignment adjusted HFAs, cap the argument alignment to 16, leave it
- // default otherwise.
- unsigned Align =
- getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
- unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity();
- Align = (Align > BaseAlign && Align >= 16) ? 16 : 0;
- return ABIArgInfo::getDirect(
- llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0,
- nullptr, true, Align);
- }
-
- // Aggregates <= 16 bytes are passed directly in registers or on the stack.
- if (Size <= 128) {
- // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
- // same size and alignment.
- if (getTarget().isRenderScriptTarget()) {
- return coerceToIntArray(Ty, getContext(), getVMContext());
- }
- unsigned Alignment;
- if (Kind == AArch64ABIInfo::AAPCS) {
- Alignment = getContext().getTypeUnadjustedAlign(Ty);
- Alignment = Alignment < 128 ? 64 : 128;
- } else {
- Alignment =
- std::max(getContext().getTypeAlign(Ty),
- (unsigned)getTarget().getPointerWidth(LangAS::Default));
- }
- Size = llvm::alignTo(Size, Alignment);
-
- // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
- // For aggregates with 16-byte alignment, we use i128.
- llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
- return ABIArgInfo::getDirect(
- Size == Alignment ? BaseTy
- : llvm::ArrayType::get(BaseTy, Size / Alignment));
- }
-
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-}
-
-ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
- bool IsVariadic) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- if (const auto *VT = RetTy->getAs<VectorType>()) {
- if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector ||
- VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
- return coerceIllegalVector(RetTy);
- }
-
- // Large vector types should be returned via memory.
- if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
- return getNaturalAlignIndirect(RetTy);
-
- if (!isAggregateTypeForABI(RetTy)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- if (const auto *EIT = RetTy->getAs<BitIntType>())
- if (EIT->getNumBits() > 128)
- return getNaturalAlignIndirect(RetTy);
-
- return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
- ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
- }
-
- uint64_t Size = getContext().getTypeSize(RetTy);
- if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
- return ABIArgInfo::getIgnore();
-
- const Type *Base = nullptr;
- uint64_t Members = 0;
- if (isHomogeneousAggregate(RetTy, Base, Members) &&
- !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
- IsVariadic))
- // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
- return ABIArgInfo::getDirect();
-
- // Aggregates <= 16 bytes are returned directly in registers or on the stack.
- if (Size <= 128) {
- // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
- // same size and alignment.
- if (getTarget().isRenderScriptTarget()) {
- return coerceToIntArray(RetTy, getContext(), getVMContext());
- }
-
- if (Size <= 64 && getDataLayout().isLittleEndian()) {
- // Composite types are returned in lower bits of a 64-bit register for LE,
- // and in higher bits for BE. However, integer types are always returned
- // in lower bits for both LE and BE, and they are not rounded up to
- // 64-bits. We can skip rounding up of composite types for LE, but not for
- // BE, otherwise composite types will be indistinguishable from integer
- // types.
- return ABIArgInfo::getDirect(
- llvm::IntegerType::get(getVMContext(), Size));
- }
-
- unsigned Alignment = getContext().getTypeAlign(RetTy);
- Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
-
- // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
- // For aggregates with 16-byte alignment, we use i128.
- if (Alignment < 128 && Size == 128) {
- llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
- return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
- }
- return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
- }
-
- return getNaturalAlignIndirect(RetTy);
-}
-
-/// isIllegalVectorType - check whether the vector type is legal for AArch64.
-bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
- if (const VectorType *VT = Ty->getAs<VectorType>()) {
- // Check whether VT is a fixed-length SVE vector. These types are
- // represented as scalable vectors in function args/return and must be
- // coerced from fixed vectors.
- if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector ||
- VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
- return true;
-
- // Check whether VT is legal.
- unsigned NumElements = VT->getNumElements();
- uint64_t Size = getContext().getTypeSize(VT);
- // NumElements should be power of 2.
- if (!llvm::isPowerOf2_32(NumElements))
- return true;
-
- // arm64_32 has to be compatible with the ARM logic here, which allows huge
- // vectors for some reason.
- llvm::Triple Triple = getTarget().getTriple();
- if (Triple.getArch() == llvm::Triple::aarch64_32 &&
- Triple.isOSBinFormatMachO())
- return Size <= 32;
-
- return Size != 64 && (Size != 128 || NumElements == 1);
- }
- return false;
-}
-
-bool AArch64SwiftABIInfo::isLegalVectorType(CharUnits VectorSize,
- llvm::Type *EltTy,
- unsigned NumElts) const {
- if (!llvm::isPowerOf2_32(NumElts))
- return false;
- if (VectorSize.getQuantity() != 8 &&
- (VectorSize.getQuantity() != 16 || NumElts == 1))
- return false;
- return true;
-}
-
-bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
- // Homogeneous aggregates for AAPCS64 must have base types of a floating
- // point type or a short-vector type. This is the same as the 32-bit ABI,
- // but with the difference that any floating-point type is allowed,
- // including __fp16.
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
- if (BT->isFloatingPoint())
- return true;
- } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
- unsigned VecSize = getContext().getTypeSize(VT);
- if (VecSize == 64 || VecSize == 128)
- return true;
- }
- return false;
-}
-
-bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
- uint64_t Members) const {
- return Members <= 4;
-}
-
-bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
- const {
- // AAPCS64 says that the rule for whether something is a homogeneous
- // aggregate is applied to the output of the data layout decision. So
- // anything that doesn't affect the data layout also does not affect
- // homogeneity. In particular, zero-length bitfields don't stop a struct
- // being homogeneous.
- return true;
-}
-
-Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- ABIArgInfo AI = classifyArgumentType(Ty, /*IsVariadic=*/true,
- CGF.CurFnInfo->getCallingConvention());
- // Empty records are ignored for parameter passing purposes.
- if (AI.isIgnore()) {
- uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
- CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
- VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
- auto *Load = CGF.Builder.CreateLoad(VAListAddr);
- Address Addr = Address(Load, CGF.Int8Ty, SlotSize);
- return CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
- }
-
- bool IsIndirect = AI.isIndirect();
-
- llvm::Type *BaseTy = CGF.ConvertType(Ty);
- if (IsIndirect)
- BaseTy = llvm::PointerType::getUnqual(BaseTy);
- else if (AI.getCoerceToType())
- BaseTy = AI.getCoerceToType();
-
- unsigned NumRegs = 1;
- if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
- BaseTy = ArrTy->getElementType();
- NumRegs = ArrTy->getNumElements();
- }
- bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
-
- // The AArch64 va_list type and handling is specified in the Procedure Call
- // Standard, section B.4:
- //
- // struct {
- // void *__stack;
- // void *__gr_top;
- // void *__vr_top;
- // int __gr_offs;
- // int __vr_offs;
- // };
-
- llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
- llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
- llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
- llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
-
- CharUnits TySize = getContext().getTypeSizeInChars(Ty);
- CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
-
- Address reg_offs_p = Address::invalid();
- llvm::Value *reg_offs = nullptr;
- int reg_top_index;
- int RegSize = IsIndirect ? 8 : TySize.getQuantity();
- if (!IsFPR) {
- // 3 is the field number of __gr_offs
- reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
- reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
- reg_top_index = 1; // field number for __gr_top
- RegSize = llvm::alignTo(RegSize, 8);
- } else {
- // 4 is the field number of __vr_offs.
- reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
- reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
- reg_top_index = 2; // field number for __vr_top
- RegSize = 16 * NumRegs;
- }
-
- //=======================================
- // Find out where argument was passed
- //=======================================
-
- // If reg_offs >= 0 we're already using the stack for this type of
- // argument. We don't want to keep updating reg_offs (in case it overflows,
- // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
- // whatever they get).
- llvm::Value *UsingStack = nullptr;
- UsingStack = CGF.Builder.CreateICmpSGE(
- reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
-
- CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
-
- // Otherwise, at least some kind of argument could go in these registers, the
- // question is whether this particular type is too big.
- CGF.EmitBlock(MaybeRegBlock);
-
- // Integer arguments may need to correct register alignment (for example a
- // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
- // align __gr_offs to calculate the potential address.
- if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
- int Align = TyAlign.getQuantity();
-
- reg_offs = CGF.Builder.CreateAdd(
- reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
- "align_regoffs");
- reg_offs = CGF.Builder.CreateAnd(
- reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
- "aligned_regoffs");
- }
-
- // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
- // The fact that this is done unconditionally reflects the fact that
- // allocating an argument to the stack also uses up all the remaining
- // registers of the appropriate kind.
- llvm::Value *NewOffset = nullptr;
- NewOffset = CGF.Builder.CreateAdd(
- reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
- CGF.Builder.CreateStore(NewOffset, reg_offs_p);
-
- // Now we're in a position to decide whether this argument really was in
- // registers or not.
- llvm::Value *InRegs = nullptr;
- InRegs = CGF.Builder.CreateICmpSLE(
- NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
-
- CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
-
- //=======================================
- // Argument was in registers
- //=======================================
-
- // Now we emit the code for if the argument was originally passed in
- // registers. First start the appropriate block:
- CGF.EmitBlock(InRegBlock);
-
- llvm::Value *reg_top = nullptr;
- Address reg_top_p =
- CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
- reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
- Address BaseAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, reg_top, reg_offs),
- CGF.Int8Ty, CharUnits::fromQuantity(IsFPR ? 16 : 8));
- Address RegAddr = Address::invalid();
- llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty), *ElementTy = MemTy;
-
- if (IsIndirect) {
- // If it's been passed indirectly (actually a struct), whatever we find from
- // stored registers or on the stack will actually be a struct **.
- MemTy = llvm::PointerType::getUnqual(MemTy);
- }
-
- const Type *Base = nullptr;
- uint64_t NumMembers = 0;
- bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
- if (IsHFA && NumMembers > 1) {
- // Homogeneous aggregates passed in registers will have their elements split
- // and stored 16-bytes apart regardless of size (they're notionally in qN,
- // qN+1, ...). We reload and store into a temporary local variable
- // contiguously.
- assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
- auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
- llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
- llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
- Address Tmp = CGF.CreateTempAlloca(HFATy,
- std::max(TyAlign, BaseTyInfo.Align));
-
- // On big-endian platforms, the value will be right-aligned in its slot.
- int Offset = 0;
- if (CGF.CGM.getDataLayout().isBigEndian() &&
- BaseTyInfo.Width.getQuantity() < 16)
- Offset = 16 - BaseTyInfo.Width.getQuantity();
-
- for (unsigned i = 0; i < NumMembers; ++i) {
- CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
- Address LoadAddr =
- CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
- LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
-
- Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
-
- llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
- CGF.Builder.CreateStore(Elem, StoreAddr);
- }
-
- RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
- } else {
- // Otherwise the object is contiguous in memory.
-
- // It might be right-aligned in its slot.
- CharUnits SlotSize = BaseAddr.getAlignment();
- if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
- (IsHFA || !isAggregateTypeForABI(Ty)) &&
- TySize < SlotSize) {
- CharUnits Offset = SlotSize - TySize;
- BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
- }
-
- RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
- }
-
- CGF.EmitBranch(ContBlock);
-
- //=======================================
- // Argument was on the stack
- //=======================================
- CGF.EmitBlock(OnStackBlock);
-
- Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
- llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
-
- // Again, stack arguments may need realignment. In this case both integer and
- // floating-point ones might be affected.
- if (!IsIndirect && TyAlign.getQuantity() > 8) {
- int Align = TyAlign.getQuantity();
-
- OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
-
- OnStackPtr = CGF.Builder.CreateAdd(
- OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
- "align_stack");
- OnStackPtr = CGF.Builder.CreateAnd(
- OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
- "align_stack");
-
- OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
- }
- Address OnStackAddr = Address(OnStackPtr, CGF.Int8Ty,
- std::max(CharUnits::fromQuantity(8), TyAlign));
-
- // All stack slots are multiples of 8 bytes.
- CharUnits StackSlotSize = CharUnits::fromQuantity(8);
- CharUnits StackSize;
- if (IsIndirect)
- StackSize = StackSlotSize;
- else
- StackSize = TySize.alignTo(StackSlotSize);
-
- llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
- llvm::Value *NewStack = CGF.Builder.CreateInBoundsGEP(
- CGF.Int8Ty, OnStackPtr, StackSizeC, "new_stack");
-
- // Write the new value of __stack for the next call to va_arg
- CGF.Builder.CreateStore(NewStack, stack_p);
-
- if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
- TySize < StackSlotSize) {
- CharUnits Offset = StackSlotSize - TySize;
- OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
- }
-
- OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
-
- CGF.EmitBranch(ContBlock);
-
- //=======================================
- // Tidy up
- //=======================================
- CGF.EmitBlock(ContBlock);
-
- Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, OnStackAddr,
- OnStackBlock, "vaargs.addr");
-
- if (IsIndirect)
- return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), ElementTy,
- TyAlign);
-
- return ResAddr;
-}
-
-Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- // The backend's lowering doesn't support va_arg for aggregates or
- // illegal vector types. Lower VAArg here for these cases and use
- // the LLVM va_arg instruction for everything else.
- if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
- return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
-
- uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
- CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
-
- // Empty records are ignored for parameter passing purposes.
- if (isEmptyRecord(getContext(), Ty, true)) {
- Address Addr = Address(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"),
- getVAListElementType(CGF), SlotSize);
- Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
- return Addr;
- }
-
- // The size of the actual thing passed, which might end up just
- // being a pointer for indirect types.
- auto TyInfo = getContext().getTypeInfoInChars(Ty);
-
- // Arguments bigger than 16 bytes which aren't homogeneous
- // aggregates should be passed indirectly.
- bool IsIndirect = false;
- if (TyInfo.Width.getQuantity() > 16) {
- const Type *Base = nullptr;
- uint64_t Members = 0;
- IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
- }
-
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
- TyInfo, SlotSize, /*AllowHigherAlign*/ true);
-}
-
-Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- bool IsIndirect = false;
-
- // Composites larger than 16 bytes are passed by reference.
- if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) > 128)
- IsIndirect = true;
-
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
- CGF.getContext().getTypeInfoInChars(Ty),
- CharUnits::fromQuantity(8),
- /*allowHigherAlign*/ false);
-}
-
-//===----------------------------------------------------------------------===//
-// ARM ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class ARMABIInfo : public ABIInfo {
-public:
- enum ABIKind {
- APCS = 0,
- AAPCS = 1,
- AAPCS_VFP = 2,
- AAPCS16_VFP = 3,
- };
-
-private:
- ABIKind Kind;
- bool IsFloatABISoftFP;
-
-public:
- ARMABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {
- setCCs();
- IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" ||
- CGT.getCodeGenOpts().FloatABI == ""; // default
- }
-
- bool isEABI() const {
- switch (getTarget().getTriple().getEnvironment()) {
- case llvm::Triple::Android:
- case llvm::Triple::EABI:
- case llvm::Triple::EABIHF:
- case llvm::Triple::GNUEABI:
- case llvm::Triple::GNUEABIHF:
- case llvm::Triple::MuslEABI:
- case llvm::Triple::MuslEABIHF:
- return true;
- default:
- return false;
- }
- }
-
- bool isEABIHF() const {
- switch (getTarget().getTriple().getEnvironment()) {
- case llvm::Triple::EABIHF:
- case llvm::Triple::GNUEABIHF:
- case llvm::Triple::MuslEABIHF:
- return true;
- default:
- return false;
- }
- }
-
- ABIKind getABIKind() const { return Kind; }
-
- bool allowBFloatArgsAndRet() const override {
- return !IsFloatABISoftFP && getTarget().hasBFloat16Type();
- }
-
-private:
- ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
- unsigned functionCallConv) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
- unsigned functionCallConv) const;
- ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
- uint64_t Members) const;
- ABIArgInfo coerceIllegalVector(QualType Ty) const;
- bool isIllegalVectorType(QualType Ty) const;
- bool containsAnyFP16Vectors(QualType Ty) const;
-
- bool isHomogeneousAggregateBaseType(QualType Ty) const override;
- bool isHomogeneousAggregateSmallEnough(const Type *Ty,
- uint64_t Members) const override;
- bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override;
-
- bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const;
-
- void computeInfo(CGFunctionInfo &FI) const override;
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- llvm::CallingConv::ID getLLVMDefaultCC() const;
- llvm::CallingConv::ID getABIDefaultCC() const;
- void setCCs();
-};
-
-class ARMSwiftABIInfo : public SwiftABIInfo {
-public:
- explicit ARMSwiftABIInfo(CodeGenTypes &CGT)
- : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {}
-
- bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
- unsigned NumElts) const override;
-};
-
-class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
- : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(CGT, K)) {
- SwiftInfo = std::make_unique<ARMSwiftABIInfo>(CGT);
- }
-
- const ARMABIInfo &getABIInfo() const {
- return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
- }
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
- return 13;
- }
-
- StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
- return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override {
- llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
-
- // 0-15 are the 16 integer registers.
- AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
- return false;
- }
-
- unsigned getSizeOfUnwindException() const override {
- if (getABIInfo().isEABI()) return 88;
- return TargetCodeGenInfo::getSizeOfUnwindException();
- }
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override {
- if (GV->isDeclaration())
- return;
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD)
- return;
- auto *Fn = cast<llvm::Function>(GV);
-
- if (const auto *TA = FD->getAttr<TargetAttr>()) {
- ParsedTargetAttr Attr =
- CGM.getTarget().parseTargetAttr(TA->getFeaturesStr());
- if (!Attr.BranchProtection.empty()) {
- TargetInfo::BranchProtectionInfo BPI;
- StringRef DiagMsg;
- StringRef Arch =
- Attr.CPU.empty() ? CGM.getTarget().getTargetOpts().CPU : Attr.CPU;
- if (!CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
- Arch, BPI, DiagMsg)) {
- CGM.getDiags().Report(
- D->getLocation(),
- diag::warn_target_unsupported_branch_protection_attribute)
- << Arch;
- } else {
- static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
- assert(static_cast<unsigned>(BPI.SignReturnAddr) <= 2 &&
- "Unexpected SignReturnAddressScopeKind");
- Fn->addFnAttr(
- "sign-return-address",
- SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
-
- Fn->addFnAttr("branch-target-enforcement",
- BPI.BranchTargetEnforcement ? "true" : "false");
- }
- } else if (CGM.getLangOpts().BranchTargetEnforcement ||
- CGM.getLangOpts().hasSignReturnAddress()) {
- // If the Branch Protection attribute is missing, validate the target
- // Architecture attribute against Branch Protection command line
- // settings.
- if (!CGM.getTarget().isBranchProtectionSupportedArch(Attr.CPU))
- CGM.getDiags().Report(
- D->getLocation(),
- diag::warn_target_unsupported_branch_protection_attribute)
- << Attr.CPU;
- }
- }
-
- const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
- if (!Attr)
- return;
-
- const char *Kind;
- switch (Attr->getInterrupt()) {
- case ARMInterruptAttr::Generic: Kind = ""; break;
- case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
- case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
- case ARMInterruptAttr::SWI: Kind = "SWI"; break;
- case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
- case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
- }
-
- Fn->addFnAttr("interrupt", Kind);
-
- ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
- if (ABI == ARMABIInfo::APCS)
- return;
-
- // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
- // however this is not necessarily true on taking any interrupt. Instruct
- // the backend to perform a realignment as part of the function prologue.
- llvm::AttrBuilder B(Fn->getContext());
- B.addStackAlignmentAttr(8);
- Fn->addFnAttrs(B);
- }
-};
-
-class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
-public:
- WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
- : ARMTargetCodeGenInfo(CGT, K) {}
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override;
-
- void getDependentLibraryOption(llvm::StringRef Lib,
- llvm::SmallString<24> &Opt) const override {
- Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
- }
-
- void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
- llvm::SmallString<32> &Opt) const override {
- Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
- }
-};
-
-void WindowsARMTargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
- ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
- if (GV->isDeclaration())
- return;
- addStackProbeTargetAttributes(D, GV, CGM);
-}
-}
-
-void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
- if (!::classifyReturnType(getCXXABI(), FI, *this))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(),
- FI.getCallingConvention());
-
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type, FI.isVariadic(),
- FI.getCallingConvention());
-
-
- // Always honor user-specified calling convention.
- if (FI.getCallingConvention() != llvm::CallingConv::C)
- return;
-
- llvm::CallingConv::ID cc = getRuntimeCC();
- if (cc != llvm::CallingConv::C)
- FI.setEffectiveCallingConvention(cc);
-}
-
-/// Return the default calling convention that LLVM will use.
-llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
- // The default calling convention that LLVM will infer.
- if (isEABIHF() || getTarget().getTriple().isWatchABI())
- return llvm::CallingConv::ARM_AAPCS_VFP;
- else if (isEABI())
- return llvm::CallingConv::ARM_AAPCS;
- else
- return llvm::CallingConv::ARM_APCS;
-}
-
-/// Return the calling convention that our ABI would like us to use
-/// as the C calling convention.
-llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
- switch (getABIKind()) {
- case APCS: return llvm::CallingConv::ARM_APCS;
- case AAPCS: return llvm::CallingConv::ARM_AAPCS;
- case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
- case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
- }
- llvm_unreachable("bad ABI kind");
-}
-
-void ARMABIInfo::setCCs() {
- assert(getRuntimeCC() == llvm::CallingConv::C);
-
- // Don't muddy up the IR with a ton of explicit annotations if
- // they'd just match what LLVM will infer from the triple.
- llvm::CallingConv::ID abiCC = getABIDefaultCC();
- if (abiCC != getLLVMDefaultCC())
- RuntimeCC = abiCC;
-}
-
-ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
- uint64_t Size = getContext().getTypeSize(Ty);
- if (Size <= 32) {
- llvm::Type *ResType =
- llvm::Type::getInt32Ty(getVMContext());
- return ABIArgInfo::getDirect(ResType);
- }
- if (Size == 64 || Size == 128) {
- auto *ResType = llvm::FixedVectorType::get(
- llvm::Type::getInt32Ty(getVMContext()), Size / 32);
- return ABIArgInfo::getDirect(ResType);
- }
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-}
-
-ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
- const Type *Base,
- uint64_t Members) const {
- assert(Base && "Base class should be set for homogeneous aggregate");
- // Base can be a floating-point or a vector.
- if (const VectorType *VT = Base->getAs<VectorType>()) {
- // FP16 vectors should be converted to integer vectors
- if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
- uint64_t Size = getContext().getTypeSize(VT);
- auto *NewVecTy = llvm::FixedVectorType::get(
- llvm::Type::getInt32Ty(getVMContext()), Size / 32);
- llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
- return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
- }
- }
- unsigned Align = 0;
- if (getABIKind() == ARMABIInfo::AAPCS ||
- getABIKind() == ARMABIInfo::AAPCS_VFP) {
- // For alignment adjusted HFAs, cap the argument alignment to 8, leave it
- // default otherwise.
- Align = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
- unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity();
- Align = (Align > BaseAlign && Align >= 8) ? 8 : 0;
- }
- return ABIArgInfo::getDirect(nullptr, 0, nullptr, false, Align);
-}
-
-ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
- unsigned functionCallConv) const {
- // 6.1.2.1 The following argument types are VFP CPRCs:
- // A single-precision floating-point type (including promoted
- // half-precision types); A double-precision floating-point type;
- // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
- // with a Base Type of a single- or double-precision floating-point type,
- // 64-bit containerized vectors or 128-bit containerized vectors with one
- // to four Elements.
- // Variadic functions should always marshal to the base standard.
- bool IsAAPCS_VFP =
- !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false);
-
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- // Handle illegal vector types here.
- if (isIllegalVectorType(Ty))
- return coerceIllegalVector(Ty);
-
- if (!isAggregateTypeForABI(Ty)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
- Ty = EnumTy->getDecl()->getIntegerType();
- }
-
- if (const auto *EIT = Ty->getAs<BitIntType>())
- if (EIT->getNumBits() > 64)
- return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
-
- return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
- }
-
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
- }
-
- // Ignore empty records.
- if (isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
-
- if (IsAAPCS_VFP) {
- // Homogeneous Aggregates need to be expanded when we can fit the aggregate
- // into VFP registers.
- const Type *Base = nullptr;
- uint64_t Members = 0;
- if (isHomogeneousAggregate(Ty, Base, Members))
- return classifyHomogeneousAggregate(Ty, Base, Members);
- } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
- // WatchOS does have homogeneous aggregates. Note that we intentionally use
- // this convention even for a variadic function: the backend will use GPRs
- // if needed.
- const Type *Base = nullptr;
- uint64_t Members = 0;
- if (isHomogeneousAggregate(Ty, Base, Members)) {
- assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
- llvm::Type *Ty =
- llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
- return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
- }
- }
-
- if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
- getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
- // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
- // bigger than 128-bits, they get placed in space allocated by the caller,
- // and a pointer is passed.
- return ABIArgInfo::getIndirect(
- CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
- }
-
- // Support byval for ARM.
- // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
- // most 8-byte. We realign the indirect argument if type alignment is bigger
- // than ABI alignment.
- uint64_t ABIAlign = 4;
- uint64_t TyAlign;
- if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
- getABIKind() == ARMABIInfo::AAPCS) {
- TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
- ABIAlign = std::clamp(TyAlign, (uint64_t)4, (uint64_t)8);
- } else {
- TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
- }
- if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
- assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval");
- return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
- /*ByVal=*/true,
- /*Realign=*/TyAlign > ABIAlign);
- }
-
- // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
- // same size and alignment.
- if (getTarget().isRenderScriptTarget()) {
- return coerceToIntArray(Ty, getContext(), getVMContext());
- }
-
- // Otherwise, pass by coercing to a structure of the appropriate size.
- llvm::Type* ElemTy;
- unsigned SizeRegs;
- // FIXME: Try to match the types of the arguments more accurately where
- // we can.
- if (TyAlign <= 4) {
- ElemTy = llvm::Type::getInt32Ty(getVMContext());
- SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
- } else {
- ElemTy = llvm::Type::getInt64Ty(getVMContext());
- SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
- }
-
- return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
-}
-
-static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
- llvm::LLVMContext &VMContext) {
- // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
- // is called integer-like if its size is less than or equal to one word, and
- // the offset of each of its addressable sub-fields is zero.
-
- uint64_t Size = Context.getTypeSize(Ty);
-
- // Check that the type fits in a word.
- if (Size > 32)
- return false;
-
- // FIXME: Handle vector types!
- if (Ty->isVectorType())
- return false;
-
- // Float types are never treated as "integer like".
- if (Ty->isRealFloatingType())
- return false;
-
- // If this is a builtin or pointer type then it is ok.
- if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
- return true;
-
- // Small complex integer types are "integer like".
- if (const ComplexType *CT = Ty->getAs<ComplexType>())
- return isIntegerLikeType(CT->getElementType(), Context, VMContext);
-
- // Single element and zero sized arrays should be allowed, by the definition
- // above, but they are not.
-
- // Otherwise, it must be a record type.
- const RecordType *RT = Ty->getAs<RecordType>();
- if (!RT) return false;
-
- // Ignore records with flexible arrays.
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return false;
-
- // Check that all sub-fields are at offset 0, and are themselves "integer
- // like".
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-
- bool HadField = false;
- unsigned idx = 0;
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- i != e; ++i, ++idx) {
- const FieldDecl *FD = *i;
-
- // Bit-fields are not addressable, we only need to verify they are "integer
- // like". We still have to disallow a subsequent non-bitfield, for example:
- // struct { int : 0; int x }
- // is non-integer like according to gcc.
- if (FD->isBitField()) {
- if (!RD->isUnion())
- HadField = true;
-
- if (!isIntegerLikeType(FD->getType(), Context, VMContext))
- return false;
-
- continue;
- }
-
- // Check if this field is at offset 0.
- if (Layout.getFieldOffset(idx) != 0)
- return false;
-
- if (!isIntegerLikeType(FD->getType(), Context, VMContext))
- return false;
-
- // Only allow at most one field in a structure. This doesn't match the
- // wording above, but follows gcc in situations with a field following an
- // empty structure.
- if (!RD->isUnion()) {
- if (HadField)
- return false;
-
- HadField = true;
- }
- }
-
- return true;
-}
-
-ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
- unsigned functionCallConv) const {
-
- // Variadic functions should always marshal to the base standard.
- bool IsAAPCS_VFP =
- !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true);
-
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- if (const VectorType *VT = RetTy->getAs<VectorType>()) {
- // Large vector types should be returned via memory.
- if (getContext().getTypeSize(RetTy) > 128)
- return getNaturalAlignIndirect(RetTy);
- // TODO: FP16/BF16 vectors should be converted to integer vectors
- // This check is similar to isIllegalVectorType - refactor?
- if ((!getTarget().hasLegalHalfType() &&
- (VT->getElementType()->isFloat16Type() ||
- VT->getElementType()->isHalfType())) ||
- (IsFloatABISoftFP &&
- VT->getElementType()->isBFloat16Type()))
- return coerceIllegalVector(RetTy);
- }
-
- if (!isAggregateTypeForABI(RetTy)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- if (const auto *EIT = RetTy->getAs<BitIntType>())
- if (EIT->getNumBits() > 64)
- return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
-
- return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect();
- }
-
- // Are we following APCS?
- if (getABIKind() == APCS) {
- if (isEmptyRecord(getContext(), RetTy, false))
- return ABIArgInfo::getIgnore();
-
- // Complex types are all returned as packed integers.
- //
- // FIXME: Consider using 2 x vector types if the back end handles them
- // correctly.
- if (RetTy->isAnyComplexType())
- return ABIArgInfo::getDirect(llvm::IntegerType::get(
- getVMContext(), getContext().getTypeSize(RetTy)));
-
- // Integer like structures are returned in r0.
- if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
- // Return in the smallest viable integer type.
- uint64_t Size = getContext().getTypeSize(RetTy);
- if (Size <= 8)
- return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
- if (Size <= 16)
- return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
- return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
- }
-
- // Otherwise return in memory.
- return getNaturalAlignIndirect(RetTy);
- }
-
- // Otherwise this is an AAPCS variant.
-
- if (isEmptyRecord(getContext(), RetTy, true))
- return ABIArgInfo::getIgnore();
-
- // Check for homogeneous aggregates with AAPCS-VFP.
- if (IsAAPCS_VFP) {
- const Type *Base = nullptr;
- uint64_t Members = 0;
- if (isHomogeneousAggregate(RetTy, Base, Members))
- return classifyHomogeneousAggregate(RetTy, Base, Members);
- }
-
- // Aggregates <= 4 bytes are returned in r0; other aggregates
- // are returned indirectly.
- uint64_t Size = getContext().getTypeSize(RetTy);
- if (Size <= 32) {
- // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
- // same size and alignment.
- if (getTarget().isRenderScriptTarget()) {
- return coerceToIntArray(RetTy, getContext(), getVMContext());
- }
- if (getDataLayout().isBigEndian())
- // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
- return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
-
- // Return in the smallest viable integer type.
- if (Size <= 8)
- return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
- if (Size <= 16)
- return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
- return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
- } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
- llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
- llvm::Type *CoerceTy =
- llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
- return ABIArgInfo::getDirect(CoerceTy);
- }
-
- return getNaturalAlignIndirect(RetTy);
-}
-
-/// isIllegalVector - check whether Ty is an illegal vector type.
-bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
- if (const VectorType *VT = Ty->getAs<VectorType> ()) {
- // On targets that don't support half, fp16 or bfloat, they are expanded
- // into float, and we don't want the ABI to depend on whether or not they
- // are supported in hardware. Thus return false to coerce vectors of these
- // types into integer vectors.
- // We do not depend on hasLegalHalfType for bfloat as it is a
- // separate IR type.
- if ((!getTarget().hasLegalHalfType() &&
- (VT->getElementType()->isFloat16Type() ||
- VT->getElementType()->isHalfType())) ||
- (IsFloatABISoftFP &&
- VT->getElementType()->isBFloat16Type()))
- return true;
- if (isAndroid()) {
- // Android shipped using Clang 3.1, which supported a slightly different
- // vector ABI. The primary differences were that 3-element vector types
- // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
- // accepts that legacy behavior for Android only.
- // Check whether VT is legal.
- unsigned NumElements = VT->getNumElements();
- // NumElements should be power of 2 or equal to 3.
- if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
- return true;
- } else {
- // Check whether VT is legal.
- unsigned NumElements = VT->getNumElements();
- uint64_t Size = getContext().getTypeSize(VT);
- // NumElements should be power of 2.
- if (!llvm::isPowerOf2_32(NumElements))
- return true;
- // Size should be greater than 32 bits.
- return Size <= 32;
- }
- }
- return false;
-}
-
-/// Return true if a type contains any 16-bit floating point vectors
-bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
- if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
- uint64_t NElements = AT->getSize().getZExtValue();
- if (NElements == 0)
- return false;
- return containsAnyFP16Vectors(AT->getElementType());
- } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
-
- // If this is a C++ record, check the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
- if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) {
- return containsAnyFP16Vectors(B.getType());
- }))
- return true;
-
- if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) {
- return FD && containsAnyFP16Vectors(FD->getType());
- }))
- return true;
-
- return false;
- } else {
- if (const VectorType *VT = Ty->getAs<VectorType>())
- return (VT->getElementType()->isFloat16Type() ||
- VT->getElementType()->isBFloat16Type() ||
- VT->getElementType()->isHalfType());
- return false;
- }
-}
-
-bool ARMSwiftABIInfo::isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
- unsigned NumElts) const {
- if (!llvm::isPowerOf2_32(NumElts))
- return false;
- unsigned size = CGT.getDataLayout().getTypeStoreSizeInBits(EltTy);
- if (size > 64)
- return false;
- if (VectorSize.getQuantity() != 8 &&
- (VectorSize.getQuantity() != 16 || NumElts == 1))
- return false;
- return true;
-}
-
-bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
- // Homogeneous aggregates for AAPCS-VFP must have base types of float,
- // double, or 64-bit or 128-bit vectors.
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
- if (BT->getKind() == BuiltinType::Float ||
- BT->getKind() == BuiltinType::Double ||
- BT->getKind() == BuiltinType::LongDouble)
- return true;
- } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
- unsigned VecSize = getContext().getTypeSize(VT);
- if (VecSize == 64 || VecSize == 128)
- return true;
- }
- return false;
-}
-
-bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
- uint64_t Members) const {
- return Members <= 4;
-}
-
-bool ARMABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const {
- // AAPCS32 says that the rule for whether something is a homogeneous
- // aggregate is applied to the output of the data layout decision. So
- // anything that doesn't affect the data layout also does not affect
- // homogeneity. In particular, zero-length bitfields don't stop a struct
- // being homogeneous.
- return true;
-}
-
-bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention,
- bool acceptHalf) const {
- // Give precedence to user-specified calling conventions.
- if (callConvention != llvm::CallingConv::C)
- return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP);
- else
- return (getABIKind() == AAPCS_VFP) ||
- (acceptHalf && (getABIKind() == AAPCS16_VFP));
-}
-
-Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- CharUnits SlotSize = CharUnits::fromQuantity(4);
-
- // Empty records are ignored for parameter passing purposes.
- if (isEmptyRecord(getContext(), Ty, true)) {
- VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
- auto *Load = CGF.Builder.CreateLoad(VAListAddr);
- Address Addr = Address(Load, CGF.Int8Ty, SlotSize);
- return CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
- }
-
- CharUnits TySize = getContext().getTypeSizeInChars(Ty);
- CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty);
-
- // Use indirect if size of the illegal vector is bigger than 16 bytes.
- bool IsIndirect = false;
- const Type *Base = nullptr;
- uint64_t Members = 0;
- if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
- IsIndirect = true;
-
- // ARMv7k passes structs bigger than 16 bytes indirectly, in space
- // allocated by the caller.
- } else if (TySize > CharUnits::fromQuantity(16) &&
- getABIKind() == ARMABIInfo::AAPCS16_VFP &&
- !isHomogeneousAggregate(Ty, Base, Members)) {
- IsIndirect = true;
-
- // Otherwise, bound the type's ABI alignment.
- // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
- // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
- // Our callers should be prepared to handle an under-aligned address.
- } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
- getABIKind() == ARMABIInfo::AAPCS) {
- TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
- TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
- } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
- // ARMv7k allows type alignment up to 16 bytes.
- TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
- TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
- } else {
- TyAlignForABI = CharUnits::fromQuantity(4);
- }
-
- TypeInfoChars TyInfo(TySize, TyAlignForABI, AlignRequirementKind::None);
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
- SlotSize, /*AllowHigherAlign*/ true);
-}
-
-//===----------------------------------------------------------------------===//
-// NVPTX ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class NVPTXTargetCodeGenInfo;
-
-class NVPTXABIInfo : public ABIInfo {
- NVPTXTargetCodeGenInfo &CGInfo;
-
-public:
- NVPTXABIInfo(CodeGenTypes &CGT, NVPTXTargetCodeGenInfo &Info)
- : ABIInfo(CGT), CGInfo(Info) {}
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType Ty) const;
-
- void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
- bool isUnsupportedType(QualType T) const;
- ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, unsigned MaxSize) const;
-};
-
-class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<NVPTXABIInfo>(CGT, *this)) {}
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const override;
- bool shouldEmitStaticExternCAliases() const override;
-
- llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const override {
- // On the device side, surface reference is represented as an object handle
- // in 64-bit integer.
- return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
- }
-
- llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const override {
- // On the device side, texture reference is represented as an object handle
- // in 64-bit integer.
- return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
- }
-
- bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF, LValue Dst,
- LValue Src) const override {
- emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
- return true;
- }
-
- bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF, LValue Dst,
- LValue Src) const override {
- emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
- return true;
- }
-
-private:
- // Adds a NamedMDNode with GV, Name, and Operand as operands, and adds the
- // resulting MDNode to the nvvm.annotations MDNode.
- static void addNVVMMetadata(llvm::GlobalValue *GV, StringRef Name,
- int Operand);
-
- static void emitBuiltinSurfTexDeviceCopy(CodeGenFunction &CGF, LValue Dst,
- LValue Src) {
- llvm::Value *Handle = nullptr;
- llvm::Constant *C =
- llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).getPointer());
- // Lookup `addrspacecast` through the constant pointer if any.
- if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C))
- C = llvm::cast<llvm::Constant>(ASC->getPointerOperand());
- if (auto *GV = llvm::dyn_cast_or_null<llvm::GlobalVariable>(C)) {
- // Load the handle from the specific global variable using
- // `nvvm.texsurf.handle.internal` intrinsic.
- Handle = CGF.EmitRuntimeCall(
- CGF.CGM.getIntrinsic(llvm::Intrinsic::nvvm_texsurf_handle_internal,
- {GV->getType()}),
- {GV}, "texsurf_handle");
- } else
- Handle = CGF.EmitLoadOfScalar(Src, SourceLocation());
- CGF.EmitStoreOfScalar(Handle, Dst);
- }
-};
-
-/// Checks if the type is unsupported directly by the current target.
-bool NVPTXABIInfo::isUnsupportedType(QualType T) const {
- ASTContext &Context = getContext();
- if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type())
- return true;
- if (!Context.getTargetInfo().hasFloat128Type() &&
- (T->isFloat128Type() ||
- (T->isRealFloatingType() && Context.getTypeSize(T) == 128)))
- return true;
- if (const auto *EIT = T->getAs<BitIntType>())
- return EIT->getNumBits() >
- (Context.getTargetInfo().hasInt128Type() ? 128U : 64U);
- if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() &&
- Context.getTypeSize(T) > 64U)
- return true;
- if (const auto *AT = T->getAsArrayTypeUnsafe())
- return isUnsupportedType(AT->getElementType());
- const auto *RT = T->getAs<RecordType>();
- if (!RT)
- return false;
- const RecordDecl *RD = RT->getDecl();
-
- // If this is a C++ record, check the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
- for (const CXXBaseSpecifier &I : CXXRD->bases())
- if (isUnsupportedType(I.getType()))
- return true;
-
- for (const FieldDecl *I : RD->fields())
- if (isUnsupportedType(I->getType()))
- return true;
- return false;
-}
-
-/// Coerce the given type into an array with maximum allowed size of elements.
-ABIArgInfo NVPTXABIInfo::coerceToIntArrayWithLimit(QualType Ty,
- unsigned MaxSize) const {
- // Alignment and Size are measured in bits.
- const uint64_t Size = getContext().getTypeSize(Ty);
- const uint64_t Alignment = getContext().getTypeAlign(Ty);
- const unsigned Div = std::min<unsigned>(MaxSize, Alignment);
- llvm::Type *IntType = llvm::Type::getIntNTy(getVMContext(), Div);
- const uint64_t NumElements = (Size + Div - 1) / Div;
- return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
-}
-
-ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- if (getContext().getLangOpts().OpenMP &&
- getContext().getLangOpts().OpenMPIsDevice && isUnsupportedType(RetTy))
- return coerceToIntArrayWithLimit(RetTy, 64);
-
- // note: this is different from default ABI
- if (!RetTy->isScalarType())
- return ABIArgInfo::getDirect();
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
-}
-
-ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- // Return aggregates type as indirect by value
- if (isAggregateTypeForABI(Ty)) {
- // Under CUDA device compilation, tex/surf builtin types are replaced with
- // object types and passed directly.
- if (getContext().getLangOpts().CUDAIsDevice) {
- if (Ty->isCUDADeviceBuiltinSurfaceType())
- return ABIArgInfo::getDirect(
- CGInfo.getCUDADeviceBuiltinSurfaceDeviceType());
- if (Ty->isCUDADeviceBuiltinTextureType())
- return ABIArgInfo::getDirect(
- CGInfo.getCUDADeviceBuiltinTextureDeviceType());
- }
- return getNaturalAlignIndirect(Ty, /* byval */ true);
- }
-
- if (const auto *EIT = Ty->getAs<BitIntType>()) {
- if ((EIT->getNumBits() > 128) ||
- (!getContext().getTargetInfo().hasInt128Type() &&
- EIT->getNumBits() > 64))
- return getNaturalAlignIndirect(Ty, /* byval */ true);
- }
-
- return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
-}
-
-void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
-
- // Always honor user-specified calling convention.
- if (FI.getCallingConvention() != llvm::CallingConv::C)
- return;
-
- FI.setEffectiveCallingConvention(getRuntimeCC());
-}
-
-Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- llvm_unreachable("NVPTX does not support varargs");
-}
-
-void NVPTXTargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
- if (GV->isDeclaration())
- return;
- const VarDecl *VD = dyn_cast_or_null<VarDecl>(D);
- if (VD) {
- if (M.getLangOpts().CUDA) {
- if (VD->getType()->isCUDADeviceBuiltinSurfaceType())
- addNVVMMetadata(GV, "surface", 1);
- else if (VD->getType()->isCUDADeviceBuiltinTextureType())
- addNVVMMetadata(GV, "texture", 1);
- return;
- }
- }
-
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD) return;
-
- llvm::Function *F = cast<llvm::Function>(GV);
-
- // Perform special handling in OpenCL mode
- if (M.getLangOpts().OpenCL) {
- // Use OpenCL function attributes to check for kernel functions
- // By default, all functions are device functions
- if (FD->hasAttr<OpenCLKernelAttr>()) {
- // OpenCL __kernel functions get kernel metadata
- // Create !{<func-ref>, metadata !"kernel", i32 1} node
- addNVVMMetadata(F, "kernel", 1);
- // And kernel functions are not subject to inlining
- F->addFnAttr(llvm::Attribute::NoInline);
- }
- }
-
- // Perform special handling in CUDA mode.
- if (M.getLangOpts().CUDA) {
- // CUDA __global__ functions get a kernel metadata entry. Since
- // __global__ functions cannot be called from the device, we do not
- // need to set the noinline attribute.
- if (FD->hasAttr<CUDAGlobalAttr>()) {
- // Create !{<func-ref>, metadata !"kernel", i32 1} node
- addNVVMMetadata(F, "kernel", 1);
- }
- if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
- // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
- llvm::APSInt MaxThreads(32);
- MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
- if (MaxThreads > 0)
- addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
-
- // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
- // not specified in __launch_bounds__ or if the user specified a 0 value,
- // we don't have to add a PTX directive.
- if (Attr->getMinBlocks()) {
- llvm::APSInt MinBlocks(32);
- MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
- if (MinBlocks > 0)
- // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
- addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
- }
- }
- }
-}
-
-void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV,
- StringRef Name, int Operand) {
- llvm::Module *M = GV->getParent();
- llvm::LLVMContext &Ctx = M->getContext();
-
- // Get "nvvm.annotations" metadata node
- llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
-
- llvm::Metadata *MDVals[] = {
- llvm::ConstantAsMetadata::get(GV), llvm::MDString::get(Ctx, Name),
- llvm::ConstantAsMetadata::get(
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
- // Append metadata to nvvm.annotations
- MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
-}
-
-bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
- return false;
-}
-}
-
-//===----------------------------------------------------------------------===//
-// SystemZ ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class SystemZABIInfo : public ABIInfo {
- bool HasVector;
- bool IsSoftFloatABI;
-
-public:
- SystemZABIInfo(CodeGenTypes &CGT, bool HV, bool SF)
- : ABIInfo(CGT), HasVector(HV), IsSoftFloatABI(SF) {}
-
- bool isPromotableIntegerTypeForABI(QualType Ty) const;
- bool isCompoundType(QualType Ty) const;
- bool isVectorArgumentType(QualType Ty) const;
- bool isFPArgumentType(QualType Ty) const;
- QualType GetSingleElementType(QualType Ty) const;
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType ArgTy) const;
-
- void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-};
-
-class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
- ASTContext &Ctx;
-
- const SystemZABIInfo &getABIInfo() const {
- return static_cast<const SystemZABIInfo&>(TargetCodeGenInfo::getABIInfo());
- }
-
- // These are used for speeding up the search for a visible vector ABI.
- mutable bool HasVisibleVecABIFlag = false;
- mutable std::set<const Type *> SeenTypes;
-
- // Returns true (the first time) if Ty is, or is found to include, a vector
- // type that exposes the vector ABI. This is any vector >=16 bytes which
- // with vector support are aligned to only 8 bytes. When IsParam is true,
- // the type belongs to a value as passed between functions. If it is a
- // vector <=16 bytes it will be passed in a vector register (if supported).
- bool isVectorTypeBased(const Type *Ty, bool IsParam) const;
-
-public:
- SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector, bool SoftFloatABI)
- : TargetCodeGenInfo(
- std::make_unique<SystemZABIInfo>(CGT, HasVector, SoftFloatABI)),
- Ctx(CGT.getContext()) {
- SwiftInfo =
- std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/false);
- }
-
- // The vector ABI is different when the vector facility is present and when
- // a module e.g. defines an externally visible vector variable, a flag
- // indicating a visible vector ABI is added. Eventually this will result in
- // a GNU attribute indicating the vector ABI of the module. Ty is the type
- // of a variable or function parameter that is globally visible.
- void handleExternallyVisibleObjABI(const Type *Ty, CodeGen::CodeGenModule &M,
- bool IsParam) const {
- if (!HasVisibleVecABIFlag && isVectorTypeBased(Ty, IsParam)) {
- M.getModule().addModuleFlag(llvm::Module::Warning,
- "s390x-visible-vector-ABI", 1);
- HasVisibleVecABIFlag = true;
- }
- }
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const override {
- if (!D)
- return;
-
- // Check if the vector ABI becomes visible by an externally visible
- // variable or function.
- if (const auto *VD = dyn_cast<VarDecl>(D)) {
- if (VD->isExternallyVisible())
- handleExternallyVisibleObjABI(VD->getType().getTypePtr(), M,
- /*IsParam*/false);
- }
- else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- if (FD->isExternallyVisible())
- handleExternallyVisibleObjABI(FD->getType().getTypePtr(), M,
- /*IsParam*/false);
- }
- }
-
- llvm::Value *testFPKind(llvm::Value *V, unsigned BuiltinID,
- CGBuilderTy &Builder,
- CodeGenModule &CGM) const override {
- assert(V->getType()->isFloatingPointTy() && "V should have an FP type.");
- // Only use TDC in constrained FP mode.
- if (!Builder.getIsFPConstrained())
- return nullptr;
-
- llvm::Type *Ty = V->getType();
- if (Ty->isFloatTy() || Ty->isDoubleTy() || Ty->isFP128Ty()) {
- llvm::Module &M = CGM.getModule();
- auto &Ctx = M.getContext();
- llvm::Function *TDCFunc =
- llvm::Intrinsic::getDeclaration(&M, llvm::Intrinsic::s390_tdc, Ty);
- unsigned TDCBits = 0;
- switch (BuiltinID) {
- case Builtin::BI__builtin_isnan:
- TDCBits = 0xf;
- break;
- case Builtin::BIfinite:
- case Builtin::BI__finite:
- case Builtin::BIfinitef:
- case Builtin::BI__finitef:
- case Builtin::BIfinitel:
- case Builtin::BI__finitel:
- case Builtin::BI__builtin_isfinite:
- TDCBits = 0xfc0;
- break;
- case Builtin::BI__builtin_isinf:
- TDCBits = 0x30;
- break;
- default:
- break;
- }
- if (TDCBits)
- return Builder.CreateCall(
- TDCFunc,
- {V, llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), TDCBits)});
- }
- return nullptr;
- }
-};
-}
-
-bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- // Promotable integer types are required to be promoted by the ABI.
- if (ABIInfo::isPromotableIntegerTypeForABI(Ty))
- return true;
-
- if (const auto *EIT = Ty->getAs<BitIntType>())
- if (EIT->getNumBits() < 64)
- return true;
-
- // 32-bit values must also be promoted.
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
- switch (BT->getKind()) {
- case BuiltinType::Int:
- case BuiltinType::UInt:
- return true;
- default:
- return false;
- }
- return false;
-}
-
-bool SystemZABIInfo::isCompoundType(QualType Ty) const {
- return (Ty->isAnyComplexType() ||
- Ty->isVectorType() ||
- isAggregateTypeForABI(Ty));
-}
-
-bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
- return (HasVector &&
- Ty->isVectorType() &&
- getContext().getTypeSize(Ty) <= 128);
-}
-
-bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
- if (IsSoftFloatABI)
- return false;
-
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
- switch (BT->getKind()) {
- case BuiltinType::Float:
- case BuiltinType::Double:
- return true;
- default:
- return false;
- }
-
- return false;
-}
-
-QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
- const RecordType *RT = Ty->getAs<RecordType>();
-
- if (RT && RT->isStructureOrClassType()) {
- const RecordDecl *RD = RT->getDecl();
- QualType Found;
-
- // If this is a C++ record, check the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
- if (CXXRD->hasDefinition())
- for (const auto &I : CXXRD->bases()) {
- QualType Base = I.getType();
-
- // Empty bases don't affect things either way.
- if (isEmptyRecord(getContext(), Base, true))
- continue;
-
- if (!Found.isNull())
- return Ty;
- Found = GetSingleElementType(Base);
- }
-
- // Check the fields.
- for (const auto *FD : RD->fields()) {
- // Unlike isSingleElementStruct(), empty structure and array fields
- // do count. So do anonymous bitfields that aren't zero-sized.
-
- // Like isSingleElementStruct(), ignore C++20 empty data members.
- if (FD->hasAttr<NoUniqueAddressAttr>() &&
- isEmptyRecord(getContext(), FD->getType(), true))
- continue;
-
- // Unlike isSingleElementStruct(), arrays do not count.
- // Nested structures still do though.
- if (!Found.isNull())
- return Ty;
- Found = GetSingleElementType(FD->getType());
- }
-
- // Unlike isSingleElementStruct(), trailing padding is allowed.
- // An 8-byte aligned struct s { float f; } is passed as a double.
- if (!Found.isNull())
- return Found;
- }
-
- return Ty;
-}
-
-Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- // Assume that va_list type is correct; should be pointer to LLVM type:
- // struct {
- // i64 __gpr;
- // i64 __fpr;
- // i8 *__overflow_arg_area;
- // i8 *__reg_save_area;
- // };
-
- // Every non-vector argument occupies 8 bytes and is passed by preference
- // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
- // always passed on the stack.
- const SystemZTargetCodeGenInfo &SZCGI =
- static_cast<const SystemZTargetCodeGenInfo &>(
- CGT.getCGM().getTargetCodeGenInfo());
- Ty = getContext().getCanonicalType(Ty);
- auto TyInfo = getContext().getTypeInfoInChars(Ty);
- llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
- llvm::Type *DirectTy = ArgTy;
- ABIArgInfo AI = classifyArgumentType(Ty);
- bool IsIndirect = AI.isIndirect();
- bool InFPRs = false;
- bool IsVector = false;
- CharUnits UnpaddedSize;
- CharUnits DirectAlign;
- SZCGI.handleExternallyVisibleObjABI(Ty.getTypePtr(), CGT.getCGM(),
- /*IsParam*/true);
- if (IsIndirect) {
- DirectTy = llvm::PointerType::getUnqual(DirectTy);
- UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
- } else {
- if (AI.getCoerceToType())
- ArgTy = AI.getCoerceToType();
- InFPRs = (!IsSoftFloatABI && (ArgTy->isFloatTy() || ArgTy->isDoubleTy()));
- IsVector = ArgTy->isVectorTy();
- UnpaddedSize = TyInfo.Width;
- DirectAlign = TyInfo.Align;
- }
- CharUnits PaddedSize = CharUnits::fromQuantity(8);
- if (IsVector && UnpaddedSize > PaddedSize)
- PaddedSize = CharUnits::fromQuantity(16);
- assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
-
- CharUnits Padding = (PaddedSize - UnpaddedSize);
-
- llvm::Type *IndexTy = CGF.Int64Ty;
- llvm::Value *PaddedSizeV =
- llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
-
- if (IsVector) {
- // Work out the address of a vector argument on the stack.
- // Vector arguments are always passed in the high bits of a
- // single (8 byte) or double (16 byte) stack slot.
- Address OverflowArgAreaPtr =
- CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
- Address OverflowArgArea =
- Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
- CGF.Int8Ty, TyInfo.Align);
- Address MemAddr =
- CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
-
- // Update overflow_arg_area_ptr pointer
- llvm::Value *NewOverflowArgArea = CGF.Builder.CreateGEP(
- OverflowArgArea.getElementType(), OverflowArgArea.getPointer(),
- PaddedSizeV, "overflow_arg_area");
- CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
-
- return MemAddr;
- }
-
- assert(PaddedSize.getQuantity() == 8);
-
- unsigned MaxRegs, RegCountField, RegSaveIndex;
- CharUnits RegPadding;
- if (InFPRs) {
- MaxRegs = 4; // Maximum of 4 FPR arguments
- RegCountField = 1; // __fpr
- RegSaveIndex = 16; // save offset for f0
- RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
- } else {
- MaxRegs = 5; // Maximum of 5 GPR arguments
- RegCountField = 0; // __gpr
- RegSaveIndex = 2; // save offset for r2
- RegPadding = Padding; // values are passed in the low bits of a GPR
- }
-
- Address RegCountPtr =
- CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
- llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
- llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
- llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
- "fits_in_regs");
-
- llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
- llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
- llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
- CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
-
- // Emit code to load the value if it was passed in registers.
- CGF.EmitBlock(InRegBlock);
-
- // Work out the address of an argument register.
- llvm::Value *ScaledRegCount =
- CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
- llvm::Value *RegBase =
- llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
- + RegPadding.getQuantity());
- llvm::Value *RegOffset =
- CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
- Address RegSaveAreaPtr =
- CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
- llvm::Value *RegSaveArea =
- CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
- Address RawRegAddr(
- CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, RegOffset, "raw_reg_addr"),
- CGF.Int8Ty, PaddedSize);
- Address RegAddr =
- CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
-
- // Update the register count
- llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
- llvm::Value *NewRegCount =
- CGF.Builder.CreateAdd(RegCount, One, "reg_count");
- CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
- CGF.EmitBranch(ContBlock);
-
- // Emit code to load the value if it was passed in memory.
- CGF.EmitBlock(InMemBlock);
-
- // Work out the address of a stack argument.
- Address OverflowArgAreaPtr =
- CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
- Address OverflowArgArea =
- Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
- CGF.Int8Ty, PaddedSize);
- Address RawMemAddr =
- CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
- Address MemAddr =
- CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
-
- // Update overflow_arg_area_ptr pointer
- llvm::Value *NewOverflowArgArea =
- CGF.Builder.CreateGEP(OverflowArgArea.getElementType(),
- OverflowArgArea.getPointer(), PaddedSizeV,
- "overflow_arg_area");
- CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
- CGF.EmitBranch(ContBlock);
-
- // Return the appropriate result.
- CGF.EmitBlock(ContBlock);
- Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
- "va_arg.addr");
-
- if (IsIndirect)
- ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"), ArgTy,
- TyInfo.Align);
-
- return ResAddr;
-}
-
-ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
- if (isVectorArgumentType(RetTy))
- return ABIArgInfo::getDirect();
- if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
- return getNaturalAlignIndirect(RetTy);
- return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
-}
-
-ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
- // Handle the generic C++ ABI.
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
-
- // Integers and enums are extended to full register width.
- if (isPromotableIntegerTypeForABI(Ty))
- return ABIArgInfo::getExtend(Ty);
-
- // Handle vector types and vector-like structure types. Note that
- // as opposed to float-like structure types, we do not allow any
- // padding for vector-like structures, so verify the sizes match.
- uint64_t Size = getContext().getTypeSize(Ty);
- QualType SingleElementTy = GetSingleElementType(Ty);
- if (isVectorArgumentType(SingleElementTy) &&
- getContext().getTypeSize(SingleElementTy) == Size)
- return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
-
- // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
- if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-
- // Handle small structures.
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- // Structures with flexible arrays have variable length, so really
- // fail the size test above.
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-
- // The structure is passed as an unextended integer, a float, or a double.
- llvm::Type *PassTy;
- if (isFPArgumentType(SingleElementTy)) {
- assert(Size == 32 || Size == 64);
- if (Size == 32)
- PassTy = llvm::Type::getFloatTy(getVMContext());
- else
- PassTy = llvm::Type::getDoubleTy(getVMContext());
- } else
- PassTy = llvm::IntegerType::get(getVMContext(), Size);
- return ABIArgInfo::getDirect(PassTy);
- }
-
- // Non-structure compounds are passed indirectly.
- if (isCompoundType(Ty))
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-
- return ABIArgInfo::getDirect(nullptr);
-}
-
-void SystemZABIInfo::computeInfo(CGFunctionInfo &FI) const {
- const SystemZTargetCodeGenInfo &SZCGI =
- static_cast<const SystemZTargetCodeGenInfo &>(
- CGT.getCGM().getTargetCodeGenInfo());
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- unsigned Idx = 0;
- for (auto &I : FI.arguments()) {
- I.info = classifyArgumentType(I.type);
- if (FI.isVariadic() && Idx++ >= FI.getNumRequiredArgs())
- // Check if a vararg vector argument is passed, in which case the
- // vector ABI becomes visible as the va_list could be passed on to
- // other functions.
- SZCGI.handleExternallyVisibleObjABI(I.type.getTypePtr(), CGT.getCGM(),
- /*IsParam*/true);
- }
-}
-
-bool SystemZTargetCodeGenInfo::isVectorTypeBased(const Type *Ty,
- bool IsParam) const {
- if (!SeenTypes.insert(Ty).second)
- return false;
-
- if (IsParam) {
- // A narrow (<16 bytes) vector will as a parameter also expose the ABI as
- // it will be passed in a vector register. A wide (>16 bytes) vector will
- // be passed via "hidden" pointer where any extra alignment is not
- // required (per GCC).
- const Type *SingleEltTy =
- getABIInfo().GetSingleElementType(QualType(Ty, 0)).getTypePtr();
- bool SingleVecEltStruct = SingleEltTy != Ty && SingleEltTy->isVectorType() &&
- Ctx.getTypeSize(SingleEltTy) == Ctx.getTypeSize(Ty);
- if (Ty->isVectorType() || SingleVecEltStruct)
- return Ctx.getTypeSize(Ty) / 8 <= 16;
- }
-
- // Assume pointers are dereferenced.
- while (Ty->isPointerType() || Ty->isArrayType())
- Ty = Ty->getPointeeOrArrayElementType();
-
- // Vectors >= 16 bytes expose the ABI through alignment requirements.
- if (Ty->isVectorType() && Ctx.getTypeSize(Ty) / 8 >= 16)
- return true;
-
- if (const auto *RecordTy = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RecordTy->getDecl();
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
- if (CXXRD->hasDefinition())
- for (const auto &I : CXXRD->bases())
- if (isVectorTypeBased(I.getType().getTypePtr(), /*IsParam*/false))
- return true;
- for (const auto *FD : RD->fields())
- if (isVectorTypeBased(FD->getType().getTypePtr(), /*IsParam*/false))
- return true;
- }
-
- if (const auto *FT = Ty->getAs<FunctionType>())
- if (isVectorTypeBased(FT->getReturnType().getTypePtr(), /*IsParam*/true))
- return true;
- if (const FunctionProtoType *Proto = Ty->getAs<FunctionProtoType>())
- for (auto ParamType : Proto->getParamTypes())
- if (isVectorTypeBased(ParamType.getTypePtr(), /*IsParam*/true))
- return true;
-
- return false;
-}
-
-//===----------------------------------------------------------------------===//
-// MSP430 ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class MSP430ABIInfo : public DefaultABIInfo {
- static ABIArgInfo complexArgInfo() {
- ABIArgInfo Info = ABIArgInfo::getDirect();
- Info.setCanBeFlattened(false);
- return Info;
- }
-
-public:
- MSP430ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
-
- ABIArgInfo classifyReturnType(QualType RetTy) const {
- if (RetTy->isAnyComplexType())
- return complexArgInfo();
-
- return DefaultABIInfo::classifyReturnType(RetTy);
- }
-
- ABIArgInfo classifyArgumentType(QualType RetTy) const {
- if (RetTy->isAnyComplexType())
- return complexArgInfo();
-
- return DefaultABIInfo::classifyArgumentType(RetTy);
- }
-
- // Just copy the original implementations because
- // DefaultABIInfo::classify{Return,Argument}Type() are not virtual
- void computeInfo(CGFunctionInfo &FI) const override {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
- }
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override {
- return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
- }
-};
-
-class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<MSP430ABIInfo>(CGT)) {}
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const override;
-};
-
-}
-
-void MSP430TargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
- if (GV->isDeclaration())
- return;
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>();
- if (!InterruptAttr)
- return;
-
- // Handle 'interrupt' attribute:
- llvm::Function *F = cast<llvm::Function>(GV);
-
- // Step 1: Set ISR calling convention.
- F->setCallingConv(llvm::CallingConv::MSP430_INTR);
-
- // Step 2: Add attributes goodness.
- F->addFnAttr(llvm::Attribute::NoInline);
- F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber()));
- }
-}
-
-//===----------------------------------------------------------------------===//
-// MIPS ABI Implementation. This works for both little-endian and
-// big-endian variants.
-//===----------------------------------------------------------------------===//
-
-namespace {
-class MipsABIInfo : public ABIInfo {
- bool IsO32;
- const unsigned MinABIStackAlignInBytes, StackAlignInBytes;
- void CoerceToIntArgs(uint64_t TySize,
- SmallVectorImpl<llvm::Type *> &ArgList) const;
- llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
- llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
- llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
-public:
- MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
- ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
- StackAlignInBytes(IsO32 ? 8 : 16) {}
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
- void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
- ABIArgInfo extendType(QualType Ty) const;
-};
-
-class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
- unsigned SizeOfUnwindException;
-public:
- MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
- : TargetCodeGenInfo(std::make_unique<MipsABIInfo>(CGT, IsO32)),
- SizeOfUnwindException(IsO32 ? 24 : 32) {}
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
- return 29;
- }
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override {
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD) return;
- llvm::Function *Fn = cast<llvm::Function>(GV);
-
- if (FD->hasAttr<MipsLongCallAttr>())
- Fn->addFnAttr("long-call");
- else if (FD->hasAttr<MipsShortCallAttr>())
- Fn->addFnAttr("short-call");
-
- // Other attributes do not have a meaning for declarations.
- if (GV->isDeclaration())
- return;
-
- if (FD->hasAttr<Mips16Attr>()) {
- Fn->addFnAttr("mips16");
- }
- else if (FD->hasAttr<NoMips16Attr>()) {
- Fn->addFnAttr("nomips16");
- }
-
- if (FD->hasAttr<MicroMipsAttr>())
- Fn->addFnAttr("micromips");
- else if (FD->hasAttr<NoMicroMipsAttr>())
- Fn->addFnAttr("nomicromips");
-
- const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
- if (!Attr)
- return;
-
- const char *Kind;
- switch (Attr->getInterrupt()) {
- case MipsInterruptAttr::eic: Kind = "eic"; break;
- case MipsInterruptAttr::sw0: Kind = "sw0"; break;
- case MipsInterruptAttr::sw1: Kind = "sw1"; break;
- case MipsInterruptAttr::hw0: Kind = "hw0"; break;
- case MipsInterruptAttr::hw1: Kind = "hw1"; break;
- case MipsInterruptAttr::hw2: Kind = "hw2"; break;
- case MipsInterruptAttr::hw3: Kind = "hw3"; break;
- case MipsInterruptAttr::hw4: Kind = "hw4"; break;
- case MipsInterruptAttr::hw5: Kind = "hw5"; break;
- }
-
- Fn->addFnAttr("interrupt", Kind);
-
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override;
-
- unsigned getSizeOfUnwindException() const override {
- return SizeOfUnwindException;
- }
-};
-}
-
-void MipsABIInfo::CoerceToIntArgs(
- uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
- llvm::IntegerType *IntTy =
- llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
-
- // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
- for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
- ArgList.push_back(IntTy);
-
- // If necessary, add one more integer type to ArgList.
- unsigned R = TySize % (MinABIStackAlignInBytes * 8);
-
- if (R)
- ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
-}
-
-// In N32/64, an aligned double precision floating point field is passed in
-// a register.
-llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
- SmallVector<llvm::Type*, 8> ArgList, IntArgList;
-
- if (IsO32) {
- CoerceToIntArgs(TySize, ArgList);
- return llvm::StructType::get(getVMContext(), ArgList);
- }
-
- if (Ty->isComplexType())
- return CGT.ConvertType(Ty);
-
- const RecordType *RT = Ty->getAs<RecordType>();
-
- // Unions/vectors are passed in integer registers.
- if (!RT || !RT->isStructureOrClassType()) {
- CoerceToIntArgs(TySize, ArgList);
- return llvm::StructType::get(getVMContext(), ArgList);
- }
-
- const RecordDecl *RD = RT->getDecl();
- const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
- assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
-
- uint64_t LastOffset = 0;
- unsigned idx = 0;
- llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
-
- // Iterate over fields in the struct/class and check if there are any aligned
- // double fields.
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- i != e; ++i, ++idx) {
- const QualType Ty = i->getType();
- const BuiltinType *BT = Ty->getAs<BuiltinType>();
-
- if (!BT || BT->getKind() != BuiltinType::Double)
- continue;
-
- uint64_t Offset = Layout.getFieldOffset(idx);
- if (Offset % 64) // Ignore doubles that are not aligned.
- continue;
-
- // Add ((Offset - LastOffset) / 64) args of type i64.
- for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
- ArgList.push_back(I64);
-
- // Add double type.
- ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
- LastOffset = Offset + 64;
- }
-
- CoerceToIntArgs(TySize - LastOffset, IntArgList);
- ArgList.append(IntArgList.begin(), IntArgList.end());
-
- return llvm::StructType::get(getVMContext(), ArgList);
-}
-
-llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
- uint64_t Offset) const {
- if (OrigOffset + MinABIStackAlignInBytes > Offset)
- return nullptr;
-
- return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
-}
-
-ABIArgInfo
-MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- uint64_t OrigOffset = Offset;
- uint64_t TySize = getContext().getTypeSize(Ty);
- uint64_t Align = getContext().getTypeAlign(Ty) / 8;
-
- Align = std::clamp(Align, (uint64_t)MinABIStackAlignInBytes,
- (uint64_t)StackAlignInBytes);
- unsigned CurrOffset = llvm::alignTo(Offset, Align);
- Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
-
- if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
- // Ignore empty aggregates.
- if (TySize == 0)
- return ABIArgInfo::getIgnore();
-
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
- Offset = OrigOffset + MinABIStackAlignInBytes;
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
- }
-
- // If we have reached here, aggregates are passed directly by coercing to
- // another structure type. Padding is inserted if the offset of the
- // aggregate is unaligned.
- ABIArgInfo ArgInfo =
- ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
- getPaddingType(OrigOffset, CurrOffset));
- ArgInfo.setInReg(true);
- return ArgInfo;
- }
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- // Make sure we pass indirectly things that are too large.
- if (const auto *EIT = Ty->getAs<BitIntType>())
- if (EIT->getNumBits() > 128 ||
- (EIT->getNumBits() > 64 &&
- !getContext().getTargetInfo().hasInt128Type()))
- return getNaturalAlignIndirect(Ty);
-
- // All integral types are promoted to the GPR width.
- if (Ty->isIntegralOrEnumerationType())
- return extendType(Ty);
-
- return ABIArgInfo::getDirect(
- nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
-}
-
-llvm::Type*
-MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
- const RecordType *RT = RetTy->getAs<RecordType>();
- SmallVector<llvm::Type*, 8> RTList;
-
- if (RT && RT->isStructureOrClassType()) {
- const RecordDecl *RD = RT->getDecl();
- const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
- unsigned FieldCnt = Layout.getFieldCount();
-
- // N32/64 returns struct/classes in floating point registers if the
- // following conditions are met:
- // 1. The size of the struct/class is no larger than 128-bit.
- // 2. The struct/class has one or two fields all of which are floating
- // point types.
- // 3. The offset of the first field is zero (this follows what gcc does).
- //
- // Any other composite results are returned in integer registers.
- //
- if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
- RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
- for (; b != e; ++b) {
- const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
-
- if (!BT || !BT->isFloatingPoint())
- break;
-
- RTList.push_back(CGT.ConvertType(b->getType()));
- }
-
- if (b == e)
- return llvm::StructType::get(getVMContext(), RTList,
- RD->hasAttr<PackedAttr>());
-
- RTList.clear();
- }
- }
-
- CoerceToIntArgs(Size, RTList);
- return llvm::StructType::get(getVMContext(), RTList);
-}
-
-ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
- uint64_t Size = getContext().getTypeSize(RetTy);
-
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- // O32 doesn't treat zero-sized structs differently from other structs.
- // However, N32/N64 ignores zero sized return values.
- if (!IsO32 && Size == 0)
- return ABIArgInfo::getIgnore();
-
- if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
- if (Size <= 128) {
- if (RetTy->isAnyComplexType())
- return ABIArgInfo::getDirect();
-
- // O32 returns integer vectors in registers and N32/N64 returns all small
- // aggregates in registers.
- if (!IsO32 ||
- (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
- ABIArgInfo ArgInfo =
- ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
- ArgInfo.setInReg(true);
- return ArgInfo;
- }
- }
-
- return getNaturalAlignIndirect(RetTy);
- }
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- // Make sure we pass indirectly things that are too large.
- if (const auto *EIT = RetTy->getAs<BitIntType>())
- if (EIT->getNumBits() > 128 ||
- (EIT->getNumBits() > 64 &&
- !getContext().getTargetInfo().hasInt128Type()))
- return getNaturalAlignIndirect(RetTy);
-
- if (isPromotableIntegerTypeForABI(RetTy))
- return ABIArgInfo::getExtend(RetTy);
-
- if ((RetTy->isUnsignedIntegerOrEnumerationType() ||
- RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32)
- return ABIArgInfo::getSignExtend(RetTy);
-
- return ABIArgInfo::getDirect();
-}
-
-void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
- ABIArgInfo &RetInfo = FI.getReturnInfo();
- if (!getCXXABI().classifyReturnType(FI))
- RetInfo = classifyReturnType(FI.getReturnType());
-
- // Check if a pointer to an aggregate is passed as a hidden argument.
- uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
-
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type, Offset);
-}
-
-Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType OrigTy) const {
- QualType Ty = OrigTy;
-
- // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
- // Pointers are also promoted in the same way but this only matters for N32.
- unsigned SlotSizeInBits = IsO32 ? 32 : 64;
- unsigned PtrWidth = getTarget().getPointerWidth(LangAS::Default);
- bool DidPromote = false;
- if ((Ty->isIntegerType() &&
- getContext().getIntWidth(Ty) < SlotSizeInBits) ||
- (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
- DidPromote = true;
- Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
- Ty->isSignedIntegerType());
- }
-
- auto TyInfo = getContext().getTypeInfoInChars(Ty);
-
- // The alignment of things in the argument area is never larger than
- // StackAlignInBytes.
- TyInfo.Align =
- std::min(TyInfo.Align, CharUnits::fromQuantity(StackAlignInBytes));
-
- // MinABIStackAlignInBytes is the size of argument slots on the stack.
- CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
-
- Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
- TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
-
-
- // If there was a promotion, "unpromote" into a temporary.
- // TODO: can we just use a pointer into a subset of the original slot?
- if (DidPromote) {
- Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
- llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
-
- // Truncate down to the right width.
- llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
- : CGF.IntPtrTy);
- llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
- if (OrigTy->isPointerType())
- V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
-
- CGF.Builder.CreateStore(V, Temp);
- Addr = Temp;
- }
-
- return Addr;
-}
-
-ABIArgInfo MipsABIInfo::extendType(QualType Ty) const {
- int TySize = getContext().getTypeSize(Ty);
-
- // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
- if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
- return ABIArgInfo::getSignExtend(Ty);
-
- return ABIArgInfo::getExtend(Ty);
-}
-
-bool
-MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const {
- // This information comes from gcc's implementation, which seems to
- // as canonical as it gets.
-
- // Everything on MIPS is 4 bytes. Double-precision FP registers
- // are aliased to pairs of single-precision FP registers.
- llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
-
- // 0-31 are the general purpose registers, $0 - $31.
- // 32-63 are the floating-point registers, $f0 - $f31.
- // 64 and 65 are the multiply/divide registers, $hi and $lo.
- // 66 is the (notional, I think) register for signal-handler return.
- AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
-
- // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
- // They are one bit wide and ignored here.
-
- // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
- // (coprocessor 1 is the FP unit)
- // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
- // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
- // 176-181 are the DSP accumulator registers.
- AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
- return false;
-}
-
-//===----------------------------------------------------------------------===//
-// M68k ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class M68kTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- M68kTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const override;
-};
-
-} // namespace
-
-void M68kTargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
- if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- if (const auto *attr = FD->getAttr<M68kInterruptAttr>()) {
- // Handle 'interrupt' attribute:
- llvm::Function *F = cast<llvm::Function>(GV);
-
- // Step 1: Set ISR calling convention.
- F->setCallingConv(llvm::CallingConv::M68k_INTR);
-
- // Step 2: Add attributes goodness.
- F->addFnAttr(llvm::Attribute::NoInline);
-
- // Step 3: Emit ISR vector alias.
- unsigned Num = attr->getNumber() / 2;
- llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
- "__isr_" + Twine(Num), F);
- }
- }
-}
-
-//===----------------------------------------------------------------------===//
-// AVR ABI Implementation. Documented at
-// https://gcc.gnu.org/wiki/avr-gcc#Calling_Convention
-// https://gcc.gnu.org/wiki/avr-gcc#Reduced_Tiny
-//===----------------------------------------------------------------------===//
-
-namespace {
-class AVRABIInfo : public DefaultABIInfo {
-private:
- // The total amount of registers can be used to pass parameters. It is 18 on
- // AVR, or 6 on AVRTiny.
- const unsigned ParamRegs;
- // The total amount of registers can be used to pass return value. It is 8 on
- // AVR, or 4 on AVRTiny.
- const unsigned RetRegs;
-
-public:
- AVRABIInfo(CodeGenTypes &CGT, unsigned NPR, unsigned NRR)
- : DefaultABIInfo(CGT), ParamRegs(NPR), RetRegs(NRR) {}
-
- ABIArgInfo classifyReturnType(QualType Ty, bool &LargeRet) const {
- // On AVR, a return struct with size less than or equals to 8 bytes is
- // returned directly via registers R18-R25. On AVRTiny, a return struct
- // with size less than or equals to 4 bytes is returned directly via
- // registers R22-R25.
- if (isAggregateTypeForABI(Ty) &&
- getContext().getTypeSize(Ty) <= RetRegs * 8)
- return ABIArgInfo::getDirect();
- // A return value (struct or scalar) with larger size is returned via a
- // stack slot, along with a pointer as the function's implicit argument.
- if (getContext().getTypeSize(Ty) > RetRegs * 8) {
- LargeRet = true;
- return getNaturalAlignIndirect(Ty);
- }
- // An i8 return value should not be extended to i16, since AVR has 8-bit
- // registers.
- if (Ty->isIntegralOrEnumerationType() && getContext().getTypeSize(Ty) <= 8)
- return ABIArgInfo::getDirect();
- // Otherwise we follow the default way which is compatible.
- return DefaultABIInfo::classifyReturnType(Ty);
- }
-
- ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegs) const {
- unsigned TySize = getContext().getTypeSize(Ty);
-
- // An int8 type argument always costs two registers like an int16.
- if (TySize == 8 && NumRegs >= 2) {
- NumRegs -= 2;
- return ABIArgInfo::getExtend(Ty);
- }
-
- // If the argument size is an odd number of bytes, round up the size
- // to the next even number.
- TySize = llvm::alignTo(TySize, 16);
-
- // Any type including an array/struct type can be passed in rgisters,
- // if there are enough registers left.
- if (TySize <= NumRegs * 8) {
- NumRegs -= TySize / 8;
- return ABIArgInfo::getDirect();
- }
-
- // An argument is passed either completely in registers or completely in
- // memory. Since there are not enough registers left, current argument
- // and all other unprocessed arguments should be passed in memory.
- // However we still need to return `ABIArgInfo::getDirect()` other than
- // `ABIInfo::getNaturalAlignIndirect(Ty)`, otherwise an extra stack slot
- // will be allocated, so the stack frame layout will be incompatible with
- // avr-gcc.
- NumRegs = 0;
- return ABIArgInfo::getDirect();
- }
-
- void computeInfo(CGFunctionInfo &FI) const override {
- // Decide the return type.
- bool LargeRet = false;
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), LargeRet);
-
- // Decide each argument type. The total number of registers can be used for
- // arguments depends on several factors:
- // 1. Arguments of varargs functions are passed on the stack. This applies
- // even to the named arguments. So no register can be used.
- // 2. Total 18 registers can be used on avr and 6 ones on avrtiny.
- // 3. If the return type is a struct with too large size, two registers
- // (out of 18/6) will be cost as an implicit pointer argument.
- unsigned NumRegs = ParamRegs;
- if (FI.isVariadic())
- NumRegs = 0;
- else if (LargeRet)
- NumRegs -= 2;
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type, NumRegs);
- }
-};
-
-class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- AVRTargetCodeGenInfo(CodeGenTypes &CGT, unsigned NPR, unsigned NRR)
- : TargetCodeGenInfo(std::make_unique<AVRABIInfo>(CGT, NPR, NRR)) {}
-
- LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
- const VarDecl *D) const override {
- // Check if global/static variable is defined in address space
- // 1~6 (__flash, __flash1, __flash2, __flash3, __flash4, __flash5)
- // but not constant.
- if (D) {
- LangAS AS = D->getType().getAddressSpace();
- if (isTargetAddressSpace(AS) && 1 <= toTargetAddressSpace(AS) &&
- toTargetAddressSpace(AS) <= 6 && !D->getType().isConstQualified())
- CGM.getDiags().Report(D->getLocation(),
- diag::err_verify_nonconst_addrspace)
- << "__flash*";
- }
- return TargetCodeGenInfo::getGlobalVarAddressSpace(CGM, D);
- }
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override {
- if (GV->isDeclaration())
- return;
- const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD) return;
- auto *Fn = cast<llvm::Function>(GV);
-
- if (FD->getAttr<AVRInterruptAttr>())
- Fn->addFnAttr("interrupt");
-
- if (FD->getAttr<AVRSignalAttr>())
- Fn->addFnAttr("signal");
- }
-};
-}
-
-//===----------------------------------------------------------------------===//
-// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
-// Currently subclassed only to implement custom OpenCL C function attribute
-// handling.
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
-public:
- TCETargetCodeGenInfo(CodeGenTypes &CGT)
- : DefaultTargetCodeGenInfo(CGT) {}
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const override;
-};
-
-void TCETargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
- if (GV->isDeclaration())
- return;
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD) return;
-
- llvm::Function *F = cast<llvm::Function>(GV);
-
- if (M.getLangOpts().OpenCL) {
- if (FD->hasAttr<OpenCLKernelAttr>()) {
- // OpenCL C Kernel functions are not subject to inlining
- F->addFnAttr(llvm::Attribute::NoInline);
- const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
- if (Attr) {
- // Convert the reqd_work_group_size() attributes to metadata.
- llvm::LLVMContext &Context = F->getContext();
- llvm::NamedMDNode *OpenCLMetadata =
- M.getModule().getOrInsertNamedMetadata(
- "opencl.kernel_wg_size_info");
-
- SmallVector<llvm::Metadata *, 5> Operands;
- Operands.push_back(llvm::ConstantAsMetadata::get(F));
-
- Operands.push_back(
- llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
- M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
- Operands.push_back(
- llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
- M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
- Operands.push_back(
- llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
- M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
-
- // Add a boolean constant operand for "required" (true) or "hint"
- // (false) for implementing the work_group_size_hint attr later.
- // Currently always true as the hint is not yet implemented.
- Operands.push_back(
- llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
- OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
- }
- }
- }
-}
-
-}
-
-//===----------------------------------------------------------------------===//
-// Hexagon ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class HexagonABIInfo : public DefaultABIInfo {
-public:
- HexagonABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
-
-private:
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, unsigned *RegsLeft) const;
-
- void computeInfo(CGFunctionInfo &FI) const override;
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
- Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr,
- QualType Ty) const;
- Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr,
- QualType Ty) const;
- Address EmitVAArgForHexagonLinux(CodeGenFunction &CFG, Address VAListAddr,
- QualType Ty) const;
-};
-
-class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<HexagonABIInfo>(CGT)) {}
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
- return 29;
- }
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &GCM) const override {
- if (GV->isDeclaration())
- return;
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD)
- return;
- }
-};
-
-} // namespace
-
-void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
- unsigned RegsLeft = 6;
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type, &RegsLeft);
-}
-
-static bool HexagonAdjustRegsLeft(uint64_t Size, unsigned *RegsLeft) {
- assert(Size <= 64 && "Not expecting to pass arguments larger than 64 bits"
- " through registers");
-
- if (*RegsLeft == 0)
- return false;
-
- if (Size <= 32) {
- (*RegsLeft)--;
- return true;
- }
-
- if (2 <= (*RegsLeft & (~1U))) {
- *RegsLeft = (*RegsLeft & (~1U)) - 2;
- return true;
- }
-
- // Next available register was r5 but candidate was greater than 32-bits so it
- // has to go on the stack. However we still consume r5
- if (*RegsLeft == 1)
- *RegsLeft = 0;
-
- return false;
-}
-
-ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty,
- unsigned *RegsLeft) const {
- if (!isAggregateTypeForABI(Ty)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- uint64_t Size = getContext().getTypeSize(Ty);
- if (Size <= 64)
- HexagonAdjustRegsLeft(Size, RegsLeft);
-
- if (Size > 64 && Ty->isBitIntType())
- return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
-
- return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect();
- }
-
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
-
- // Ignore empty records.
- if (isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
-
- uint64_t Size = getContext().getTypeSize(Ty);
- unsigned Align = getContext().getTypeAlign(Ty);
-
- if (Size > 64)
- return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
-
- if (HexagonAdjustRegsLeft(Size, RegsLeft))
- Align = Size <= 32 ? 32 : 64;
- if (Size <= Align) {
- // Pass in the smallest viable integer type.
- if (!llvm::isPowerOf2_64(Size))
- Size = llvm::NextPowerOf2(Size);
- return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
- }
- return DefaultABIInfo::classifyArgumentType(Ty);
-}
-
-ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- const TargetInfo &T = CGT.getTarget();
- uint64_t Size = getContext().getTypeSize(RetTy);
-
- if (RetTy->getAs<VectorType>()) {
- // HVX vectors are returned in vector registers or register pairs.
- if (T.hasFeature("hvx")) {
- assert(T.hasFeature("hvx-length64b") || T.hasFeature("hvx-length128b"));
- uint64_t VecSize = T.hasFeature("hvx-length64b") ? 64*8 : 128*8;
- if (Size == VecSize || Size == 2*VecSize)
- return ABIArgInfo::getDirectInReg();
- }
- // Large vector types should be returned via memory.
- if (Size > 64)
- return getNaturalAlignIndirect(RetTy);
- }
-
- if (!isAggregateTypeForABI(RetTy)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- if (Size > 64 && RetTy->isBitIntType())
- return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
-
- return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect();
- }
-
- if (isEmptyRecord(getContext(), RetTy, true))
- return ABIArgInfo::getIgnore();
-
- // Aggregates <= 8 bytes are returned in registers, other aggregates
- // are returned indirectly.
- if (Size <= 64) {
- // Return in the smallest viable integer type.
- if (!llvm::isPowerOf2_64(Size))
- Size = llvm::NextPowerOf2(Size);
- return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
- }
- return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
-}
-
-Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF,
- Address VAListAddr,
- QualType Ty) const {
- // Load the overflow area pointer.
- Address __overflow_area_pointer_p =
- CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
- llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
- __overflow_area_pointer_p, "__overflow_area_pointer");
-
- uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
- if (Align > 4) {
- // Alignment should be a power of 2.
- assert((Align & (Align - 1)) == 0 && "Alignment is not power of 2!");
-
- // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
- llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
-
- // Add offset to the current pointer to access the argument.
- __overflow_area_pointer =
- CGF.Builder.CreateGEP(CGF.Int8Ty, __overflow_area_pointer, Offset);
- llvm::Value *AsInt =
- CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
-
- // Create a mask which should be "AND"ed
- // with (overflow_arg_area + align - 1)
- llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -(int)Align);
- __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
- CGF.Builder.CreateAnd(AsInt, Mask), __overflow_area_pointer->getType(),
- "__overflow_area_pointer.align");
- }
-
- // Get the type of the argument from memory and bitcast
- // overflow area pointer to the argument type.
- llvm::Type *PTy = CGF.ConvertTypeForMem(Ty);
- Address AddrTyped = CGF.Builder.CreateElementBitCast(
- Address(__overflow_area_pointer, CGF.Int8Ty,
- CharUnits::fromQuantity(Align)),
- PTy);
-
- // Round up to the minimum stack alignment for varargs which is 4 bytes.
- uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
-
- __overflow_area_pointer = CGF.Builder.CreateGEP(
- CGF.Int8Ty, __overflow_area_pointer,
- llvm::ConstantInt::get(CGF.Int32Ty, Offset),
- "__overflow_area_pointer.next");
- CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p);
-
- return AddrTyped;
-}
-
-Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF,
- Address VAListAddr,
- QualType Ty) const {
- // FIXME: Need to handle alignment
- llvm::Type *BP = CGF.Int8PtrTy;
- CGBuilderTy &Builder = CGF.Builder;
- Address VAListAddrAsBPP = Builder.CreateElementBitCast(VAListAddr, BP, "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- // Handle address alignment for type alignment > 32 bits
- uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
- if (TyAlign > 4) {
- assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!");
- llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
- AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
- AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
- Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
- }
- Address AddrTyped = Builder.CreateElementBitCast(
- Address(Addr, CGF.Int8Ty, CharUnits::fromQuantity(TyAlign)),
- CGF.ConvertType(Ty));
-
- uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
- llvm::Value *NextAddr = Builder.CreateGEP(
- CGF.Int8Ty, Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
-
- return AddrTyped;
-}
-
-Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF,
- Address VAListAddr,
- QualType Ty) const {
- int ArgSize = CGF.getContext().getTypeSize(Ty) / 8;
-
- if (ArgSize > 8)
- return EmitVAArgFromMemory(CGF, VAListAddr, Ty);
-
- // Here we have check if the argument is in register area or
- // in overflow area.
- // If the saved register area pointer + argsize rounded up to alignment >
- // saved register area end pointer, argument is in overflow area.
- unsigned RegsLeft = 6;
- Ty = CGF.getContext().getCanonicalType(Ty);
- (void)classifyArgumentType(Ty, &RegsLeft);
-
- llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
- llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
- llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
- llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
-
- // Get rounded size of the argument.GCC does not allow vararg of
- // size < 4 bytes. We follow the same logic here.
- ArgSize = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
- int ArgAlign = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
-
- // Argument may be in saved register area
- CGF.EmitBlock(MaybeRegBlock);
-
- // Load the current saved register area pointer.
- Address __current_saved_reg_area_pointer_p = CGF.Builder.CreateStructGEP(
- VAListAddr, 0, "__current_saved_reg_area_pointer_p");
- llvm::Value *__current_saved_reg_area_pointer = CGF.Builder.CreateLoad(
- __current_saved_reg_area_pointer_p, "__current_saved_reg_area_pointer");
-
- // Load the saved register area end pointer.
- Address __saved_reg_area_end_pointer_p = CGF.Builder.CreateStructGEP(
- VAListAddr, 1, "__saved_reg_area_end_pointer_p");
- llvm::Value *__saved_reg_area_end_pointer = CGF.Builder.CreateLoad(
- __saved_reg_area_end_pointer_p, "__saved_reg_area_end_pointer");
-
- // If the size of argument is > 4 bytes, check if the stack
- // location is aligned to 8 bytes
- if (ArgAlign > 4) {
-
- llvm::Value *__current_saved_reg_area_pointer_int =
- CGF.Builder.CreatePtrToInt(__current_saved_reg_area_pointer,
- CGF.Int32Ty);
-
- __current_saved_reg_area_pointer_int = CGF.Builder.CreateAdd(
- __current_saved_reg_area_pointer_int,
- llvm::ConstantInt::get(CGF.Int32Ty, (ArgAlign - 1)),
- "align_current_saved_reg_area_pointer");
-
- __current_saved_reg_area_pointer_int =
- CGF.Builder.CreateAnd(__current_saved_reg_area_pointer_int,
- llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
- "align_current_saved_reg_area_pointer");
-
- __current_saved_reg_area_pointer =
- CGF.Builder.CreateIntToPtr(__current_saved_reg_area_pointer_int,
- __current_saved_reg_area_pointer->getType(),
- "align_current_saved_reg_area_pointer");
- }
-
- llvm::Value *__new_saved_reg_area_pointer =
- CGF.Builder.CreateGEP(CGF.Int8Ty, __current_saved_reg_area_pointer,
- llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
- "__new_saved_reg_area_pointer");
-
- llvm::Value *UsingStack = nullptr;
- UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer,
- __saved_reg_area_end_pointer);
-
- CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, InRegBlock);
-
- // Argument in saved register area
- // Implement the block where argument is in register saved area
- CGF.EmitBlock(InRegBlock);
-
- llvm::Type *PTy = CGF.ConvertType(Ty);
- llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast(
- __current_saved_reg_area_pointer, llvm::PointerType::getUnqual(PTy));
-
- CGF.Builder.CreateStore(__new_saved_reg_area_pointer,
- __current_saved_reg_area_pointer_p);
-
- CGF.EmitBranch(ContBlock);
-
- // Argument in overflow area
- // Implement the block where the argument is in overflow area.
- CGF.EmitBlock(OnStackBlock);
-
- // Load the overflow area pointer
- Address __overflow_area_pointer_p =
- CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
- llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
- __overflow_area_pointer_p, "__overflow_area_pointer");
-
- // Align the overflow area pointer according to the alignment of the argument
- if (ArgAlign > 4) {
- llvm::Value *__overflow_area_pointer_int =
- CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
-
- __overflow_area_pointer_int =
- CGF.Builder.CreateAdd(__overflow_area_pointer_int,
- llvm::ConstantInt::get(CGF.Int32Ty, ArgAlign - 1),
- "align_overflow_area_pointer");
-
- __overflow_area_pointer_int =
- CGF.Builder.CreateAnd(__overflow_area_pointer_int,
- llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
- "align_overflow_area_pointer");
-
- __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
- __overflow_area_pointer_int, __overflow_area_pointer->getType(),
- "align_overflow_area_pointer");
- }
-
- // Get the pointer for next argument in overflow area and store it
- // to overflow area pointer.
- llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP(
- CGF.Int8Ty, __overflow_area_pointer,
- llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
- "__overflow_area_pointer.next");
-
- CGF.Builder.CreateStore(__new_overflow_area_pointer,
- __overflow_area_pointer_p);
-
- CGF.Builder.CreateStore(__new_overflow_area_pointer,
- __current_saved_reg_area_pointer_p);
-
- // Bitcast the overflow area pointer to the type of argument.
- llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(Ty);
- llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast(
- __overflow_area_pointer, llvm::PointerType::getUnqual(OverflowPTy));
-
- CGF.EmitBranch(ContBlock);
-
- // Get the correct pointer to load the variable argument
- // Implement the ContBlock
- CGF.EmitBlock(ContBlock);
-
- llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
- llvm::Type *MemPTy = llvm::PointerType::getUnqual(MemTy);
- llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr");
- ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock);
- ArgAddr->addIncoming(__overflow_area_p, OnStackBlock);
-
- return Address(ArgAddr, MemTy, CharUnits::fromQuantity(ArgAlign));
-}
-
-Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
-
- if (getTarget().getTriple().isMusl())
- return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty);
-
- return EmitVAArgForHexagon(CGF, VAListAddr, Ty);
-}
-
-//===----------------------------------------------------------------------===//
-// Lanai ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-class LanaiABIInfo : public DefaultABIInfo {
-public:
- LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
-
- bool shouldUseInReg(QualType Ty, CCState &State) const;
-
- void computeInfo(CGFunctionInfo &FI) const override {
- CCState State(FI);
- // Lanai uses 4 registers to pass arguments unless the function has the
- // regparm attribute set.
- if (FI.getHasRegParm()) {
- State.FreeRegs = FI.getRegParm();
- } else {
- State.FreeRegs = 4;
- }
-
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type, State);
- }
-
- ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
-};
-} // end anonymous namespace
-
-bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const {
- unsigned Size = getContext().getTypeSize(Ty);
- unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
-
- if (SizeInRegs == 0)
- return false;
-
- if (SizeInRegs > State.FreeRegs) {
- State.FreeRegs = 0;
- return false;
- }
-
- State.FreeRegs -= SizeInRegs;
-
- return true;
-}
-
-ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
- CCState &State) const {
- if (!ByVal) {
- if (State.FreeRegs) {
- --State.FreeRegs; // Non-byval indirects just use one pointer.
- return getNaturalAlignIndirectInReg(Ty);
- }
- return getNaturalAlignIndirect(Ty, false);
- }
-
- // Compute the byval alignment.
- const unsigned MinABIStackAlignInBytes = 4;
- unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
- return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
- /*Realign=*/TypeAlign >
- MinABIStackAlignInBytes);
-}
-
-ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
- CCState &State) const {
- // Check with the C++ ABI first.
- const RecordType *RT = Ty->getAs<RecordType>();
- if (RT) {
- CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
- if (RAA == CGCXXABI::RAA_Indirect) {
- return getIndirectResult(Ty, /*ByVal=*/false, State);
- } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
- return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
- }
- }
-
- if (isAggregateTypeForABI(Ty)) {
- // Structures with flexible arrays are always indirect.
- if (RT && RT->getDecl()->hasFlexibleArrayMember())
- return getIndirectResult(Ty, /*ByVal=*/true, State);
-
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
-
- llvm::LLVMContext &LLVMContext = getVMContext();
- unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
- if (SizeInRegs <= State.FreeRegs) {
- llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
- SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
- llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
- State.FreeRegs -= SizeInRegs;
- return ABIArgInfo::getDirectInReg(Result);
- } else {
- State.FreeRegs = 0;
- }
- return getIndirectResult(Ty, true, State);
- }
-
- // Treat an enum type as its underlying type.
- if (const auto *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- bool InReg = shouldUseInReg(Ty, State);
-
- // Don't pass >64 bit integers in registers.
- if (const auto *EIT = Ty->getAs<BitIntType>())
- if (EIT->getNumBits() > 64)
- return getIndirectResult(Ty, /*ByVal=*/true, State);
-
- if (isPromotableIntegerTypeForABI(Ty)) {
- if (InReg)
- return ABIArgInfo::getDirectInReg();
- return ABIArgInfo::getExtend(Ty);
- }
- if (InReg)
- return ABIArgInfo::getDirectInReg();
- return ABIArgInfo::getDirect();
-}
-
-namespace {
-class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<LanaiABIInfo>(CGT)) {}
-};
-}
-
-//===----------------------------------------------------------------------===//
-// AMDGPU ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class AMDGPUABIInfo final : public DefaultABIInfo {
-private:
- static const unsigned MaxNumRegsForArgsRet = 16;
-
- unsigned numRegsForType(QualType Ty) const;
-
- bool isHomogeneousAggregateBaseType(QualType Ty) const override;
- bool isHomogeneousAggregateSmallEnough(const Type *Base,
- uint64_t Members) const override;
-
- // Coerce HIP scalar pointer arguments from generic pointers to global ones.
- llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS,
- unsigned ToAS) const {
- // Single value types.
- auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(Ty);
- if (PtrTy && PtrTy->getAddressSpace() == FromAS)
- return llvm::PointerType::getWithSamePointeeType(PtrTy, ToAS);
- return Ty;
- }
-
-public:
- explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) :
- DefaultABIInfo(CGT) {}
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
- ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const;
-
- void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-};
-
-bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
- return true;
-}
-
-bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
- const Type *Base, uint64_t Members) const {
- uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32;
-
- // Homogeneous Aggregates may occupy at most 16 registers.
- return Members * NumRegs <= MaxNumRegsForArgsRet;
-}
-
-/// Estimate number of registers the type will use when passed in registers.
-unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const {
- unsigned NumRegs = 0;
-
- if (const VectorType *VT = Ty->getAs<VectorType>()) {
- // Compute from the number of elements. The reported size is based on the
- // in-memory size, which includes the padding 4th element for 3-vectors.
- QualType EltTy = VT->getElementType();
- unsigned EltSize = getContext().getTypeSize(EltTy);
-
- // 16-bit element vectors should be passed as packed.
- if (EltSize == 16)
- return (VT->getNumElements() + 1) / 2;
-
- unsigned EltNumRegs = (EltSize + 31) / 32;
- return EltNumRegs * VT->getNumElements();
- }
-
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- assert(!RD->hasFlexibleArrayMember());
-
- for (const FieldDecl *Field : RD->fields()) {
- QualType FieldTy = Field->getType();
- NumRegs += numRegsForType(FieldTy);
- }
-
- return NumRegs;
- }
-
- return (getContext().getTypeSize(Ty) + 31) / 32;
-}
-
-void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
- llvm::CallingConv::ID CC = FI.getCallingConvention();
-
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
-
- unsigned NumRegsLeft = MaxNumRegsForArgsRet;
- for (auto &Arg : FI.arguments()) {
- if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
- Arg.info = classifyKernelArgumentType(Arg.type);
- } else {
- Arg.info = classifyArgumentType(Arg.type, NumRegsLeft);
- }
- }
-}
-
-Address AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- llvm_unreachable("AMDGPU does not support varargs");
-}
-
-ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
- if (isAggregateTypeForABI(RetTy)) {
- // Records with non-trivial destructors/copy-constructors should not be
- // returned by value.
- if (!getRecordArgABI(RetTy, getCXXABI())) {
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), RetTy, true))
- return ABIArgInfo::getIgnore();
-
- // Lower single-element structs to just return a regular value.
- if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
- return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
-
- if (const RecordType *RT = RetTy->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return DefaultABIInfo::classifyReturnType(RetTy);
- }
-
- // Pack aggregates <= 4 bytes into single VGPR or pair.
- uint64_t Size = getContext().getTypeSize(RetTy);
- if (Size <= 16)
- return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
-
- if (Size <= 32)
- return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
-
- if (Size <= 64) {
- llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
- return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
- }
-
- if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
- return ABIArgInfo::getDirect();
- }
- }
-
- // Otherwise just do the default thing.
- return DefaultABIInfo::classifyReturnType(RetTy);
-}
-
-/// For kernels all parameters are really passed in a special buffer. It doesn't
-/// make sense to pass anything byval, so everything must be direct.
-ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const {
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- // TODO: Can we omit empty structs?
-
- if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
- Ty = QualType(SeltTy, 0);
-
- llvm::Type *OrigLTy = CGT.ConvertType(Ty);
- llvm::Type *LTy = OrigLTy;
- if (getContext().getLangOpts().HIP) {
- LTy = coerceKernelArgumentType(
- OrigLTy, /*FromAS=*/getContext().getTargetAddressSpace(LangAS::Default),
- /*ToAS=*/getContext().getTargetAddressSpace(LangAS::cuda_device));
- }
-
- // FIXME: Should also use this for OpenCL, but it requires addressing the
- // problem of kernels being called.
- //
- // FIXME: This doesn't apply the optimization of coercing pointers in structs
- // to global address space when using byref. This would require implementing a
- // new kind of coercion of the in-memory type when for indirect arguments.
- if (!getContext().getLangOpts().OpenCL && LTy == OrigLTy &&
- isAggregateTypeForABI(Ty)) {
- return ABIArgInfo::getIndirectAliased(
- getContext().getTypeAlignInChars(Ty),
- getContext().getTargetAddressSpace(LangAS::opencl_constant),
- false /*Realign*/, nullptr /*Padding*/);
- }
-
- // If we set CanBeFlattened to true, CodeGen will expand the struct to its
- // individual elements, which confuses the Clover OpenCL backend; therefore we
- // have to set it to false here. Other args of getDirect() are just defaults.
- return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
-}
-
-ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty,
- unsigned &NumRegsLeft) const {
- assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow");
-
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- if (isAggregateTypeForABI(Ty)) {
- // Records with non-trivial destructors/copy-constructors should not be
- // passed by value.
- if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
-
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
-
- // Lower single-element structs to just pass a regular value. TODO: We
- // could do reasonable-size multiple-element structs too, using getExpand(),
- // though watch out for things like bitfields.
- if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
- return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
-
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return DefaultABIInfo::classifyArgumentType(Ty);
- }
-
- // Pack aggregates <= 8 bytes into single VGPR or pair.
- uint64_t Size = getContext().getTypeSize(Ty);
- if (Size <= 64) {
- unsigned NumRegs = (Size + 31) / 32;
- NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
-
- if (Size <= 16)
- return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
-
- if (Size <= 32)
- return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
-
- // XXX: Should this be i64 instead, and should the limit increase?
- llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
- return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
- }
-
- if (NumRegsLeft > 0) {
- unsigned NumRegs = numRegsForType(Ty);
- if (NumRegsLeft >= NumRegs) {
- NumRegsLeft -= NumRegs;
- return ABIArgInfo::getDirect();
- }
- }
- }
-
- // Otherwise just do the default thing.
- ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty);
- if (!ArgInfo.isIndirect()) {
- unsigned NumRegs = numRegsForType(Ty);
- NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
- }
-
- return ArgInfo;
-}
-
-class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {}
-
- void setFunctionDeclAttributes(const FunctionDecl *FD, llvm::Function *F,
- CodeGenModule &CGM) const;
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const override;
- unsigned getOpenCLKernelCallingConv() const override;
-
- llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
- llvm::PointerType *T, QualType QT) const override;
-
- LangAS getASTAllocaAddressSpace() const override {
- return getLangASFromTargetAS(
- getABIInfo().getDataLayout().getAllocaAddrSpace());
- }
- LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
- const VarDecl *D) const override;
- llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
- SyncScope Scope,
- llvm::AtomicOrdering Ordering,
- llvm::LLVMContext &Ctx) const override;
- llvm::Function *
- createEnqueuedBlockKernel(CodeGenFunction &CGF,
- llvm::Function *BlockInvokeFunc,
- llvm::Type *BlockTy) const override;
- bool shouldEmitStaticExternCAliases() const override;
- void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
-};
-}
-
-static bool requiresAMDGPUProtectedVisibility(const Decl *D,
- llvm::GlobalValue *GV) {
- if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
- return false;
-
- return D->hasAttr<OpenCLKernelAttr>() ||
- (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) ||
- (isa<VarDecl>(D) &&
- (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
- cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinSurfaceType() ||
- cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType()));
-}
-
-void AMDGPUTargetCodeGenInfo::setFunctionDeclAttributes(
- const FunctionDecl *FD, llvm::Function *F, CodeGenModule &M) const {
- const auto *ReqdWGS =
- M.getLangOpts().OpenCL ? FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
- const bool IsOpenCLKernel =
- M.getLangOpts().OpenCL && FD->hasAttr<OpenCLKernelAttr>();
- const bool IsHIPKernel = M.getLangOpts().HIP && FD->hasAttr<CUDAGlobalAttr>();
-
- const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
- if (ReqdWGS || FlatWGS) {
- unsigned Min = 0;
- unsigned Max = 0;
- if (FlatWGS) {
- Min = FlatWGS->getMin()
- ->EvaluateKnownConstInt(M.getContext())
- .getExtValue();
- Max = FlatWGS->getMax()
- ->EvaluateKnownConstInt(M.getContext())
- .getExtValue();
- }
- if (ReqdWGS && Min == 0 && Max == 0)
- Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
-
- if (Min != 0) {
- assert(Min <= Max && "Min must be less than or equal Max");
-
- std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);
- F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
- } else
- assert(Max == 0 && "Max must be zero");
- } else if (IsOpenCLKernel || IsHIPKernel) {
- // By default, restrict the maximum size to a value specified by
- // --gpu-max-threads-per-block=n or its default value for HIP.
- const unsigned OpenCLDefaultMaxWorkGroupSize = 256;
- const unsigned DefaultMaxWorkGroupSize =
- IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize
- : M.getLangOpts().GPUMaxThreadsPerBlock;
- std::string AttrVal =
- std::string("1,") + llvm::utostr(DefaultMaxWorkGroupSize);
- F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
- }
-
- if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) {
- unsigned Min =
- Attr->getMin()->EvaluateKnownConstInt(M.getContext()).getExtValue();
- unsigned Max = Attr->getMax() ? Attr->getMax()
- ->EvaluateKnownConstInt(M.getContext())
- .getExtValue()
- : 0;
-
- if (Min != 0) {
- assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");
-
- std::string AttrVal = llvm::utostr(Min);
- if (Max != 0)
- AttrVal = AttrVal + "," + llvm::utostr(Max);
- F->addFnAttr("amdgpu-waves-per-eu", AttrVal);
- } else
- assert(Max == 0 && "Max must be zero");
- }
-
- if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
- unsigned NumSGPR = Attr->getNumSGPR();
-
- if (NumSGPR != 0)
- F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));
- }
-
- if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
- uint32_t NumVGPR = Attr->getNumVGPR();
-
- if (NumVGPR != 0)
- F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
- }
-}
-
-void AMDGPUTargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
- if (requiresAMDGPUProtectedVisibility(D, GV)) {
- GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
- GV->setDSOLocal(true);
- }
-
- if (GV->isDeclaration())
- return;
-
- llvm::Function *F = dyn_cast<llvm::Function>(GV);
- if (!F)
- return;
-
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (FD)
- setFunctionDeclAttributes(FD, F, M);
-
- const bool IsHIPKernel =
- M.getLangOpts().HIP && FD && FD->hasAttr<CUDAGlobalAttr>();
- const bool IsOpenMPkernel =
- M.getLangOpts().OpenMPIsDevice &&
- (F->getCallingConv() == llvm::CallingConv::AMDGPU_KERNEL);
-
- // TODO: This should be moved to language specific attributes instead.
- if (IsHIPKernel || IsOpenMPkernel)
- F->addFnAttr("uniform-work-group-size", "true");
-
- if (M.getContext().getTargetInfo().allowAMDGPUUnsafeFPAtomics())
- F->addFnAttr("amdgpu-unsafe-fp-atomics", "true");
-
- if (!getABIInfo().getCodeGenOpts().EmitIEEENaNCompliantInsts)
- F->addFnAttr("amdgpu-ieee", "false");
-}
-
-unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
- return llvm::CallingConv::AMDGPU_KERNEL;
-}
-
-// Currently LLVM assumes null pointers always have value 0,
-// which results in incorrectly transformed IR. Therefore, instead of
-// emitting null pointers in private and local address spaces, a null
-// pointer in generic address space is emitted which is casted to a
-// pointer in local or private address space.
-llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
- const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
- QualType QT) const {
- if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
- return llvm::ConstantPointerNull::get(PT);
-
- auto &Ctx = CGM.getContext();
- auto NPT = llvm::PointerType::getWithSamePointeeType(
- PT, Ctx.getTargetAddressSpace(LangAS::opencl_generic));
- return llvm::ConstantExpr::getAddrSpaceCast(
- llvm::ConstantPointerNull::get(NPT), PT);
-}
-
-LangAS
-AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
- const VarDecl *D) const {
- assert(!CGM.getLangOpts().OpenCL &&
- !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
- "Address space agnostic languages only");
- LangAS DefaultGlobalAS = getLangASFromTargetAS(
- CGM.getContext().getTargetAddressSpace(LangAS::opencl_global));
- if (!D)
- return DefaultGlobalAS;
-
- LangAS AddrSpace = D->getType().getAddressSpace();
- assert(AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace));
- if (AddrSpace != LangAS::Default)
- return AddrSpace;
-
- // Only promote to address space 4 if VarDecl has constant initialization.
- if (CGM.isTypeConstant(D->getType(), false) &&
- D->hasConstantInitialization()) {
- if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
- return *ConstAS;
- }
- return DefaultGlobalAS;
-}
-
-llvm::SyncScope::ID
-AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
- SyncScope Scope,
- llvm::AtomicOrdering Ordering,
- llvm::LLVMContext &Ctx) const {
- std::string Name;
- switch (Scope) {
- case SyncScope::HIPSingleThread:
- Name = "singlethread";
- break;
- case SyncScope::HIPWavefront:
- case SyncScope::OpenCLSubGroup:
- Name = "wavefront";
- break;
- case SyncScope::HIPWorkgroup:
- case SyncScope::OpenCLWorkGroup:
- Name = "workgroup";
- break;
- case SyncScope::HIPAgent:
- case SyncScope::OpenCLDevice:
- Name = "agent";
- break;
- case SyncScope::HIPSystem:
- case SyncScope::OpenCLAllSVMDevices:
- Name = "";
- break;
- }
-
- if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
- if (!Name.empty())
- Name = Twine(Twine(Name) + Twine("-")).str();
-
- Name = Twine(Twine(Name) + Twine("one-as")).str();
- }
-
- return Ctx.getOrInsertSyncScopeID(Name);
-}
-
-bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
- return false;
-}
-
-void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
- const FunctionType *&FT) const {
- FT = getABIInfo().getContext().adjustFunctionType(
- FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
-}
-
-//===----------------------------------------------------------------------===//
-// SPARC v8 ABI Implementation.
-// Based on the SPARC Compliance Definition version 2.4.1.
-//
-// Ensures that complex values are passed in registers.
-//
-namespace {
-class SparcV8ABIInfo : public DefaultABIInfo {
-public:
- SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
-
-private:
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- void computeInfo(CGFunctionInfo &FI) const override;
-};
-} // end anonymous namespace
-
-
-ABIArgInfo
-SparcV8ABIInfo::classifyReturnType(QualType Ty) const {
- if (Ty->isAnyComplexType()) {
- return ABIArgInfo::getDirect();
- }
- else {
- return DefaultABIInfo::classifyReturnType(Ty);
- }
-}
-
-void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const {
-
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &Arg : FI.arguments())
- Arg.info = classifyArgumentType(Arg.type);
-}
-
-namespace {
-class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<SparcV8ABIInfo>(CGT)) {}
-
- llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override {
- int Offset;
- if (isAggregateTypeForABI(CGF.CurFnInfo->getReturnType()))
- Offset = 12;
- else
- Offset = 8;
- return CGF.Builder.CreateGEP(CGF.Int8Ty, Address,
- llvm::ConstantInt::get(CGF.Int32Ty, Offset));
- }
-
- llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override {
- int Offset;
- if (isAggregateTypeForABI(CGF.CurFnInfo->getReturnType()))
- Offset = -12;
- else
- Offset = -8;
- return CGF.Builder.CreateGEP(CGF.Int8Ty, Address,
- llvm::ConstantInt::get(CGF.Int32Ty, Offset));
- }
-};
-} // end anonymous namespace
-
-//===----------------------------------------------------------------------===//
-// SPARC v9 ABI Implementation.
-// Based on the SPARC Compliance Definition version 2.4.1.
-//
-// Function arguments a mapped to a nominal "parameter array" and promoted to
-// registers depending on their type. Each argument occupies 8 or 16 bytes in
-// the array, structs larger than 16 bytes are passed indirectly.
-//
-// One case requires special care:
-//
-// struct mixed {
-// int i;
-// float f;
-// };
-//
-// When a struct mixed is passed by value, it only occupies 8 bytes in the
-// parameter array, but the int is passed in an integer register, and the float
-// is passed in a floating point register. This is represented as two arguments
-// with the LLVM IR inreg attribute:
-//
-// declare void f(i32 inreg %i, float inreg %f)
-//
-// The code generator will only allocate 4 bytes from the parameter array for
-// the inreg arguments. All other arguments are allocated a multiple of 8
-// bytes.
-//
-namespace {
-class SparcV9ABIInfo : public ABIInfo {
-public:
- SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
-
-private:
- ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
- void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- // Coercion type builder for structs passed in registers. The coercion type
- // serves two purposes:
- //
- // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
- // in registers.
- // 2. Expose aligned floating point elements as first-level elements, so the
- // code generator knows to pass them in floating point registers.
- //
- // We also compute the InReg flag which indicates that the struct contains
- // aligned 32-bit floats.
- //
- struct CoerceBuilder {
- llvm::LLVMContext &Context;
- const llvm::DataLayout &DL;
- SmallVector<llvm::Type*, 8> Elems;
- uint64_t Size;
- bool InReg;
-
- CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
- : Context(c), DL(dl), Size(0), InReg(false) {}
-
- // Pad Elems with integers until Size is ToSize.
- void pad(uint64_t ToSize) {
- assert(ToSize >= Size && "Cannot remove elements");
- if (ToSize == Size)
- return;
-
- // Finish the current 64-bit word.
- uint64_t Aligned = llvm::alignTo(Size, 64);
- if (Aligned > Size && Aligned <= ToSize) {
- Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
- Size = Aligned;
- }
-
- // Add whole 64-bit words.
- while (Size + 64 <= ToSize) {
- Elems.push_back(llvm::Type::getInt64Ty(Context));
- Size += 64;
- }
-
- // Final in-word padding.
- if (Size < ToSize) {
- Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
- Size = ToSize;
- }
- }
-
- // Add a floating point element at Offset.
- void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
- // Unaligned floats are treated as integers.
- if (Offset % Bits)
- return;
- // The InReg flag is only required if there are any floats < 64 bits.
- if (Bits < 64)
- InReg = true;
- pad(Offset);
- Elems.push_back(Ty);
- Size = Offset + Bits;
- }
-
- // Add a struct type to the coercion type, starting at Offset (in bits).
- void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
- const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
- for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
- llvm::Type *ElemTy = StrTy->getElementType(i);
- uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
- switch (ElemTy->getTypeID()) {
- case llvm::Type::StructTyID:
- addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
- break;
- case llvm::Type::FloatTyID:
- addFloat(ElemOffset, ElemTy, 32);
- break;
- case llvm::Type::DoubleTyID:
- addFloat(ElemOffset, ElemTy, 64);
- break;
- case llvm::Type::FP128TyID:
- addFloat(ElemOffset, ElemTy, 128);
- break;
- case llvm::Type::PointerTyID:
- if (ElemOffset % 64 == 0) {
- pad(ElemOffset);
- Elems.push_back(ElemTy);
- Size += 64;
- }
- break;
- default:
- break;
- }
- }
- }
-
- // Check if Ty is a usable substitute for the coercion type.
- bool isUsableType(llvm::StructType *Ty) const {
- return llvm::ArrayRef(Elems) == Ty->elements();
- }
-
- // Get the coercion type as a literal struct type.
- llvm::Type *getType() const {
- if (Elems.size() == 1)
- return Elems.front();
- else
- return llvm::StructType::get(Context, Elems);
- }
- };
-};
-} // end anonymous namespace
-
-ABIArgInfo
-SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
- if (Ty->isVoidType())
- return ABIArgInfo::getIgnore();
-
- uint64_t Size = getContext().getTypeSize(Ty);
-
- // Anything too big to fit in registers is passed with an explicit indirect
- // pointer / sret pointer.
- if (Size > SizeLimit)
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- // Integer types smaller than a register are extended.
- if (Size < 64 && Ty->isIntegerType())
- return ABIArgInfo::getExtend(Ty);
-
- if (const auto *EIT = Ty->getAs<BitIntType>())
- if (EIT->getNumBits() < 64)
- return ABIArgInfo::getExtend(Ty);
-
- // Other non-aggregates go in registers.
- if (!isAggregateTypeForABI(Ty))
- return ABIArgInfo::getDirect();
-
- // If a C++ object has either a non-trivial copy constructor or a non-trivial
- // destructor, it is passed with an explicit indirect pointer / sret pointer.
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
-
- // This is a small aggregate type that should be passed in registers.
- // Build a coercion type from the LLVM struct type.
- llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
- if (!StrTy)
- return ABIArgInfo::getDirect();
-
- CoerceBuilder CB(getVMContext(), getDataLayout());
- CB.addStruct(0, StrTy);
- CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
-
- // Try to use the original type for coercion.
- llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
-
- if (CB.InReg)
- return ABIArgInfo::getDirectInReg(CoerceTy);
- else
- return ABIArgInfo::getDirect(CoerceTy);
-}
-
-Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- ABIArgInfo AI = classifyType(Ty, 16 * 8);
- llvm::Type *ArgTy = CGT.ConvertType(Ty);
- if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
- AI.setCoerceToType(ArgTy);
-
- CharUnits SlotSize = CharUnits::fromQuantity(8);
-
- CGBuilderTy &Builder = CGF.Builder;
- Address Addr = Address(Builder.CreateLoad(VAListAddr, "ap.cur"),
- getVAListElementType(CGF), SlotSize);
- llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
-
- auto TypeInfo = getContext().getTypeInfoInChars(Ty);
-
- Address ArgAddr = Address::invalid();
- CharUnits Stride;
- switch (AI.getKind()) {
- case ABIArgInfo::Expand:
- case ABIArgInfo::CoerceAndExpand:
- case ABIArgInfo::InAlloca:
- llvm_unreachable("Unsupported ABI kind for va_arg");
-
- case ABIArgInfo::Extend: {
- Stride = SlotSize;
- CharUnits Offset = SlotSize - TypeInfo.Width;
- ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
- break;
- }
-
- case ABIArgInfo::Direct: {
- auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
- Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize);
- ArgAddr = Addr;
- break;
- }
-
- case ABIArgInfo::Indirect:
- case ABIArgInfo::IndirectAliased:
- Stride = SlotSize;
- ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
- ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"), ArgTy,
- TypeInfo.Align);
- break;
-
- case ABIArgInfo::Ignore:
- return Address(llvm::UndefValue::get(ArgPtrTy), ArgTy, TypeInfo.Align);
- }
-
- // Update VAList.
- Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next");
- Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
-
- return Builder.CreateElementBitCast(ArgAddr, ArgTy, "arg.addr");
-}
-
-void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
- FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
- for (auto &I : FI.arguments())
- I.info = classifyType(I.type, 16 * 8);
-}
-
-namespace {
-class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<SparcV9ABIInfo>(CGT)) {}
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
- return 14;
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override;
-
- llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override {
- return CGF.Builder.CreateGEP(CGF.Int8Ty, Address,
- llvm::ConstantInt::get(CGF.Int32Ty, 8));
- }
-
- llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override {
- return CGF.Builder.CreateGEP(CGF.Int8Ty, Address,
- llvm::ConstantInt::get(CGF.Int32Ty, -8));
- }
-};
-} // end anonymous namespace
-
-bool
-SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const {
- // This is calculated from the LLVM and GCC tables and verified
- // against gcc output. AFAIK all ABIs use the same encoding.
-
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
-
- llvm::IntegerType *i8 = CGF.Int8Ty;
- llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
- llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
-
- // 0-31: the 8-byte general-purpose registers
- AssignToArrayRange(Builder, Address, Eight8, 0, 31);
-
- // 32-63: f0-31, the 4-byte floating-point registers
- AssignToArrayRange(Builder, Address, Four8, 32, 63);
-
- // Y = 64
- // PSR = 65
- // WIM = 66
- // TBR = 67
- // PC = 68
- // NPC = 69
- // FSR = 70
- // CSR = 71
- AssignToArrayRange(Builder, Address, Eight8, 64, 71);
-
- // 72-87: d0-15, the 8-byte floating-point registers
- AssignToArrayRange(Builder, Address, Eight8, 72, 87);
-
- return false;
-}
-
-// ARC ABI implementation.
-namespace {
-
-class ARCABIInfo : public DefaultABIInfo {
-public:
- using DefaultABIInfo::DefaultABIInfo;
-
-private:
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const {
- if (!State.FreeRegs)
- return;
- if (Info.isIndirect() && Info.getInReg())
- State.FreeRegs--;
- else if (Info.isDirect() && Info.getInReg()) {
- unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32;
- if (sz < State.FreeRegs)
- State.FreeRegs -= sz;
- else
- State.FreeRegs = 0;
- }
- }
-
- void computeInfo(CGFunctionInfo &FI) const override {
- CCState State(FI);
- // ARC uses 8 registers to pass arguments.
- State.FreeRegs = 8;
-
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- updateState(FI.getReturnInfo(), FI.getReturnType(), State);
- for (auto &I : FI.arguments()) {
- I.info = classifyArgumentType(I.type, State.FreeRegs);
- updateState(I.info, I.type, State);
- }
- }
-
- ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const;
- ABIArgInfo getIndirectByValue(QualType Ty) const;
- ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const;
- ABIArgInfo classifyReturnType(QualType RetTy) const;
-};
-
-class ARCTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- ARCTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<ARCABIInfo>(CGT)) {}
-};
-
-
-ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const {
- return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) :
- getNaturalAlignIndirect(Ty, false);
-}
-
-ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const {
- // Compute the byval alignment.
- const unsigned MinABIStackAlignInBytes = 4;
- unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
- return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
- TypeAlign > MinABIStackAlignInBytes);
-}
-
-Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
- getContext().getTypeInfoInChars(Ty),
- CharUnits::fromQuantity(4), true);
-}
-
-ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
- uint8_t FreeRegs) const {
- // Handle the generic C++ ABI.
- const RecordType *RT = Ty->getAs<RecordType>();
- if (RT) {
- CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
- if (RAA == CGCXXABI::RAA_Indirect)
- return getIndirectByRef(Ty, FreeRegs > 0);
-
- if (RAA == CGCXXABI::RAA_DirectInMemory)
- return getIndirectByValue(Ty);
- }
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32;
-
- if (isAggregateTypeForABI(Ty)) {
- // Structures with flexible arrays are always indirect.
- if (RT && RT->getDecl()->hasFlexibleArrayMember())
- return getIndirectByValue(Ty);
-
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
-
- llvm::LLVMContext &LLVMContext = getVMContext();
-
- llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
- SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
- llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
-
- return FreeRegs >= SizeInRegs ?
- ABIArgInfo::getDirectInReg(Result) :
- ABIArgInfo::getDirect(Result, 0, nullptr, false);
- }
-
- if (const auto *EIT = Ty->getAs<BitIntType>())
- if (EIT->getNumBits() > 64)
- return getIndirectByValue(Ty);
-
- return isPromotableIntegerTypeForABI(Ty)
- ? (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty)
- : ABIArgInfo::getExtend(Ty))
- : (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg()
- : ABIArgInfo::getDirect());
-}
-
-ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isAnyComplexType())
- return ABIArgInfo::getDirectInReg();
-
- // Arguments of size > 4 registers are indirect.
- auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32;
- if (RetSize > 4)
- return getIndirectByRef(RetTy, /*HasFreeRegs*/ true);
-
- return DefaultABIInfo::classifyReturnType(RetTy);
-}
-
-} // End anonymous namespace.
-
-//===----------------------------------------------------------------------===//
-// XCore ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-/// A SmallStringEnc instance is used to build up the TypeString by passing
-/// it by reference between functions that append to it.
-typedef llvm::SmallString<128> SmallStringEnc;
-
-/// TypeStringCache caches the meta encodings of Types.
-///
-/// The reason for caching TypeStrings is two fold:
-/// 1. To cache a type's encoding for later uses;
-/// 2. As a means to break recursive member type inclusion.
-///
-/// A cache Entry can have a Status of:
-/// NonRecursive: The type encoding is not recursive;
-/// Recursive: The type encoding is recursive;
-/// Incomplete: An incomplete TypeString;
-/// IncompleteUsed: An incomplete TypeString that has been used in a
-/// Recursive type encoding.
-///
-/// A NonRecursive entry will have all of its sub-members expanded as fully
-/// as possible. Whilst it may contain types which are recursive, the type
-/// itself is not recursive and thus its encoding may be safely used whenever
-/// the type is encountered.
-///
-/// A Recursive entry will have all of its sub-members expanded as fully as
-/// possible. The type itself is recursive and it may contain other types which
-/// are recursive. The Recursive encoding must not be used during the expansion
-/// of a recursive type's recursive branch. For simplicity the code uses
-/// IncompleteCount to reject all usage of Recursive encodings for member types.
-///
-/// An Incomplete entry is always a RecordType and only encodes its
-/// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
-/// are placed into the cache during type expansion as a means to identify and
-/// handle recursive inclusion of types as sub-members. If there is recursion
-/// the entry becomes IncompleteUsed.
-///
-/// During the expansion of a RecordType's members:
-///
-/// If the cache contains a NonRecursive encoding for the member type, the
-/// cached encoding is used;
-///
-/// If the cache contains a Recursive encoding for the member type, the
-/// cached encoding is 'Swapped' out, as it may be incorrect, and...
-///
-/// If the member is a RecordType, an Incomplete encoding is placed into the
-/// cache to break potential recursive inclusion of itself as a sub-member;
-///
-/// Once a member RecordType has been expanded, its temporary incomplete
-/// entry is removed from the cache. If a Recursive encoding was swapped out
-/// it is swapped back in;
-///
-/// If an incomplete entry is used to expand a sub-member, the incomplete
-/// entry is marked as IncompleteUsed. The cache keeps count of how many
-/// IncompleteUsed entries it currently contains in IncompleteUsedCount;
-///
-/// If a member's encoding is found to be a NonRecursive or Recursive viz:
-/// IncompleteUsedCount==0, the member's encoding is added to the cache.
-/// Else the member is part of a recursive type and thus the recursion has
-/// been exited too soon for the encoding to be correct for the member.
-///
-class TypeStringCache {
- enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
- struct Entry {
- std::string Str; // The encoded TypeString for the type.
- enum Status State; // Information about the encoding in 'Str'.
- std::string Swapped; // A temporary place holder for a Recursive encoding
- // during the expansion of RecordType's members.
- };
- std::map<const IdentifierInfo *, struct Entry> Map;
- unsigned IncompleteCount; // Number of Incomplete entries in the Map.
- unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
-public:
- TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
- void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
- bool removeIncomplete(const IdentifierInfo *ID);
- void addIfComplete(const IdentifierInfo *ID, StringRef Str,
- bool IsRecursive);
- StringRef lookupStr(const IdentifierInfo *ID);
-};
-
-/// TypeString encodings for enum & union fields must be order.
-/// FieldEncoding is a helper for this ordering process.
-class FieldEncoding {
- bool HasName;
- std::string Enc;
-public:
- FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
- StringRef str() { return Enc; }
- bool operator<(const FieldEncoding &rhs) const {
- if (HasName != rhs.HasName) return HasName;
- return Enc < rhs.Enc;
- }
-};
-
-class XCoreABIInfo : public DefaultABIInfo {
-public:
- XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-};
-
-class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
- mutable TypeStringCache TSC;
- void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
- const CodeGen::CodeGenModule &M) const;
-
-public:
- XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<XCoreABIInfo>(CGT)) {}
- void emitTargetMetadata(CodeGen::CodeGenModule &CGM,
- const llvm::MapVector<GlobalDecl, StringRef>
- &MangledDeclNames) const override;
-};
-
-} // End anonymous namespace.
-
-// TODO: this implementation is likely now redundant with the default
-// EmitVAArg.
-Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- CGBuilderTy &Builder = CGF.Builder;
-
- // Get the VAList.
- CharUnits SlotSize = CharUnits::fromQuantity(4);
- Address AP = Address(Builder.CreateLoad(VAListAddr),
- getVAListElementType(CGF), SlotSize);
-
- // Handle the argument.
- ABIArgInfo AI = classifyArgumentType(Ty);
- CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
- llvm::Type *ArgTy = CGT.ConvertType(Ty);
- if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
- AI.setCoerceToType(ArgTy);
- llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
-
- Address Val = Address::invalid();
- CharUnits ArgSize = CharUnits::Zero();
- switch (AI.getKind()) {
- case ABIArgInfo::Expand:
- case ABIArgInfo::CoerceAndExpand:
- case ABIArgInfo::InAlloca:
- llvm_unreachable("Unsupported ABI kind for va_arg");
- case ABIArgInfo::Ignore:
- Val = Address(llvm::UndefValue::get(ArgPtrTy), ArgTy, TypeAlign);
- ArgSize = CharUnits::Zero();
- break;
- case ABIArgInfo::Extend:
- case ABIArgInfo::Direct:
- Val = Builder.CreateElementBitCast(AP, ArgTy);
- ArgSize = CharUnits::fromQuantity(
- getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
- ArgSize = ArgSize.alignTo(SlotSize);
- break;
- case ABIArgInfo::Indirect:
- case ABIArgInfo::IndirectAliased:
- Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
- Val = Address(Builder.CreateLoad(Val), ArgTy, TypeAlign);
- ArgSize = SlotSize;
- break;
- }
-
- // Increment the VAList.
- if (!ArgSize.isZero()) {
- Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize);
- Builder.CreateStore(APN.getPointer(), VAListAddr);
- }
-
- return Val;
-}
-
-/// During the expansion of a RecordType, an incomplete TypeString is placed
-/// into the cache as a means to identify and break recursion.
-/// If there is a Recursive encoding in the cache, it is swapped out and will
-/// be reinserted by removeIncomplete().
-/// All other types of encoding should have been used rather than arriving here.
-void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
- std::string StubEnc) {
- if (!ID)
- return;
- Entry &E = Map[ID];
- assert( (E.Str.empty() || E.State == Recursive) &&
- "Incorrectly use of addIncomplete");
- assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
- E.Swapped.swap(E.Str); // swap out the Recursive
- E.Str.swap(StubEnc);
- E.State = Incomplete;
- ++IncompleteCount;
-}
-
-/// Once the RecordType has been expanded, the temporary incomplete TypeString
-/// must be removed from the cache.
-/// If a Recursive was swapped out by addIncomplete(), it will be replaced.
-/// Returns true if the RecordType was defined recursively.
-bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
- if (!ID)
- return false;
- auto I = Map.find(ID);
- assert(I != Map.end() && "Entry not present");
- Entry &E = I->second;
- assert( (E.State == Incomplete ||
- E.State == IncompleteUsed) &&
- "Entry must be an incomplete type");
- bool IsRecursive = false;
- if (E.State == IncompleteUsed) {
- // We made use of our Incomplete encoding, thus we are recursive.
- IsRecursive = true;
- --IncompleteUsedCount;
- }
- if (E.Swapped.empty())
- Map.erase(I);
- else {
- // Swap the Recursive back.
- E.Swapped.swap(E.Str);
- E.Swapped.clear();
- E.State = Recursive;
- }
- --IncompleteCount;
- return IsRecursive;
-}
-
-/// Add the encoded TypeString to the cache only if it is NonRecursive or
-/// Recursive (viz: all sub-members were expanded as fully as possible).
-void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
- bool IsRecursive) {
- if (!ID || IncompleteUsedCount)
- return; // No key or it is an incomplete sub-type so don't add.
- Entry &E = Map[ID];
- if (IsRecursive && !E.Str.empty()) {
- assert(E.State==Recursive && E.Str.size() == Str.size() &&
- "This is not the same Recursive entry");
- // The parent container was not recursive after all, so we could have used
- // this Recursive sub-member entry after all, but we assumed the worse when
- // we started viz: IncompleteCount!=0.
- return;
- }
- assert(E.Str.empty() && "Entry already present");
- E.Str = Str.str();
- E.State = IsRecursive? Recursive : NonRecursive;
-}
-
-/// Return a cached TypeString encoding for the ID. If there isn't one, or we
-/// are recursively expanding a type (IncompleteCount != 0) and the cached
-/// encoding is Recursive, return an empty StringRef.
-StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
- if (!ID)
- return StringRef(); // We have no key.
- auto I = Map.find(ID);
- if (I == Map.end())
- return StringRef(); // We have no encoding.
- Entry &E = I->second;
- if (E.State == Recursive && IncompleteCount)
- return StringRef(); // We don't use Recursive encodings for member types.
-
- if (E.State == Incomplete) {
- // The incomplete type is being used to break out of recursion.
- E.State = IncompleteUsed;
- ++IncompleteUsedCount;
- }
- return E.Str;
-}
-
-/// The XCore ABI includes a type information section that communicates symbol
-/// type information to the linker. The linker uses this information to verify
-/// safety/correctness of things such as array bound and pointers et al.
-/// The ABI only requires C (and XC) language modules to emit TypeStrings.
-/// This type information (TypeString) is emitted into meta data for all global
-/// symbols: definitions, declarations, functions & variables.
-///
-/// The TypeString carries type, qualifier, name, size & value details.
-/// Please see 'Tools Development Guide' section 2.16.2 for format details:
-/// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
-/// The output is tested by test/CodeGen/xcore-stringtype.c.
-///
-static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
- const CodeGen::CodeGenModule &CGM,
- TypeStringCache &TSC);
-
-/// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
-void XCoreTargetCodeGenInfo::emitTargetMD(
- const Decl *D, llvm::GlobalValue *GV,
- const CodeGen::CodeGenModule &CGM) const {
- SmallStringEnc Enc;
- if (getTypeString(Enc, D, CGM, TSC)) {
- llvm::LLVMContext &Ctx = CGM.getModule().getContext();
- llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
- llvm::MDString::get(Ctx, Enc.str())};
- llvm::NamedMDNode *MD =
- CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
- MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
- }
-}
-
-void XCoreTargetCodeGenInfo::emitTargetMetadata(
- CodeGen::CodeGenModule &CGM,
- const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {
- // Warning, new MangledDeclNames may be appended within this loop.
- // We rely on MapVector insertions adding new elements to the end
- // of the container.
- for (unsigned I = 0; I != MangledDeclNames.size(); ++I) {
- auto Val = *(MangledDeclNames.begin() + I);
- llvm::GlobalValue *GV = CGM.GetGlobalValue(Val.second);
- if (GV) {
- const Decl *D = Val.first.getDecl()->getMostRecentDecl();
- emitTargetMD(D, GV, CGM);
- }
- }
-}
-
-//===----------------------------------------------------------------------===//
-// Base ABI and target codegen info implementation common between SPIR and
-// SPIR-V.
-//===----------------------------------------------------------------------===//
-
-namespace {
-class CommonSPIRABIInfo : public DefaultABIInfo {
-public:
- CommonSPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); }
-
-private:
- void setCCs();
-};
-
-class SPIRVABIInfo : public CommonSPIRABIInfo {
-public:
- SPIRVABIInfo(CodeGenTypes &CGT) : CommonSPIRABIInfo(CGT) {}
- void computeInfo(CGFunctionInfo &FI) const override;
-
-private:
- ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
-};
-} // end anonymous namespace
-namespace {
-class CommonSPIRTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- CommonSPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<CommonSPIRABIInfo>(CGT)) {}
- CommonSPIRTargetCodeGenInfo(std::unique_ptr<ABIInfo> ABIInfo)
- : TargetCodeGenInfo(std::move(ABIInfo)) {}
-
- LangAS getASTAllocaAddressSpace() const override {
- return getLangASFromTargetAS(
- getABIInfo().getDataLayout().getAllocaAddrSpace());
- }
-
- unsigned getOpenCLKernelCallingConv() const override;
-};
-class SPIRVTargetCodeGenInfo : public CommonSPIRTargetCodeGenInfo {
-public:
- SPIRVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : CommonSPIRTargetCodeGenInfo(std::make_unique<SPIRVABIInfo>(CGT)) {}
- void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
-};
-} // End anonymous namespace.
-
-void CommonSPIRABIInfo::setCCs() {
- assert(getRuntimeCC() == llvm::CallingConv::C);
- RuntimeCC = llvm::CallingConv::SPIR_FUNC;
-}
-
-ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty) const {
- if (getContext().getLangOpts().CUDAIsDevice) {
- // Coerce pointer arguments with default address space to CrossWorkGroup
- // pointers for HIPSPV/CUDASPV. When the language mode is HIP/CUDA, the
- // SPIRTargetInfo maps cuda_device to SPIR-V's CrossWorkGroup address space.
- llvm::Type *LTy = CGT.ConvertType(Ty);
- auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default);
- auto GlobalAS = getContext().getTargetAddressSpace(LangAS::cuda_device);
- auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(LTy);
- if (PtrTy && PtrTy->getAddressSpace() == DefaultAS) {
- LTy = llvm::PointerType::getWithSamePointeeType(PtrTy, GlobalAS);
- return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
- }
-
- // Force copying aggregate type in kernel arguments by value when
- // compiling CUDA targeting SPIR-V. This is required for the object
- // copied to be valid on the device.
- // This behavior follows the CUDA spec
- // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#global-function-argument-processing,
- // and matches the NVPTX implementation.
- if (isAggregateTypeForABI(Ty))
- return getNaturalAlignIndirect(Ty, /* byval */ true);
- }
- return classifyArgumentType(Ty);
-}
-
-void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI) const {
- // The logic is same as in DefaultABIInfo with an exception on the kernel
- // arguments handling.
- llvm::CallingConv::ID CC = FI.getCallingConvention();
-
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
-
- for (auto &I : FI.arguments()) {
- if (CC == llvm::CallingConv::SPIR_KERNEL) {
- I.info = classifyKernelArgumentType(I.type);
- } else {
- I.info = classifyArgumentType(I.type);
- }
- }
-}
-
-namespace clang {
-namespace CodeGen {
-void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
- if (CGM.getTarget().getTriple().isSPIRV())
- SPIRVABIInfo(CGM.getTypes()).computeInfo(FI);
- else
- CommonSPIRABIInfo(CGM.getTypes()).computeInfo(FI);
-}
-}
-}
-
-unsigned CommonSPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
- return llvm::CallingConv::SPIR_KERNEL;
-}
-
-void SPIRVTargetCodeGenInfo::setCUDAKernelCallingConvention(
- const FunctionType *&FT) const {
- // Convert HIP kernels to SPIR-V kernels.
- if (getABIInfo().getContext().getLangOpts().HIP) {
- FT = getABIInfo().getContext().adjustFunctionType(
- FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
- return;
- }
-}
-
-static bool appendType(SmallStringEnc &Enc, QualType QType,
- const CodeGen::CodeGenModule &CGM,
- TypeStringCache &TSC);
-
-/// Helper function for appendRecordType().
-/// Builds a SmallVector containing the encoded field types in declaration
-/// order.
-static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
- const RecordDecl *RD,
- const CodeGen::CodeGenModule &CGM,
- TypeStringCache &TSC) {
- for (const auto *Field : RD->fields()) {
- SmallStringEnc Enc;
- Enc += "m(";
- Enc += Field->getName();
- Enc += "){";
- if (Field->isBitField()) {
- Enc += "b(";
- llvm::raw_svector_ostream OS(Enc);
- OS << Field->getBitWidthValue(CGM.getContext());
- Enc += ':';
- }
- if (!appendType(Enc, Field->getType(), CGM, TSC))
- return false;
- if (Field->isBitField())
- Enc += ')';
- Enc += '}';
- FE.emplace_back(!Field->getName().empty(), Enc);
- }
- return true;
-}
-
-/// Appends structure and union types to Enc and adds encoding to cache.
-/// Recursively calls appendType (via extractFieldType) for each field.
-/// Union types have their fields ordered according to the ABI.
-static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
- const CodeGen::CodeGenModule &CGM,
- TypeStringCache &TSC, const IdentifierInfo *ID) {
- // Append the cached TypeString if we have one.
- StringRef TypeString = TSC.lookupStr(ID);
- if (!TypeString.empty()) {
- Enc += TypeString;
- return true;
- }
-
- // Start to emit an incomplete TypeString.
- size_t Start = Enc.size();
- Enc += (RT->isUnionType()? 'u' : 's');
- Enc += '(';
- if (ID)
- Enc += ID->getName();
- Enc += "){";
-
- // We collect all encoded fields and order as necessary.
- bool IsRecursive = false;
- const RecordDecl *RD = RT->getDecl()->getDefinition();
- if (RD && !RD->field_empty()) {
- // An incomplete TypeString stub is placed in the cache for this RecordType
- // so that recursive calls to this RecordType will use it whilst building a
- // complete TypeString for this RecordType.
- SmallVector<FieldEncoding, 16> FE;
- std::string StubEnc(Enc.substr(Start).str());
- StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
- TSC.addIncomplete(ID, std::move(StubEnc));
- if (!extractFieldType(FE, RD, CGM, TSC)) {
- (void) TSC.removeIncomplete(ID);
- return false;
- }
- IsRecursive = TSC.removeIncomplete(ID);
- // The ABI requires unions to be sorted but not structures.
- // See FieldEncoding::operator< for sort algorithm.
- if (RT->isUnionType())
- llvm::sort(FE);
- // We can now complete the TypeString.
- unsigned E = FE.size();
- for (unsigned I = 0; I != E; ++I) {
- if (I)
- Enc += ',';
- Enc += FE[I].str();
- }
- }
- Enc += '}';
- TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
- return true;
-}
-
-/// Appends enum types to Enc and adds the encoding to the cache.
-static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
- TypeStringCache &TSC,
- const IdentifierInfo *ID) {
- // Append the cached TypeString if we have one.
- StringRef TypeString = TSC.lookupStr(ID);
- if (!TypeString.empty()) {
- Enc += TypeString;
- return true;
- }
-
- size_t Start = Enc.size();
- Enc += "e(";
- if (ID)
- Enc += ID->getName();
- Enc += "){";
-
- // We collect all encoded enumerations and order them alphanumerically.
- if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
- SmallVector<FieldEncoding, 16> FE;
- for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
- ++I) {
- SmallStringEnc EnumEnc;
- EnumEnc += "m(";
- EnumEnc += I->getName();
- EnumEnc += "){";
- I->getInitVal().toString(EnumEnc);
- EnumEnc += '}';
- FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
- }
- llvm::sort(FE);
- unsigned E = FE.size();
- for (unsigned I = 0; I != E; ++I) {
- if (I)
- Enc += ',';
- Enc += FE[I].str();
- }
- }
- Enc += '}';
- TSC.addIfComplete(ID, Enc.substr(Start), false);
- return true;
-}
-
-/// Appends type's qualifier to Enc.
-/// This is done prior to appending the type's encoding.
-static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
- // Qualifiers are emitted in alphabetical order.
- static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
- int Lookup = 0;
- if (QT.isConstQualified())
- Lookup += 1<<0;
- if (QT.isRestrictQualified())
- Lookup += 1<<1;
- if (QT.isVolatileQualified())
- Lookup += 1<<2;
- Enc += Table[Lookup];
-}
-
-/// Appends built-in types to Enc.
-static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
- const char *EncType;
- switch (BT->getKind()) {
- case BuiltinType::Void:
- EncType = "0";
- break;
- case BuiltinType::Bool:
- EncType = "b";
- break;
- case BuiltinType::Char_U:
- EncType = "uc";
- break;
- case BuiltinType::UChar:
- EncType = "uc";
- break;
- case BuiltinType::SChar:
- EncType = "sc";
- break;
- case BuiltinType::UShort:
- EncType = "us";
- break;
- case BuiltinType::Short:
- EncType = "ss";
- break;
- case BuiltinType::UInt:
- EncType = "ui";
- break;
- case BuiltinType::Int:
- EncType = "si";
- break;
- case BuiltinType::ULong:
- EncType = "ul";
- break;
- case BuiltinType::Long:
- EncType = "sl";
- break;
- case BuiltinType::ULongLong:
- EncType = "ull";
- break;
- case BuiltinType::LongLong:
- EncType = "sll";
- break;
- case BuiltinType::Float:
- EncType = "ft";
- break;
- case BuiltinType::Double:
- EncType = "d";
- break;
- case BuiltinType::LongDouble:
- EncType = "ld";
- break;
- default:
- return false;
- }
- Enc += EncType;
- return true;
-}
-
-/// Appends a pointer encoding to Enc before calling appendType for the pointee.
-static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
- const CodeGen::CodeGenModule &CGM,
- TypeStringCache &TSC) {
- Enc += "p(";
- if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
- return false;
- Enc += ')';
- return true;
-}
-
-/// Appends array encoding to Enc before calling appendType for the element.
-static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
- const ArrayType *AT,
- const CodeGen::CodeGenModule &CGM,
- TypeStringCache &TSC, StringRef NoSizeEnc) {
- if (AT->getSizeModifier() != ArrayType::Normal)
- return false;
- Enc += "a(";
- if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
- CAT->getSize().toStringUnsigned(Enc);
- else
- Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
- Enc += ':';
- // The Qualifiers should be attached to the type rather than the array.
- appendQualifier(Enc, QT);
- if (!appendType(Enc, AT->getElementType(), CGM, TSC))
- return false;
- Enc += ')';
- return true;
-}
-
-/// Appends a function encoding to Enc, calling appendType for the return type
-/// and the arguments.
-static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
- const CodeGen::CodeGenModule &CGM,
- TypeStringCache &TSC) {
- Enc += "f{";
- if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
- return false;
- Enc += "}(";
- if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
- // N.B. we are only interested in the adjusted param types.
- auto I = FPT->param_type_begin();
- auto E = FPT->param_type_end();
- if (I != E) {
- do {
- if (!appendType(Enc, *I, CGM, TSC))
- return false;
- ++I;
- if (I != E)
- Enc += ',';
- } while (I != E);
- if (FPT->isVariadic())
- Enc += ",va";
- } else {
- if (FPT->isVariadic())
- Enc += "va";
- else
- Enc += '0';
- }
- }
- Enc += ')';
- return true;
-}
-
-/// Handles the type's qualifier before dispatching a call to handle specific
-/// type encodings.
-static bool appendType(SmallStringEnc &Enc, QualType QType,
- const CodeGen::CodeGenModule &CGM,
- TypeStringCache &TSC) {
-
- QualType QT = QType.getCanonicalType();
-
- if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
- // The Qualifiers should be attached to the type rather than the array.
- // Thus we don't call appendQualifier() here.
- return appendArrayType(Enc, QT, AT, CGM, TSC, "");
-
- appendQualifier(Enc, QT);
-
- if (const BuiltinType *BT = QT->getAs<BuiltinType>())
- return appendBuiltinType(Enc, BT);
-
- if (const PointerType *PT = QT->getAs<PointerType>())
- return appendPointerType(Enc, PT, CGM, TSC);
-
- if (const EnumType *ET = QT->getAs<EnumType>())
- return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
-
- if (const RecordType *RT = QT->getAsStructureType())
- return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
-
- if (const RecordType *RT = QT->getAsUnionType())
- return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
-
- if (const FunctionType *FT = QT->getAs<FunctionType>())
- return appendFunctionType(Enc, FT, CGM, TSC);
-
- return false;
-}
-
-static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
- const CodeGen::CodeGenModule &CGM,
- TypeStringCache &TSC) {
- if (!D)
- return false;
-
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- if (FD->getLanguageLinkage() != CLanguageLinkage)
- return false;
- return appendType(Enc, FD->getType(), CGM, TSC);
- }
-
- if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
- if (VD->getLanguageLinkage() != CLanguageLinkage)
- return false;
- QualType QT = VD->getType().getCanonicalType();
- if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
- // Global ArrayTypes are given a size of '*' if the size is unknown.
- // The Qualifiers should be attached to the type rather than the array.
- // Thus we don't call appendQualifier() here.
- return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
- }
- return appendType(Enc, QT, CGM, TSC);
- }
- return false;
-}
-
-//===----------------------------------------------------------------------===//
-// RISCV ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-class RISCVABIInfo : public DefaultABIInfo {
-private:
- // Size of the integer ('x') registers in bits.
- unsigned XLen;
- // Size of the floating point ('f') registers in bits. Note that the target
- // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target
- // with soft float ABI has FLen==0).
- unsigned FLen;
- static const int NumArgGPRs = 8;
- static const int NumArgFPRs = 8;
- bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
- llvm::Type *&Field1Ty,
- CharUnits &Field1Off,
- llvm::Type *&Field2Ty,
- CharUnits &Field2Off) const;
-
-public:
- RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen)
- : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {}
-
- // DefaultABIInfo's classifyReturnType and classifyArgumentType are
- // non-virtual, but computeInfo is virtual, so we overload it.
- void computeInfo(CGFunctionInfo &FI) const override;
-
- ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft,
- int &ArgFPRsLeft) const;
- ABIArgInfo classifyReturnType(QualType RetTy) const;
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- ABIArgInfo extendType(QualType Ty) const;
-
- bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
- CharUnits &Field1Off, llvm::Type *&Field2Ty,
- CharUnits &Field2Off, int &NeededArgGPRs,
- int &NeededArgFPRs) const;
- ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty,
- CharUnits Field1Off,
- llvm::Type *Field2Ty,
- CharUnits Field2Off) const;
-};
-} // end anonymous namespace
-
-void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
- QualType RetTy = FI.getReturnType();
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(RetTy);
-
- // IsRetIndirect is true if classifyArgumentType indicated the value should
- // be passed indirect, or if the type size is a scalar greater than 2*XLen
- // and not a complex type with elements <= FLen. e.g. fp128 is passed direct
- // in LLVM IR, relying on the backend lowering code to rewrite the argument
- // list and pass indirectly on RV32.
- bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
- if (!IsRetIndirect && RetTy->isScalarType() &&
- getContext().getTypeSize(RetTy) > (2 * XLen)) {
- if (RetTy->isComplexType() && FLen) {
- QualType EltTy = RetTy->castAs<ComplexType>()->getElementType();
- IsRetIndirect = getContext().getTypeSize(EltTy) > FLen;
- } else {
- // This is a normal scalar > 2*XLen, such as fp128 on RV32.
- IsRetIndirect = true;
- }
- }
-
- int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
- int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
- int NumFixedArgs = FI.getNumRequiredArgs();
-
- int ArgNum = 0;
- for (auto &ArgInfo : FI.arguments()) {
- bool IsFixed = ArgNum < NumFixedArgs;
- ArgInfo.info =
- classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft);
- ArgNum++;
- }
-}
-
-// Returns true if the struct is a potential candidate for the floating point
-// calling convention. If this function returns true, the caller is
-// responsible for checking that if there is only a single field then that
-// field is a float.
-bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
- llvm::Type *&Field1Ty,
- CharUnits &Field1Off,
- llvm::Type *&Field2Ty,
- CharUnits &Field2Off) const {
- bool IsInt = Ty->isIntegralOrEnumerationType();
- bool IsFloat = Ty->isRealFloatingType();
-
- if (IsInt || IsFloat) {
- uint64_t Size = getContext().getTypeSize(Ty);
- if (IsInt && Size > XLen)
- return false;
- // Can't be eligible if larger than the FP registers. Half precision isn't
- // currently supported on RISC-V and the ABI hasn't been confirmed, so
- // default to the integer ABI in that case.
- if (IsFloat && (Size > FLen || Size < 32))
- return false;
- // Can't be eligible if an integer type was already found (int+int pairs
- // are not eligible).
- if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
- return false;
- if (!Field1Ty) {
- Field1Ty = CGT.ConvertType(Ty);
- Field1Off = CurOff;
- return true;
- }
- if (!Field2Ty) {
- Field2Ty = CGT.ConvertType(Ty);
- Field2Off = CurOff;
- return true;
- }
- return false;
- }
-
- if (auto CTy = Ty->getAs<ComplexType>()) {
- if (Field1Ty)
- return false;
- QualType EltTy = CTy->getElementType();
- if (getContext().getTypeSize(EltTy) > FLen)
- return false;
- Field1Ty = CGT.ConvertType(EltTy);
- Field1Off = CurOff;
- Field2Ty = Field1Ty;
- Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
- return true;
- }
-
- if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
- uint64_t ArraySize = ATy->getSize().getZExtValue();
- QualType EltTy = ATy->getElementType();
- CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
- for (uint64_t i = 0; i < ArraySize; ++i) {
- bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
- Field1Off, Field2Ty, Field2Off);
- if (!Ret)
- return false;
- CurOff += EltSize;
- }
- return true;
- }
-
- if (const auto *RTy = Ty->getAs<RecordType>()) {
- // Structures with either a non-trivial destructor or a non-trivial
- // copy constructor are not eligible for the FP calling convention.
- if (getRecordArgABI(Ty, CGT.getCXXABI()))
- return false;
- if (isEmptyRecord(getContext(), Ty, true))
- return true;
- const RecordDecl *RD = RTy->getDecl();
- // Unions aren't eligible unless they're empty (which is caught above).
- if (RD->isUnion())
- return false;
- const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
- // If this is a C++ record, check the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
- for (const CXXBaseSpecifier &B : CXXRD->bases()) {
- const auto *BDecl =
- cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
- CharUnits BaseOff = Layout.getBaseClassOffset(BDecl);
- bool Ret = detectFPCCEligibleStructHelper(B.getType(), CurOff + BaseOff,
- Field1Ty, Field1Off, Field2Ty,
- Field2Off);
- if (!Ret)
- return false;
- }
- }
- int ZeroWidthBitFieldCount = 0;
- for (const FieldDecl *FD : RD->fields()) {
- uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex());
- QualType QTy = FD->getType();
- if (FD->isBitField()) {
- unsigned BitWidth = FD->getBitWidthValue(getContext());
- // Allow a bitfield with a type greater than XLen as long as the
- // bitwidth is XLen or less.
- if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
- QTy = getContext().getIntTypeForBitwidth(XLen, false);
- if (BitWidth == 0) {
- ZeroWidthBitFieldCount++;
- continue;
- }
- }
-
- bool Ret = detectFPCCEligibleStructHelper(
- QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits),
- Field1Ty, Field1Off, Field2Ty, Field2Off);
- if (!Ret)
- return false;
-
- // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp
- // or int+fp structs, but are ignored for a struct with an fp field and
- // any number of zero-width bitfields.
- if (Field2Ty && ZeroWidthBitFieldCount > 0)
- return false;
- }
- return Field1Ty != nullptr;
- }
-
- return false;
-}
-
-// Determine if a struct is eligible for passing according to the floating
-// point calling convention (i.e., when flattened it contains a single fp
-// value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and
-// NeededArgGPRs are incremented appropriately.
-bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
- CharUnits &Field1Off,
- llvm::Type *&Field2Ty,
- CharUnits &Field2Off,
- int &NeededArgGPRs,
- int &NeededArgFPRs) const {
- Field1Ty = nullptr;
- Field2Ty = nullptr;
- NeededArgGPRs = 0;
- NeededArgFPRs = 0;
- bool IsCandidate = detectFPCCEligibleStructHelper(
- Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off);
- // Not really a candidate if we have a single int but no float.
- if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
- return false;
- if (!IsCandidate)
- return false;
- if (Field1Ty && Field1Ty->isFloatingPointTy())
- NeededArgFPRs++;
- else if (Field1Ty)
- NeededArgGPRs++;
- if (Field2Ty && Field2Ty->isFloatingPointTy())
- NeededArgFPRs++;
- else if (Field2Ty)
- NeededArgGPRs++;
- return true;
-}
-
-// Call getCoerceAndExpand for the two-element flattened struct described by
-// Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
-// appropriate coerceToType and unpaddedCoerceToType.
-ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
- llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
- CharUnits Field2Off) const {
- SmallVector<llvm::Type *, 3> CoerceElts;
- SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
- if (!Field1Off.isZero())
- CoerceElts.push_back(llvm::ArrayType::get(
- llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
-
- CoerceElts.push_back(Field1Ty);
- UnpaddedCoerceElts.push_back(Field1Ty);
-
- if (!Field2Ty) {
- return ABIArgInfo::getCoerceAndExpand(
- llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
- UnpaddedCoerceElts[0]);
- }
-
- CharUnits Field2Align =
- CharUnits::fromQuantity(getDataLayout().getABITypeAlign(Field2Ty));
- CharUnits Field1End = Field1Off +
- CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
- CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align);
-
- CharUnits Padding = CharUnits::Zero();
- if (Field2Off > Field2OffNoPadNoPack)
- Padding = Field2Off - Field2OffNoPadNoPack;
- else if (Field2Off != Field2Align && Field2Off > Field1End)
- Padding = Field2Off - Field1End;
-
- bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
-
- if (!Padding.isZero())
- CoerceElts.push_back(llvm::ArrayType::get(
- llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
-
- CoerceElts.push_back(Field2Ty);
- UnpaddedCoerceElts.push_back(Field2Ty);
-
- auto CoerceToType =
- llvm::StructType::get(getVMContext(), CoerceElts, IsPacked);
- auto UnpaddedCoerceToType =
- llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked);
-
- return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType);
-}
-
-ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
- int &ArgGPRsLeft,
- int &ArgFPRsLeft) const {
- assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- // Structures with either a non-trivial destructor or a non-trivial
- // copy constructor are always passed indirectly.
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
- if (ArgGPRsLeft)
- ArgGPRsLeft -= 1;
- return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
- CGCXXABI::RAA_DirectInMemory);
- }
-
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
-
- uint64_t Size = getContext().getTypeSize(Ty);
-
- // Pass floating point values via FPRs if possible.
- if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
- FLen >= Size && ArgFPRsLeft) {
- ArgFPRsLeft--;
- return ABIArgInfo::getDirect();
- }
-
- // Complex types for the hard float ABI must be passed direct rather than
- // using CoerceAndExpand.
- if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) {
- QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
- if (getContext().getTypeSize(EltTy) <= FLen) {
- ArgFPRsLeft -= 2;
- return ABIArgInfo::getDirect();
- }
- }
-
- if (IsFixed && FLen && Ty->isStructureOrClassType()) {
- llvm::Type *Field1Ty = nullptr;
- llvm::Type *Field2Ty = nullptr;
- CharUnits Field1Off = CharUnits::Zero();
- CharUnits Field2Off = CharUnits::Zero();
- int NeededArgGPRs = 0;
- int NeededArgFPRs = 0;
- bool IsCandidate =
- detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
- NeededArgGPRs, NeededArgFPRs);
- if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
- NeededArgFPRs <= ArgFPRsLeft) {
- ArgGPRsLeft -= NeededArgGPRs;
- ArgFPRsLeft -= NeededArgFPRs;
- return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
- Field2Off);
- }
- }
-
- uint64_t NeededAlign = getContext().getTypeAlign(Ty);
- // Determine the number of GPRs needed to pass the current argument
- // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
- // register pairs, so may consume 3 registers.
- int NeededArgGPRs = 1;
- if (!IsFixed && NeededAlign == 2 * XLen)
- NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
- else if (Size > XLen && Size <= 2 * XLen)
- NeededArgGPRs = 2;
-
- if (NeededArgGPRs > ArgGPRsLeft) {
- NeededArgGPRs = ArgGPRsLeft;
- }
-
- ArgGPRsLeft -= NeededArgGPRs;
-
- if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- // All integral types are promoted to XLen width
- if (Size < XLen && Ty->isIntegralOrEnumerationType()) {
- return extendType(Ty);
- }
-
- if (const auto *EIT = Ty->getAs<BitIntType>()) {
- if (EIT->getNumBits() < XLen)
- return extendType(Ty);
- if (EIT->getNumBits() > 128 ||
- (!getContext().getTargetInfo().hasInt128Type() &&
- EIT->getNumBits() > 64))
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
- }
-
- return ABIArgInfo::getDirect();
- }
-
- // Aggregates which are <= 2*XLen will be passed in registers if possible,
- // so coerce to integers.
- if (Size <= 2 * XLen) {
- unsigned Alignment = getContext().getTypeAlign(Ty);
-
- // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
- // required, and a 2-element XLen array if only XLen alignment is required.
- if (Size <= XLen) {
- return ABIArgInfo::getDirect(
- llvm::IntegerType::get(getVMContext(), XLen));
- } else if (Alignment == 2 * XLen) {
- return ABIArgInfo::getDirect(
- llvm::IntegerType::get(getVMContext(), 2 * XLen));
- } else {
- return ABIArgInfo::getDirect(llvm::ArrayType::get(
- llvm::IntegerType::get(getVMContext(), XLen), 2));
- }
- }
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-}
-
-ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- int ArgGPRsLeft = 2;
- int ArgFPRsLeft = FLen ? 2 : 0;
-
- // The rules for return and argument types are the same, so defer to
- // classifyArgumentType.
- return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft,
- ArgFPRsLeft);
-}
-
-Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
-
- // Empty records are ignored for parameter passing purposes.
- if (isEmptyRecord(getContext(), Ty, true)) {
- Address Addr = Address(CGF.Builder.CreateLoad(VAListAddr),
- getVAListElementType(CGF), SlotSize);
- Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
- return Addr;
- }
-
- auto TInfo = getContext().getTypeInfoInChars(Ty);
-
- // Arguments bigger than 2*Xlen bytes are passed indirectly.
- bool IsIndirect = TInfo.Width > 2 * SlotSize;
-
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo,
- SlotSize, /*AllowHigherAlign=*/true);
-}
-
-ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
- int TySize = getContext().getTypeSize(Ty);
- // RV64 ABI requires unsigned 32 bit integers to be sign extended.
- if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
- return ABIArgInfo::getSignExtend(Ty);
- return ABIArgInfo::getExtend(Ty);
-}
-
-namespace {
-class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
- unsigned FLen)
- : TargetCodeGenInfo(std::make_unique<RISCVABIInfo>(CGT, XLen, FLen)) {}
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override {
- const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD) return;
-
- const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
- if (!Attr)
- return;
-
- const char *Kind;
- switch (Attr->getInterrupt()) {
- case RISCVInterruptAttr::user: Kind = "user"; break;
- case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break;
- case RISCVInterruptAttr::machine: Kind = "machine"; break;
- }
-
- auto *Fn = cast<llvm::Function>(GV);
-
- Fn->addFnAttr("interrupt", Kind);
- }
-};
-} // namespace
-
-//===----------------------------------------------------------------------===//
-// VE ABI Implementation.
-//
-namespace {
-class VEABIInfo : public DefaultABIInfo {
-public:
- VEABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
-
-private:
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
- void computeInfo(CGFunctionInfo &FI) const override;
-};
-} // end anonymous namespace
-
-ABIArgInfo VEABIInfo::classifyReturnType(QualType Ty) const {
- if (Ty->isAnyComplexType())
- return ABIArgInfo::getDirect();
- uint64_t Size = getContext().getTypeSize(Ty);
- if (Size < 64 && Ty->isIntegerType())
- return ABIArgInfo::getExtend(Ty);
- return DefaultABIInfo::classifyReturnType(Ty);
-}
-
-ABIArgInfo VEABIInfo::classifyArgumentType(QualType Ty) const {
- if (Ty->isAnyComplexType())
- return ABIArgInfo::getDirect();
- uint64_t Size = getContext().getTypeSize(Ty);
- if (Size < 64 && Ty->isIntegerType())
- return ABIArgInfo::getExtend(Ty);
- return DefaultABIInfo::classifyArgumentType(Ty);
-}
-
-void VEABIInfo::computeInfo(CGFunctionInfo &FI) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &Arg : FI.arguments())
- Arg.info = classifyArgumentType(Arg.type);
-}
-
-namespace {
-class VETargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- VETargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<VEABIInfo>(CGT)) {}
- // VE ABI requires the arguments of variadic and prototype-less functions
- // are passed in both registers and memory.
- bool isNoProtoCallVariadic(const CallArgList &args,
- const FunctionNoProtoType *fnType) const override {
- return true;
- }
-};
-} // end anonymous namespace
-
-//===----------------------------------------------------------------------===//
-// CSKY ABI Implementation
-//===----------------------------------------------------------------------===//
-namespace {
-class CSKYABIInfo : public DefaultABIInfo {
- static const int NumArgGPRs = 4;
- static const int NumArgFPRs = 4;
-
- static const unsigned XLen = 32;
- unsigned FLen;
-
-public:
- CSKYABIInfo(CodeGen::CodeGenTypes &CGT, unsigned FLen)
- : DefaultABIInfo(CGT), FLen(FLen) {}
-
- void computeInfo(CGFunctionInfo &FI) const override;
- ABIArgInfo classifyArgumentType(QualType Ty, int &ArgGPRsLeft,
- int &ArgFPRsLeft,
- bool isReturnType = false) const;
- ABIArgInfo classifyReturnType(QualType RetTy) const;
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-};
-
-} // end anonymous namespace
-
-void CSKYABIInfo::computeInfo(CGFunctionInfo &FI) const {
- QualType RetTy = FI.getReturnType();
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(RetTy);
-
- bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
-
- // We must track the number of GPRs used in order to conform to the CSKY
- // ABI, as integer scalars passed in registers should have signext/zeroext
- // when promoted.
- int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
- int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
-
- for (auto &ArgInfo : FI.arguments()) {
- ArgInfo.info = classifyArgumentType(ArgInfo.type, ArgGPRsLeft, ArgFPRsLeft);
- }
-}
-
-Address CSKYABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
-
- // Empty records are ignored for parameter passing purposes.
- if (isEmptyRecord(getContext(), Ty, true)) {
- Address Addr = Address(CGF.Builder.CreateLoad(VAListAddr),
- getVAListElementType(CGF), SlotSize);
- Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
- return Addr;
- }
-
- auto TInfo = getContext().getTypeInfoInChars(Ty);
-
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, false, TInfo, SlotSize,
- /*AllowHigherAlign=*/true);
-}
-
-ABIArgInfo CSKYABIInfo::classifyArgumentType(QualType Ty, int &ArgGPRsLeft,
- int &ArgFPRsLeft,
- bool isReturnType) const {
- assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- // Structures with either a non-trivial destructor or a non-trivial
- // copy constructor are always passed indirectly.
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
- if (ArgGPRsLeft)
- ArgGPRsLeft -= 1;
- return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
- CGCXXABI::RAA_DirectInMemory);
- }
-
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
-
- if (!Ty->getAsUnionType())
- if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
- return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
-
- uint64_t Size = getContext().getTypeSize(Ty);
- // Pass floating point values via FPRs if possible.
- if (Ty->isFloatingType() && !Ty->isComplexType() && FLen >= Size &&
- ArgFPRsLeft) {
- ArgFPRsLeft--;
- return ABIArgInfo::getDirect();
- }
-
- // Complex types for the hard float ABI must be passed direct rather than
- // using CoerceAndExpand.
- if (Ty->isComplexType() && FLen && !isReturnType) {
- QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
- if (getContext().getTypeSize(EltTy) <= FLen) {
- ArgFPRsLeft -= 2;
- return ABIArgInfo::getDirect();
- }
- }
-
- if (!isAggregateTypeForABI(Ty)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- // All integral types are promoted to XLen width, unless passed on the
- // stack.
- if (Size < XLen && Ty->isIntegralOrEnumerationType())
- return ABIArgInfo::getExtend(Ty);
-
- if (const auto *EIT = Ty->getAs<BitIntType>()) {
- if (EIT->getNumBits() < XLen)
- return ABIArgInfo::getExtend(Ty);
- }
-
- return ABIArgInfo::getDirect();
- }
-
- // For argument type, the first 4*XLen parts of aggregate will be passed
- // in registers, and the rest will be passed in stack.
- // So we can coerce to integers directly and let backend handle it correctly.
- // For return type, aggregate which <= 2*XLen will be returned in registers.
- // Otherwise, aggregate will be returned indirectly.
- if (!isReturnType || (isReturnType && Size <= 2 * XLen)) {
- if (Size <= XLen) {
- return ABIArgInfo::getDirect(
- llvm::IntegerType::get(getVMContext(), XLen));
- } else {
- return ABIArgInfo::getDirect(llvm::ArrayType::get(
- llvm::IntegerType::get(getVMContext(), XLen), (Size + 31) / XLen));
- }
- }
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-}
-
-ABIArgInfo CSKYABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- int ArgGPRsLeft = 2;
- int ArgFPRsLeft = FLen ? 1 : 0;
-
- // The rules for return and argument types are the same, so defer to
- // classifyArgumentType.
- return classifyArgumentType(RetTy, ArgGPRsLeft, ArgFPRsLeft, true);
-}
-
-namespace {
-class CSKYTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- CSKYTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned FLen)
- : TargetCodeGenInfo(std::make_unique<CSKYABIInfo>(CGT, FLen)) {}
-};
-} // end anonymous namespace
-
-//===----------------------------------------------------------------------===//
-// BPF ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class BPFABIInfo : public DefaultABIInfo {
-public:
- BPFABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
-
- ABIArgInfo classifyArgumentType(QualType Ty) const {
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- if (isAggregateTypeForABI(Ty)) {
- uint64_t Bits = getContext().getTypeSize(Ty);
- if (Bits == 0)
- return ABIArgInfo::getIgnore();
-
- // If the aggregate needs 1 or 2 registers, do not use reference.
- if (Bits <= 128) {
- llvm::Type *CoerceTy;
- if (Bits <= 64) {
- CoerceTy =
- llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
- } else {
- llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), 64);
- CoerceTy = llvm::ArrayType::get(RegTy, 2);
- }
- return ABIArgInfo::getDirect(CoerceTy);
- } else {
- return getNaturalAlignIndirect(Ty);
- }
- }
-
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- ASTContext &Context = getContext();
- if (const auto *EIT = Ty->getAs<BitIntType>())
- if (EIT->getNumBits() > Context.getTypeSize(Context.Int128Ty))
- return getNaturalAlignIndirect(Ty);
-
- return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
- }
-
- ABIArgInfo classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- if (isAggregateTypeForABI(RetTy))
- return getNaturalAlignIndirect(RetTy);
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- ASTContext &Context = getContext();
- if (const auto *EIT = RetTy->getAs<BitIntType>())
- if (EIT->getNumBits() > Context.getTypeSize(Context.Int128Ty))
- return getNaturalAlignIndirect(RetTy);
-
- // Caller will do necessary sign/zero extension.
- return ABIArgInfo::getDirect();
- }
-
- void computeInfo(CGFunctionInfo &FI) const override {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
- }
-
-};
-
-class BPFTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- BPFTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<BPFABIInfo>(CGT)) {}
-
- const BPFABIInfo &getABIInfo() const {
- return static_cast<const BPFABIInfo&>(TargetCodeGenInfo::getABIInfo());
- }
-};
-
-}
-
-// LoongArch ABI Implementation. Documented at
-// https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html
-//
-//===----------------------------------------------------------------------===//
-
-namespace {
-class LoongArchABIInfo : public DefaultABIInfo {
-private:
- // Size of the integer ('r') registers in bits.
- unsigned GRLen;
- // Size of the floating point ('f') registers in bits.
- unsigned FRLen;
- // Number of general-purpose argument registers.
- static const int NumGARs = 8;
- // Number of floating-point argument registers.
- static const int NumFARs = 8;
- bool detectFARsEligibleStructHelper(QualType Ty, CharUnits CurOff,
- llvm::Type *&Field1Ty,
- CharUnits &Field1Off,
- llvm::Type *&Field2Ty,
- CharUnits &Field2Off) const;
-
-public:
- LoongArchABIInfo(CodeGen::CodeGenTypes &CGT, unsigned GRLen, unsigned FRLen)
- : DefaultABIInfo(CGT), GRLen(GRLen), FRLen(FRLen) {}
-
- void computeInfo(CGFunctionInfo &FI) const override;
-
- ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &GARsLeft,
- int &FARsLeft) const;
- ABIArgInfo classifyReturnType(QualType RetTy) const;
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- ABIArgInfo extendType(QualType Ty) const;
-
- bool detectFARsEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
- CharUnits &Field1Off, llvm::Type *&Field2Ty,
- CharUnits &Field2Off, int &NeededArgGPRs,
- int &NeededArgFPRs) const;
- ABIArgInfo coerceAndExpandFARsEligibleStruct(llvm::Type *Field1Ty,
- CharUnits Field1Off,
- llvm::Type *Field2Ty,
- CharUnits Field2Off) const;
-};
-} // end anonymous namespace
-
-void LoongArchABIInfo::computeInfo(CGFunctionInfo &FI) const {
- QualType RetTy = FI.getReturnType();
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(RetTy);
-
- // IsRetIndirect is true if classifyArgumentType indicated the value should
- // be passed indirect, or if the type size is a scalar greater than 2*GRLen
- // and not a complex type with elements <= FRLen. e.g. fp128 is passed direct
- // in LLVM IR, relying on the backend lowering code to rewrite the argument
- // list and pass indirectly on LA32.
- bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
- if (!IsRetIndirect && RetTy->isScalarType() &&
- getContext().getTypeSize(RetTy) > (2 * GRLen)) {
- if (RetTy->isComplexType() && FRLen) {
- QualType EltTy = RetTy->castAs<ComplexType>()->getElementType();
- IsRetIndirect = getContext().getTypeSize(EltTy) > FRLen;
- } else {
- // This is a normal scalar > 2*GRLen, such as fp128 on LA32.
- IsRetIndirect = true;
- }
- }
-
- // We must track the number of GARs and FARs used in order to conform to the
- // LoongArch ABI. As GAR usage is different for variadic arguments, we must
- // also track whether we are examining a vararg or not.
- int GARsLeft = IsRetIndirect ? NumGARs - 1 : NumGARs;
- int FARsLeft = FRLen ? NumFARs : 0;
- int NumFixedArgs = FI.getNumRequiredArgs();
-
- int ArgNum = 0;
- for (auto &ArgInfo : FI.arguments()) {
- ArgInfo.info = classifyArgumentType(
- ArgInfo.type, /*IsFixed=*/ArgNum < NumFixedArgs, GARsLeft, FARsLeft);
- ArgNum++;
- }
-}
-
-// Returns true if the struct is a potential candidate to be passed in FARs (and
-// GARs). If this function returns true, the caller is responsible for checking
-// that if there is only a single field then that field is a float.
-bool LoongArchABIInfo::detectFARsEligibleStructHelper(
- QualType Ty, CharUnits CurOff, llvm::Type *&Field1Ty, CharUnits &Field1Off,
- llvm::Type *&Field2Ty, CharUnits &Field2Off) const {
- bool IsInt = Ty->isIntegralOrEnumerationType();
- bool IsFloat = Ty->isRealFloatingType();
-
- if (IsInt || IsFloat) {
- uint64_t Size = getContext().getTypeSize(Ty);
- if (IsInt && Size > GRLen)
- return false;
- // Can't be eligible if larger than the FP registers. Half precision isn't
- // currently supported on LoongArch and the ABI hasn't been confirmed, so
- // default to the integer ABI in that case.
- if (IsFloat && (Size > FRLen || Size < 32))
- return false;
- // Can't be eligible if an integer type was already found (int+int pairs
- // are not eligible).
- if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
- return false;
- if (!Field1Ty) {
- Field1Ty = CGT.ConvertType(Ty);
- Field1Off = CurOff;
- return true;
- }
- if (!Field2Ty) {
- Field2Ty = CGT.ConvertType(Ty);
- Field2Off = CurOff;
- return true;
- }
- return false;
- }
-
- if (auto CTy = Ty->getAs<ComplexType>()) {
- if (Field1Ty)
- return false;
- QualType EltTy = CTy->getElementType();
- if (getContext().getTypeSize(EltTy) > FRLen)
- return false;
- Field1Ty = CGT.ConvertType(EltTy);
- Field1Off = CurOff;
- Field2Ty = Field1Ty;
- Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
- return true;
- }
-
- if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
- uint64_t ArraySize = ATy->getSize().getZExtValue();
- QualType EltTy = ATy->getElementType();
- CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
- for (uint64_t i = 0; i < ArraySize; ++i) {
- if (!detectFARsEligibleStructHelper(EltTy, CurOff, Field1Ty, Field1Off,
- Field2Ty, Field2Off))
- return false;
- CurOff += EltSize;
- }
- return true;
- }
-
- if (const auto *RTy = Ty->getAs<RecordType>()) {
- // Structures with either a non-trivial destructor or a non-trivial
- // copy constructor are not eligible for the FP calling convention.
- if (getRecordArgABI(Ty, CGT.getCXXABI()))
- return false;
- if (isEmptyRecord(getContext(), Ty, true))
- return true;
- const RecordDecl *RD = RTy->getDecl();
- // Unions aren't eligible unless they're empty (which is caught above).
- if (RD->isUnion())
- return false;
- const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
- // If this is a C++ record, check the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
- for (const CXXBaseSpecifier &B : CXXRD->bases()) {
- const auto *BDecl =
- cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
- if (!detectFARsEligibleStructHelper(
- B.getType(), CurOff + Layout.getBaseClassOffset(BDecl),
- Field1Ty, Field1Off, Field2Ty, Field2Off))
- return false;
- }
- }
- for (const FieldDecl *FD : RD->fields()) {
- QualType QTy = FD->getType();
- if (FD->isBitField()) {
- unsigned BitWidth = FD->getBitWidthValue(getContext());
- // Zero-width bitfields are ignored.
- if (BitWidth == 0)
- continue;
- // Allow a bitfield with a type greater than GRLen as long as the
- // bitwidth is GRLen or less.
- if (getContext().getTypeSize(QTy) > GRLen && BitWidth <= GRLen) {
- QTy = getContext().getIntTypeForBitwidth(GRLen, false);
- }
- }
-
- if (!detectFARsEligibleStructHelper(
- QTy,
- CurOff + getContext().toCharUnitsFromBits(
- Layout.getFieldOffset(FD->getFieldIndex())),
- Field1Ty, Field1Off, Field2Ty, Field2Off))
- return false;
- }
- return Field1Ty != nullptr;
- }
-
- return false;
-}
-
-// Determine if a struct is eligible to be passed in FARs (and GARs) (i.e., when
-// flattened it contains a single fp value, fp+fp, or int+fp of appropriate
-// size). If so, NeededFARs and NeededGARs are incremented appropriately.
-bool LoongArchABIInfo::detectFARsEligibleStruct(
- QualType Ty, llvm::Type *&Field1Ty, CharUnits &Field1Off,
- llvm::Type *&Field2Ty, CharUnits &Field2Off, int &NeededGARs,
- int &NeededFARs) const {
- Field1Ty = nullptr;
- Field2Ty = nullptr;
- NeededGARs = 0;
- NeededFARs = 0;
- if (!detectFARsEligibleStructHelper(Ty, CharUnits::Zero(), Field1Ty,
- Field1Off, Field2Ty, Field2Off))
- return false;
- // Not really a candidate if we have a single int but no float.
- if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
- return false;
- if (Field1Ty && Field1Ty->isFloatingPointTy())
- NeededFARs++;
- else if (Field1Ty)
- NeededGARs++;
- if (Field2Ty && Field2Ty->isFloatingPointTy())
- NeededFARs++;
- else if (Field2Ty)
- NeededGARs++;
- return true;
-}
-
-// Call getCoerceAndExpand for the two-element flattened struct described by
-// Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
-// appropriate coerceToType and unpaddedCoerceToType.
-ABIArgInfo LoongArchABIInfo::coerceAndExpandFARsEligibleStruct(
- llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
- CharUnits Field2Off) const {
- SmallVector<llvm::Type *, 3> CoerceElts;
- SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
- if (!Field1Off.isZero())
- CoerceElts.push_back(llvm::ArrayType::get(
- llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
-
- CoerceElts.push_back(Field1Ty);
- UnpaddedCoerceElts.push_back(Field1Ty);
-
- if (!Field2Ty) {
- return ABIArgInfo::getCoerceAndExpand(
- llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
- UnpaddedCoerceElts[0]);
- }
-
- CharUnits Field2Align =
- CharUnits::fromQuantity(getDataLayout().getABITypeAlign(Field2Ty));
- CharUnits Field1End =
- Field1Off +
- CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
- CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align);
-
- CharUnits Padding = CharUnits::Zero();
- if (Field2Off > Field2OffNoPadNoPack)
- Padding = Field2Off - Field2OffNoPadNoPack;
- else if (Field2Off != Field2Align && Field2Off > Field1End)
- Padding = Field2Off - Field1End;
-
- bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
-
- if (!Padding.isZero())
- CoerceElts.push_back(llvm::ArrayType::get(
- llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
-
- CoerceElts.push_back(Field2Ty);
- UnpaddedCoerceElts.push_back(Field2Ty);
-
- return ABIArgInfo::getCoerceAndExpand(
- llvm::StructType::get(getVMContext(), CoerceElts, IsPacked),
- llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked));
-}
-
-ABIArgInfo LoongArchABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
- int &GARsLeft,
- int &FARsLeft) const {
- assert(GARsLeft <= NumGARs && "GAR tracking underflow");
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- // Structures with either a non-trivial destructor or a non-trivial
- // copy constructor are always passed indirectly.
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
- if (GARsLeft)
- GARsLeft -= 1;
- return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
- CGCXXABI::RAA_DirectInMemory);
- }
-
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
-
- uint64_t Size = getContext().getTypeSize(Ty);
-
- // Pass floating point values via FARs if possible.
- if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
- FRLen >= Size && FARsLeft) {
- FARsLeft--;
- return ABIArgInfo::getDirect();
- }
-
- // Complex types for the *f or *d ABI must be passed directly rather than
- // using CoerceAndExpand.
- if (IsFixed && Ty->isComplexType() && FRLen && FARsLeft >= 2) {
- QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
- if (getContext().getTypeSize(EltTy) <= FRLen) {
- FARsLeft -= 2;
- return ABIArgInfo::getDirect();
- }
- }
-
- if (IsFixed && FRLen && Ty->isStructureOrClassType()) {
- llvm::Type *Field1Ty = nullptr;
- llvm::Type *Field2Ty = nullptr;
- CharUnits Field1Off = CharUnits::Zero();
- CharUnits Field2Off = CharUnits::Zero();
- int NeededGARs = 0;
- int NeededFARs = 0;
- bool IsCandidate = detectFARsEligibleStruct(
- Ty, Field1Ty, Field1Off, Field2Ty, Field2Off, NeededGARs, NeededFARs);
- if (IsCandidate && NeededGARs <= GARsLeft && NeededFARs <= FARsLeft) {
- GARsLeft -= NeededGARs;
- FARsLeft -= NeededFARs;
- return coerceAndExpandFARsEligibleStruct(Field1Ty, Field1Off, Field2Ty,
- Field2Off);
- }
- }
-
- uint64_t NeededAlign = getContext().getTypeAlign(Ty);
- // Determine the number of GARs needed to pass the current argument
- // according to the ABI. 2*GRLen-aligned varargs are passed in "aligned"
- // register pairs, so may consume 3 registers.
- int NeededGARs = 1;
- if (!IsFixed && NeededAlign == 2 * GRLen)
- NeededGARs = 2 + (GARsLeft % 2);
- else if (Size > GRLen && Size <= 2 * GRLen)
- NeededGARs = 2;
-
- if (NeededGARs > GARsLeft)
- NeededGARs = GARsLeft;
-
- GARsLeft -= NeededGARs;
-
- if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- // All integral types are promoted to GRLen width.
- if (Size < GRLen && Ty->isIntegralOrEnumerationType())
- return extendType(Ty);
-
- if (const auto *EIT = Ty->getAs<BitIntType>()) {
- if (EIT->getNumBits() < GRLen)
- return extendType(Ty);
- if (EIT->getNumBits() > 128 ||
- (!getContext().getTargetInfo().hasInt128Type() &&
- EIT->getNumBits() > 64))
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
- }
-
- return ABIArgInfo::getDirect();
- }
-
- // Aggregates which are <= 2*GRLen will be passed in registers if possible,
- // so coerce to integers.
- if (Size <= 2 * GRLen) {
- // Use a single GRLen int if possible, 2*GRLen if 2*GRLen alignment is
- // required, and a 2-element GRLen array if only GRLen alignment is
- // required.
- if (Size <= GRLen) {
- return ABIArgInfo::getDirect(
- llvm::IntegerType::get(getVMContext(), GRLen));
- }
- if (getContext().getTypeAlign(Ty) == 2 * GRLen) {
- return ABIArgInfo::getDirect(
- llvm::IntegerType::get(getVMContext(), 2 * GRLen));
- }
- return ABIArgInfo::getDirect(
- llvm::ArrayType::get(llvm::IntegerType::get(getVMContext(), GRLen), 2));
- }
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-}
-
-ABIArgInfo LoongArchABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
- // The rules for return and argument types are the same, so defer to
- // classifyArgumentType.
- int GARsLeft = 2;
- int FARsLeft = FRLen ? 2 : 0;
- return classifyArgumentType(RetTy, /*IsFixed=*/true, GARsLeft, FARsLeft);
-}
-
-Address LoongArchABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- CharUnits SlotSize = CharUnits::fromQuantity(GRLen / 8);
-
- // Empty records are ignored for parameter passing purposes.
- if (isEmptyRecord(getContext(), Ty, true)) {
- Address Addr = Address(CGF.Builder.CreateLoad(VAListAddr),
- getVAListElementType(CGF), SlotSize);
- Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
- return Addr;
- }
-
- auto TInfo = getContext().getTypeInfoInChars(Ty);
-
- // Arguments bigger than 2*GRLen bytes are passed indirectly.
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty,
- /*IsIndirect=*/TInfo.Width > 2 * SlotSize, TInfo,
- SlotSize,
- /*AllowHigherAlign=*/true);
-}
-
-ABIArgInfo LoongArchABIInfo::extendType(QualType Ty) const {
- int TySize = getContext().getTypeSize(Ty);
- // LA64 ABI requires unsigned 32 bit integers to be sign extended.
- if (GRLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
- return ABIArgInfo::getSignExtend(Ty);
- return ABIArgInfo::getExtend(Ty);
-}
-
-namespace {
-class LoongArchTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- LoongArchTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned GRLen,
- unsigned FRLen)
- : TargetCodeGenInfo(
- std::make_unique<LoongArchABIInfo>(CGT, GRLen, FRLen)) {}
-};
-} // namespace
-
-//===----------------------------------------------------------------------===//
-// Driver code
-//===----------------------------------------------------------------------===//
-
-bool CodeGenModule::supportsCOMDAT() const {
- return getTriple().supportsCOMDAT();
-}
-
-const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
- if (TheTargetCodeGenInfo)
- return *TheTargetCodeGenInfo;
-
- // Helper to set the unique_ptr while still keeping the return value.
- auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & {
- this->TheTargetCodeGenInfo.reset(P);
- return *P;
- };
-
- const llvm::Triple &Triple = getTarget().getTriple();
- switch (Triple.getArch()) {
- default:
- return SetCGInfo(new DefaultTargetCodeGenInfo(Types));
-
- case llvm::Triple::le32:
- return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
- case llvm::Triple::m68k:
- return SetCGInfo(new M68kTargetCodeGenInfo(Types));
- case llvm::Triple::mips:
- case llvm::Triple::mipsel:
- if (Triple.getOS() == llvm::Triple::NaCl)
- return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
- return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true));
-
- case llvm::Triple::mips64:
- case llvm::Triple::mips64el:
- return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false));
-
- case llvm::Triple::avr: {
- // For passing parameters, R8~R25 are used on avr, and R18~R25 are used
- // on avrtiny. For passing return value, R18~R25 are used on avr, and
- // R22~R25 are used on avrtiny.
- unsigned NPR = getTarget().getABI() == "avrtiny" ? 6 : 18;
- unsigned NRR = getTarget().getABI() == "avrtiny" ? 4 : 8;
- return SetCGInfo(new AVRTargetCodeGenInfo(Types, NPR, NRR));
- }
-
- case llvm::Triple::aarch64:
- case llvm::Triple::aarch64_32:
- case llvm::Triple::aarch64_be: {
- AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
- if (getTarget().getABI() == "darwinpcs")
- Kind = AArch64ABIInfo::DarwinPCS;
- else if (Triple.isOSWindows())
- return SetCGInfo(
- new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64));
-
- return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind));
- }
-
- case llvm::Triple::wasm32:
- case llvm::Triple::wasm64: {
- WebAssemblyABIInfo::ABIKind Kind = WebAssemblyABIInfo::MVP;
- if (getTarget().getABI() == "experimental-mv")
- Kind = WebAssemblyABIInfo::ExperimentalMV;
- return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types, Kind));
- }
-
- case llvm::Triple::arm:
- case llvm::Triple::armeb:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb: {
- if (Triple.getOS() == llvm::Triple::Win32) {
- return SetCGInfo(
- new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
- }
-
- ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
- StringRef ABIStr = getTarget().getABI();
- if (ABIStr == "apcs-gnu")
- Kind = ARMABIInfo::APCS;
- else if (ABIStr == "aapcs16")
- Kind = ARMABIInfo::AAPCS16_VFP;
- else if (CodeGenOpts.FloatABI == "hard" ||
- (CodeGenOpts.FloatABI != "soft" &&
- (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
- Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
- Triple.getEnvironment() == llvm::Triple::EABIHF)))
- Kind = ARMABIInfo::AAPCS_VFP;
-
- return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind));
- }
-
- case llvm::Triple::ppc: {
- if (Triple.isOSAIX())
- return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ false));
-
- bool IsSoftFloat =
- CodeGenOpts.FloatABI == "soft" || getTarget().hasFeature("spe");
- bool RetSmallStructInRegABI =
- PPC32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
- return SetCGInfo(
- new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI));
- }
- case llvm::Triple::ppcle: {
- bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
- bool RetSmallStructInRegABI =
- PPC32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
- return SetCGInfo(
- new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI));
- }
- case llvm::Triple::ppc64:
- if (Triple.isOSAIX())
- return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ true));
-
- if (Triple.isOSBinFormatELF()) {
- PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
- if (getTarget().getABI() == "elfv2")
- Kind = PPC64_SVR4_ABIInfo::ELFv2;
- bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
-
- return SetCGInfo(
- new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, IsSoftFloat));
- }
- return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
- case llvm::Triple::ppc64le: {
- assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
- PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
- if (getTarget().getABI() == "elfv1")
- Kind = PPC64_SVR4_ABIInfo::ELFv1;
- bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
-
- return SetCGInfo(
- new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, IsSoftFloat));
- }
-
- case llvm::Triple::nvptx:
- case llvm::Triple::nvptx64:
- return SetCGInfo(new NVPTXTargetCodeGenInfo(Types));
-
- case llvm::Triple::msp430:
- return SetCGInfo(new MSP430TargetCodeGenInfo(Types));
-
- case llvm::Triple::riscv32:
- case llvm::Triple::riscv64: {
- StringRef ABIStr = getTarget().getABI();
- unsigned XLen = getTarget().getPointerWidth(LangAS::Default);
- unsigned ABIFLen = 0;
- if (ABIStr.endswith("f"))
- ABIFLen = 32;
- else if (ABIStr.endswith("d"))
- ABIFLen = 64;
- return SetCGInfo(new RISCVTargetCodeGenInfo(Types, XLen, ABIFLen));
- }
-
- case llvm::Triple::systemz: {
- bool SoftFloat = CodeGenOpts.FloatABI == "soft";
- bool HasVector = !SoftFloat && getTarget().getABI() == "vector";
- return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector, SoftFloat));
- }
-
- case llvm::Triple::tce:
- case llvm::Triple::tcele:
- return SetCGInfo(new TCETargetCodeGenInfo(Types));
-
- case llvm::Triple::x86: {
- bool IsDarwinVectorABI = Triple.isOSDarwin();
- bool RetSmallStructInRegABI =
- X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
- bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
-
- if (Triple.getOS() == llvm::Triple::Win32) {
- return SetCGInfo(new WinX86_32TargetCodeGenInfo(
- Types, IsDarwinVectorABI, RetSmallStructInRegABI,
- IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
- } else {
- return SetCGInfo(new X86_32TargetCodeGenInfo(
- Types, IsDarwinVectorABI, RetSmallStructInRegABI,
- IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
- CodeGenOpts.FloatABI == "soft"));
- }
- }
-
- case llvm::Triple::x86_64: {
- StringRef ABI = getTarget().getABI();
- X86AVXABILevel AVXLevel =
- (ABI == "avx512"
- ? X86AVXABILevel::AVX512
- : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None);
-
- switch (Triple.getOS()) {
- case llvm::Triple::Win32:
- return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
- default:
- return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel));
- }
- }
- case llvm::Triple::hexagon:
- return SetCGInfo(new HexagonTargetCodeGenInfo(Types));
- case llvm::Triple::lanai:
- return SetCGInfo(new LanaiTargetCodeGenInfo(Types));
- case llvm::Triple::r600:
- return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
- case llvm::Triple::amdgcn:
- return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
- case llvm::Triple::sparc:
- return SetCGInfo(new SparcV8TargetCodeGenInfo(Types));
- case llvm::Triple::sparcv9:
- return SetCGInfo(new SparcV9TargetCodeGenInfo(Types));
- case llvm::Triple::xcore:
- return SetCGInfo(new XCoreTargetCodeGenInfo(Types));
- case llvm::Triple::arc:
- return SetCGInfo(new ARCTargetCodeGenInfo(Types));
- case llvm::Triple::spir:
- case llvm::Triple::spir64:
- return SetCGInfo(new CommonSPIRTargetCodeGenInfo(Types));
- case llvm::Triple::spirv32:
- case llvm::Triple::spirv64:
- return SetCGInfo(new SPIRVTargetCodeGenInfo(Types));
- case llvm::Triple::ve:
- return SetCGInfo(new VETargetCodeGenInfo(Types));
- case llvm::Triple::csky: {
- bool IsSoftFloat = !getTarget().hasFeature("hard-float-abi");
- bool hasFP64 = getTarget().hasFeature("fpuv2_df") ||
- getTarget().hasFeature("fpuv3_df");
- return SetCGInfo(new CSKYTargetCodeGenInfo(Types, IsSoftFloat ? 0
- : hasFP64 ? 64
- : 32));
- }
- case llvm::Triple::bpfeb:
- case llvm::Triple::bpfel:
- return SetCGInfo(new BPFTargetCodeGenInfo(Types));
- case llvm::Triple::loongarch32:
- case llvm::Triple::loongarch64: {
- StringRef ABIStr = getTarget().getABI();
- unsigned ABIFRLen = 0;
- if (ABIStr.endswith("f"))
- ABIFRLen = 32;
- else if (ABIStr.endswith("d"))
- ABIFRLen = 64;
- return SetCGInfo(new LoongArchTargetCodeGenInfo(
- Types, getTarget().getPointerWidth(LangAS::Default), ABIFRLen));
- }
- }
-}
-
/// Create an OpenCL kernel for an enqueued block.
///
/// The kernel has the same function type as the block invoke function. Its
/// name is the name of the block invoke function postfixed with "_kernel".
/// It simply calls the block invoke function then returns.
-llvm::Function *
-TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF,
- llvm::Function *Invoke,
- llvm::Type *BlockTy) const {
+llvm::Value *TargetCodeGenInfo::createEnqueuedBlockKernel(
+ CodeGenFunction &CGF, llvm::Function *Invoke, llvm::Type *BlockTy) const {
auto *InvokeFT = Invoke->getFunctionType();
auto &C = CGF.getLLVMContext();
std::string Name = Invoke->getName().str() + "_kernel";
@@ -12440,88 +183,38 @@ TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF,
InvokeFT->params(), false);
auto *F = llvm::Function::Create(FT, llvm::GlobalValue::ExternalLinkage, Name,
&CGF.CGM.getModule());
+ llvm::CallingConv::ID KernelCC =
+ CGF.getTypes().ClangCallConvToLLVMCallConv(CallingConv::CC_OpenCLKernel);
+ F->setCallingConv(KernelCC);
+
+ llvm::AttrBuilder KernelAttrs(C);
+
+ // FIXME: This is missing setTargetAttributes
+ CGF.CGM.addDefaultFunctionDefinitionAttributes(KernelAttrs);
+ F->addFnAttrs(KernelAttrs);
+
auto IP = CGF.Builder.saveIP();
auto *BB = llvm::BasicBlock::Create(C, "entry", F);
auto &Builder = CGF.Builder;
Builder.SetInsertPoint(BB);
llvm::SmallVector<llvm::Value *, 2> Args(llvm::make_pointer_range(F->args()));
- llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
- call->setCallingConv(Invoke->getCallingConv());
+ llvm::CallInst *Call = Builder.CreateCall(Invoke, Args);
+ Call->setCallingConv(Invoke->getCallingConv());
+
Builder.CreateRetVoid();
Builder.restoreIP(IP);
return F;
}
-/// Create an OpenCL kernel for an enqueued block.
-///
-/// The type of the first argument (the block literal) is the struct type
-/// of the block literal instead of a pointer type. The first argument
-/// (block literal) is passed directly by value to the kernel. The kernel
-/// allocates the same type of struct on stack and stores the block literal
-/// to it and passes its pointer to the block invoke function. The kernel
-/// has "enqueued-block" function attribute and kernel argument metadata.
-llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
- CodeGenFunction &CGF, llvm::Function *Invoke,
- llvm::Type *BlockTy) const {
- auto &Builder = CGF.Builder;
- auto &C = CGF.getLLVMContext();
-
- auto *InvokeFT = Invoke->getFunctionType();
- llvm::SmallVector<llvm::Type *, 2> ArgTys;
- llvm::SmallVector<llvm::Metadata *, 8> AddressQuals;
- llvm::SmallVector<llvm::Metadata *, 8> AccessQuals;
- llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames;
- llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames;
- llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals;
- llvm::SmallVector<llvm::Metadata *, 8> ArgNames;
-
- ArgTys.push_back(BlockTy);
- ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
- AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
- ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
- ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
- AccessQuals.push_back(llvm::MDString::get(C, "none"));
- ArgNames.push_back(llvm::MDString::get(C, "block_literal"));
- for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
- ArgTys.push_back(InvokeFT->getParamType(I));
- ArgTypeNames.push_back(llvm::MDString::get(C, "void*"));
- AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
- AccessQuals.push_back(llvm::MDString::get(C, "none"));
- ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*"));
- ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
- ArgNames.push_back(
- llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str()));
- }
- std::string Name = Invoke->getName().str() + "_kernel";
- auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
- auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
- &CGF.CGM.getModule());
- F->addFnAttr("enqueued-block");
- auto IP = CGF.Builder.saveIP();
- auto *BB = llvm::BasicBlock::Create(C, "entry", F);
- Builder.SetInsertPoint(BB);
- const auto BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(BlockTy);
- auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr);
- BlockPtr->setAlignment(BlockAlign);
- Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
- auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
- llvm::SmallVector<llvm::Value *, 2> Args;
- Args.push_back(Cast);
- for (llvm::Argument &A : llvm::drop_begin(F->args()))
- Args.push_back(&A);
- llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
- call->setCallingConv(Invoke->getCallingConv());
- Builder.CreateRetVoid();
- Builder.restoreIP(IP);
-
- F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
- F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
- F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
- F->setMetadata("kernel_arg_base_type",
- llvm::MDNode::get(C, ArgBaseTypeNames));
- F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
- if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
- F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames));
+namespace {
+class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
+};
+} // namespace
- return F;
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createDefaultTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<DefaultTargetCodeGenInfo>(CGM.getTypes());
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
index c7c1ec7fce7e..14ed5e5d2d2c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
@@ -52,6 +52,11 @@ protected:
// by returning true from TargetInfo::checkCallingConvention for them.
std::unique_ptr<SwiftABIInfo> SwiftInfo;
+ // Returns ABI info helper for the target. This is for use by derived classes.
+ template <typename T> const T &getABIInfo() const {
+ return static_cast<const T &>(*Info);
+ }
+
public:
TargetCodeGenInfo(std::unique_ptr<ABIInfo> Info);
virtual ~TargetCodeGenInfo();
@@ -199,9 +204,10 @@ public:
/// Return a constant used by UBSan as a signature to identify functions
/// possessing type information, or 0 if the platform is unsupported.
+ /// This magic number is invalid instruction encoding in many targets.
virtual llvm::Constant *
getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const {
- return nullptr;
+ return llvm::ConstantInt::get(CGM.Int32Ty, 0xc105cafe);
}
/// Determine whether a call to an unprototyped functions under
@@ -339,7 +345,7 @@ public:
/// convention and ABI as an OpenCL kernel. The wrapper function accepts
/// block context and block arguments in target-specific way and calls
/// the original block invoke function.
- virtual llvm::Function *
+ virtual llvm::Value *
createEnqueuedBlockKernel(CodeGenFunction &CGF,
llvm::Function *BlockInvokeFunc,
llvm::Type *BlockTy) const;
@@ -349,6 +355,11 @@ public:
/// as 'used', and having internal linkage.
virtual bool shouldEmitStaticExternCAliases() const { return true; }
+ /// \return true if annonymous zero-sized bitfields should be emitted to
+ /// correctly distinguish between struct types whose memory layout is the
+ /// same, but whose layout may differ when used as argument passed by value
+ virtual bool shouldEmitDWARFBitFieldSeparators() const { return false; }
+
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const {}
/// Return the device-side type for the CUDA device builtin surface type.
@@ -362,6 +373,12 @@ public:
return nullptr;
}
+ /// Return the WebAssembly externref reference type.
+ virtual llvm::Type *getWasmExternrefReferenceType() const { return nullptr; }
+
+ /// Return the WebAssembly funcref reference type.
+ virtual llvm::Type *getWasmFuncrefReferenceType() const { return nullptr; }
+
/// Emit the device-side copy of the builtin surface type.
virtual bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF,
LValue Dst,
@@ -376,8 +393,163 @@ public:
// DO NOTHING by default.
return false;
}
+
+ /// Return an LLVM type that corresponds to an OpenCL type.
+ virtual llvm::Type *getOpenCLType(CodeGenModule &CGM, const Type *T) const {
+ return nullptr;
+ }
+
+protected:
+ static std::string qualifyWindowsLibrary(StringRef Lib);
+
+ void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const;
+};
+
+std::unique_ptr<TargetCodeGenInfo>
+createDefaultTargetCodeGenInfo(CodeGenModule &CGM);
+
+enum class AArch64ABIKind {
+ AAPCS = 0,
+ DarwinPCS,
+ Win64,
+};
+
+std::unique_ptr<TargetCodeGenInfo>
+createAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind Kind);
+
+std::unique_ptr<TargetCodeGenInfo>
+createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind K);
+
+std::unique_ptr<TargetCodeGenInfo>
+createAMDGPUTargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createARCTargetCodeGenInfo(CodeGenModule &CGM);
+
+enum class ARMABIKind {
+ APCS = 0,
+ AAPCS = 1,
+ AAPCS_VFP = 2,
+ AAPCS16_VFP = 3,
};
+std::unique_ptr<TargetCodeGenInfo>
+createARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind Kind);
+
+std::unique_ptr<TargetCodeGenInfo>
+createWindowsARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind K);
+
+std::unique_ptr<TargetCodeGenInfo>
+createAVRTargetCodeGenInfo(CodeGenModule &CGM, unsigned NPR, unsigned NRR);
+
+std::unique_ptr<TargetCodeGenInfo>
+createBPFTargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createCSKYTargetCodeGenInfo(CodeGenModule &CGM, unsigned FLen);
+
+std::unique_ptr<TargetCodeGenInfo>
+createHexagonTargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createLanaiTargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createLoongArchTargetCodeGenInfo(CodeGenModule &CGM, unsigned GRLen,
+ unsigned FLen);
+
+std::unique_ptr<TargetCodeGenInfo>
+createM68kTargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createMIPSTargetCodeGenInfo(CodeGenModule &CGM, bool IsOS32);
+
+std::unique_ptr<TargetCodeGenInfo>
+createMSP430TargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createNVPTXTargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createPNaClTargetCodeGenInfo(CodeGenModule &CGM);
+
+enum class PPC64_SVR4_ABIKind {
+ ELFv1 = 0,
+ ELFv2,
+};
+
+std::unique_ptr<TargetCodeGenInfo>
+createAIXTargetCodeGenInfo(CodeGenModule &CGM, bool Is64Bit);
+
+std::unique_ptr<TargetCodeGenInfo>
+createPPC32TargetCodeGenInfo(CodeGenModule &CGM, bool SoftFloatABI);
+
+std::unique_ptr<TargetCodeGenInfo>
+createPPC64TargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createPPC64_SVR4_TargetCodeGenInfo(CodeGenModule &CGM, PPC64_SVR4_ABIKind Kind,
+ bool SoftFloatABI);
+
+std::unique_ptr<TargetCodeGenInfo>
+createRISCVTargetCodeGenInfo(CodeGenModule &CGM, unsigned XLen, unsigned FLen);
+
+std::unique_ptr<TargetCodeGenInfo>
+createCommonSPIRTargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createSPIRVTargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createSparcV8TargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createSparcV9TargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createSystemZTargetCodeGenInfo(CodeGenModule &CGM, bool HasVector,
+ bool SoftFloatABI);
+
+std::unique_ptr<TargetCodeGenInfo>
+createTCETargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createVETargetCodeGenInfo(CodeGenModule &CGM);
+
+enum class WebAssemblyABIKind {
+ MVP = 0,
+ ExperimentalMV = 1,
+};
+
+std::unique_ptr<TargetCodeGenInfo>
+createWebAssemblyTargetCodeGenInfo(CodeGenModule &CGM, WebAssemblyABIKind K);
+
+/// The AVX ABI level for X86 targets.
+enum class X86AVXABILevel {
+ None,
+ AVX,
+ AVX512,
+};
+
+std::unique_ptr<TargetCodeGenInfo> createX86_32TargetCodeGenInfo(
+ CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI,
+ unsigned NumRegisterParameters, bool SoftFloatABI);
+
+std::unique_ptr<TargetCodeGenInfo>
+createWinX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI,
+ bool Win32StructABI,
+ unsigned NumRegisterParameters);
+
+std::unique_ptr<TargetCodeGenInfo>
+createX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel);
+
+std::unique_ptr<TargetCodeGenInfo>
+createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel);
+
+std::unique_ptr<TargetCodeGenInfo>
+createXCoreTargetCodeGenInfo(CodeGenModule &CGM);
+
} // namespace CodeGen
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/AArch64.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/AArch64.cpp
new file mode 100644
index 000000000000..561110ff8c0d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/AArch64.cpp
@@ -0,0 +1,824 @@
+//===- AArch64.cpp --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// AArch64 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class AArch64ABIInfo : public ABIInfo {
+ AArch64ABIKind Kind;
+
+public:
+ AArch64ABIInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
+ : ABIInfo(CGT), Kind(Kind) {}
+
+private:
+ AArch64ABIKind getABIKind() const { return Kind; }
+ bool isDarwinPCS() const { return Kind == AArch64ABIKind::DarwinPCS; }
+
+ ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic,
+ unsigned CallingConvention) const;
+ ABIArgInfo coerceIllegalVector(QualType Ty) const;
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override;
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t Members) const override;
+ bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override;
+
+ bool isIllegalVectorType(QualType Ty) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!::classifyReturnType(getCXXABI(), FI, *this))
+ FI.getReturnInfo() =
+ classifyReturnType(FI.getReturnType(), FI.isVariadic());
+
+ for (auto &it : FI.arguments())
+ it.info = classifyArgumentType(it.type, FI.isVariadic(),
+ FI.getCallingConvention());
+ }
+
+ Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+
+ Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override {
+ llvm::Type *BaseTy = CGF.ConvertType(Ty);
+ if (isa<llvm::ScalableVectorType>(BaseTy))
+ llvm::report_fatal_error("Passing SVE types to variadic functions is "
+ "currently not supported");
+
+ return Kind == AArch64ABIKind::Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
+ : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
+ : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
+ }
+
+ Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ bool allowBFloatArgsAndRet() const override {
+ return getTarget().hasBFloat16Type();
+ }
+};
+
+class AArch64SwiftABIInfo : public SwiftABIInfo {
+public:
+ explicit AArch64SwiftABIInfo(CodeGenTypes &CGT)
+ : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {}
+
+ bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
+ unsigned NumElts) const override;
+};
+
+class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
+ : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {
+ SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGT);
+ }
+
+ StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
+ return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
+ }
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ return 31;
+ }
+
+ bool doesReturnSlotInterfereWithArgs() const override { return false; }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD)
+ return;
+
+ const auto *TA = FD->getAttr<TargetAttr>();
+ if (TA == nullptr)
+ return;
+
+ ParsedTargetAttr Attr =
+ CGM.getTarget().parseTargetAttr(TA->getFeaturesStr());
+ if (Attr.BranchProtection.empty())
+ return;
+
+ TargetInfo::BranchProtectionInfo BPI;
+ StringRef Error;
+ (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
+ Attr.CPU, BPI, Error);
+ assert(Error.empty());
+
+ auto *Fn = cast<llvm::Function>(GV);
+ static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
+ Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
+
+ if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
+ Fn->addFnAttr("sign-return-address-key",
+ BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey
+ ? "a_key"
+ : "b_key");
+ }
+
+ Fn->addFnAttr("branch-target-enforcement",
+ BPI.BranchTargetEnforcement ? "true" : "false");
+ }
+
+ bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF,
+ llvm::Type *Ty) const override {
+ if (CGF.getTarget().hasFeature("ls64")) {
+ auto *ST = dyn_cast<llvm::StructType>(Ty);
+ if (ST && ST->getNumElements() == 1) {
+ auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
+ if (AT && AT->getNumElements() == 8 &&
+ AT->getElementType()->isIntegerTy(64))
+ return true;
+ }
+ }
+ return TargetCodeGenInfo::isScalarizableAsmOperand(CGF, Ty);
+ }
+};
+
+class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
+public:
+ WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind K)
+ : AArch64TargetCodeGenInfo(CGT, K) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
+
+ void getDependentLibraryOption(llvm::StringRef Lib,
+ llvm::SmallString<24> &Opt) const override {
+ Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
+ }
+
+ void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
+ llvm::SmallString<32> &Opt) const override {
+ Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
+ }
+};
+
+void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+ if (GV->isDeclaration())
+ return;
+ addStackProbeTargetAttributes(D, GV, CGM);
+}
+}
+
+ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const {
+ assert(Ty->isVectorType() && "expected vector type!");
+
+ const auto *VT = Ty->castAs<VectorType>();
+ if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) {
+ assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
+ assert(VT->getElementType()->castAs<BuiltinType>()->getKind() ==
+ BuiltinType::UChar &&
+ "unexpected builtin type for SVE predicate!");
+ return ABIArgInfo::getDirect(llvm::ScalableVectorType::get(
+ llvm::Type::getInt1Ty(getVMContext()), 16));
+ }
+
+ if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) {
+ assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
+
+ const auto *BT = VT->getElementType()->castAs<BuiltinType>();
+ llvm::ScalableVectorType *ResType = nullptr;
+ switch (BT->getKind()) {
+ default:
+ llvm_unreachable("unexpected builtin type for SVE vector!");
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getInt8Ty(getVMContext()), 16);
+ break;
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getInt16Ty(getVMContext()), 8);
+ break;
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getInt32Ty(getVMContext()), 4);
+ break;
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getInt64Ty(getVMContext()), 2);
+ break;
+ case BuiltinType::Half:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getHalfTy(getVMContext()), 8);
+ break;
+ case BuiltinType::Float:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getFloatTy(getVMContext()), 4);
+ break;
+ case BuiltinType::Double:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getDoubleTy(getVMContext()), 2);
+ break;
+ case BuiltinType::BFloat16:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getBFloatTy(getVMContext()), 8);
+ break;
+ }
+ return ABIArgInfo::getDirect(ResType);
+ }
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ // Android promotes <2 x i8> to i16, not i32
+ if ((isAndroid() || isOHOSFamily()) && (Size <= 16)) {
+ llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size <= 32) {
+ llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size == 64) {
+ auto *ResType =
+ llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size == 128) {
+ auto *ResType =
+ llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
+ return ABIArgInfo::getDirect(ResType);
+ }
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
+ABIArgInfo
+AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic,
+ unsigned CallingConvention) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // Handle illegal vector types here.
+ if (isIllegalVectorType(Ty))
+ return coerceIllegalVector(Ty);
+
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() > 128)
+ return getNaturalAlignIndirect(Ty);
+
+ return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
+ ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+ }
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
+ CGCXXABI::RAA_DirectInMemory);
+ }
+
+ // Empty records are always ignored on Darwin, but actually passed in C++ mode
+ // elsewhere for GNU compatibility.
+ uint64_t Size = getContext().getTypeSize(Ty);
+ bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
+ if (IsEmpty || Size == 0) {
+ if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
+ return ABIArgInfo::getIgnore();
+
+ // GNU C mode. The only argument that gets ignored is an empty one with size
+ // 0.
+ if (IsEmpty && Size == 0)
+ return ABIArgInfo::getIgnore();
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ }
+
+ // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ bool IsWin64 = Kind == AArch64ABIKind::Win64 ||
+ CallingConvention == llvm::CallingConv::Win64;
+ bool IsWinVariadic = IsWin64 && IsVariadic;
+ // In variadic functions on Windows, all composite types are treated alike,
+ // no special handling of HFAs/HVAs.
+ if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) {
+ if (Kind != AArch64ABIKind::AAPCS)
+ return ABIArgInfo::getDirect(
+ llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
+
+ // For alignment adjusted HFAs, cap the argument alignment to 16, leave it
+ // default otherwise.
+ unsigned Align =
+ getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
+ unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity();
+ Align = (Align > BaseAlign && Align >= 16) ? 16 : 0;
+ return ABIArgInfo::getDirect(
+ llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0,
+ nullptr, true, Align);
+ }
+
+ // Aggregates <= 16 bytes are passed directly in registers or on the stack.
+ if (Size <= 128) {
+ // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
+ // same size and alignment.
+ if (getTarget().isRenderScriptTarget()) {
+ return coerceToIntArray(Ty, getContext(), getVMContext());
+ }
+ unsigned Alignment;
+ if (Kind == AArch64ABIKind::AAPCS) {
+ Alignment = getContext().getTypeUnadjustedAlign(Ty);
+ Alignment = Alignment < 128 ? 64 : 128;
+ } else {
+ Alignment =
+ std::max(getContext().getTypeAlign(Ty),
+ (unsigned)getTarget().getPointerWidth(LangAS::Default));
+ }
+ Size = llvm::alignTo(Size, Alignment);
+
+ // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
+ // For aggregates with 16-byte alignment, we use i128.
+ llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
+ return ABIArgInfo::getDirect(
+ Size == Alignment ? BaseTy
+ : llvm::ArrayType::get(BaseTy, Size / Alignment));
+ }
+
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
+ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
+ bool IsVariadic) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (const auto *VT = RetTy->getAs<VectorType>()) {
+ if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector ||
+ VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
+ return coerceIllegalVector(RetTy);
+ }
+
+ // Large vector types should be returned via memory.
+ if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
+ return getNaturalAlignIndirect(RetTy);
+
+ if (!isAggregateTypeForABI(RetTy)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
+ if (EIT->getNumBits() > 128)
+ return getNaturalAlignIndirect(RetTy);
+
+ return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
+ ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
+ }
+
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
+ return ABIArgInfo::getIgnore();
+
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (isHomogeneousAggregate(RetTy, Base, Members) &&
+ !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
+ IsVariadic))
+ // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
+ return ABIArgInfo::getDirect();
+
+ // Aggregates <= 16 bytes are returned directly in registers or on the stack.
+ if (Size <= 128) {
+ // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
+ // same size and alignment.
+ if (getTarget().isRenderScriptTarget()) {
+ return coerceToIntArray(RetTy, getContext(), getVMContext());
+ }
+
+ if (Size <= 64 && getDataLayout().isLittleEndian()) {
+ // Composite types are returned in lower bits of a 64-bit register for LE,
+ // and in higher bits for BE. However, integer types are always returned
+ // in lower bits for both LE and BE, and they are not rounded up to
+ // 64-bits. We can skip rounding up of composite types for LE, but not for
+ // BE, otherwise composite types will be indistinguishable from integer
+ // types.
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), Size));
+ }
+
+ unsigned Alignment = getContext().getTypeAlign(RetTy);
+ Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
+
+ // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
+ // For aggregates with 16-byte alignment, we use i128.
+ if (Alignment < 128 && Size == 128) {
+ llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
+ }
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
+ }
+
+ return getNaturalAlignIndirect(RetTy);
+}
+
+/// isIllegalVectorType - check whether the vector type is legal for AArch64.
+bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // Check whether VT is a fixed-length SVE vector. These types are
+ // represented as scalable vectors in function args/return and must be
+ // coerced from fixed vectors.
+ if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector ||
+ VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
+ return true;
+
+ // Check whether VT is legal.
+ unsigned NumElements = VT->getNumElements();
+ uint64_t Size = getContext().getTypeSize(VT);
+ // NumElements should be power of 2.
+ if (!llvm::isPowerOf2_32(NumElements))
+ return true;
+
+ // arm64_32 has to be compatible with the ARM logic here, which allows huge
+ // vectors for some reason.
+ llvm::Triple Triple = getTarget().getTriple();
+ if (Triple.getArch() == llvm::Triple::aarch64_32 &&
+ Triple.isOSBinFormatMachO())
+ return Size <= 32;
+
+ return Size != 64 && (Size != 128 || NumElements == 1);
+ }
+ return false;
+}
+
+bool AArch64SwiftABIInfo::isLegalVectorType(CharUnits VectorSize,
+ llvm::Type *EltTy,
+ unsigned NumElts) const {
+ if (!llvm::isPowerOf2_32(NumElts))
+ return false;
+ if (VectorSize.getQuantity() != 8 &&
+ (VectorSize.getQuantity() != 16 || NumElts == 1))
+ return false;
+ return true;
+}
+
+bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ // Homogeneous aggregates for AAPCS64 must have base types of a floating
+ // point type or a short-vector type. This is the same as the 32-bit ABI,
+ // but with the difference that any floating-point type is allowed,
+ // including __fp16.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->isFloatingPoint())
+ return true;
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ unsigned VecSize = getContext().getTypeSize(VT);
+ if (VecSize == 64 || VecSize == 128)
+ return true;
+ }
+ return false;
+}
+
+bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const {
+ return Members <= 4;
+}
+
+bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
+ const {
+ // AAPCS64 says that the rule for whether something is a homogeneous
+ // aggregate is applied to the output of the data layout decision. So
+ // anything that doesn't affect the data layout also does not affect
+ // homogeneity. In particular, zero-length bitfields don't stop a struct
+ // being homogeneous.
+ return true;
+}
+
+Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ ABIArgInfo AI = classifyArgumentType(Ty, /*IsVariadic=*/true,
+ CGF.CurFnInfo->getCallingConvention());
+ // Empty records are ignored for parameter passing purposes.
+ if (AI.isIgnore()) {
+ uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
+ CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
+ VAListAddr = VAListAddr.withElementType(CGF.Int8PtrTy);
+ auto *Load = CGF.Builder.CreateLoad(VAListAddr);
+ return Address(Load, CGF.ConvertTypeForMem(Ty), SlotSize);
+ }
+
+ bool IsIndirect = AI.isIndirect();
+
+ llvm::Type *BaseTy = CGF.ConvertType(Ty);
+ if (IsIndirect)
+ BaseTy = llvm::PointerType::getUnqual(BaseTy);
+ else if (AI.getCoerceToType())
+ BaseTy = AI.getCoerceToType();
+
+ unsigned NumRegs = 1;
+ if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
+ BaseTy = ArrTy->getElementType();
+ NumRegs = ArrTy->getNumElements();
+ }
+ bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
+
+ // The AArch64 va_list type and handling is specified in the Procedure Call
+ // Standard, section B.4:
+ //
+ // struct {
+ // void *__stack;
+ // void *__gr_top;
+ // void *__vr_top;
+ // int __gr_offs;
+ // int __vr_offs;
+ // };
+
+ llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+
+ CharUnits TySize = getContext().getTypeSizeInChars(Ty);
+ CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
+
+ Address reg_offs_p = Address::invalid();
+ llvm::Value *reg_offs = nullptr;
+ int reg_top_index;
+ int RegSize = IsIndirect ? 8 : TySize.getQuantity();
+ if (!IsFPR) {
+ // 3 is the field number of __gr_offs
+ reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
+ reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
+ reg_top_index = 1; // field number for __gr_top
+ RegSize = llvm::alignTo(RegSize, 8);
+ } else {
+ // 4 is the field number of __vr_offs.
+ reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
+ reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
+ reg_top_index = 2; // field number for __vr_top
+ RegSize = 16 * NumRegs;
+ }
+
+ //=======================================
+ // Find out where argument was passed
+ //=======================================
+
+ // If reg_offs >= 0 we're already using the stack for this type of
+ // argument. We don't want to keep updating reg_offs (in case it overflows,
+ // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
+ // whatever they get).
+ llvm::Value *UsingStack = nullptr;
+ UsingStack = CGF.Builder.CreateICmpSGE(
+ reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
+
+ CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
+
+ // Otherwise, at least some kind of argument could go in these registers, the
+ // question is whether this particular type is too big.
+ CGF.EmitBlock(MaybeRegBlock);
+
+ // Integer arguments may need to correct register alignment (for example a
+ // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
+ // align __gr_offs to calculate the potential address.
+ if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
+ int Align = TyAlign.getQuantity();
+
+ reg_offs = CGF.Builder.CreateAdd(
+ reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
+ "align_regoffs");
+ reg_offs = CGF.Builder.CreateAnd(
+ reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
+ "aligned_regoffs");
+ }
+
+ // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
+ // The fact that this is done unconditionally reflects the fact that
+ // allocating an argument to the stack also uses up all the remaining
+ // registers of the appropriate kind.
+ llvm::Value *NewOffset = nullptr;
+ NewOffset = CGF.Builder.CreateAdd(
+ reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
+ CGF.Builder.CreateStore(NewOffset, reg_offs_p);
+
+ // Now we're in a position to decide whether this argument really was in
+ // registers or not.
+ llvm::Value *InRegs = nullptr;
+ InRegs = CGF.Builder.CreateICmpSLE(
+ NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
+
+ CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
+
+ //=======================================
+ // Argument was in registers
+ //=======================================
+
+ // Now we emit the code for if the argument was originally passed in
+ // registers. First start the appropriate block:
+ CGF.EmitBlock(InRegBlock);
+
+ llvm::Value *reg_top = nullptr;
+ Address reg_top_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
+ reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
+ Address BaseAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, reg_top, reg_offs),
+ CGF.Int8Ty, CharUnits::fromQuantity(IsFPR ? 16 : 8));
+ Address RegAddr = Address::invalid();
+ llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty), *ElementTy = MemTy;
+
+ if (IsIndirect) {
+ // If it's been passed indirectly (actually a struct), whatever we find from
+ // stored registers or on the stack will actually be a struct **.
+ MemTy = llvm::PointerType::getUnqual(MemTy);
+ }
+
+ const Type *Base = nullptr;
+ uint64_t NumMembers = 0;
+ bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
+ if (IsHFA && NumMembers > 1) {
+ // Homogeneous aggregates passed in registers will have their elements split
+ // and stored 16-bytes apart regardless of size (they're notionally in qN,
+ // qN+1, ...). We reload and store into a temporary local variable
+ // contiguously.
+ assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
+ auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
+ llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
+ llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
+ Address Tmp = CGF.CreateTempAlloca(HFATy,
+ std::max(TyAlign, BaseTyInfo.Align));
+
+ // On big-endian platforms, the value will be right-aligned in its slot.
+ int Offset = 0;
+ if (CGF.CGM.getDataLayout().isBigEndian() &&
+ BaseTyInfo.Width.getQuantity() < 16)
+ Offset = 16 - BaseTyInfo.Width.getQuantity();
+
+ for (unsigned i = 0; i < NumMembers; ++i) {
+ CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
+ Address LoadAddr =
+ CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
+ LoadAddr = LoadAddr.withElementType(BaseTy);
+
+ Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
+
+ llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
+ CGF.Builder.CreateStore(Elem, StoreAddr);
+ }
+
+ RegAddr = Tmp.withElementType(MemTy);
+ } else {
+ // Otherwise the object is contiguous in memory.
+
+ // It might be right-aligned in its slot.
+ CharUnits SlotSize = BaseAddr.getAlignment();
+ if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
+ (IsHFA || !isAggregateTypeForABI(Ty)) &&
+ TySize < SlotSize) {
+ CharUnits Offset = SlotSize - TySize;
+ BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
+ }
+
+ RegAddr = BaseAddr.withElementType(MemTy);
+ }
+
+ CGF.EmitBranch(ContBlock);
+
+ //=======================================
+ // Argument was on the stack
+ //=======================================
+ CGF.EmitBlock(OnStackBlock);
+
+ Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
+ llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
+
+ // Again, stack arguments may need realignment. In this case both integer and
+ // floating-point ones might be affected.
+ if (!IsIndirect && TyAlign.getQuantity() > 8) {
+ int Align = TyAlign.getQuantity();
+
+ OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
+
+ OnStackPtr = CGF.Builder.CreateAdd(
+ OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
+ "align_stack");
+ OnStackPtr = CGF.Builder.CreateAnd(
+ OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
+ "align_stack");
+
+ OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
+ }
+ Address OnStackAddr = Address(OnStackPtr, CGF.Int8Ty,
+ std::max(CharUnits::fromQuantity(8), TyAlign));
+
+ // All stack slots are multiples of 8 bytes.
+ CharUnits StackSlotSize = CharUnits::fromQuantity(8);
+ CharUnits StackSize;
+ if (IsIndirect)
+ StackSize = StackSlotSize;
+ else
+ StackSize = TySize.alignTo(StackSlotSize);
+
+ llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
+ llvm::Value *NewStack = CGF.Builder.CreateInBoundsGEP(
+ CGF.Int8Ty, OnStackPtr, StackSizeC, "new_stack");
+
+ // Write the new value of __stack for the next call to va_arg
+ CGF.Builder.CreateStore(NewStack, stack_p);
+
+ if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
+ TySize < StackSlotSize) {
+ CharUnits Offset = StackSlotSize - TySize;
+ OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
+ }
+
+ OnStackAddr = OnStackAddr.withElementType(MemTy);
+
+ CGF.EmitBranch(ContBlock);
+
+ //=======================================
+ // Tidy up
+ //=======================================
+ CGF.EmitBlock(ContBlock);
+
+ Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, OnStackAddr,
+ OnStackBlock, "vaargs.addr");
+
+ if (IsIndirect)
+ return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), ElementTy,
+ TyAlign);
+
+ return ResAddr;
+}
+
+Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // The backend's lowering doesn't support va_arg for aggregates or
+ // illegal vector types. Lower VAArg here for these cases and use
+ // the LLVM va_arg instruction for everything else.
+ if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
+ return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
+
+ uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
+ CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
+
+ // Empty records are ignored for parameter passing purposes.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return Address(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"),
+ CGF.ConvertTypeForMem(Ty), SlotSize);
+
+ // The size of the actual thing passed, which might end up just
+ // being a pointer for indirect types.
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
+
+ // Arguments bigger than 16 bytes which aren't homogeneous
+ // aggregates should be passed indirectly.
+ bool IsIndirect = false;
+ if (TyInfo.Width.getQuantity() > 16) {
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
+ }
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
+ TyInfo, SlotSize, /*AllowHigherAlign*/ true);
+}
+
+Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ bool IsIndirect = false;
+
+ // Composites larger than 16 bytes are passed by reference.
+ if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) > 128)
+ IsIndirect = true;
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
+ CGF.getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(8),
+ /*allowHigherAlign*/ false);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createAArch64TargetCodeGenInfo(CodeGenModule &CGM,
+ AArch64ABIKind Kind) {
+ return std::make_unique<AArch64TargetCodeGenInfo>(CGM.getTypes(), Kind);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM,
+ AArch64ABIKind K) {
+ return std::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM.getTypes(), K);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/AMDGPU.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/AMDGPU.cpp
new file mode 100644
index 000000000000..796a2be81a09
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/AMDGPU.cpp
@@ -0,0 +1,601 @@
+//===- AMDGPU.cpp ---------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// AMDGPU ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class AMDGPUABIInfo final : public DefaultABIInfo {
+private:
+ static const unsigned MaxNumRegsForArgsRet = 16;
+
+ unsigned numRegsForType(QualType Ty) const;
+
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override;
+ bool isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const override;
+
+ // Coerce HIP scalar pointer arguments from generic pointers to global ones.
+ llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS,
+ unsigned ToAS) const {
+ // Single value types.
+ auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(Ty);
+ if (PtrTy && PtrTy->getAddressSpace() == FromAS)
+ return llvm::PointerType::get(Ty->getContext(), ToAS);
+ return Ty;
+ }
+
+public:
+ explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) :
+ DefaultABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
+ ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ return true;
+}
+
+bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
+ const Type *Base, uint64_t Members) const {
+ uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32;
+
+ // Homogeneous Aggregates may occupy at most 16 registers.
+ return Members * NumRegs <= MaxNumRegsForArgsRet;
+}
+
+/// Estimate number of registers the type will use when passed in registers.
+unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const {
+ unsigned NumRegs = 0;
+
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // Compute from the number of elements. The reported size is based on the
+ // in-memory size, which includes the padding 4th element for 3-vectors.
+ QualType EltTy = VT->getElementType();
+ unsigned EltSize = getContext().getTypeSize(EltTy);
+
+ // 16-bit element vectors should be passed as packed.
+ if (EltSize == 16)
+ return (VT->getNumElements() + 1) / 2;
+
+ unsigned EltNumRegs = (EltSize + 31) / 32;
+ return EltNumRegs * VT->getNumElements();
+ }
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ assert(!RD->hasFlexibleArrayMember());
+
+ for (const FieldDecl *Field : RD->fields()) {
+ QualType FieldTy = Field->getType();
+ NumRegs += numRegsForType(FieldTy);
+ }
+
+ return NumRegs;
+ }
+
+ return (getContext().getTypeSize(Ty) + 31) / 32;
+}
+
+void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ llvm::CallingConv::ID CC = FI.getCallingConvention();
+
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ unsigned NumRegsLeft = MaxNumRegsForArgsRet;
+ for (auto &Arg : FI.arguments()) {
+ if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
+ Arg.info = classifyKernelArgumentType(Arg.type);
+ } else {
+ Arg.info = classifyArgumentType(Arg.type, NumRegsLeft);
+ }
+ }
+}
+
+Address AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ llvm_unreachable("AMDGPU does not support varargs");
+}
+
+ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
+ if (isAggregateTypeForABI(RetTy)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // returned by value.
+ if (!getRecordArgABI(RetTy, getCXXABI())) {
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Lower single-element structs to just return a regular value.
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+
+ if (const RecordType *RT = RetTy->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return DefaultABIInfo::classifyReturnType(RetTy);
+ }
+
+ // Pack aggregates <= 4 bytes into single VGPR or pair.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+
+ if (Size <= 32)
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+
+ if (Size <= 64) {
+ llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
+ }
+
+ if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
+ return ABIArgInfo::getDirect();
+ }
+ }
+
+ // Otherwise just do the default thing.
+ return DefaultABIInfo::classifyReturnType(RetTy);
+}
+
+/// For kernels all parameters are really passed in a special buffer. It doesn't
+/// make sense to pass anything byval, so everything must be direct.
+ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // TODO: Can we omit empty structs?
+
+ if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
+ Ty = QualType(SeltTy, 0);
+
+ llvm::Type *OrigLTy = CGT.ConvertType(Ty);
+ llvm::Type *LTy = OrigLTy;
+ if (getContext().getLangOpts().HIP) {
+ LTy = coerceKernelArgumentType(
+ OrigLTy, /*FromAS=*/getContext().getTargetAddressSpace(LangAS::Default),
+ /*ToAS=*/getContext().getTargetAddressSpace(LangAS::cuda_device));
+ }
+
+ // FIXME: Should also use this for OpenCL, but it requires addressing the
+ // problem of kernels being called.
+ //
+ // FIXME: This doesn't apply the optimization of coercing pointers in structs
+ // to global address space when using byref. This would require implementing a
+ // new kind of coercion of the in-memory type when for indirect arguments.
+ if (!getContext().getLangOpts().OpenCL && LTy == OrigLTy &&
+ isAggregateTypeForABI(Ty)) {
+ return ABIArgInfo::getIndirectAliased(
+ getContext().getTypeAlignInChars(Ty),
+ getContext().getTargetAddressSpace(LangAS::opencl_constant),
+ false /*Realign*/, nullptr /*Padding*/);
+ }
+
+ // If we set CanBeFlattened to true, CodeGen will expand the struct to its
+ // individual elements, which confuses the Clover OpenCL backend; therefore we
+ // have to set it to false here. Other args of getDirect() are just defaults.
+ return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
+}
+
+ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty,
+ unsigned &NumRegsLeft) const {
+ assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow");
+
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // passed by value.
+ if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ // Lower single-element structs to just pass a regular value. TODO: We
+ // could do reasonable-size multiple-element structs too, using getExpand(),
+ // though watch out for things like bitfields.
+ if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return DefaultABIInfo::classifyArgumentType(Ty);
+ }
+
+ // Pack aggregates <= 8 bytes into single VGPR or pair.
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size <= 64) {
+ unsigned NumRegs = (Size + 31) / 32;
+ NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
+
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+
+ if (Size <= 32)
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+
+ // XXX: Should this be i64 instead, and should the limit increase?
+ llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
+ }
+
+ if (NumRegsLeft > 0) {
+ unsigned NumRegs = numRegsForType(Ty);
+ if (NumRegsLeft >= NumRegs) {
+ NumRegsLeft -= NumRegs;
+ return ABIArgInfo::getDirect();
+ }
+ }
+ }
+
+ // Otherwise just do the default thing.
+ ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty);
+ if (!ArgInfo.isIndirect()) {
+ unsigned NumRegs = numRegsForType(Ty);
+ NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
+ }
+
+ return ArgInfo;
+}
+
+class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {}
+
+ void setFunctionDeclAttributes(const FunctionDecl *FD, llvm::Function *F,
+ CodeGenModule &CGM) const;
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
+ unsigned getOpenCLKernelCallingConv() const override;
+
+ llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
+ llvm::PointerType *T, QualType QT) const override;
+
+ LangAS getASTAllocaAddressSpace() const override {
+ return getLangASFromTargetAS(
+ getABIInfo().getDataLayout().getAllocaAddrSpace());
+ }
+ LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
+ const VarDecl *D) const override;
+ llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
+ SyncScope Scope,
+ llvm::AtomicOrdering Ordering,
+ llvm::LLVMContext &Ctx) const override;
+ llvm::Value *createEnqueuedBlockKernel(CodeGenFunction &CGF,
+ llvm::Function *BlockInvokeFunc,
+ llvm::Type *BlockTy) const override;
+ bool shouldEmitStaticExternCAliases() const override;
+ bool shouldEmitDWARFBitFieldSeparators() const override;
+ void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
+};
+}
+
+static bool requiresAMDGPUProtectedVisibility(const Decl *D,
+ llvm::GlobalValue *GV) {
+ if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
+ return false;
+
+ return D->hasAttr<OpenCLKernelAttr>() ||
+ (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) ||
+ (isa<VarDecl>(D) &&
+ (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
+ cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinSurfaceType() ||
+ cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType()));
+}
+
+void AMDGPUTargetCodeGenInfo::setFunctionDeclAttributes(
+ const FunctionDecl *FD, llvm::Function *F, CodeGenModule &M) const {
+ const auto *ReqdWGS =
+ M.getLangOpts().OpenCL ? FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
+ const bool IsOpenCLKernel =
+ M.getLangOpts().OpenCL && FD->hasAttr<OpenCLKernelAttr>();
+ const bool IsHIPKernel = M.getLangOpts().HIP && FD->hasAttr<CUDAGlobalAttr>();
+
+ const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
+ if (ReqdWGS || FlatWGS) {
+ unsigned Min = 0;
+ unsigned Max = 0;
+ if (FlatWGS) {
+ Min = FlatWGS->getMin()
+ ->EvaluateKnownConstInt(M.getContext())
+ .getExtValue();
+ Max = FlatWGS->getMax()
+ ->EvaluateKnownConstInt(M.getContext())
+ .getExtValue();
+ }
+ if (ReqdWGS && Min == 0 && Max == 0)
+ Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
+
+ if (Min != 0) {
+ assert(Min <= Max && "Min must be less than or equal Max");
+
+ std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);
+ F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
+ } else
+ assert(Max == 0 && "Max must be zero");
+ } else if (IsOpenCLKernel || IsHIPKernel) {
+ // By default, restrict the maximum size to a value specified by
+ // --gpu-max-threads-per-block=n or its default value for HIP.
+ const unsigned OpenCLDefaultMaxWorkGroupSize = 256;
+ const unsigned DefaultMaxWorkGroupSize =
+ IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize
+ : M.getLangOpts().GPUMaxThreadsPerBlock;
+ std::string AttrVal =
+ std::string("1,") + llvm::utostr(DefaultMaxWorkGroupSize);
+ F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
+ }
+
+ if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) {
+ unsigned Min =
+ Attr->getMin()->EvaluateKnownConstInt(M.getContext()).getExtValue();
+ unsigned Max = Attr->getMax() ? Attr->getMax()
+ ->EvaluateKnownConstInt(M.getContext())
+ .getExtValue()
+ : 0;
+
+ if (Min != 0) {
+ assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");
+
+ std::string AttrVal = llvm::utostr(Min);
+ if (Max != 0)
+ AttrVal = AttrVal + "," + llvm::utostr(Max);
+ F->addFnAttr("amdgpu-waves-per-eu", AttrVal);
+ } else
+ assert(Max == 0 && "Max must be zero");
+ }
+
+ if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
+ unsigned NumSGPR = Attr->getNumSGPR();
+
+ if (NumSGPR != 0)
+ F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));
+ }
+
+ if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
+ uint32_t NumVGPR = Attr->getNumVGPR();
+
+ if (NumVGPR != 0)
+ F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
+ }
+}
+
+void AMDGPUTargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (requiresAMDGPUProtectedVisibility(D, GV)) {
+ GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
+ GV->setDSOLocal(true);
+ }
+
+ if (GV->isDeclaration())
+ return;
+
+ llvm::Function *F = dyn_cast<llvm::Function>(GV);
+ if (!F)
+ return;
+
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (FD)
+ setFunctionDeclAttributes(FD, F, M);
+
+ const bool IsHIPKernel =
+ M.getLangOpts().HIP && FD && FD->hasAttr<CUDAGlobalAttr>();
+
+ // TODO: This should be moved to language specific attributes instead.
+ if (IsHIPKernel)
+ F->addFnAttr("uniform-work-group-size", "true");
+
+ if (M.getContext().getTargetInfo().allowAMDGPUUnsafeFPAtomics())
+ F->addFnAttr("amdgpu-unsafe-fp-atomics", "true");
+
+ if (!getABIInfo().getCodeGenOpts().EmitIEEENaNCompliantInsts)
+ F->addFnAttr("amdgpu-ieee", "false");
+}
+
+unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
+ return llvm::CallingConv::AMDGPU_KERNEL;
+}
+
+// Currently LLVM assumes null pointers always have value 0,
+// which results in incorrectly transformed IR. Therefore, instead of
+// emitting null pointers in private and local address spaces, a null
+// pointer in generic address space is emitted which is casted to a
+// pointer in local or private address space.
+llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
+ const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
+ QualType QT) const {
+ if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
+ return llvm::ConstantPointerNull::get(PT);
+
+ auto &Ctx = CGM.getContext();
+ auto NPT = llvm::PointerType::get(
+ PT->getContext(), Ctx.getTargetAddressSpace(LangAS::opencl_generic));
+ return llvm::ConstantExpr::getAddrSpaceCast(
+ llvm::ConstantPointerNull::get(NPT), PT);
+}
+
+LangAS
+AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
+ const VarDecl *D) const {
+ assert(!CGM.getLangOpts().OpenCL &&
+ !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
+ "Address space agnostic languages only");
+ LangAS DefaultGlobalAS = getLangASFromTargetAS(
+ CGM.getContext().getTargetAddressSpace(LangAS::opencl_global));
+ if (!D)
+ return DefaultGlobalAS;
+
+ LangAS AddrSpace = D->getType().getAddressSpace();
+ assert(AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace));
+ if (AddrSpace != LangAS::Default)
+ return AddrSpace;
+
+ // Only promote to address space 4 if VarDecl has constant initialization.
+ if (CGM.isTypeConstant(D->getType(), false, false) &&
+ D->hasConstantInitialization()) {
+ if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
+ return *ConstAS;
+ }
+ return DefaultGlobalAS;
+}
+
+llvm::SyncScope::ID
+AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
+ SyncScope Scope,
+ llvm::AtomicOrdering Ordering,
+ llvm::LLVMContext &Ctx) const {
+ std::string Name;
+ switch (Scope) {
+ case SyncScope::HIPSingleThread:
+ Name = "singlethread";
+ break;
+ case SyncScope::HIPWavefront:
+ case SyncScope::OpenCLSubGroup:
+ Name = "wavefront";
+ break;
+ case SyncScope::HIPWorkgroup:
+ case SyncScope::OpenCLWorkGroup:
+ Name = "workgroup";
+ break;
+ case SyncScope::HIPAgent:
+ case SyncScope::OpenCLDevice:
+ Name = "agent";
+ break;
+ case SyncScope::HIPSystem:
+ case SyncScope::OpenCLAllSVMDevices:
+ Name = "";
+ break;
+ }
+
+ if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
+ if (!Name.empty())
+ Name = Twine(Twine(Name) + Twine("-")).str();
+
+ Name = Twine(Twine(Name) + Twine("one-as")).str();
+ }
+
+ return Ctx.getOrInsertSyncScopeID(Name);
+}
+
+bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
+ return false;
+}
+
+bool AMDGPUTargetCodeGenInfo::shouldEmitDWARFBitFieldSeparators() const {
+ return true;
+}
+
+void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
+ const FunctionType *&FT) const {
+ FT = getABIInfo().getContext().adjustFunctionType(
+ FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
+}
+
+/// Create an OpenCL kernel for an enqueued block.
+///
+/// The type of the first argument (the block literal) is the struct type
+/// of the block literal instead of a pointer type. The first argument
+/// (block literal) is passed directly by value to the kernel. The kernel
+/// allocates the same type of struct on stack and stores the block literal
+/// to it and passes its pointer to the block invoke function. The kernel
+/// has "enqueued-block" function attribute and kernel argument metadata.
+llvm::Value *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
+ CodeGenFunction &CGF, llvm::Function *Invoke, llvm::Type *BlockTy) const {
+ auto &Builder = CGF.Builder;
+ auto &C = CGF.getLLVMContext();
+
+ auto *InvokeFT = Invoke->getFunctionType();
+ llvm::SmallVector<llvm::Type *, 2> ArgTys;
+ llvm::SmallVector<llvm::Metadata *, 8> AddressQuals;
+ llvm::SmallVector<llvm::Metadata *, 8> AccessQuals;
+ llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames;
+ llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames;
+ llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals;
+ llvm::SmallVector<llvm::Metadata *, 8> ArgNames;
+
+ ArgTys.push_back(BlockTy);
+ ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
+ AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
+ ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
+ ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
+ AccessQuals.push_back(llvm::MDString::get(C, "none"));
+ ArgNames.push_back(llvm::MDString::get(C, "block_literal"));
+ for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
+ ArgTys.push_back(InvokeFT->getParamType(I));
+ ArgTypeNames.push_back(llvm::MDString::get(C, "void*"));
+ AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
+ AccessQuals.push_back(llvm::MDString::get(C, "none"));
+ ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*"));
+ ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
+ ArgNames.push_back(
+ llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str()));
+ }
+ std::string Name = Invoke->getName().str() + "_kernel";
+ auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
+ auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
+ &CGF.CGM.getModule());
+ F->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
+
+ llvm::AttrBuilder KernelAttrs(C);
+ // FIXME: The invoke isn't applying the right attributes either
+ // FIXME: This is missing setTargetAttributes
+ CGF.CGM.addDefaultFunctionDefinitionAttributes(KernelAttrs);
+ KernelAttrs.addAttribute("enqueued-block");
+ F->addFnAttrs(KernelAttrs);
+
+ auto IP = CGF.Builder.saveIP();
+ auto *BB = llvm::BasicBlock::Create(C, "entry", F);
+ Builder.SetInsertPoint(BB);
+ const auto BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(BlockTy);
+ auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr);
+ BlockPtr->setAlignment(BlockAlign);
+ Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
+ auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
+ llvm::SmallVector<llvm::Value *, 2> Args;
+ Args.push_back(Cast);
+ for (llvm::Argument &A : llvm::drop_begin(F->args()))
+ Args.push_back(&A);
+ llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
+ call->setCallingConv(Invoke->getCallingConv());
+ Builder.CreateRetVoid();
+ Builder.restoreIP(IP);
+
+ F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
+ F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
+ F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
+ F->setMetadata("kernel_arg_base_type",
+ llvm::MDNode::get(C, ArgBaseTypeNames));
+ F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
+ if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
+ F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames));
+
+ return F;
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createAMDGPUTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<AMDGPUTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/ARC.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/ARC.cpp
new file mode 100644
index 000000000000..550eb4068f25
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/ARC.cpp
@@ -0,0 +1,158 @@
+//===- ARC.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+// ARC ABI implementation.
+namespace {
+
+class ARCABIInfo : public DefaultABIInfo {
+ struct CCState {
+ unsigned FreeRegs;
+ };
+
+public:
+ using DefaultABIInfo::DefaultABIInfo;
+
+private:
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const {
+ if (!State.FreeRegs)
+ return;
+ if (Info.isIndirect() && Info.getInReg())
+ State.FreeRegs--;
+ else if (Info.isDirect() && Info.getInReg()) {
+ unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32;
+ if (sz < State.FreeRegs)
+ State.FreeRegs -= sz;
+ else
+ State.FreeRegs = 0;
+ }
+ }
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ CCState State;
+ // ARC uses 8 registers to pass arguments.
+ State.FreeRegs = 8;
+
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ updateState(FI.getReturnInfo(), FI.getReturnType(), State);
+ for (auto &I : FI.arguments()) {
+ I.info = classifyArgumentType(I.type, State.FreeRegs);
+ updateState(I.info, I.type, State);
+ }
+ }
+
+ ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const;
+ ABIArgInfo getIndirectByValue(QualType Ty) const;
+ ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const;
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+};
+
+class ARCTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ ARCTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<ARCABIInfo>(CGT)) {}
+};
+
+
+ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const {
+ return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) :
+ getNaturalAlignIndirect(Ty, false);
+}
+
+ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const {
+ // Compute the byval alignment.
+ const unsigned MinABIStackAlignInBytes = 4;
+ unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
+ TypeAlign > MinABIStackAlignInBytes);
+}
+
+Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(4), true);
+}
+
+ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
+ uint8_t FreeRegs) const {
+ // Handle the generic C++ ABI.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (RT) {
+ CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
+ if (RAA == CGCXXABI::RAA_Indirect)
+ return getIndirectByRef(Ty, FreeRegs > 0);
+
+ if (RAA == CGCXXABI::RAA_DirectInMemory)
+ return getIndirectByValue(Ty);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32;
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Structures with flexible arrays are always indirect.
+ if (RT && RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectByValue(Ty);
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ llvm::LLVMContext &LLVMContext = getVMContext();
+
+ llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
+ SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
+ llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
+
+ return FreeRegs >= SizeInRegs ?
+ ABIArgInfo::getDirectInReg(Result) :
+ ABIArgInfo::getDirect(Result, 0, nullptr, false);
+ }
+
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() > 64)
+ return getIndirectByValue(Ty);
+
+ return isPromotableIntegerTypeForABI(Ty)
+ ? (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty)
+ : ABIArgInfo::getExtend(Ty))
+ : (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg()
+ : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirectInReg();
+
+ // Arguments of size > 4 registers are indirect.
+ auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32;
+ if (RetSize > 4)
+ return getIndirectByRef(RetTy, /*HasFreeRegs*/ true);
+
+ return DefaultABIInfo::classifyReturnType(RetTy);
+}
+
+} // End anonymous namespace.
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createARCTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<ARCTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/ARM.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/ARM.cpp
new file mode 100644
index 000000000000..d7d175ff1724
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/ARM.cpp
@@ -0,0 +1,819 @@
+//===- ARM.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// ARM ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class ARMABIInfo : public ABIInfo {
+ ARMABIKind Kind;
+ bool IsFloatABISoftFP;
+
+public:
+ ARMABIInfo(CodeGenTypes &CGT, ARMABIKind Kind) : ABIInfo(CGT), Kind(Kind) {
+ setCCs();
+ IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" ||
+ CGT.getCodeGenOpts().FloatABI == ""; // default
+ }
+
+ bool isEABI() const {
+ switch (getTarget().getTriple().getEnvironment()) {
+ case llvm::Triple::Android:
+ case llvm::Triple::EABI:
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::GNUEABI:
+ case llvm::Triple::GNUEABIHF:
+ case llvm::Triple::MuslEABI:
+ case llvm::Triple::MuslEABIHF:
+ return true;
+ default:
+ return getTarget().getTriple().isOHOSFamily();
+ }
+ }
+
+ bool isEABIHF() const {
+ switch (getTarget().getTriple().getEnvironment()) {
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::GNUEABIHF:
+ case llvm::Triple::MuslEABIHF:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ ARMABIKind getABIKind() const { return Kind; }
+
+ bool allowBFloatArgsAndRet() const override {
+ return !IsFloatABISoftFP && getTarget().hasBFloat16Type();
+ }
+
+private:
+ ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
+ unsigned functionCallConv) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
+ unsigned functionCallConv) const;
+ ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
+ uint64_t Members) const;
+ ABIArgInfo coerceIllegalVector(QualType Ty) const;
+ bool isIllegalVectorType(QualType Ty) const;
+ bool containsAnyFP16Vectors(QualType Ty) const;
+
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override;
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t Members) const override;
+ bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override;
+
+ bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ llvm::CallingConv::ID getLLVMDefaultCC() const;
+ llvm::CallingConv::ID getABIDefaultCC() const;
+ void setCCs();
+};
+
+class ARMSwiftABIInfo : public SwiftABIInfo {
+public:
+ explicit ARMSwiftABIInfo(CodeGenTypes &CGT)
+ : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {}
+
+ bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
+ unsigned NumElts) const override;
+};
+
+class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIKind K)
+ : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(CGT, K)) {
+ SwiftInfo = std::make_unique<ARMSwiftABIInfo>(CGT);
+ }
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ return 13;
+ }
+
+ StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
+ return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override {
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
+
+ // 0-15 are the 16 integer registers.
+ AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
+ return false;
+ }
+
+ unsigned getSizeOfUnwindException() const override {
+ if (getABIInfo<ARMABIInfo>().isEABI())
+ return 88;
+ return TargetCodeGenInfo::getSizeOfUnwindException();
+ }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ if (GV->isDeclaration())
+ return;
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD)
+ return;
+ auto *Fn = cast<llvm::Function>(GV);
+
+ if (const auto *TA = FD->getAttr<TargetAttr>()) {
+ ParsedTargetAttr Attr =
+ CGM.getTarget().parseTargetAttr(TA->getFeaturesStr());
+ if (!Attr.BranchProtection.empty()) {
+ TargetInfo::BranchProtectionInfo BPI;
+ StringRef DiagMsg;
+ StringRef Arch =
+ Attr.CPU.empty() ? CGM.getTarget().getTargetOpts().CPU : Attr.CPU;
+ if (!CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
+ Arch, BPI, DiagMsg)) {
+ CGM.getDiags().Report(
+ D->getLocation(),
+ diag::warn_target_unsupported_branch_protection_attribute)
+ << Arch;
+ } else {
+ static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
+ assert(static_cast<unsigned>(BPI.SignReturnAddr) <= 2 &&
+ "Unexpected SignReturnAddressScopeKind");
+ Fn->addFnAttr(
+ "sign-return-address",
+ SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
+
+ Fn->addFnAttr("branch-target-enforcement",
+ BPI.BranchTargetEnforcement ? "true" : "false");
+ }
+ } else if (CGM.getLangOpts().BranchTargetEnforcement ||
+ CGM.getLangOpts().hasSignReturnAddress()) {
+ // If the Branch Protection attribute is missing, validate the target
+ // Architecture attribute against Branch Protection command line
+ // settings.
+ if (!CGM.getTarget().isBranchProtectionSupportedArch(Attr.CPU))
+ CGM.getDiags().Report(
+ D->getLocation(),
+ diag::warn_target_unsupported_branch_protection_attribute)
+ << Attr.CPU;
+ }
+ }
+
+ const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
+ if (!Attr)
+ return;
+
+ const char *Kind;
+ switch (Attr->getInterrupt()) {
+ case ARMInterruptAttr::Generic: Kind = ""; break;
+ case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
+ case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
+ case ARMInterruptAttr::SWI: Kind = "SWI"; break;
+ case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
+ case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
+ }
+
+ Fn->addFnAttr("interrupt", Kind);
+
+ ARMABIKind ABI = getABIInfo<ARMABIInfo>().getABIKind();
+ if (ABI == ARMABIKind::APCS)
+ return;
+
+ // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
+ // however this is not necessarily true on taking any interrupt. Instruct
+ // the backend to perform a realignment as part of the function prologue.
+ llvm::AttrBuilder B(Fn->getContext());
+ B.addStackAlignmentAttr(8);
+ Fn->addFnAttrs(B);
+ }
+};
+
+class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
+public:
+ WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIKind K)
+ : ARMTargetCodeGenInfo(CGT, K) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
+
+ void getDependentLibraryOption(llvm::StringRef Lib,
+ llvm::SmallString<24> &Opt) const override {
+ Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
+ }
+
+ void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
+ llvm::SmallString<32> &Opt) const override {
+ Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
+ }
+};
+
+void WindowsARMTargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+ if (GV->isDeclaration())
+ return;
+ addStackProbeTargetAttributes(D, GV, CGM);
+}
+}
+
+void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ if (!::classifyReturnType(getCXXABI(), FI, *this))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(),
+ FI.getCallingConvention());
+
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type, FI.isVariadic(),
+ FI.getCallingConvention());
+
+
+ // Always honor user-specified calling convention.
+ if (FI.getCallingConvention() != llvm::CallingConv::C)
+ return;
+
+ llvm::CallingConv::ID cc = getRuntimeCC();
+ if (cc != llvm::CallingConv::C)
+ FI.setEffectiveCallingConvention(cc);
+}
+
+/// Return the default calling convention that LLVM will use.
+llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
+ // The default calling convention that LLVM will infer.
+ if (isEABIHF() || getTarget().getTriple().isWatchABI())
+ return llvm::CallingConv::ARM_AAPCS_VFP;
+ else if (isEABI())
+ return llvm::CallingConv::ARM_AAPCS;
+ else
+ return llvm::CallingConv::ARM_APCS;
+}
+
+/// Return the calling convention that our ABI would like us to use
+/// as the C calling convention.
+llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
+ switch (getABIKind()) {
+ case ARMABIKind::APCS:
+ return llvm::CallingConv::ARM_APCS;
+ case ARMABIKind::AAPCS:
+ return llvm::CallingConv::ARM_AAPCS;
+ case ARMABIKind::AAPCS_VFP:
+ return llvm::CallingConv::ARM_AAPCS_VFP;
+ case ARMABIKind::AAPCS16_VFP:
+ return llvm::CallingConv::ARM_AAPCS_VFP;
+ }
+ llvm_unreachable("bad ABI kind");
+}
+
+void ARMABIInfo::setCCs() {
+ assert(getRuntimeCC() == llvm::CallingConv::C);
+
+ // Don't muddy up the IR with a ton of explicit annotations if
+ // they'd just match what LLVM will infer from the triple.
+ llvm::CallingConv::ID abiCC = getABIDefaultCC();
+ if (abiCC != getLLVMDefaultCC())
+ RuntimeCC = abiCC;
+}
+
+ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size <= 32) {
+ llvm::Type *ResType =
+ llvm::Type::getInt32Ty(getVMContext());
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size == 64 || Size == 128) {
+ auto *ResType = llvm::FixedVectorType::get(
+ llvm::Type::getInt32Ty(getVMContext()), Size / 32);
+ return ABIArgInfo::getDirect(ResType);
+ }
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
+ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
+ const Type *Base,
+ uint64_t Members) const {
+ assert(Base && "Base class should be set for homogeneous aggregate");
+ // Base can be a floating-point or a vector.
+ if (const VectorType *VT = Base->getAs<VectorType>()) {
+ // FP16 vectors should be converted to integer vectors
+ if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
+ uint64_t Size = getContext().getTypeSize(VT);
+ auto *NewVecTy = llvm::FixedVectorType::get(
+ llvm::Type::getInt32Ty(getVMContext()), Size / 32);
+ llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
+ return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
+ }
+ }
+ unsigned Align = 0;
+ if (getABIKind() == ARMABIKind::AAPCS ||
+ getABIKind() == ARMABIKind::AAPCS_VFP) {
+ // For alignment adjusted HFAs, cap the argument alignment to 8, leave it
+ // default otherwise.
+ Align = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
+ unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity();
+ Align = (Align > BaseAlign && Align >= 8) ? 8 : 0;
+ }
+ return ABIArgInfo::getDirect(nullptr, 0, nullptr, false, Align);
+}
+
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
+ unsigned functionCallConv) const {
+ // 6.1.2.1 The following argument types are VFP CPRCs:
+ // A single-precision floating-point type (including promoted
+ // half-precision types); A double-precision floating-point type;
+ // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
+ // with a Base Type of a single- or double-precision floating-point type,
+ // 64-bit containerized vectors or 128-bit containerized vectors with one
+ // to four Elements.
+ // Variadic functions should always marshal to the base standard.
+ bool IsAAPCS_VFP =
+ !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false);
+
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // Handle illegal vector types here.
+ if (isIllegalVectorType(Ty))
+ return coerceIllegalVector(Ty);
+
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
+ Ty = EnumTy->getDecl()->getIntegerType();
+ }
+
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() > 64)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+ }
+
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+ }
+
+ // Ignore empty records.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ if (IsAAPCS_VFP) {
+ // Homogeneous Aggregates need to be expanded when we can fit the aggregate
+ // into VFP registers.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (isHomogeneousAggregate(Ty, Base, Members))
+ return classifyHomogeneousAggregate(Ty, Base, Members);
+ } else if (getABIKind() == ARMABIKind::AAPCS16_VFP) {
+ // WatchOS does have homogeneous aggregates. Note that we intentionally use
+ // this convention even for a variadic function: the backend will use GPRs
+ // if needed.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (isHomogeneousAggregate(Ty, Base, Members)) {
+ assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
+ llvm::Type *Ty =
+ llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
+ return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
+ }
+ }
+
+ if (getABIKind() == ARMABIKind::AAPCS16_VFP &&
+ getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
+ // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
+ // bigger than 128-bits, they get placed in space allocated by the caller,
+ // and a pointer is passed.
+ return ABIArgInfo::getIndirect(
+ CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
+ }
+
+ // Support byval for ARM.
+ // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
+ // most 8-byte. We realign the indirect argument if type alignment is bigger
+ // than ABI alignment.
+ uint64_t ABIAlign = 4;
+ uint64_t TyAlign;
+ if (getABIKind() == ARMABIKind::AAPCS_VFP ||
+ getABIKind() == ARMABIKind::AAPCS) {
+ TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
+ ABIAlign = std::clamp(TyAlign, (uint64_t)4, (uint64_t)8);
+ } else {
+ TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
+ }
+ if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
+ assert(getABIKind() != ARMABIKind::AAPCS16_VFP && "unexpected byval");
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
+ /*ByVal=*/true,
+ /*Realign=*/TyAlign > ABIAlign);
+ }
+
+ // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
+ // same size and alignment.
+ if (getTarget().isRenderScriptTarget()) {
+ return coerceToIntArray(Ty, getContext(), getVMContext());
+ }
+
+ // Otherwise, pass by coercing to a structure of the appropriate size.
+ llvm::Type* ElemTy;
+ unsigned SizeRegs;
+ // FIXME: Try to match the types of the arguments more accurately where
+ // we can.
+ if (TyAlign <= 4) {
+ ElemTy = llvm::Type::getInt32Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ } else {
+ ElemTy = llvm::Type::getInt64Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
+ }
+
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
+}
+
+static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
+ llvm::LLVMContext &VMContext) {
+ // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
+ // is called integer-like if its size is less than or equal to one word, and
+ // the offset of each of its addressable sub-fields is zero.
+
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // Check that the type fits in a word.
+ if (Size > 32)
+ return false;
+
+ // FIXME: Handle vector types!
+ if (Ty->isVectorType())
+ return false;
+
+ // Float types are never treated as "integer like".
+ if (Ty->isRealFloatingType())
+ return false;
+
+ // If this is a builtin or pointer type then it is ok.
+ if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
+ return true;
+
+ // Small complex integer types are "integer like".
+ if (const ComplexType *CT = Ty->getAs<ComplexType>())
+ return isIntegerLikeType(CT->getElementType(), Context, VMContext);
+
+ // Single element and zero sized arrays should be allowed, by the definition
+ // above, but they are not.
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT) return false;
+
+ // Ignore records with flexible arrays.
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ // Check that all sub-fields are at offset 0, and are themselves "integer
+ // like".
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ bool HadField = false;
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ const FieldDecl *FD = *i;
+
+ // Bit-fields are not addressable, we only need to verify they are "integer
+ // like". We still have to disallow a subsequent non-bitfield, for example:
+ // struct { int : 0; int x }
+ // is non-integer like according to gcc.
+ if (FD->isBitField()) {
+ if (!RD->isUnion())
+ HadField = true;
+
+ if (!isIntegerLikeType(FD->getType(), Context, VMContext))
+ return false;
+
+ continue;
+ }
+
+ // Check if this field is at offset 0.
+ if (Layout.getFieldOffset(idx) != 0)
+ return false;
+
+ if (!isIntegerLikeType(FD->getType(), Context, VMContext))
+ return false;
+
+ // Only allow at most one field in a structure. This doesn't match the
+ // wording above, but follows gcc in situations with a field following an
+ // empty structure.
+ if (!RD->isUnion()) {
+ if (HadField)
+ return false;
+
+ HadField = true;
+ }
+ }
+
+ return true;
+}
+
+ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
+ unsigned functionCallConv) const {
+
+ // Variadic functions should always marshal to the base standard.
+ bool IsAAPCS_VFP =
+ !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true);
+
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (const VectorType *VT = RetTy->getAs<VectorType>()) {
+ // Large vector types should be returned via memory.
+ if (getContext().getTypeSize(RetTy) > 128)
+ return getNaturalAlignIndirect(RetTy);
+ // TODO: FP16/BF16 vectors should be converted to integer vectors
+ // This check is similar to isIllegalVectorType - refactor?
+ if ((!getTarget().hasLegalHalfType() &&
+ (VT->getElementType()->isFloat16Type() ||
+ VT->getElementType()->isHalfType())) ||
+ (IsFloatABISoftFP &&
+ VT->getElementType()->isBFloat16Type()))
+ return coerceIllegalVector(RetTy);
+ }
+
+ if (!isAggregateTypeForABI(RetTy)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
+ if (EIT->getNumBits() > 64)
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
+
+ return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect();
+ }
+
+ // Are we following APCS?
+ if (getABIKind() == ARMABIKind::APCS) {
+ if (isEmptyRecord(getContext(), RetTy, false))
+ return ABIArgInfo::getIgnore();
+
+ // Complex types are all returned as packed integers.
+ //
+ // FIXME: Consider using 2 x vector types if the back end handles them
+ // correctly.
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(
+ getVMContext(), getContext().getTypeSize(RetTy)));
+
+ // Integer like structures are returned in r0.
+ if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
+ // Return in the smallest viable integer type.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ }
+
+ // Otherwise return in memory.
+ return getNaturalAlignIndirect(RetTy);
+ }
+
+ // Otherwise this is an AAPCS variant.
+
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Check for homogeneous aggregates with AAPCS-VFP.
+ if (IsAAPCS_VFP) {
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (isHomogeneousAggregate(RetTy, Base, Members))
+ return classifyHomogeneousAggregate(RetTy, Base, Members);
+ }
+
+ // Aggregates <= 4 bytes are returned in r0; other aggregates
+ // are returned indirectly.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 32) {
+ // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
+ // same size and alignment.
+ if (getTarget().isRenderScriptTarget()) {
+ return coerceToIntArray(RetTy, getContext(), getVMContext());
+ }
+ if (getDataLayout().isBigEndian())
+ // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+
+ // Return in the smallest viable integer type.
+ if (Size <= 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ } else if (Size <= 128 && getABIKind() == ARMABIKind::AAPCS16_VFP) {
+ llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
+ llvm::Type *CoerceTy =
+ llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ return getNaturalAlignIndirect(RetTy);
+}
+
+/// isIllegalVector - check whether Ty is an illegal vector type.
+bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
+ if (const VectorType *VT = Ty->getAs<VectorType> ()) {
+ // On targets that don't support half, fp16 or bfloat, they are expanded
+ // into float, and we don't want the ABI to depend on whether or not they
+ // are supported in hardware. Thus return false to coerce vectors of these
+ // types into integer vectors.
+ // We do not depend on hasLegalHalfType for bfloat as it is a
+ // separate IR type.
+ if ((!getTarget().hasLegalHalfType() &&
+ (VT->getElementType()->isFloat16Type() ||
+ VT->getElementType()->isHalfType())) ||
+ (IsFloatABISoftFP &&
+ VT->getElementType()->isBFloat16Type()))
+ return true;
+ if (isAndroid()) {
+ // Android shipped using Clang 3.1, which supported a slightly different
+ // vector ABI. The primary differences were that 3-element vector types
+ // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
+ // accepts that legacy behavior for Android only.
+ // Check whether VT is legal.
+ unsigned NumElements = VT->getNumElements();
+ // NumElements should be power of 2 or equal to 3.
+ if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
+ return true;
+ } else {
+ // Check whether VT is legal.
+ unsigned NumElements = VT->getNumElements();
+ uint64_t Size = getContext().getTypeSize(VT);
+ // NumElements should be power of 2.
+ if (!llvm::isPowerOf2_32(NumElements))
+ return true;
+ // Size should be greater than 32 bits.
+ return Size <= 32;
+ }
+ }
+ return false;
+}
+
+/// Return true if a type contains any 16-bit floating point vectors
+bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ uint64_t NElements = AT->getSize().getZExtValue();
+ if (NElements == 0)
+ return false;
+ return containsAnyFP16Vectors(AT->getElementType());
+ } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) {
+ return containsAnyFP16Vectors(B.getType());
+ }))
+ return true;
+
+ if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) {
+ return FD && containsAnyFP16Vectors(FD->getType());
+ }))
+ return true;
+
+ return false;
+ } else {
+ if (const VectorType *VT = Ty->getAs<VectorType>())
+ return (VT->getElementType()->isFloat16Type() ||
+ VT->getElementType()->isBFloat16Type() ||
+ VT->getElementType()->isHalfType());
+ return false;
+ }
+}
+
+bool ARMSwiftABIInfo::isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
+ unsigned NumElts) const {
+ if (!llvm::isPowerOf2_32(NumElts))
+ return false;
+ unsigned size = CGT.getDataLayout().getTypeStoreSizeInBits(EltTy);
+ if (size > 64)
+ return false;
+ if (VectorSize.getQuantity() != 8 &&
+ (VectorSize.getQuantity() != 16 || NumElts == 1))
+ return false;
+ return true;
+}
+
+bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ // Homogeneous aggregates for AAPCS-VFP must have base types of float,
+ // double, or 64-bit or 128-bit vectors.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::Float ||
+ BT->getKind() == BuiltinType::Double ||
+ BT->getKind() == BuiltinType::LongDouble)
+ return true;
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ unsigned VecSize = getContext().getTypeSize(VT);
+ if (VecSize == 64 || VecSize == 128)
+ return true;
+ }
+ return false;
+}
+
+bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const {
+ return Members <= 4;
+}
+
+bool ARMABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const {
+ // AAPCS32 says that the rule for whether something is a homogeneous
+ // aggregate is applied to the output of the data layout decision. So
+ // anything that doesn't affect the data layout also does not affect
+ // homogeneity. In particular, zero-length bitfields don't stop a struct
+ // being homogeneous.
+ return true;
+}
+
+bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention,
+ bool acceptHalf) const {
+ // Give precedence to user-specified calling conventions.
+ if (callConvention != llvm::CallingConv::C)
+ return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP);
+ else
+ return (getABIKind() == ARMABIKind::AAPCS_VFP) ||
+ (acceptHalf && (getABIKind() == ARMABIKind::AAPCS16_VFP));
+}
+
+Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ CharUnits SlotSize = CharUnits::fromQuantity(4);
+
+ // Empty records are ignored for parameter passing purposes.
+ if (isEmptyRecord(getContext(), Ty, true)) {
+ VAListAddr = VAListAddr.withElementType(CGF.Int8PtrTy);
+ auto *Load = CGF.Builder.CreateLoad(VAListAddr);
+ return Address(Load, CGF.ConvertTypeForMem(Ty), SlotSize);
+ }
+
+ CharUnits TySize = getContext().getTypeSizeInChars(Ty);
+ CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty);
+
+ // Use indirect if size of the illegal vector is bigger than 16 bytes.
+ bool IsIndirect = false;
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
+ IsIndirect = true;
+
+ // ARMv7k passes structs bigger than 16 bytes indirectly, in space
+ // allocated by the caller.
+ } else if (TySize > CharUnits::fromQuantity(16) &&
+ getABIKind() == ARMABIKind::AAPCS16_VFP &&
+ !isHomogeneousAggregate(Ty, Base, Members)) {
+ IsIndirect = true;
+
+ // Otherwise, bound the type's ABI alignment.
+ // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
+ // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
+ // Our callers should be prepared to handle an under-aligned address.
+ } else if (getABIKind() == ARMABIKind::AAPCS_VFP ||
+ getABIKind() == ARMABIKind::AAPCS) {
+ TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
+ TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
+ } else if (getABIKind() == ARMABIKind::AAPCS16_VFP) {
+ // ARMv7k allows type alignment up to 16 bytes.
+ TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
+ TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
+ } else {
+ TyAlignForABI = CharUnits::fromQuantity(4);
+ }
+
+ TypeInfoChars TyInfo(TySize, TyAlignForABI, AlignRequirementKind::None);
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
+ SlotSize, /*AllowHigherAlign*/ true);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind Kind) {
+ return std::make_unique<ARMTargetCodeGenInfo>(CGM.getTypes(), Kind);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createWindowsARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind K) {
+ return std::make_unique<WindowsARMTargetCodeGenInfo>(CGM.getTypes(), K);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/AVR.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/AVR.cpp
new file mode 100644
index 000000000000..50547dd6dec5
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/AVR.cpp
@@ -0,0 +1,154 @@
+//===- AVR.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+#include "clang/Basic/DiagnosticFrontend.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// AVR ABI Implementation. Documented at
+// https://gcc.gnu.org/wiki/avr-gcc#Calling_Convention
+// https://gcc.gnu.org/wiki/avr-gcc#Reduced_Tiny
+//===----------------------------------------------------------------------===//
+
+namespace {
+class AVRABIInfo : public DefaultABIInfo {
+private:
+ // The total amount of registers can be used to pass parameters. It is 18 on
+ // AVR, or 6 on AVRTiny.
+ const unsigned ParamRegs;
+ // The total amount of registers can be used to pass return value. It is 8 on
+ // AVR, or 4 on AVRTiny.
+ const unsigned RetRegs;
+
+public:
+ AVRABIInfo(CodeGenTypes &CGT, unsigned NPR, unsigned NRR)
+ : DefaultABIInfo(CGT), ParamRegs(NPR), RetRegs(NRR) {}
+
+ ABIArgInfo classifyReturnType(QualType Ty, bool &LargeRet) const {
+ // On AVR, a return struct with size less than or equals to 8 bytes is
+ // returned directly via registers R18-R25. On AVRTiny, a return struct
+ // with size less than or equals to 4 bytes is returned directly via
+ // registers R22-R25.
+ if (isAggregateTypeForABI(Ty) &&
+ getContext().getTypeSize(Ty) <= RetRegs * 8)
+ return ABIArgInfo::getDirect();
+ // A return value (struct or scalar) with larger size is returned via a
+ // stack slot, along with a pointer as the function's implicit argument.
+ if (getContext().getTypeSize(Ty) > RetRegs * 8) {
+ LargeRet = true;
+ return getNaturalAlignIndirect(Ty);
+ }
+ // An i8 return value should not be extended to i16, since AVR has 8-bit
+ // registers.
+ if (Ty->isIntegralOrEnumerationType() && getContext().getTypeSize(Ty) <= 8)
+ return ABIArgInfo::getDirect();
+ // Otherwise we follow the default way which is compatible.
+ return DefaultABIInfo::classifyReturnType(Ty);
+ }
+
+ ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegs) const {
+ unsigned TySize = getContext().getTypeSize(Ty);
+
+ // An int8 type argument always costs two registers like an int16.
+ if (TySize == 8 && NumRegs >= 2) {
+ NumRegs -= 2;
+ return ABIArgInfo::getExtend(Ty);
+ }
+
+ // If the argument size is an odd number of bytes, round up the size
+ // to the next even number.
+ TySize = llvm::alignTo(TySize, 16);
+
+ // Any type including an array/struct type can be passed in rgisters,
+ // if there are enough registers left.
+ if (TySize <= NumRegs * 8) {
+ NumRegs -= TySize / 8;
+ return ABIArgInfo::getDirect();
+ }
+
+ // An argument is passed either completely in registers or completely in
+ // memory. Since there are not enough registers left, current argument
+ // and all other unprocessed arguments should be passed in memory.
+ // However we still need to return `ABIArgInfo::getDirect()` other than
+ // `ABIInfo::getNaturalAlignIndirect(Ty)`, otherwise an extra stack slot
+ // will be allocated, so the stack frame layout will be incompatible with
+ // avr-gcc.
+ NumRegs = 0;
+ return ABIArgInfo::getDirect();
+ }
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ // Decide the return type.
+ bool LargeRet = false;
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), LargeRet);
+
+ // Decide each argument type. The total number of registers can be used for
+ // arguments depends on several factors:
+ // 1. Arguments of varargs functions are passed on the stack. This applies
+ // even to the named arguments. So no register can be used.
+ // 2. Total 18 registers can be used on avr and 6 ones on avrtiny.
+ // 3. If the return type is a struct with too large size, two registers
+ // (out of 18/6) will be cost as an implicit pointer argument.
+ unsigned NumRegs = ParamRegs;
+ if (FI.isVariadic())
+ NumRegs = 0;
+ else if (LargeRet)
+ NumRegs -= 2;
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type, NumRegs);
+ }
+};
+
+class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ AVRTargetCodeGenInfo(CodeGenTypes &CGT, unsigned NPR, unsigned NRR)
+ : TargetCodeGenInfo(std::make_unique<AVRABIInfo>(CGT, NPR, NRR)) {}
+
+ LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
+ const VarDecl *D) const override {
+ // Check if global/static variable is defined in address space
+ // 1~6 (__flash, __flash1, __flash2, __flash3, __flash4, __flash5)
+ // but not constant.
+ if (D) {
+ LangAS AS = D->getType().getAddressSpace();
+ if (isTargetAddressSpace(AS) && 1 <= toTargetAddressSpace(AS) &&
+ toTargetAddressSpace(AS) <= 6 && !D->getType().isConstQualified())
+ CGM.getDiags().Report(D->getLocation(),
+ diag::err_verify_nonconst_addrspace)
+ << "__flash*";
+ }
+ return TargetCodeGenInfo::getGlobalVarAddressSpace(CGM, D);
+ }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ if (GV->isDeclaration())
+ return;
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD) return;
+ auto *Fn = cast<llvm::Function>(GV);
+
+ if (FD->getAttr<AVRInterruptAttr>())
+ Fn->addFnAttr("interrupt");
+
+ if (FD->getAttr<AVRSignalAttr>())
+ Fn->addFnAttr("signal");
+ }
+};
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createAVRTargetCodeGenInfo(CodeGenModule &CGM, unsigned NPR,
+ unsigned NRR) {
+ return std::make_unique<AVRTargetCodeGenInfo>(CGM.getTypes(), NPR, NRR);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/BPF.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/BPF.cpp
new file mode 100644
index 000000000000..2849222f7a18
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/BPF.cpp
@@ -0,0 +1,100 @@
+//===- BPF.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// BPF ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class BPFABIInfo : public DefaultABIInfo {
+public:
+ BPFABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+ ABIArgInfo classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (isAggregateTypeForABI(Ty)) {
+ uint64_t Bits = getContext().getTypeSize(Ty);
+ if (Bits == 0)
+ return ABIArgInfo::getIgnore();
+
+ // If the aggregate needs 1 or 2 registers, do not use reference.
+ if (Bits <= 128) {
+ llvm::Type *CoerceTy;
+ if (Bits <= 64) {
+ CoerceTy =
+ llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
+ } else {
+ llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), 64);
+ CoerceTy = llvm::ArrayType::get(RegTy, 2);
+ }
+ return ABIArgInfo::getDirect(CoerceTy);
+ } else {
+ return getNaturalAlignIndirect(Ty);
+ }
+ }
+
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ ASTContext &Context = getContext();
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() > Context.getTypeSize(Context.Int128Ty))
+ return getNaturalAlignIndirect(Ty);
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+ }
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy))
+ return getNaturalAlignIndirect(RetTy);
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ ASTContext &Context = getContext();
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
+ if (EIT->getNumBits() > Context.getTypeSize(Context.Int128Ty))
+ return getNaturalAlignIndirect(RetTy);
+
+ // Caller will do necessary sign/zero extension.
+ return ABIArgInfo::getDirect();
+ }
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+ }
+
+};
+
+class BPFTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ BPFTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<BPFABIInfo>(CGT)) {}
+};
+
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createBPFTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<BPFTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/CSKY.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/CSKY.cpp
new file mode 100644
index 000000000000..924eced700e1
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/CSKY.cpp
@@ -0,0 +1,175 @@
+//===- CSKY.cpp -----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// CSKY ABI Implementation
+//===----------------------------------------------------------------------===//
+namespace {
+class CSKYABIInfo : public DefaultABIInfo {
+ static const int NumArgGPRs = 4;
+ static const int NumArgFPRs = 4;
+
+ static const unsigned XLen = 32;
+ unsigned FLen;
+
+public:
+ CSKYABIInfo(CodeGen::CodeGenTypes &CGT, unsigned FLen)
+ : DefaultABIInfo(CGT), FLen(FLen) {}
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+ ABIArgInfo classifyArgumentType(QualType Ty, int &ArgGPRsLeft,
+ int &ArgFPRsLeft,
+ bool isReturnType = false) const;
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+} // end anonymous namespace
+
+void CSKYABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ QualType RetTy = FI.getReturnType();
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(RetTy);
+
+ bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
+
+ // We must track the number of GPRs used in order to conform to the CSKY
+ // ABI, as integer scalars passed in registers should have signext/zeroext
+ // when promoted.
+ int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
+ int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
+
+ for (auto &ArgInfo : FI.arguments()) {
+ ArgInfo.info = classifyArgumentType(ArgInfo.type, ArgGPRsLeft, ArgFPRsLeft);
+ }
+}
+
+Address CSKYABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
+
+ // Empty records are ignored for parameter passing purposes.
+ if (isEmptyRecord(getContext(), Ty, true)) {
+ return Address(CGF.Builder.CreateLoad(VAListAddr),
+ CGF.ConvertTypeForMem(Ty), SlotSize);
+ }
+
+ auto TInfo = getContext().getTypeInfoInChars(Ty);
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, false, TInfo, SlotSize,
+ /*AllowHigherAlign=*/true);
+}
+
+ABIArgInfo CSKYABIInfo::classifyArgumentType(QualType Ty, int &ArgGPRsLeft,
+ int &ArgFPRsLeft,
+ bool isReturnType) const {
+ assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always passed indirectly.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+ if (ArgGPRsLeft)
+ ArgGPRsLeft -= 1;
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
+ CGCXXABI::RAA_DirectInMemory);
+ }
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ if (!Ty->getAsUnionType())
+ if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ // Pass floating point values via FPRs if possible.
+ if (Ty->isFloatingType() && !Ty->isComplexType() && FLen >= Size &&
+ ArgFPRsLeft) {
+ ArgFPRsLeft--;
+ return ABIArgInfo::getDirect();
+ }
+
+ // Complex types for the hard float ABI must be passed direct rather than
+ // using CoerceAndExpand.
+ if (Ty->isComplexType() && FLen && !isReturnType) {
+ QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
+ if (getContext().getTypeSize(EltTy) <= FLen) {
+ ArgFPRsLeft -= 2;
+ return ABIArgInfo::getDirect();
+ }
+ }
+
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // All integral types are promoted to XLen width, unless passed on the
+ // stack.
+ if (Size < XLen && Ty->isIntegralOrEnumerationType())
+ return ABIArgInfo::getExtend(Ty);
+
+ if (const auto *EIT = Ty->getAs<BitIntType>()) {
+ if (EIT->getNumBits() < XLen)
+ return ABIArgInfo::getExtend(Ty);
+ }
+
+ return ABIArgInfo::getDirect();
+ }
+
+ // For argument type, the first 4*XLen parts of aggregate will be passed
+ // in registers, and the rest will be passed in stack.
+ // So we can coerce to integers directly and let backend handle it correctly.
+ // For return type, aggregate which <= 2*XLen will be returned in registers.
+ // Otherwise, aggregate will be returned indirectly.
+ if (!isReturnType || (isReturnType && Size <= 2 * XLen)) {
+ if (Size <= XLen) {
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), XLen));
+ } else {
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(
+ llvm::IntegerType::get(getVMContext(), XLen), (Size + 31) / XLen));
+ }
+ }
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
+ABIArgInfo CSKYABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ int ArgGPRsLeft = 2;
+ int ArgFPRsLeft = FLen ? 1 : 0;
+
+ // The rules for return and argument types are the same, so defer to
+ // classifyArgumentType.
+ return classifyArgumentType(RetTy, ArgGPRsLeft, ArgFPRsLeft, true);
+}
+
+namespace {
+class CSKYTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ CSKYTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned FLen)
+ : TargetCodeGenInfo(std::make_unique<CSKYABIInfo>(CGT, FLen)) {}
+};
+} // end anonymous namespace
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createCSKYTargetCodeGenInfo(CodeGenModule &CGM, unsigned FLen) {
+ return std::make_unique<CSKYTargetCodeGenInfo>(CGM.getTypes(), FLen);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/Hexagon.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/Hexagon.cpp
new file mode 100644
index 000000000000..944a8d002ecf
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/Hexagon.cpp
@@ -0,0 +1,423 @@
+//===- Hexagon.cpp --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Hexagon ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class HexagonABIInfo : public DefaultABIInfo {
+public:
+ HexagonABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+private:
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, unsigned *RegsLeft) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+ Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr,
+ QualType Ty) const;
+ Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr,
+ QualType Ty) const;
+ Address EmitVAArgForHexagonLinux(CodeGenFunction &CFG, Address VAListAddr,
+ QualType Ty) const;
+};
+
+class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<HexagonABIInfo>(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ return 29;
+ }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &GCM) const override {
+ if (GV->isDeclaration())
+ return;
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD)
+ return;
+ }
+};
+
+} // namespace
+
+void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ unsigned RegsLeft = 6;
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type, &RegsLeft);
+}
+
+static bool HexagonAdjustRegsLeft(uint64_t Size, unsigned *RegsLeft) {
+ assert(Size <= 64 && "Not expecting to pass arguments larger than 64 bits"
+ " through registers");
+
+ if (*RegsLeft == 0)
+ return false;
+
+ if (Size <= 32) {
+ (*RegsLeft)--;
+ return true;
+ }
+
+ if (2 <= (*RegsLeft & (~1U))) {
+ *RegsLeft = (*RegsLeft & (~1U)) - 2;
+ return true;
+ }
+
+ // Next available register was r5 but candidate was greater than 32-bits so it
+ // has to go on the stack. However we still consume r5
+ if (*RegsLeft == 1)
+ *RegsLeft = 0;
+
+ return false;
+}
+
+ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty,
+ unsigned *RegsLeft) const {
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size <= 64)
+ HexagonAdjustRegsLeft(Size, RegsLeft);
+
+ if (Size > 64 && Ty->isBitIntType())
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+
+ return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect();
+ }
+
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ // Ignore empty records.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ unsigned Align = getContext().getTypeAlign(Ty);
+
+ if (Size > 64)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+
+ if (HexagonAdjustRegsLeft(Size, RegsLeft))
+ Align = Size <= 32 ? 32 : 64;
+ if (Size <= Align) {
+ // Pass in the smallest viable integer type.
+ Size = llvm::bit_ceil(Size);
+ return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
+ }
+ return DefaultABIInfo::classifyArgumentType(Ty);
+}
+
+ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ const TargetInfo &T = CGT.getTarget();
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ if (RetTy->getAs<VectorType>()) {
+ // HVX vectors are returned in vector registers or register pairs.
+ if (T.hasFeature("hvx")) {
+ assert(T.hasFeature("hvx-length64b") || T.hasFeature("hvx-length128b"));
+ uint64_t VecSize = T.hasFeature("hvx-length64b") ? 64*8 : 128*8;
+ if (Size == VecSize || Size == 2*VecSize)
+ return ABIArgInfo::getDirectInReg();
+ }
+ // Large vector types should be returned via memory.
+ if (Size > 64)
+ return getNaturalAlignIndirect(RetTy);
+ }
+
+ if (!isAggregateTypeForABI(RetTy)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ if (Size > 64 && RetTy->isBitIntType())
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
+
+ return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect();
+ }
+
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Aggregates <= 8 bytes are returned in registers, other aggregates
+ // are returned indirectly.
+ if (Size <= 64) {
+ // Return in the smallest viable integer type.
+ Size = llvm::bit_ceil(Size);
+ return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
+ }
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
+}
+
+Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF,
+ Address VAListAddr,
+ QualType Ty) const {
+ // Load the overflow area pointer.
+ Address __overflow_area_pointer_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
+ llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
+ __overflow_area_pointer_p, "__overflow_area_pointer");
+
+ uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (Align > 4) {
+ // Alignment should be a power of 2.
+ assert((Align & (Align - 1)) == 0 && "Alignment is not power of 2!");
+
+ // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
+
+ // Add offset to the current pointer to access the argument.
+ __overflow_area_pointer =
+ CGF.Builder.CreateGEP(CGF.Int8Ty, __overflow_area_pointer, Offset);
+ llvm::Value *AsInt =
+ CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
+
+ // Create a mask which should be "AND"ed
+ // with (overflow_arg_area + align - 1)
+ llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -(int)Align);
+ __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
+ CGF.Builder.CreateAnd(AsInt, Mask), __overflow_area_pointer->getType(),
+ "__overflow_area_pointer.align");
+ }
+
+ // Get the type of the argument from memory and bitcast
+ // overflow area pointer to the argument type.
+ llvm::Type *PTy = CGF.ConvertTypeForMem(Ty);
+ Address AddrTyped =
+ Address(__overflow_area_pointer, PTy, CharUnits::fromQuantity(Align));
+
+ // Round up to the minimum stack alignment for varargs which is 4 bytes.
+ uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
+
+ __overflow_area_pointer = CGF.Builder.CreateGEP(
+ CGF.Int8Ty, __overflow_area_pointer,
+ llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "__overflow_area_pointer.next");
+ CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p);
+
+ return AddrTyped;
+}
+
+Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF,
+ Address VAListAddr,
+ QualType Ty) const {
+ // FIXME: Need to handle alignment
+ llvm::Type *BP = CGF.Int8PtrTy;
+ CGBuilderTy &Builder = CGF.Builder;
+ Address VAListAddrAsBPP = VAListAddr.withElementType(BP);
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ // Handle address alignment for type alignment > 32 bits
+ uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (TyAlign > 4) {
+ assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!");
+ llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
+ AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
+ AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
+ Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
+ }
+ Address AddrTyped =
+ Address(Addr, CGF.ConvertType(Ty), CharUnits::fromQuantity(TyAlign));
+
+ uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr = Builder.CreateGEP(
+ CGF.Int8Ty, Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF,
+ Address VAListAddr,
+ QualType Ty) const {
+ int ArgSize = CGF.getContext().getTypeSize(Ty) / 8;
+
+ if (ArgSize > 8)
+ return EmitVAArgFromMemory(CGF, VAListAddr, Ty);
+
+ // Here we have check if the argument is in register area or
+ // in overflow area.
+ // If the saved register area pointer + argsize rounded up to alignment >
+ // saved register area end pointer, argument is in overflow area.
+ unsigned RegsLeft = 6;
+ Ty = CGF.getContext().getCanonicalType(Ty);
+ (void)classifyArgumentType(Ty, &RegsLeft);
+
+ llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+
+ // Get rounded size of the argument.GCC does not allow vararg of
+ // size < 4 bytes. We follow the same logic here.
+ ArgSize = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
+ int ArgAlign = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
+
+ // Argument may be in saved register area
+ CGF.EmitBlock(MaybeRegBlock);
+
+ // Load the current saved register area pointer.
+ Address __current_saved_reg_area_pointer_p = CGF.Builder.CreateStructGEP(
+ VAListAddr, 0, "__current_saved_reg_area_pointer_p");
+ llvm::Value *__current_saved_reg_area_pointer = CGF.Builder.CreateLoad(
+ __current_saved_reg_area_pointer_p, "__current_saved_reg_area_pointer");
+
+ // Load the saved register area end pointer.
+ Address __saved_reg_area_end_pointer_p = CGF.Builder.CreateStructGEP(
+ VAListAddr, 1, "__saved_reg_area_end_pointer_p");
+ llvm::Value *__saved_reg_area_end_pointer = CGF.Builder.CreateLoad(
+ __saved_reg_area_end_pointer_p, "__saved_reg_area_end_pointer");
+
+ // If the size of argument is > 4 bytes, check if the stack
+ // location is aligned to 8 bytes
+ if (ArgAlign > 4) {
+
+ llvm::Value *__current_saved_reg_area_pointer_int =
+ CGF.Builder.CreatePtrToInt(__current_saved_reg_area_pointer,
+ CGF.Int32Ty);
+
+ __current_saved_reg_area_pointer_int = CGF.Builder.CreateAdd(
+ __current_saved_reg_area_pointer_int,
+ llvm::ConstantInt::get(CGF.Int32Ty, (ArgAlign - 1)),
+ "align_current_saved_reg_area_pointer");
+
+ __current_saved_reg_area_pointer_int =
+ CGF.Builder.CreateAnd(__current_saved_reg_area_pointer_int,
+ llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
+ "align_current_saved_reg_area_pointer");
+
+ __current_saved_reg_area_pointer =
+ CGF.Builder.CreateIntToPtr(__current_saved_reg_area_pointer_int,
+ __current_saved_reg_area_pointer->getType(),
+ "align_current_saved_reg_area_pointer");
+ }
+
+ llvm::Value *__new_saved_reg_area_pointer =
+ CGF.Builder.CreateGEP(CGF.Int8Ty, __current_saved_reg_area_pointer,
+ llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
+ "__new_saved_reg_area_pointer");
+
+ llvm::Value *UsingStack = nullptr;
+ UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer,
+ __saved_reg_area_end_pointer);
+
+ CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, InRegBlock);
+
+ // Argument in saved register area
+ // Implement the block where argument is in register saved area
+ CGF.EmitBlock(InRegBlock);
+
+ llvm::Type *PTy = CGF.ConvertType(Ty);
+ llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast(
+ __current_saved_reg_area_pointer, llvm::PointerType::getUnqual(PTy));
+
+ CGF.Builder.CreateStore(__new_saved_reg_area_pointer,
+ __current_saved_reg_area_pointer_p);
+
+ CGF.EmitBranch(ContBlock);
+
+ // Argument in overflow area
+ // Implement the block where the argument is in overflow area.
+ CGF.EmitBlock(OnStackBlock);
+
+ // Load the overflow area pointer
+ Address __overflow_area_pointer_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
+ llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
+ __overflow_area_pointer_p, "__overflow_area_pointer");
+
+ // Align the overflow area pointer according to the alignment of the argument
+ if (ArgAlign > 4) {
+ llvm::Value *__overflow_area_pointer_int =
+ CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
+
+ __overflow_area_pointer_int =
+ CGF.Builder.CreateAdd(__overflow_area_pointer_int,
+ llvm::ConstantInt::get(CGF.Int32Ty, ArgAlign - 1),
+ "align_overflow_area_pointer");
+
+ __overflow_area_pointer_int =
+ CGF.Builder.CreateAnd(__overflow_area_pointer_int,
+ llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
+ "align_overflow_area_pointer");
+
+ __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
+ __overflow_area_pointer_int, __overflow_area_pointer->getType(),
+ "align_overflow_area_pointer");
+ }
+
+ // Get the pointer for next argument in overflow area and store it
+ // to overflow area pointer.
+ llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP(
+ CGF.Int8Ty, __overflow_area_pointer,
+ llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
+ "__overflow_area_pointer.next");
+
+ CGF.Builder.CreateStore(__new_overflow_area_pointer,
+ __overflow_area_pointer_p);
+
+ CGF.Builder.CreateStore(__new_overflow_area_pointer,
+ __current_saved_reg_area_pointer_p);
+
+ // Bitcast the overflow area pointer to the type of argument.
+ llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast(
+ __overflow_area_pointer, llvm::PointerType::getUnqual(OverflowPTy));
+
+ CGF.EmitBranch(ContBlock);
+
+ // Get the correct pointer to load the variable argument
+ // Implement the ContBlock
+ CGF.EmitBlock(ContBlock);
+
+ llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Type *MemPTy = llvm::PointerType::getUnqual(MemTy);
+ llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr");
+ ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock);
+ ArgAddr->addIncoming(__overflow_area_p, OnStackBlock);
+
+ return Address(ArgAddr, MemTy, CharUnits::fromQuantity(ArgAlign));
+}
+
+Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+
+ if (getTarget().getTriple().isMusl())
+ return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty);
+
+ return EmitVAArgForHexagon(CGF, VAListAddr, Ty);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createHexagonTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<HexagonTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/Lanai.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/Lanai.cpp
new file mode 100644
index 000000000000..2578fc0291e7
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/Lanai.cpp
@@ -0,0 +1,154 @@
+//===- Lanai.cpp ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Lanai ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class LanaiABIInfo : public DefaultABIInfo {
+ struct CCState {
+ unsigned FreeRegs;
+ };
+
+public:
+ LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+ bool shouldUseInReg(QualType Ty, CCState &State) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ CCState State;
+ // Lanai uses 4 registers to pass arguments unless the function has the
+ // regparm attribute set.
+ if (FI.getHasRegParm()) {
+ State.FreeRegs = FI.getRegParm();
+ } else {
+ State.FreeRegs = 4;
+ }
+
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type, State);
+ }
+
+ ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
+};
+} // end anonymous namespace
+
+bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const {
+ unsigned Size = getContext().getTypeSize(Ty);
+ unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
+
+ if (SizeInRegs == 0)
+ return false;
+
+ if (SizeInRegs > State.FreeRegs) {
+ State.FreeRegs = 0;
+ return false;
+ }
+
+ State.FreeRegs -= SizeInRegs;
+
+ return true;
+}
+
+ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
+ CCState &State) const {
+ if (!ByVal) {
+ if (State.FreeRegs) {
+ --State.FreeRegs; // Non-byval indirects just use one pointer.
+ return getNaturalAlignIndirectInReg(Ty);
+ }
+ return getNaturalAlignIndirect(Ty, false);
+ }
+
+ // Compute the byval alignment.
+ const unsigned MinABIStackAlignInBytes = 4;
+ unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
+ /*Realign=*/TypeAlign >
+ MinABIStackAlignInBytes);
+}
+
+ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
+ CCState &State) const {
+ // Check with the C++ ABI first.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (RT) {
+ CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
+ if (RAA == CGCXXABI::RAA_Indirect) {
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+ } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+ }
+ }
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Structures with flexible arrays are always indirect.
+ if (RT && RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectResult(Ty, /*ByVal=*/true, State);
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ llvm::LLVMContext &LLVMContext = getVMContext();
+ unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ if (SizeInRegs <= State.FreeRegs) {
+ llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
+ SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
+ llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
+ State.FreeRegs -= SizeInRegs;
+ return ABIArgInfo::getDirectInReg(Result);
+ } else {
+ State.FreeRegs = 0;
+ }
+ return getIndirectResult(Ty, true, State);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const auto *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ bool InReg = shouldUseInReg(Ty, State);
+
+ // Don't pass >64 bit integers in registers.
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() > 64)
+ return getIndirectResult(Ty, /*ByVal=*/true, State);
+
+ if (isPromotableIntegerTypeForABI(Ty)) {
+ if (InReg)
+ return ABIArgInfo::getDirectInReg();
+ return ABIArgInfo::getExtend(Ty);
+ }
+ if (InReg)
+ return ABIArgInfo::getDirectInReg();
+ return ABIArgInfo::getDirect();
+}
+
+namespace {
+class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<LanaiABIInfo>(CGT)) {}
+};
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createLanaiTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<LanaiTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/LoongArch.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/LoongArch.cpp
new file mode 100644
index 000000000000..6391a8aeaa67
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/LoongArch.cpp
@@ -0,0 +1,449 @@
+//===- LoongArch.cpp ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+// LoongArch ABI Implementation. Documented at
+// https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html
+//
+//===----------------------------------------------------------------------===//
+
+namespace {
+class LoongArchABIInfo : public DefaultABIInfo {
+private:
+ // Size of the integer ('r') registers in bits.
+ unsigned GRLen;
+ // Size of the floating point ('f') registers in bits.
+ unsigned FRLen;
+ // Number of general-purpose argument registers.
+ static const int NumGARs = 8;
+ // Number of floating-point argument registers.
+ static const int NumFARs = 8;
+ bool detectFARsEligibleStructHelper(QualType Ty, CharUnits CurOff,
+ llvm::Type *&Field1Ty,
+ CharUnits &Field1Off,
+ llvm::Type *&Field2Ty,
+ CharUnits &Field2Off) const;
+
+public:
+ LoongArchABIInfo(CodeGen::CodeGenTypes &CGT, unsigned GRLen, unsigned FRLen)
+ : DefaultABIInfo(CGT), GRLen(GRLen), FRLen(FRLen) {}
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &GARsLeft,
+ int &FARsLeft) const;
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ ABIArgInfo extendType(QualType Ty) const;
+
+ bool detectFARsEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
+ CharUnits &Field1Off, llvm::Type *&Field2Ty,
+ CharUnits &Field2Off, int &NeededArgGPRs,
+ int &NeededArgFPRs) const;
+ ABIArgInfo coerceAndExpandFARsEligibleStruct(llvm::Type *Field1Ty,
+ CharUnits Field1Off,
+ llvm::Type *Field2Ty,
+ CharUnits Field2Off) const;
+};
+} // end anonymous namespace
+
+void LoongArchABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ QualType RetTy = FI.getReturnType();
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(RetTy);
+
+ // IsRetIndirect is true if classifyArgumentType indicated the value should
+ // be passed indirect, or if the type size is a scalar greater than 2*GRLen
+ // and not a complex type with elements <= FRLen. e.g. fp128 is passed direct
+ // in LLVM IR, relying on the backend lowering code to rewrite the argument
+ // list and pass indirectly on LA32.
+ bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
+ if (!IsRetIndirect && RetTy->isScalarType() &&
+ getContext().getTypeSize(RetTy) > (2 * GRLen)) {
+ if (RetTy->isComplexType() && FRLen) {
+ QualType EltTy = RetTy->castAs<ComplexType>()->getElementType();
+ IsRetIndirect = getContext().getTypeSize(EltTy) > FRLen;
+ } else {
+ // This is a normal scalar > 2*GRLen, such as fp128 on LA32.
+ IsRetIndirect = true;
+ }
+ }
+
+ // We must track the number of GARs and FARs used in order to conform to the
+ // LoongArch ABI. As GAR usage is different for variadic arguments, we must
+ // also track whether we are examining a vararg or not.
+ int GARsLeft = IsRetIndirect ? NumGARs - 1 : NumGARs;
+ int FARsLeft = FRLen ? NumFARs : 0;
+ int NumFixedArgs = FI.getNumRequiredArgs();
+
+ int ArgNum = 0;
+ for (auto &ArgInfo : FI.arguments()) {
+ ArgInfo.info = classifyArgumentType(
+ ArgInfo.type, /*IsFixed=*/ArgNum < NumFixedArgs, GARsLeft, FARsLeft);
+ ArgNum++;
+ }
+}
+
+// Returns true if the struct is a potential candidate to be passed in FARs (and
+// GARs). If this function returns true, the caller is responsible for checking
+// that if there is only a single field then that field is a float.
+bool LoongArchABIInfo::detectFARsEligibleStructHelper(
+ QualType Ty, CharUnits CurOff, llvm::Type *&Field1Ty, CharUnits &Field1Off,
+ llvm::Type *&Field2Ty, CharUnits &Field2Off) const {
+ bool IsInt = Ty->isIntegralOrEnumerationType();
+ bool IsFloat = Ty->isRealFloatingType();
+
+ if (IsInt || IsFloat) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (IsInt && Size > GRLen)
+ return false;
+ // Can't be eligible if larger than the FP registers. Half precision isn't
+ // currently supported on LoongArch and the ABI hasn't been confirmed, so
+ // default to the integer ABI in that case.
+ if (IsFloat && (Size > FRLen || Size < 32))
+ return false;
+ // Can't be eligible if an integer type was already found (int+int pairs
+ // are not eligible).
+ if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
+ return false;
+ if (!Field1Ty) {
+ Field1Ty = CGT.ConvertType(Ty);
+ Field1Off = CurOff;
+ return true;
+ }
+ if (!Field2Ty) {
+ Field2Ty = CGT.ConvertType(Ty);
+ Field2Off = CurOff;
+ return true;
+ }
+ return false;
+ }
+
+ if (auto CTy = Ty->getAs<ComplexType>()) {
+ if (Field1Ty)
+ return false;
+ QualType EltTy = CTy->getElementType();
+ if (getContext().getTypeSize(EltTy) > FRLen)
+ return false;
+ Field1Ty = CGT.ConvertType(EltTy);
+ Field1Off = CurOff;
+ Field2Ty = Field1Ty;
+ Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
+ return true;
+ }
+
+ if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
+ uint64_t ArraySize = ATy->getSize().getZExtValue();
+ QualType EltTy = ATy->getElementType();
+ CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
+ for (uint64_t i = 0; i < ArraySize; ++i) {
+ if (!detectFARsEligibleStructHelper(EltTy, CurOff, Field1Ty, Field1Off,
+ Field2Ty, Field2Off))
+ return false;
+ CurOff += EltSize;
+ }
+ return true;
+ }
+
+ if (const auto *RTy = Ty->getAs<RecordType>()) {
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are not eligible for the FP calling convention.
+ if (getRecordArgABI(Ty, CGT.getCXXABI()))
+ return false;
+ if (isEmptyRecord(getContext(), Ty, true))
+ return true;
+ const RecordDecl *RD = RTy->getDecl();
+ // Unions aren't eligible unless they're empty (which is caught above).
+ if (RD->isUnion())
+ return false;
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const CXXBaseSpecifier &B : CXXRD->bases()) {
+ const auto *BDecl =
+ cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
+ if (!detectFARsEligibleStructHelper(
+ B.getType(), CurOff + Layout.getBaseClassOffset(BDecl),
+ Field1Ty, Field1Off, Field2Ty, Field2Off))
+ return false;
+ }
+ }
+ for (const FieldDecl *FD : RD->fields()) {
+ QualType QTy = FD->getType();
+ if (FD->isBitField()) {
+ unsigned BitWidth = FD->getBitWidthValue(getContext());
+ // Zero-width bitfields are ignored.
+ if (BitWidth == 0)
+ continue;
+ // Allow a bitfield with a type greater than GRLen as long as the
+ // bitwidth is GRLen or less.
+ if (getContext().getTypeSize(QTy) > GRLen && BitWidth <= GRLen) {
+ QTy = getContext().getIntTypeForBitwidth(GRLen, false);
+ }
+ }
+
+ if (!detectFARsEligibleStructHelper(
+ QTy,
+ CurOff + getContext().toCharUnitsFromBits(
+ Layout.getFieldOffset(FD->getFieldIndex())),
+ Field1Ty, Field1Off, Field2Ty, Field2Off))
+ return false;
+ }
+ return Field1Ty != nullptr;
+ }
+
+ return false;
+}
+
+// Determine if a struct is eligible to be passed in FARs (and GARs) (i.e., when
+// flattened it contains a single fp value, fp+fp, or int+fp of appropriate
+// size). If so, NeededFARs and NeededGARs are incremented appropriately.
+bool LoongArchABIInfo::detectFARsEligibleStruct(
+ QualType Ty, llvm::Type *&Field1Ty, CharUnits &Field1Off,
+ llvm::Type *&Field2Ty, CharUnits &Field2Off, int &NeededGARs,
+ int &NeededFARs) const {
+ Field1Ty = nullptr;
+ Field2Ty = nullptr;
+ NeededGARs = 0;
+ NeededFARs = 0;
+ if (!detectFARsEligibleStructHelper(Ty, CharUnits::Zero(), Field1Ty,
+ Field1Off, Field2Ty, Field2Off))
+ return false;
+ // Not really a candidate if we have a single int but no float.
+ if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
+ return false;
+ if (Field1Ty && Field1Ty->isFloatingPointTy())
+ NeededFARs++;
+ else if (Field1Ty)
+ NeededGARs++;
+ if (Field2Ty && Field2Ty->isFloatingPointTy())
+ NeededFARs++;
+ else if (Field2Ty)
+ NeededGARs++;
+ return true;
+}
+
+// Call getCoerceAndExpand for the two-element flattened struct described by
+// Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
+// appropriate coerceToType and unpaddedCoerceToType.
+ABIArgInfo LoongArchABIInfo::coerceAndExpandFARsEligibleStruct(
+ llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
+ CharUnits Field2Off) const {
+ SmallVector<llvm::Type *, 3> CoerceElts;
+ SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
+ if (!Field1Off.isZero())
+ CoerceElts.push_back(llvm::ArrayType::get(
+ llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
+
+ CoerceElts.push_back(Field1Ty);
+ UnpaddedCoerceElts.push_back(Field1Ty);
+
+ if (!Field2Ty) {
+ return ABIArgInfo::getCoerceAndExpand(
+ llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
+ UnpaddedCoerceElts[0]);
+ }
+
+ CharUnits Field2Align =
+ CharUnits::fromQuantity(getDataLayout().getABITypeAlign(Field2Ty));
+ CharUnits Field1End =
+ Field1Off +
+ CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
+ CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align);
+
+ CharUnits Padding = CharUnits::Zero();
+ if (Field2Off > Field2OffNoPadNoPack)
+ Padding = Field2Off - Field2OffNoPadNoPack;
+ else if (Field2Off != Field2Align && Field2Off > Field1End)
+ Padding = Field2Off - Field1End;
+
+ bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
+
+ if (!Padding.isZero())
+ CoerceElts.push_back(llvm::ArrayType::get(
+ llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
+
+ CoerceElts.push_back(Field2Ty);
+ UnpaddedCoerceElts.push_back(Field2Ty);
+
+ return ABIArgInfo::getCoerceAndExpand(
+ llvm::StructType::get(getVMContext(), CoerceElts, IsPacked),
+ llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked));
+}
+
+ABIArgInfo LoongArchABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
+ int &GARsLeft,
+ int &FARsLeft) const {
+ assert(GARsLeft <= NumGARs && "GAR tracking underflow");
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always passed indirectly.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+ if (GARsLeft)
+ GARsLeft -= 1;
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
+ CGCXXABI::RAA_DirectInMemory);
+ }
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // Pass floating point values via FARs if possible.
+ if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
+ FRLen >= Size && FARsLeft) {
+ FARsLeft--;
+ return ABIArgInfo::getDirect();
+ }
+
+ // Complex types for the *f or *d ABI must be passed directly rather than
+ // using CoerceAndExpand.
+ if (IsFixed && Ty->isComplexType() && FRLen && FARsLeft >= 2) {
+ QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
+ if (getContext().getTypeSize(EltTy) <= FRLen) {
+ FARsLeft -= 2;
+ return ABIArgInfo::getDirect();
+ }
+ }
+
+ if (IsFixed && FRLen && Ty->isStructureOrClassType()) {
+ llvm::Type *Field1Ty = nullptr;
+ llvm::Type *Field2Ty = nullptr;
+ CharUnits Field1Off = CharUnits::Zero();
+ CharUnits Field2Off = CharUnits::Zero();
+ int NeededGARs = 0;
+ int NeededFARs = 0;
+ bool IsCandidate = detectFARsEligibleStruct(
+ Ty, Field1Ty, Field1Off, Field2Ty, Field2Off, NeededGARs, NeededFARs);
+ if (IsCandidate && NeededGARs <= GARsLeft && NeededFARs <= FARsLeft) {
+ GARsLeft -= NeededGARs;
+ FARsLeft -= NeededFARs;
+ return coerceAndExpandFARsEligibleStruct(Field1Ty, Field1Off, Field2Ty,
+ Field2Off);
+ }
+ }
+
+ uint64_t NeededAlign = getContext().getTypeAlign(Ty);
+ // Determine the number of GARs needed to pass the current argument
+ // according to the ABI. 2*GRLen-aligned varargs are passed in "aligned"
+ // register pairs, so may consume 3 registers.
+ int NeededGARs = 1;
+ if (!IsFixed && NeededAlign == 2 * GRLen)
+ NeededGARs = 2 + (GARsLeft % 2);
+ else if (Size > GRLen && Size <= 2 * GRLen)
+ NeededGARs = 2;
+
+ if (NeededGARs > GARsLeft)
+ NeededGARs = GARsLeft;
+
+ GARsLeft -= NeededGARs;
+
+ if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // All integral types are promoted to GRLen width.
+ if (Size < GRLen && Ty->isIntegralOrEnumerationType())
+ return extendType(Ty);
+
+ if (const auto *EIT = Ty->getAs<BitIntType>()) {
+ if (EIT->getNumBits() < GRLen)
+ return extendType(Ty);
+ if (EIT->getNumBits() > 128 ||
+ (!getContext().getTargetInfo().hasInt128Type() &&
+ EIT->getNumBits() > 64))
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+ }
+
+ return ABIArgInfo::getDirect();
+ }
+
+ // Aggregates which are <= 2*GRLen will be passed in registers if possible,
+ // so coerce to integers.
+ if (Size <= 2 * GRLen) {
+ // Use a single GRLen int if possible, 2*GRLen if 2*GRLen alignment is
+ // required, and a 2-element GRLen array if only GRLen alignment is
+ // required.
+ if (Size <= GRLen) {
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), GRLen));
+ }
+ if (getContext().getTypeAlign(Ty) == 2 * GRLen) {
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), 2 * GRLen));
+ }
+ return ABIArgInfo::getDirect(
+ llvm::ArrayType::get(llvm::IntegerType::get(getVMContext(), GRLen), 2));
+ }
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
+ABIArgInfo LoongArchABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+ // The rules for return and argument types are the same, so defer to
+ // classifyArgumentType.
+ int GARsLeft = 2;
+ int FARsLeft = FRLen ? 2 : 0;
+ return classifyArgumentType(RetTy, /*IsFixed=*/true, GARsLeft, FARsLeft);
+}
+
+Address LoongArchABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ CharUnits SlotSize = CharUnits::fromQuantity(GRLen / 8);
+
+ // Empty records are ignored for parameter passing purposes.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return Address(CGF.Builder.CreateLoad(VAListAddr),
+ CGF.ConvertTypeForMem(Ty), SlotSize);
+
+ auto TInfo = getContext().getTypeInfoInChars(Ty);
+
+ // Arguments bigger than 2*GRLen bytes are passed indirectly.
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty,
+ /*IsIndirect=*/TInfo.Width > 2 * SlotSize, TInfo,
+ SlotSize,
+ /*AllowHigherAlign=*/true);
+}
+
+ABIArgInfo LoongArchABIInfo::extendType(QualType Ty) const {
+ int TySize = getContext().getTypeSize(Ty);
+ // LA64 ABI requires unsigned 32 bit integers to be sign extended.
+ if (GRLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
+ return ABIArgInfo::getSignExtend(Ty);
+ return ABIArgInfo::getExtend(Ty);
+}
+
+namespace {
+class LoongArchTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ LoongArchTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned GRLen,
+ unsigned FRLen)
+ : TargetCodeGenInfo(
+ std::make_unique<LoongArchABIInfo>(CGT, GRLen, FRLen)) {}
+};
+} // namespace
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createLoongArchTargetCodeGenInfo(CodeGenModule &CGM, unsigned GRLen,
+ unsigned FLen) {
+ return std::make_unique<LoongArchTargetCodeGenInfo>(CGM.getTypes(), GRLen,
+ FLen);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/M68k.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/M68k.cpp
new file mode 100644
index 000000000000..120022105f34
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/M68k.cpp
@@ -0,0 +1,55 @@
+//===- M68k.cpp -----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// M68k ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class M68kTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ M68kTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
+};
+
+} // namespace
+
+void M68kTargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if (const auto *attr = FD->getAttr<M68kInterruptAttr>()) {
+ // Handle 'interrupt' attribute:
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Step 1: Set ISR calling convention.
+ F->setCallingConv(llvm::CallingConv::M68k_INTR);
+
+ // Step 2: Add attributes goodness.
+ F->addFnAttr(llvm::Attribute::NoInline);
+
+ // Step 3: Emit ISR vector alias.
+ unsigned Num = attr->getNumber() / 2;
+ llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
+ "__isr_" + Twine(Num), F);
+ }
+ }
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createM68kTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<M68kTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/MSP430.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/MSP430.cpp
new file mode 100644
index 000000000000..bb67d97f4421
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/MSP430.cpp
@@ -0,0 +1,94 @@
+//===- MSP430.cpp ---------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// MSP430 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class MSP430ABIInfo : public DefaultABIInfo {
+ static ABIArgInfo complexArgInfo() {
+ ABIArgInfo Info = ABIArgInfo::getDirect();
+ Info.setCanBeFlattened(false);
+ return Info;
+ }
+
+public:
+ MSP430ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const {
+ if (RetTy->isAnyComplexType())
+ return complexArgInfo();
+
+ return DefaultABIInfo::classifyReturnType(RetTy);
+ }
+
+ ABIArgInfo classifyArgumentType(QualType RetTy) const {
+ if (RetTy->isAnyComplexType())
+ return complexArgInfo();
+
+ return DefaultABIInfo::classifyArgumentType(RetTy);
+ }
+
+ // Just copy the original implementations because
+ // DefaultABIInfo::classify{Return,Argument}Type() are not virtual
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+ }
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override {
+ return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
+ }
+};
+
+class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<MSP430ABIInfo>(CGT)) {}
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
+};
+
+}
+
+void MSP430TargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (GV->isDeclaration())
+ return;
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>();
+ if (!InterruptAttr)
+ return;
+
+ // Handle 'interrupt' attribute:
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Step 1: Set ISR calling convention.
+ F->setCallingConv(llvm::CallingConv::MSP430_INTR);
+
+ // Step 2: Add attributes goodness.
+ F->addFnAttr(llvm::Attribute::NoInline);
+ F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber()));
+ }
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createMSP430TargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<MSP430TargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/Mips.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/Mips.cpp
new file mode 100644
index 000000000000..8f11c63dcd85
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/Mips.cpp
@@ -0,0 +1,441 @@
+//===- Mips.cpp -----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// MIPS ABI Implementation. This works for both little-endian and
+// big-endian variants.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class MipsABIInfo : public ABIInfo {
+ bool IsO32;
+ const unsigned MinABIStackAlignInBytes, StackAlignInBytes;
+ void CoerceToIntArgs(uint64_t TySize,
+ SmallVectorImpl<llvm::Type *> &ArgList) const;
+ llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
+ llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
+ llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
+public:
+ MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
+ ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
+ StackAlignInBytes(IsO32 ? 8 : 16) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+ ABIArgInfo extendType(QualType Ty) const;
+};
+
+class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
+ unsigned SizeOfUnwindException;
+public:
+ MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
+ : TargetCodeGenInfo(std::make_unique<MipsABIInfo>(CGT, IsO32)),
+ SizeOfUnwindException(IsO32 ? 24 : 32) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
+ return 29;
+ }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD) return;
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+
+ if (FD->hasAttr<MipsLongCallAttr>())
+ Fn->addFnAttr("long-call");
+ else if (FD->hasAttr<MipsShortCallAttr>())
+ Fn->addFnAttr("short-call");
+
+ // Other attributes do not have a meaning for declarations.
+ if (GV->isDeclaration())
+ return;
+
+ if (FD->hasAttr<Mips16Attr>()) {
+ Fn->addFnAttr("mips16");
+ }
+ else if (FD->hasAttr<NoMips16Attr>()) {
+ Fn->addFnAttr("nomips16");
+ }
+
+ if (FD->hasAttr<MicroMipsAttr>())
+ Fn->addFnAttr("micromips");
+ else if (FD->hasAttr<NoMicroMipsAttr>())
+ Fn->addFnAttr("nomicromips");
+
+ const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
+ if (!Attr)
+ return;
+
+ const char *Kind;
+ switch (Attr->getInterrupt()) {
+ case MipsInterruptAttr::eic: Kind = "eic"; break;
+ case MipsInterruptAttr::sw0: Kind = "sw0"; break;
+ case MipsInterruptAttr::sw1: Kind = "sw1"; break;
+ case MipsInterruptAttr::hw0: Kind = "hw0"; break;
+ case MipsInterruptAttr::hw1: Kind = "hw1"; break;
+ case MipsInterruptAttr::hw2: Kind = "hw2"; break;
+ case MipsInterruptAttr::hw3: Kind = "hw3"; break;
+ case MipsInterruptAttr::hw4: Kind = "hw4"; break;
+ case MipsInterruptAttr::hw5: Kind = "hw5"; break;
+ }
+
+ Fn->addFnAttr("interrupt", Kind);
+
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+
+ unsigned getSizeOfUnwindException() const override {
+ return SizeOfUnwindException;
+ }
+};
+}
+
+void MipsABIInfo::CoerceToIntArgs(
+ uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
+ llvm::IntegerType *IntTy =
+ llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
+
+ // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
+ for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
+ ArgList.push_back(IntTy);
+
+ // If necessary, add one more integer type to ArgList.
+ unsigned R = TySize % (MinABIStackAlignInBytes * 8);
+
+ if (R)
+ ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
+}
+
+// In N32/64, an aligned double precision floating point field is passed in
+// a register.
+llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
+ SmallVector<llvm::Type*, 8> ArgList, IntArgList;
+
+ if (IsO32) {
+ CoerceToIntArgs(TySize, ArgList);
+ return llvm::StructType::get(getVMContext(), ArgList);
+ }
+
+ if (Ty->isComplexType())
+ return CGT.ConvertType(Ty);
+
+ const RecordType *RT = Ty->getAs<RecordType>();
+
+ // Unions/vectors are passed in integer registers.
+ if (!RT || !RT->isStructureOrClassType()) {
+ CoerceToIntArgs(TySize, ArgList);
+ return llvm::StructType::get(getVMContext(), ArgList);
+ }
+
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
+
+ uint64_t LastOffset = 0;
+ unsigned idx = 0;
+ llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
+
+ // Iterate over fields in the struct/class and check if there are any aligned
+ // double fields.
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ const QualType Ty = i->getType();
+ const BuiltinType *BT = Ty->getAs<BuiltinType>();
+
+ if (!BT || BT->getKind() != BuiltinType::Double)
+ continue;
+
+ uint64_t Offset = Layout.getFieldOffset(idx);
+ if (Offset % 64) // Ignore doubles that are not aligned.
+ continue;
+
+ // Add ((Offset - LastOffset) / 64) args of type i64.
+ for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
+ ArgList.push_back(I64);
+
+ // Add double type.
+ ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
+ LastOffset = Offset + 64;
+ }
+
+ CoerceToIntArgs(TySize - LastOffset, IntArgList);
+ ArgList.append(IntArgList.begin(), IntArgList.end());
+
+ return llvm::StructType::get(getVMContext(), ArgList);
+}
+
+llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
+ uint64_t Offset) const {
+ if (OrigOffset + MinABIStackAlignInBytes > Offset)
+ return nullptr;
+
+ return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
+}
+
+ABIArgInfo
+MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ uint64_t OrigOffset = Offset;
+ uint64_t TySize = getContext().getTypeSize(Ty);
+ uint64_t Align = getContext().getTypeAlign(Ty) / 8;
+
+ Align = std::clamp(Align, (uint64_t)MinABIStackAlignInBytes,
+ (uint64_t)StackAlignInBytes);
+ unsigned CurrOffset = llvm::alignTo(Offset, Align);
+ Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
+
+ if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
+ // Ignore empty aggregates.
+ if (TySize == 0)
+ return ABIArgInfo::getIgnore();
+
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+ Offset = OrigOffset + MinABIStackAlignInBytes;
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+ }
+
+ // If we have reached here, aggregates are passed directly by coercing to
+ // another structure type. Padding is inserted if the offset of the
+ // aggregate is unaligned.
+ ABIArgInfo ArgInfo =
+ ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
+ getPaddingType(OrigOffset, CurrOffset));
+ ArgInfo.setInReg(true);
+ return ArgInfo;
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Make sure we pass indirectly things that are too large.
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() > 128 ||
+ (EIT->getNumBits() > 64 &&
+ !getContext().getTargetInfo().hasInt128Type()))
+ return getNaturalAlignIndirect(Ty);
+
+ // All integral types are promoted to the GPR width.
+ if (Ty->isIntegralOrEnumerationType())
+ return extendType(Ty);
+
+ return ABIArgInfo::getDirect(
+ nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
+}
+
+llvm::Type*
+MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
+ const RecordType *RT = RetTy->getAs<RecordType>();
+ SmallVector<llvm::Type*, 8> RTList;
+
+ if (RT && RT->isStructureOrClassType()) {
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ unsigned FieldCnt = Layout.getFieldCount();
+
+ // N32/64 returns struct/classes in floating point registers if the
+ // following conditions are met:
+ // 1. The size of the struct/class is no larger than 128-bit.
+ // 2. The struct/class has one or two fields all of which are floating
+ // point types.
+ // 3. The offset of the first field is zero (this follows what gcc does).
+ //
+ // Any other composite results are returned in integer registers.
+ //
+ if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
+ RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
+ for (; b != e; ++b) {
+ const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
+
+ if (!BT || !BT->isFloatingPoint())
+ break;
+
+ RTList.push_back(CGT.ConvertType(b->getType()));
+ }
+
+ if (b == e)
+ return llvm::StructType::get(getVMContext(), RTList,
+ RD->hasAttr<PackedAttr>());
+
+ RTList.clear();
+ }
+ }
+
+ CoerceToIntArgs(Size, RTList);
+ return llvm::StructType::get(getVMContext(), RTList);
+}
+
+ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ // O32 doesn't treat zero-sized structs differently from other structs.
+ // However, N32/N64 ignores zero sized return values.
+ if (!IsO32 && Size == 0)
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
+ if (Size <= 128) {
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+
+ // O32 returns integer vectors in registers and N32/N64 returns all small
+ // aggregates in registers.
+ if (!IsO32 ||
+ (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
+ ABIArgInfo ArgInfo =
+ ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
+ ArgInfo.setInReg(true);
+ return ArgInfo;
+ }
+ }
+
+ return getNaturalAlignIndirect(RetTy);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ // Make sure we pass indirectly things that are too large.
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
+ if (EIT->getNumBits() > 128 ||
+ (EIT->getNumBits() > 64 &&
+ !getContext().getTargetInfo().hasInt128Type()))
+ return getNaturalAlignIndirect(RetTy);
+
+ if (isPromotableIntegerTypeForABI(RetTy))
+ return ABIArgInfo::getExtend(RetTy);
+
+ if ((RetTy->isUnsignedIntegerOrEnumerationType() ||
+ RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32)
+ return ABIArgInfo::getSignExtend(RetTy);
+
+ return ABIArgInfo::getDirect();
+}
+
+void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ ABIArgInfo &RetInfo = FI.getReturnInfo();
+ if (!getCXXABI().classifyReturnType(FI))
+ RetInfo = classifyReturnType(FI.getReturnType());
+
+ // Check if a pointer to an aggregate is passed as a hidden argument.
+ uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
+
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type, Offset);
+}
+
+Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType OrigTy) const {
+ QualType Ty = OrigTy;
+
+ // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
+ // Pointers are also promoted in the same way but this only matters for N32.
+ unsigned SlotSizeInBits = IsO32 ? 32 : 64;
+ unsigned PtrWidth = getTarget().getPointerWidth(LangAS::Default);
+ bool DidPromote = false;
+ if ((Ty->isIntegerType() &&
+ getContext().getIntWidth(Ty) < SlotSizeInBits) ||
+ (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
+ DidPromote = true;
+ Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
+ Ty->isSignedIntegerType());
+ }
+
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
+
+ // The alignment of things in the argument area is never larger than
+ // StackAlignInBytes.
+ TyInfo.Align =
+ std::min(TyInfo.Align, CharUnits::fromQuantity(StackAlignInBytes));
+
+ // MinABIStackAlignInBytes is the size of argument slots on the stack.
+ CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
+
+ Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
+
+
+ // If there was a promotion, "unpromote" into a temporary.
+ // TODO: can we just use a pointer into a subset of the original slot?
+ if (DidPromote) {
+ Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
+ llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
+
+ // Truncate down to the right width.
+ llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
+ : CGF.IntPtrTy);
+ llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
+ if (OrigTy->isPointerType())
+ V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
+
+ CGF.Builder.CreateStore(V, Temp);
+ Addr = Temp;
+ }
+
+ return Addr;
+}
+
+ABIArgInfo MipsABIInfo::extendType(QualType Ty) const {
+ int TySize = getContext().getTypeSize(Ty);
+
+ // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
+ if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
+ return ABIArgInfo::getSignExtend(Ty);
+
+ return ABIArgInfo::getExtend(Ty);
+}
+
+bool
+MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This information comes from gcc's implementation, which seems to
+ // as canonical as it gets.
+
+ // Everything on MIPS is 4 bytes. Double-precision FP registers
+ // are aliased to pairs of single-precision FP registers.
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
+
+ // 0-31 are the general purpose registers, $0 - $31.
+ // 32-63 are the floating-point registers, $f0 - $f31.
+ // 64 and 65 are the multiply/divide registers, $hi and $lo.
+ // 66 is the (notional, I think) register for signal-handler return.
+ AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
+
+ // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
+ // They are one bit wide and ignored here.
+
+ // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
+ // (coprocessor 1 is the FP unit)
+ // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
+ // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
+ // 176-181 are the DSP accumulator registers.
+ AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
+ return false;
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createMIPSTargetCodeGenInfo(CodeGenModule &CGM, bool IsOS32) {
+ return std::make_unique<MIPSTargetCodeGenInfo>(CGM.getTypes(), IsOS32);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/NVPTX.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/NVPTX.cpp
new file mode 100644
index 000000000000..1ca0192333a0
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/NVPTX.cpp
@@ -0,0 +1,309 @@
+//===- NVPTX.cpp ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+#include "llvm/IR/IntrinsicsNVPTX.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// NVPTX ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class NVPTXTargetCodeGenInfo;
+
+class NVPTXABIInfo : public ABIInfo {
+ NVPTXTargetCodeGenInfo &CGInfo;
+
+public:
+ NVPTXABIInfo(CodeGenTypes &CGT, NVPTXTargetCodeGenInfo &Info)
+ : ABIInfo(CGT), CGInfo(Info) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+ bool isUnsupportedType(QualType T) const;
+ ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, unsigned MaxSize) const;
+};
+
+class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<NVPTXABIInfo>(CGT, *this)) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
+ bool shouldEmitStaticExternCAliases() const override;
+
+ llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const override {
+ // On the device side, surface reference is represented as an object handle
+ // in 64-bit integer.
+ return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
+ }
+
+ llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const override {
+ // On the device side, texture reference is represented as an object handle
+ // in 64-bit integer.
+ return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
+ }
+
+ bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF, LValue Dst,
+ LValue Src) const override {
+ emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
+ return true;
+ }
+
+ bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF, LValue Dst,
+ LValue Src) const override {
+ emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
+ return true;
+ }
+
+private:
+ // Adds a NamedMDNode with GV, Name, and Operand as operands, and adds the
+ // resulting MDNode to the nvvm.annotations MDNode.
+ static void addNVVMMetadata(llvm::GlobalValue *GV, StringRef Name,
+ int Operand);
+
+ static void emitBuiltinSurfTexDeviceCopy(CodeGenFunction &CGF, LValue Dst,
+ LValue Src) {
+ llvm::Value *Handle = nullptr;
+ llvm::Constant *C =
+ llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).getPointer());
+ // Lookup `addrspacecast` through the constant pointer if any.
+ if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C))
+ C = llvm::cast<llvm::Constant>(ASC->getPointerOperand());
+ if (auto *GV = llvm::dyn_cast_or_null<llvm::GlobalVariable>(C)) {
+ // Load the handle from the specific global variable using
+ // `nvvm.texsurf.handle.internal` intrinsic.
+ Handle = CGF.EmitRuntimeCall(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::nvvm_texsurf_handle_internal,
+ {GV->getType()}),
+ {GV}, "texsurf_handle");
+ } else
+ Handle = CGF.EmitLoadOfScalar(Src, SourceLocation());
+ CGF.EmitStoreOfScalar(Handle, Dst);
+ }
+};
+
+/// Checks if the type is unsupported directly by the current target.
+bool NVPTXABIInfo::isUnsupportedType(QualType T) const {
+ ASTContext &Context = getContext();
+ if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type())
+ return true;
+ if (!Context.getTargetInfo().hasFloat128Type() &&
+ (T->isFloat128Type() ||
+ (T->isRealFloatingType() && Context.getTypeSize(T) == 128)))
+ return true;
+ if (const auto *EIT = T->getAs<BitIntType>())
+ return EIT->getNumBits() >
+ (Context.getTargetInfo().hasInt128Type() ? 128U : 64U);
+ if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() &&
+ Context.getTypeSize(T) > 64U)
+ return true;
+ if (const auto *AT = T->getAsArrayTypeUnsafe())
+ return isUnsupportedType(AT->getElementType());
+ const auto *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+ const RecordDecl *RD = RT->getDecl();
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (const CXXBaseSpecifier &I : CXXRD->bases())
+ if (isUnsupportedType(I.getType()))
+ return true;
+
+ for (const FieldDecl *I : RD->fields())
+ if (isUnsupportedType(I->getType()))
+ return true;
+ return false;
+}
+
+/// Coerce the given type into an array with maximum allowed size of elements.
+ABIArgInfo NVPTXABIInfo::coerceToIntArrayWithLimit(QualType Ty,
+ unsigned MaxSize) const {
+ // Alignment and Size are measured in bits.
+ const uint64_t Size = getContext().getTypeSize(Ty);
+ const uint64_t Alignment = getContext().getTypeAlign(Ty);
+ const unsigned Div = std::min<unsigned>(MaxSize, Alignment);
+ llvm::Type *IntType = llvm::Type::getIntNTy(getVMContext(), Div);
+ const uint64_t NumElements = (Size + Div - 1) / Div;
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
+}
+
+ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (getContext().getLangOpts().OpenMP &&
+ getContext().getLangOpts().OpenMPIsTargetDevice &&
+ isUnsupportedType(RetTy))
+ return coerceToIntArrayWithLimit(RetTy, 64);
+
+ // note: this is different from default ABI
+ if (!RetTy->isScalarType())
+ return ABIArgInfo::getDirect();
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Return aggregates type as indirect by value
+ if (isAggregateTypeForABI(Ty)) {
+ // Under CUDA device compilation, tex/surf builtin types are replaced with
+ // object types and passed directly.
+ if (getContext().getLangOpts().CUDAIsDevice) {
+ if (Ty->isCUDADeviceBuiltinSurfaceType())
+ return ABIArgInfo::getDirect(
+ CGInfo.getCUDADeviceBuiltinSurfaceDeviceType());
+ if (Ty->isCUDADeviceBuiltinTextureType())
+ return ABIArgInfo::getDirect(
+ CGInfo.getCUDADeviceBuiltinTextureDeviceType());
+ }
+ return getNaturalAlignIndirect(Ty, /* byval */ true);
+ }
+
+ if (const auto *EIT = Ty->getAs<BitIntType>()) {
+ if ((EIT->getNumBits() > 128) ||
+ (!getContext().getTargetInfo().hasInt128Type() &&
+ EIT->getNumBits() > 64))
+ return getNaturalAlignIndirect(Ty, /* byval */ true);
+ }
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+}
+
+void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+
+ // Always honor user-specified calling convention.
+ if (FI.getCallingConvention() != llvm::CallingConv::C)
+ return;
+
+ FI.setEffectiveCallingConvention(getRuntimeCC());
+}
+
+Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ llvm_unreachable("NVPTX does not support varargs");
+}
+
+void NVPTXTargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (GV->isDeclaration())
+ return;
+ const VarDecl *VD = dyn_cast_or_null<VarDecl>(D);
+ if (VD) {
+ if (M.getLangOpts().CUDA) {
+ if (VD->getType()->isCUDADeviceBuiltinSurfaceType())
+ addNVVMMetadata(GV, "surface", 1);
+ else if (VD->getType()->isCUDADeviceBuiltinTextureType())
+ addNVVMMetadata(GV, "texture", 1);
+ return;
+ }
+ }
+
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD) return;
+
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Perform special handling in OpenCL mode
+ if (M.getLangOpts().OpenCL) {
+ // Use OpenCL function attributes to check for kernel functions
+ // By default, all functions are device functions
+ if (FD->hasAttr<OpenCLKernelAttr>()) {
+ // OpenCL __kernel functions get kernel metadata
+ // Create !{<func-ref>, metadata !"kernel", i32 1} node
+ addNVVMMetadata(F, "kernel", 1);
+ // And kernel functions are not subject to inlining
+ F->addFnAttr(llvm::Attribute::NoInline);
+ }
+ }
+
+ // Perform special handling in CUDA mode.
+ if (M.getLangOpts().CUDA) {
+ // CUDA __global__ functions get a kernel metadata entry. Since
+ // __global__ functions cannot be called from the device, we do not
+ // need to set the noinline attribute.
+ if (FD->hasAttr<CUDAGlobalAttr>()) {
+ // Create !{<func-ref>, metadata !"kernel", i32 1} node
+ addNVVMMetadata(F, "kernel", 1);
+ }
+ if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
+ // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
+ llvm::APSInt MaxThreads(32);
+ MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
+ if (MaxThreads > 0)
+ addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
+
+ // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
+ // not specified in __launch_bounds__ or if the user specified a 0 value,
+ // we don't have to add a PTX directive.
+ if (Attr->getMinBlocks()) {
+ llvm::APSInt MinBlocks(32);
+ MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
+ if (MinBlocks > 0)
+ // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
+ addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
+ }
+ }
+ }
+
+ // Attach kernel metadata directly if compiling for NVPTX.
+ if (FD->hasAttr<NVPTXKernelAttr>()) {
+ addNVVMMetadata(F, "kernel", 1);
+ }
+}
+
+void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV,
+ StringRef Name, int Operand) {
+ llvm::Module *M = GV->getParent();
+ llvm::LLVMContext &Ctx = M->getContext();
+
+ // Get "nvvm.annotations" metadata node
+ llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
+
+ llvm::Metadata *MDVals[] = {
+ llvm::ConstantAsMetadata::get(GV), llvm::MDString::get(Ctx, Name),
+ llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
+ // Append metadata to nvvm.annotations
+ MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
+}
+
+bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
+ return false;
+}
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createNVPTXTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<NVPTXTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/PNaCl.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/PNaCl.cpp
new file mode 100644
index 000000000000..771aa7469da2
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/PNaCl.cpp
@@ -0,0 +1,109 @@
+//===- PNaCl.cpp ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// le32/PNaCl bitcode ABI Implementation
+//
+// This is a simplified version of the x86_32 ABI. Arguments and return values
+// are always passed on the stack.
+//===----------------------------------------------------------------------===//
+
+class PNaClABIInfo : public ABIInfo {
+ public:
+ PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF,
+ Address VAListAddr, QualType Ty) const override;
+};
+
+class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
+ public:
+ PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<PNaClABIInfo>(CGT)) {}
+};
+
+void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+}
+
+Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ // The PNaCL ABI is a bit odd, in that varargs don't use normal
+ // function classification. Structs get passed directly for varargs
+ // functions, through a rewriting transform in
+ // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
+ // this target to actually support a va_arg instructions with an
+ // aggregate type, unlike other targets.
+ return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
+}
+
+/// Classify argument of given type \p Ty.
+ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty)) {
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty);
+ } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
+ // Treat an enum type as its underlying type.
+ Ty = EnumTy->getDecl()->getIntegerType();
+ } else if (Ty->isFloatingType()) {
+ // Floating-point types don't go inreg.
+ return ABIArgInfo::getDirect();
+ } else if (const auto *EIT = Ty->getAs<BitIntType>()) {
+ // Treat bit-precise integers as integers if <= 64, otherwise pass
+ // indirectly.
+ if (EIT->getNumBits() > 64)
+ return getNaturalAlignIndirect(Ty);
+ return ABIArgInfo::getDirect();
+ }
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ // In the PNaCl ABI we always return records/structures on the stack.
+ if (isAggregateTypeForABI(RetTy))
+ return getNaturalAlignIndirect(RetTy);
+
+ // Treat bit-precise integers as integers if <= 64, otherwise pass indirectly.
+ if (const auto *EIT = RetTy->getAs<BitIntType>()) {
+ if (EIT->getNumBits() > 64)
+ return getNaturalAlignIndirect(RetTy);
+ return ABIArgInfo::getDirect();
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createPNaClTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<PNaClTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/PPC.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/PPC.cpp
new file mode 100644
index 000000000000..9cdd2aa07791
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/PPC.cpp
@@ -0,0 +1,972 @@
+//===- PPC.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+static Address complexTempStructure(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, CharUnits SlotSize,
+ CharUnits EltSize, const ComplexType *CTy) {
+ Address Addr =
+ emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, SlotSize * 2,
+ SlotSize, SlotSize, /*AllowHigher*/ true);
+
+ Address RealAddr = Addr;
+ Address ImagAddr = RealAddr;
+ if (CGF.CGM.getDataLayout().isBigEndian()) {
+ RealAddr =
+ CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize - EltSize);
+ ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
+ 2 * SlotSize - EltSize);
+ } else {
+ ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
+ }
+
+ llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
+ RealAddr = RealAddr.withElementType(EltTy);
+ ImagAddr = ImagAddr.withElementType(EltTy);
+ llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
+ llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
+
+ Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
+ CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
+ /*init*/ true);
+ return Temp;
+}
+
+static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address, bool Is64Bit,
+ bool IsAIX) {
+ // This is calculated from the LLVM and GCC tables and verified
+ // against gcc output. AFAIK all PPC ABIs use the same encoding.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::IntegerType *i8 = CGF.Int8Ty;
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
+
+ // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers
+ AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31);
+
+ // 32-63: fp0-31, the 8-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Eight8, 32, 63);
+
+ // 64-67 are various 4-byte or 8-byte special-purpose registers:
+ // 64: mq
+ // 65: lr
+ // 66: ctr
+ // 67: ap
+ AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67);
+
+ // 68-76 are various 4-byte special-purpose registers:
+ // 68-75 cr0-7
+ // 76: xer
+ AssignToArrayRange(Builder, Address, Four8, 68, 76);
+
+ // 77-108: v0-31, the 16-byte vector registers
+ AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
+
+ // 109: vrsave
+ // 110: vscr
+ AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110);
+
+ // AIX does not utilize the rest of the registers.
+ if (IsAIX)
+ return false;
+
+ // 111: spe_acc
+ // 112: spefscr
+ // 113: sfp
+ AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113);
+
+ if (!Is64Bit)
+ return false;
+
+ // TODO: Need to verify if these registers are used on 64 bit AIX with Power8
+ // or above CPU.
+ // 64-bit only registers:
+ // 114: tfhar
+ // 115: tfiar
+ // 116: texasr
+ AssignToArrayRange(Builder, Address, Eight8, 114, 116);
+
+ return false;
+}
+
+// AIX
+namespace {
+/// AIXABIInfo - The AIX XCOFF ABI information.
+class AIXABIInfo : public ABIInfo {
+ const bool Is64Bit;
+ const unsigned PtrByteSize;
+ CharUnits getParamTypeAlignment(QualType Ty) const;
+
+public:
+ AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
+ : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
+
+ bool isPromotableTypeForABI(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+ }
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class AIXTargetCodeGenInfo : public TargetCodeGenInfo {
+ const bool Is64Bit;
+
+public:
+ AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
+ : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)),
+ Is64Bit(Is64Bit) {}
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+};
+} // namespace
+
+// Return true if the ABI requires Ty to be passed sign- or zero-
+// extended to 32/64 bits.
+bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Promotable integer types are required to be promoted by the ABI.
+ if (getContext().isPromotableIntegerType(Ty))
+ return true;
+
+ if (!Is64Bit)
+ return false;
+
+ // For 64 bit mode, in addition to the usual promotable integer types, we also
+ // need to extend all 32-bit types, since the ABI requires promotion to 64
+ // bits.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+
+ if (RetTy->isVectorType())
+ return ABIArgInfo::getDirect();
+
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy))
+ return getNaturalAlignIndirect(RetTy);
+
+ return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (Ty->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+
+ if (Ty->isVectorType())
+ return ABIArgInfo::getDirect();
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // passed by value.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ CharUnits CCAlign = getParamTypeAlignment(Ty);
+ CharUnits TyAlign = getContext().getTypeAlignInChars(Ty);
+
+ return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true,
+ /*Realign*/ TyAlign > CCAlign);
+ }
+
+ return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+}
+
+CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
+ // Complex types are passed just like their elements.
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>())
+ Ty = CTy->getElementType();
+
+ if (Ty->isVectorType())
+ return CharUnits::fromQuantity(16);
+
+ // If the structure contains a vector type, the alignment is 16.
+ if (isRecordWithSIMDVectorType(getContext(), Ty))
+ return CharUnits::fromQuantity(16);
+
+ return CharUnits::fromQuantity(PtrByteSize);
+}
+
+Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+ TypeInfo.Align = getParamTypeAlignment(Ty);
+
+ CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize);
+
+ // If we have a complex type and the base type is smaller than the register
+ // size, the ABI calls for the real and imaginary parts to be right-adjusted
+ // in separate words in 32bit mode or doublewords in 64bit mode. However,
+ // Clang expects us to produce a pointer to a structure with the two parts
+ // packed tightly. So generate loads of the real and imaginary parts relative
+ // to the va_list pointer, and store them to a temporary structure. We do the
+ // same as the PPC64ABI here.
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
+ CharUnits EltSize = TypeInfo.Width / 2;
+ if (EltSize < SlotSize)
+ return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy);
+ }
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
+ SlotSize, /*AllowHigher*/ true);
+}
+
+bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
+ CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const {
+ return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true);
+}
+
+// PowerPC-32
+namespace {
+/// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
+class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
+ bool IsSoftFloatABI;
+ bool IsRetSmallStructInRegABI;
+
+ CharUnits getParamTypeAlignment(QualType Ty) const;
+
+public:
+ PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI,
+ bool RetSmallStructInRegABI)
+ : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI),
+ IsRetSmallStructInRegABI(RetSmallStructInRegABI) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+ }
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI,
+ bool RetSmallStructInRegABI)
+ : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>(
+ CGT, SoftFloatABI, RetSmallStructInRegABI)) {}
+
+ static bool isStructReturnInRegABI(const llvm::Triple &Triple,
+ const CodeGenOptions &Opts);
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+};
+}
+
+CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
+ // Complex types are passed just like their elements.
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>())
+ Ty = CTy->getElementType();
+
+ if (Ty->isVectorType())
+ return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
+ : 4);
+
+ // For single-element float/vector structs, we consider the whole type
+ // to have the same alignment requirements as its single element.
+ const Type *AlignTy = nullptr;
+ if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
+ const BuiltinType *BT = EltType->getAs<BuiltinType>();
+ if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
+ (BT && BT->isFloatingPoint()))
+ AlignTy = EltType;
+ }
+
+ if (AlignTy)
+ return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
+ return CharUnits::fromQuantity(4);
+}
+
+ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
+ uint64_t Size;
+
+ // -msvr4-struct-return puts small aggregates in GPR3 and GPR4.
+ if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI &&
+ (Size = getContext().getTypeSize(RetTy)) <= 64) {
+ // System V ABI (1995), page 3-22, specified:
+ // > A structure or union whose size is less than or equal to 8 bytes
+ // > shall be returned in r3 and r4, as if it were first stored in the
+ // > 8-byte aligned memory area and then the low addressed word were
+ // > loaded into r3 and the high-addressed word into r4. Bits beyond
+ // > the last member of the structure or union are not defined.
+ //
+ // GCC for big-endian PPC32 inserts the pad before the first member,
+ // not "beyond the last member" of the struct. To stay compatible
+ // with GCC, we coerce the struct to an integer of the same size.
+ // LLVM will extend it and return i32 in r3, or i64 in r3:r4.
+ if (Size == 0)
+ return ABIArgInfo::getIgnore();
+ else {
+ llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+ }
+
+ return DefaultABIInfo::classifyReturnType(RetTy);
+}
+
+// TODO: this implementation is now likely redundant with
+// DefaultABIInfo::EmitVAArg.
+Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
+ QualType Ty) const {
+ if (getTarget().getTriple().isOSDarwin()) {
+ auto TI = getContext().getTypeInfoInChars(Ty);
+ TI.Align = getParamTypeAlignment(Ty);
+
+ CharUnits SlotSize = CharUnits::fromQuantity(4);
+ return emitVoidPtrVAArg(CGF, VAList, Ty,
+ classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
+ /*AllowHigherAlign=*/true);
+ }
+
+ const unsigned OverflowLimit = 8;
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
+ // TODO: Implement this. For now ignore.
+ (void)CTy;
+ return Address::invalid(); // FIXME?
+ }
+
+ // struct __va_list_tag {
+ // unsigned char gpr;
+ // unsigned char fpr;
+ // unsigned short reserved;
+ // void *overflow_arg_area;
+ // void *reg_save_area;
+ // };
+
+ bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
+ bool isInt = !Ty->isFloatingType();
+ bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
+
+ // All aggregates are passed indirectly? That doesn't seem consistent
+ // with the argument-lowering code.
+ bool isIndirect = isAggregateTypeForABI(Ty);
+
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // The calling convention either uses 1-2 GPRs or 1 FPR.
+ Address NumRegsAddr = Address::invalid();
+ if (isInt || IsSoftFloatABI) {
+ NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr");
+ } else {
+ NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr");
+ }
+
+ llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
+
+ // "Align" the register count when TY is i64.
+ if (isI64 || (isF64 && IsSoftFloatABI)) {
+ NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
+ NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
+ }
+
+ llvm::Value *CC =
+ Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
+
+ llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
+ llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
+
+ Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
+
+ llvm::Type *DirectTy = CGF.ConvertType(Ty), *ElementTy = DirectTy;
+ if (isIndirect)
+ DirectTy = llvm::PointerType::getUnqual(CGF.getLLVMContext());
+
+ // Case 1: consume registers.
+ Address RegAddr = Address::invalid();
+ {
+ CGF.EmitBlock(UsingRegs);
+
+ Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
+ RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), CGF.Int8Ty,
+ CharUnits::fromQuantity(8));
+ assert(RegAddr.getElementType() == CGF.Int8Ty);
+
+ // Floating-point registers start after the general-purpose registers.
+ if (!(isInt || IsSoftFloatABI)) {
+ RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
+ CharUnits::fromQuantity(32));
+ }
+
+ // Get the address of the saved value by scaling the number of
+ // registers we've used by the number of
+ CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
+ llvm::Value *RegOffset =
+ Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
+ RegAddr = Address(
+ Builder.CreateInBoundsGEP(CGF.Int8Ty, RegAddr.getPointer(), RegOffset),
+ DirectTy, RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
+
+ // Increase the used-register count.
+ NumRegs =
+ Builder.CreateAdd(NumRegs,
+ Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
+ Builder.CreateStore(NumRegs, NumRegsAddr);
+
+ CGF.EmitBranch(Cont);
+ }
+
+ // Case 2: consume space in the overflow area.
+ Address MemAddr = Address::invalid();
+ {
+ CGF.EmitBlock(UsingOverflow);
+
+ Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
+
+ // Everything in the overflow area is rounded up to a size of at least 4.
+ CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
+
+ CharUnits Size;
+ if (!isIndirect) {
+ auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
+ Size = TypeInfo.Width.alignTo(OverflowAreaAlign);
+ } else {
+ Size = CGF.getPointerSize();
+ }
+
+ Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
+ Address OverflowArea =
+ Address(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), CGF.Int8Ty,
+ OverflowAreaAlign);
+ // Round up address of argument to alignment
+ CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
+ if (Align > OverflowAreaAlign) {
+ llvm::Value *Ptr = OverflowArea.getPointer();
+ OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
+ OverflowArea.getElementType(), Align);
+ }
+
+ MemAddr = OverflowArea.withElementType(DirectTy);
+
+ // Increase the overflow area.
+ OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
+ Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
+ CGF.EmitBranch(Cont);
+ }
+
+ CGF.EmitBlock(Cont);
+
+ // Merge the cases with a phi.
+ Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
+ "vaarg.addr");
+
+ // Load the pointer if the argument was passed indirectly.
+ if (isIndirect) {
+ Result = Address(Builder.CreateLoad(Result, "aggr"), ElementTy,
+ getContext().getTypeAlignInChars(Ty));
+ }
+
+ return Result;
+}
+
+bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
+ const llvm::Triple &Triple, const CodeGenOptions &Opts) {
+ assert(Triple.isPPC32());
+
+ switch (Opts.getStructReturnConvention()) {
+ case CodeGenOptions::SRCK_Default:
+ break;
+ case CodeGenOptions::SRCK_OnStack: // -maix-struct-return
+ return false;
+ case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return
+ return true;
+ }
+
+ if (Triple.isOSBinFormatELF() && !Triple.isOSLinux())
+ return true;
+
+ return false;
+}
+
+bool
+PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false,
+ /*IsAIX*/ false);
+}
+
+// PowerPC-64
+
+namespace {
+
+/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
+class PPC64_SVR4_ABIInfo : public ABIInfo {
+ static const unsigned GPRBits = 64;
+ PPC64_SVR4_ABIKind Kind;
+ bool IsSoftFloatABI;
+
+public:
+ PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, PPC64_SVR4_ABIKind Kind,
+ bool SoftFloatABI)
+ : ABIInfo(CGT), Kind(Kind), IsSoftFloatABI(SoftFloatABI) {}
+
+ bool isPromotableTypeForABI(QualType Ty) const;
+ CharUnits getParamTypeAlignment(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override;
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t Members) const override;
+
+ // TODO: We can add more logic to computeInfo to improve performance.
+ // Example: For aggregate arguments that fit in a register, we could
+ // use getDirectInReg (as is done below for structs containing a single
+ // floating-point value) to avoid pushing them to memory on function
+ // entry. This would require changing the logic in PPCISelLowering
+ // when lowering the parameters in the caller and args in the callee.
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments()) {
+ // We rely on the default argument classification for the most part.
+ // One exception: An aggregate containing a single floating-point
+ // or vector item must be passed in a register if one is available.
+ const Type *T = isSingleElementStruct(I.type, getContext());
+ if (T) {
+ const BuiltinType *BT = T->getAs<BuiltinType>();
+ if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
+ (BT && BT->isFloatingPoint())) {
+ QualType QT(T, 0);
+ I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
+ continue;
+ }
+ }
+ I.info = classifyArgumentType(I.type);
+ }
+ }
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
+
+public:
+ PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, PPC64_SVR4_ABIKind Kind,
+ bool SoftFloatABI)
+ : TargetCodeGenInfo(
+ std::make_unique<PPC64_SVR4_ABIInfo>(CGT, Kind, SoftFloatABI)) {
+ SwiftInfo =
+ std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/false);
+ }
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+};
+
+class PPC64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ PPC64TargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+};
+}
+
+// Return true if the ABI requires Ty to be passed sign- or zero-
+// extended to 64 bits.
+bool
+PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Promotable integer types are required to be promoted by the ABI.
+ if (isPromotableIntegerTypeForABI(Ty))
+ return true;
+
+ // In addition to the usual promotable integer types, we also need to
+ // extend all 32-bit types, since the ABI requires promotion to 64 bits.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return true;
+ default:
+ break;
+ }
+
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() < 64)
+ return true;
+
+ return false;
+}
+
+/// isAlignedParamType - Determine whether a type requires 16-byte or
+/// higher alignment in the parameter area. Always returns at least 8.
+CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
+ // Complex types are passed just like their elements.
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>())
+ Ty = CTy->getElementType();
+
+ auto FloatUsesVector = [this](QualType Ty){
+ return Ty->isRealFloatingType() && &getContext().getFloatTypeSemantics(
+ Ty) == &llvm::APFloat::IEEEquad();
+ };
+
+ // Only vector types of size 16 bytes need alignment (larger types are
+ // passed via reference, smaller types are not aligned).
+ if (Ty->isVectorType()) {
+ return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
+ } else if (FloatUsesVector(Ty)) {
+ // According to ABI document section 'Optional Save Areas': If extended
+ // precision floating-point values in IEEE BINARY 128 QUADRUPLE PRECISION
+ // format are supported, map them to a single quadword, quadword aligned.
+ return CharUnits::fromQuantity(16);
+ }
+
+ // For single-element float/vector structs, we consider the whole type
+ // to have the same alignment requirements as its single element.
+ const Type *AlignAsType = nullptr;
+ const Type *EltType = isSingleElementStruct(Ty, getContext());
+ if (EltType) {
+ const BuiltinType *BT = EltType->getAs<BuiltinType>();
+ if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
+ (BT && BT->isFloatingPoint()))
+ AlignAsType = EltType;
+ }
+
+ // Likewise for ELFv2 homogeneous aggregates.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (!AlignAsType && Kind == PPC64_SVR4_ABIKind::ELFv2 &&
+ isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
+ AlignAsType = Base;
+
+ // With special case aggregates, only vector base types need alignment.
+ if (AlignAsType) {
+ bool UsesVector = AlignAsType->isVectorType() ||
+ FloatUsesVector(QualType(AlignAsType, 0));
+ return CharUnits::fromQuantity(UsesVector ? 16 : 8);
+ }
+
+ // Otherwise, we only need alignment for any aggregate type that
+ // has an alignment requirement of >= 16 bytes.
+ if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
+ return CharUnits::fromQuantity(16);
+ }
+
+ return CharUnits::fromQuantity(8);
+}
+
+bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ // Homogeneous aggregates for ELFv2 must have base types of float,
+ // double, long double, or 128-bit vectors.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::Float ||
+ BT->getKind() == BuiltinType::Double ||
+ BT->getKind() == BuiltinType::LongDouble ||
+ BT->getKind() == BuiltinType::Ibm128 ||
+ (getContext().getTargetInfo().hasFloat128Type() &&
+ (BT->getKind() == BuiltinType::Float128))) {
+ if (IsSoftFloatABI)
+ return false;
+ return true;
+ }
+ }
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ if (getContext().getTypeSize(VT) == 128)
+ return true;
+ }
+ return false;
+}
+
+bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
+ const Type *Base, uint64_t Members) const {
+ // Vector and fp128 types require one register, other floating point types
+ // require one or two registers depending on their size.
+ uint32_t NumRegs =
+ ((getContext().getTargetInfo().hasFloat128Type() &&
+ Base->isFloat128Type()) ||
+ Base->isVectorType()) ? 1
+ : (getContext().getTypeSize(Base) + 63) / 64;
+
+ // Homogeneous Aggregates may occupy at most 8 registers.
+ return Members * NumRegs <= 8;
+}
+
+ABIArgInfo
+PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (Ty->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+
+ // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
+ // or via reference (larger than 16 bytes).
+ if (Ty->isVectorType()) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size > 128)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+ else if (Size < 128) {
+ llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+ }
+
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() > 128)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+
+ if (isAggregateTypeForABI(Ty)) {
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
+ uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
+
+ // ELFv2 homogeneous aggregates are passed as array types.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (Kind == PPC64_SVR4_ABIKind::ELFv2 &&
+ isHomogeneousAggregate(Ty, Base, Members)) {
+ llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
+ llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ // If an aggregate may end up fully in registers, we do not
+ // use the ByVal method, but pass the aggregate as array.
+ // This is usually beneficial since we avoid forcing the
+ // back-end to store the argument to memory.
+ uint64_t Bits = getContext().getTypeSize(Ty);
+ if (Bits > 0 && Bits <= 8 * GPRBits) {
+ llvm::Type *CoerceTy;
+
+ // Types up to 8 bytes are passed as integer type (which will be
+ // properly aligned in the argument save area doubleword).
+ if (Bits <= GPRBits)
+ CoerceTy =
+ llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
+ // Larger types are passed as arrays, with the base type selected
+ // according to the required alignment in the save area.
+ else {
+ uint64_t RegBits = ABIAlign * 8;
+ uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
+ llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
+ CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
+ }
+
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ // All other aggregates are passed ByVal.
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
+ /*ByVal=*/true,
+ /*Realign=*/TyAlign > ABIAlign);
+ }
+
+ return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo
+PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+
+ // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
+ // or via reference (larger than 16 bytes).
+ if (RetTy->isVectorType()) {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size > 128)
+ return getNaturalAlignIndirect(RetTy);
+ else if (Size < 128) {
+ llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+ }
+
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
+ if (EIT->getNumBits() > 128)
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
+
+ if (isAggregateTypeForABI(RetTy)) {
+ // ELFv2 homogeneous aggregates are returned as array types.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (Kind == PPC64_SVR4_ABIKind::ELFv2 &&
+ isHomogeneousAggregate(RetTy, Base, Members)) {
+ llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
+ llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ // ELFv2 small aggregates are returned in up to two registers.
+ uint64_t Bits = getContext().getTypeSize(RetTy);
+ if (Kind == PPC64_SVR4_ABIKind::ELFv2 && Bits <= 2 * GPRBits) {
+ if (Bits == 0)
+ return ABIArgInfo::getIgnore();
+
+ llvm::Type *CoerceTy;
+ if (Bits > GPRBits) {
+ CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
+ CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
+ } else
+ CoerceTy =
+ llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ // All other aggregates are returned indirectly.
+ return getNaturalAlignIndirect(RetTy);
+ }
+
+ return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
+}
+
+// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
+Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+ TypeInfo.Align = getParamTypeAlignment(Ty);
+
+ CharUnits SlotSize = CharUnits::fromQuantity(8);
+
+ // If we have a complex type and the base type is smaller than 8 bytes,
+ // the ABI calls for the real and imaginary parts to be right-adjusted
+ // in separate doublewords. However, Clang expects us to produce a
+ // pointer to a structure with the two parts packed tightly. So generate
+ // loads of the real and imaginary parts relative to the va_list pointer,
+ // and store them to a temporary structure.
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
+ CharUnits EltSize = TypeInfo.Width / 2;
+ if (EltSize < SlotSize)
+ return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy);
+ }
+
+ // Otherwise, just use the general rule.
+ //
+ // The PPC64 ABI passes some arguments in integer registers, even to variadic
+ // functions. To allow va_list to use the simple "void*" representation,
+ // variadic calls allocate space in the argument area for the integer argument
+ // registers, and variadic functions spill their integer argument registers to
+ // this area in their prologues. When aggregates smaller than a register are
+ // passed this way, they are passed in the least significant bits of the
+ // register, which means that after spilling on big-endian targets they will
+ // be right-aligned in their argument slot. This is uncommon; for a variety of
+ // reasons, other big-endian targets don't end up right-aligning aggregate
+ // types this way, and so right-alignment only applies to fundamental types.
+ // So on PPC64, we must force the use of right-alignment even for aggregates.
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
+ SlotSize, /*AllowHigher*/ true,
+ /*ForceRightAdjust*/ true);
+}
+
+bool
+PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
+ /*IsAIX*/ false);
+}
+
+bool
+PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
+ /*IsAIX*/ false);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createAIXTargetCodeGenInfo(CodeGenModule &CGM, bool Is64Bit) {
+ return std::make_unique<AIXTargetCodeGenInfo>(CGM.getTypes(), Is64Bit);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createPPC32TargetCodeGenInfo(CodeGenModule &CGM, bool SoftFloatABI) {
+ bool RetSmallStructInRegABI = PPC32TargetCodeGenInfo::isStructReturnInRegABI(
+ CGM.getTriple(), CGM.getCodeGenOpts());
+ return std::make_unique<PPC32TargetCodeGenInfo>(CGM.getTypes(), SoftFloatABI,
+ RetSmallStructInRegABI);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createPPC64TargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<PPC64TargetCodeGenInfo>(CGM.getTypes());
+}
+
+std::unique_ptr<TargetCodeGenInfo> CodeGen::createPPC64_SVR4_TargetCodeGenInfo(
+ CodeGenModule &CGM, PPC64_SVR4_ABIKind Kind, bool SoftFloatABI) {
+ return std::make_unique<PPC64_SVR4_TargetCodeGenInfo>(CGM.getTypes(), Kind,
+ SoftFloatABI);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/RISCV.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/RISCV.cpp
new file mode 100644
index 000000000000..b6d8ae462675
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/RISCV.cpp
@@ -0,0 +1,519 @@
+//===- RISCV.cpp ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// RISC-V ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class RISCVABIInfo : public DefaultABIInfo {
+private:
+ // Size of the integer ('x') registers in bits.
+ unsigned XLen;
+ // Size of the floating point ('f') registers in bits. Note that the target
+ // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target
+ // with soft float ABI has FLen==0).
+ unsigned FLen;
+ static const int NumArgGPRs = 8;
+ static const int NumArgFPRs = 8;
+ bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
+ llvm::Type *&Field1Ty,
+ CharUnits &Field1Off,
+ llvm::Type *&Field2Ty,
+ CharUnits &Field2Off) const;
+
+public:
+ RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen)
+ : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {}
+
+ // DefaultABIInfo's classifyReturnType and classifyArgumentType are
+ // non-virtual, but computeInfo is virtual, so we overload it.
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft,
+ int &ArgFPRsLeft) const;
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ ABIArgInfo extendType(QualType Ty) const;
+
+ bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
+ CharUnits &Field1Off, llvm::Type *&Field2Ty,
+ CharUnits &Field2Off, int &NeededArgGPRs,
+ int &NeededArgFPRs) const;
+ ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty,
+ CharUnits Field1Off,
+ llvm::Type *Field2Ty,
+ CharUnits Field2Off) const;
+
+ ABIArgInfo coerceVLSVector(QualType Ty) const;
+};
+} // end anonymous namespace
+
+void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ QualType RetTy = FI.getReturnType();
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(RetTy);
+
+ // IsRetIndirect is true if classifyArgumentType indicated the value should
+ // be passed indirect, or if the type size is a scalar greater than 2*XLen
+ // and not a complex type with elements <= FLen. e.g. fp128 is passed direct
+ // in LLVM IR, relying on the backend lowering code to rewrite the argument
+ // list and pass indirectly on RV32.
+ bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
+ if (!IsRetIndirect && RetTy->isScalarType() &&
+ getContext().getTypeSize(RetTy) > (2 * XLen)) {
+ if (RetTy->isComplexType() && FLen) {
+ QualType EltTy = RetTy->castAs<ComplexType>()->getElementType();
+ IsRetIndirect = getContext().getTypeSize(EltTy) > FLen;
+ } else {
+ // This is a normal scalar > 2*XLen, such as fp128 on RV32.
+ IsRetIndirect = true;
+ }
+ }
+
+ int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
+ int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
+ int NumFixedArgs = FI.getNumRequiredArgs();
+
+ int ArgNum = 0;
+ for (auto &ArgInfo : FI.arguments()) {
+ bool IsFixed = ArgNum < NumFixedArgs;
+ ArgInfo.info =
+ classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft);
+ ArgNum++;
+ }
+}
+
+// Returns true if the struct is a potential candidate for the floating point
+// calling convention. If this function returns true, the caller is
+// responsible for checking that if there is only a single field then that
+// field is a float.
+bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
+ llvm::Type *&Field1Ty,
+ CharUnits &Field1Off,
+ llvm::Type *&Field2Ty,
+ CharUnits &Field2Off) const {
+ bool IsInt = Ty->isIntegralOrEnumerationType();
+ bool IsFloat = Ty->isRealFloatingType();
+
+ if (IsInt || IsFloat) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (IsInt && Size > XLen)
+ return false;
+ // Can't be eligible if larger than the FP registers. Handling of half
+ // precision values has been specified in the ABI, so don't block those.
+ if (IsFloat && Size > FLen)
+ return false;
+ // Can't be eligible if an integer type was already found (int+int pairs
+ // are not eligible).
+ if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
+ return false;
+ if (!Field1Ty) {
+ Field1Ty = CGT.ConvertType(Ty);
+ Field1Off = CurOff;
+ return true;
+ }
+ if (!Field2Ty) {
+ Field2Ty = CGT.ConvertType(Ty);
+ Field2Off = CurOff;
+ return true;
+ }
+ return false;
+ }
+
+ if (auto CTy = Ty->getAs<ComplexType>()) {
+ if (Field1Ty)
+ return false;
+ QualType EltTy = CTy->getElementType();
+ if (getContext().getTypeSize(EltTy) > FLen)
+ return false;
+ Field1Ty = CGT.ConvertType(EltTy);
+ Field1Off = CurOff;
+ Field2Ty = Field1Ty;
+ Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
+ return true;
+ }
+
+ if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
+ uint64_t ArraySize = ATy->getSize().getZExtValue();
+ QualType EltTy = ATy->getElementType();
+ CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
+ for (uint64_t i = 0; i < ArraySize; ++i) {
+ bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
+ Field1Off, Field2Ty, Field2Off);
+ if (!Ret)
+ return false;
+ CurOff += EltSize;
+ }
+ return true;
+ }
+
+ if (const auto *RTy = Ty->getAs<RecordType>()) {
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are not eligible for the FP calling convention.
+ if (getRecordArgABI(Ty, CGT.getCXXABI()))
+ return false;
+ if (isEmptyRecord(getContext(), Ty, true))
+ return true;
+ const RecordDecl *RD = RTy->getDecl();
+ // Unions aren't eligible unless they're empty (which is caught above).
+ if (RD->isUnion())
+ return false;
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const CXXBaseSpecifier &B : CXXRD->bases()) {
+ const auto *BDecl =
+ cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
+ CharUnits BaseOff = Layout.getBaseClassOffset(BDecl);
+ bool Ret = detectFPCCEligibleStructHelper(B.getType(), CurOff + BaseOff,
+ Field1Ty, Field1Off, Field2Ty,
+ Field2Off);
+ if (!Ret)
+ return false;
+ }
+ }
+ int ZeroWidthBitFieldCount = 0;
+ for (const FieldDecl *FD : RD->fields()) {
+ uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex());
+ QualType QTy = FD->getType();
+ if (FD->isBitField()) {
+ unsigned BitWidth = FD->getBitWidthValue(getContext());
+ // Allow a bitfield with a type greater than XLen as long as the
+ // bitwidth is XLen or less.
+ if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
+ QTy = getContext().getIntTypeForBitwidth(XLen, false);
+ if (BitWidth == 0) {
+ ZeroWidthBitFieldCount++;
+ continue;
+ }
+ }
+
+ bool Ret = detectFPCCEligibleStructHelper(
+ QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits),
+ Field1Ty, Field1Off, Field2Ty, Field2Off);
+ if (!Ret)
+ return false;
+
+ // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp
+ // or int+fp structs, but are ignored for a struct with an fp field and
+ // any number of zero-width bitfields.
+ if (Field2Ty && ZeroWidthBitFieldCount > 0)
+ return false;
+ }
+ return Field1Ty != nullptr;
+ }
+
+ return false;
+}
+
+// Determine if a struct is eligible for passing according to the floating
+// point calling convention (i.e., when flattened it contains a single fp
+// value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and
+// NeededArgGPRs are incremented appropriately.
+bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
+ CharUnits &Field1Off,
+ llvm::Type *&Field2Ty,
+ CharUnits &Field2Off,
+ int &NeededArgGPRs,
+ int &NeededArgFPRs) const {
+ Field1Ty = nullptr;
+ Field2Ty = nullptr;
+ NeededArgGPRs = 0;
+ NeededArgFPRs = 0;
+ bool IsCandidate = detectFPCCEligibleStructHelper(
+ Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off);
+ // Not really a candidate if we have a single int but no float.
+ if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
+ return false;
+ if (!IsCandidate)
+ return false;
+ if (Field1Ty && Field1Ty->isFloatingPointTy())
+ NeededArgFPRs++;
+ else if (Field1Ty)
+ NeededArgGPRs++;
+ if (Field2Ty && Field2Ty->isFloatingPointTy())
+ NeededArgFPRs++;
+ else if (Field2Ty)
+ NeededArgGPRs++;
+ return true;
+}
+
+// Call getCoerceAndExpand for the two-element flattened struct described by
+// Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
+// appropriate coerceToType and unpaddedCoerceToType.
+ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
+ llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
+ CharUnits Field2Off) const {
+ SmallVector<llvm::Type *, 3> CoerceElts;
+ SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
+ if (!Field1Off.isZero())
+ CoerceElts.push_back(llvm::ArrayType::get(
+ llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
+
+ CoerceElts.push_back(Field1Ty);
+ UnpaddedCoerceElts.push_back(Field1Ty);
+
+ if (!Field2Ty) {
+ return ABIArgInfo::getCoerceAndExpand(
+ llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
+ UnpaddedCoerceElts[0]);
+ }
+
+ CharUnits Field2Align =
+ CharUnits::fromQuantity(getDataLayout().getABITypeAlign(Field2Ty));
+ CharUnits Field1End = Field1Off +
+ CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
+ CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align);
+
+ CharUnits Padding = CharUnits::Zero();
+ if (Field2Off > Field2OffNoPadNoPack)
+ Padding = Field2Off - Field2OffNoPadNoPack;
+ else if (Field2Off != Field2Align && Field2Off > Field1End)
+ Padding = Field2Off - Field1End;
+
+ bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
+
+ if (!Padding.isZero())
+ CoerceElts.push_back(llvm::ArrayType::get(
+ llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
+
+ CoerceElts.push_back(Field2Ty);
+ UnpaddedCoerceElts.push_back(Field2Ty);
+
+ auto CoerceToType =
+ llvm::StructType::get(getVMContext(), CoerceElts, IsPacked);
+ auto UnpaddedCoerceToType =
+ llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked);
+
+ return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType);
+}
+
+// Fixed-length RVV vectors are represented as scalable vectors in function
+// args/return and must be coerced from fixed vectors.
+ABIArgInfo RISCVABIInfo::coerceVLSVector(QualType Ty) const {
+ assert(Ty->isVectorType() && "expected vector type!");
+
+ const auto *VT = Ty->castAs<VectorType>();
+ assert(VT->getVectorKind() == VectorType::RVVFixedLengthDataVector &&
+ "Unexpected vector kind");
+
+ assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
+
+ const auto *BT = VT->getElementType()->castAs<BuiltinType>();
+ unsigned EltSize = getContext().getTypeSize(BT);
+ llvm::ScalableVectorType *ResType =
+ llvm::ScalableVectorType::get(CGT.ConvertType(VT->getElementType()),
+ llvm::RISCV::RVVBitsPerBlock / EltSize);
+ return ABIArgInfo::getDirect(ResType);
+}
+
+ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
+ int &ArgGPRsLeft,
+ int &ArgFPRsLeft) const {
+ assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always passed indirectly.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+ if (ArgGPRsLeft)
+ ArgGPRsLeft -= 1;
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
+ CGCXXABI::RAA_DirectInMemory);
+ }
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // Pass floating point values via FPRs if possible.
+ if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
+ FLen >= Size && ArgFPRsLeft) {
+ ArgFPRsLeft--;
+ return ABIArgInfo::getDirect();
+ }
+
+ // Complex types for the hard float ABI must be passed direct rather than
+ // using CoerceAndExpand.
+ if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) {
+ QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
+ if (getContext().getTypeSize(EltTy) <= FLen) {
+ ArgFPRsLeft -= 2;
+ return ABIArgInfo::getDirect();
+ }
+ }
+
+ if (IsFixed && FLen && Ty->isStructureOrClassType()) {
+ llvm::Type *Field1Ty = nullptr;
+ llvm::Type *Field2Ty = nullptr;
+ CharUnits Field1Off = CharUnits::Zero();
+ CharUnits Field2Off = CharUnits::Zero();
+ int NeededArgGPRs = 0;
+ int NeededArgFPRs = 0;
+ bool IsCandidate =
+ detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
+ NeededArgGPRs, NeededArgFPRs);
+ if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
+ NeededArgFPRs <= ArgFPRsLeft) {
+ ArgGPRsLeft -= NeededArgGPRs;
+ ArgFPRsLeft -= NeededArgFPRs;
+ return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
+ Field2Off);
+ }
+ }
+
+ uint64_t NeededAlign = getContext().getTypeAlign(Ty);
+ // Determine the number of GPRs needed to pass the current argument
+ // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
+ // register pairs, so may consume 3 registers.
+ int NeededArgGPRs = 1;
+ if (!IsFixed && NeededAlign == 2 * XLen)
+ NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
+ else if (Size > XLen && Size <= 2 * XLen)
+ NeededArgGPRs = 2;
+
+ if (NeededArgGPRs > ArgGPRsLeft) {
+ NeededArgGPRs = ArgGPRsLeft;
+ }
+
+ ArgGPRsLeft -= NeededArgGPRs;
+
+ if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // All integral types are promoted to XLen width
+ if (Size < XLen && Ty->isIntegralOrEnumerationType()) {
+ return extendType(Ty);
+ }
+
+ if (const auto *EIT = Ty->getAs<BitIntType>()) {
+ if (EIT->getNumBits() < XLen)
+ return extendType(Ty);
+ if (EIT->getNumBits() > 128 ||
+ (!getContext().getTargetInfo().hasInt128Type() &&
+ EIT->getNumBits() > 64))
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+ }
+
+ return ABIArgInfo::getDirect();
+ }
+
+ if (const VectorType *VT = Ty->getAs<VectorType>())
+ if (VT->getVectorKind() == VectorType::RVVFixedLengthDataVector)
+ return coerceVLSVector(Ty);
+
+ // Aggregates which are <= 2*XLen will be passed in registers if possible,
+ // so coerce to integers.
+ if (Size <= 2 * XLen) {
+ unsigned Alignment = getContext().getTypeAlign(Ty);
+
+ // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
+ // required, and a 2-element XLen array if only XLen alignment is required.
+ if (Size <= XLen) {
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), XLen));
+ } else if (Alignment == 2 * XLen) {
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), 2 * XLen));
+ } else {
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(
+ llvm::IntegerType::get(getVMContext(), XLen), 2));
+ }
+ }
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
+ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ int ArgGPRsLeft = 2;
+ int ArgFPRsLeft = FLen ? 2 : 0;
+
+ // The rules for return and argument types are the same, so defer to
+ // classifyArgumentType.
+ return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft,
+ ArgFPRsLeft);
+}
+
+Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
+
+ // Empty records are ignored for parameter passing purposes.
+ if (isEmptyRecord(getContext(), Ty, true)) {
+ return Address(CGF.Builder.CreateLoad(VAListAddr),
+ CGF.ConvertTypeForMem(Ty), SlotSize);
+ }
+
+ auto TInfo = getContext().getTypeInfoInChars(Ty);
+
+ // Arguments bigger than 2*Xlen bytes are passed indirectly.
+ bool IsIndirect = TInfo.Width > 2 * SlotSize;
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo,
+ SlotSize, /*AllowHigherAlign=*/true);
+}
+
+ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
+ int TySize = getContext().getTypeSize(Ty);
+ // RV64 ABI requires unsigned 32 bit integers to be sign extended.
+ if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
+ return ABIArgInfo::getSignExtend(Ty);
+ return ABIArgInfo::getExtend(Ty);
+}
+
+namespace {
+class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
+ unsigned FLen)
+ : TargetCodeGenInfo(std::make_unique<RISCVABIInfo>(CGT, XLen, FLen)) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD) return;
+
+ const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
+ if (!Attr)
+ return;
+
+ const char *Kind;
+ switch (Attr->getInterrupt()) {
+ case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break;
+ case RISCVInterruptAttr::machine: Kind = "machine"; break;
+ }
+
+ auto *Fn = cast<llvm::Function>(GV);
+
+ Fn->addFnAttr("interrupt", Kind);
+ }
+};
+} // namespace
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createRISCVTargetCodeGenInfo(CodeGenModule &CGM, unsigned XLen,
+ unsigned FLen) {
+ return std::make_unique<RISCVTargetCodeGenInfo>(CGM.getTypes(), XLen, FLen);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/SPIR.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/SPIR.cpp
new file mode 100644
index 000000000000..8bacba65617e
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/SPIR.cpp
@@ -0,0 +1,218 @@
+//===- SPIR.cpp -----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Base ABI and target codegen info implementation common between SPIR and
+// SPIR-V.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CommonSPIRABIInfo : public DefaultABIInfo {
+public:
+ CommonSPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); }
+
+private:
+ void setCCs();
+};
+
+class SPIRVABIInfo : public CommonSPIRABIInfo {
+public:
+ SPIRVABIInfo(CodeGenTypes &CGT) : CommonSPIRABIInfo(CGT) {}
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+private:
+ ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
+};
+} // end anonymous namespace
+namespace {
+class CommonSPIRTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ CommonSPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<CommonSPIRABIInfo>(CGT)) {}
+ CommonSPIRTargetCodeGenInfo(std::unique_ptr<ABIInfo> ABIInfo)
+ : TargetCodeGenInfo(std::move(ABIInfo)) {}
+
+ LangAS getASTAllocaAddressSpace() const override {
+ return getLangASFromTargetAS(
+ getABIInfo().getDataLayout().getAllocaAddrSpace());
+ }
+
+ unsigned getOpenCLKernelCallingConv() const override;
+ llvm::Type *getOpenCLType(CodeGenModule &CGM, const Type *T) const override;
+};
+class SPIRVTargetCodeGenInfo : public CommonSPIRTargetCodeGenInfo {
+public:
+ SPIRVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : CommonSPIRTargetCodeGenInfo(std::make_unique<SPIRVABIInfo>(CGT)) {}
+ void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
+};
+} // End anonymous namespace.
+
+void CommonSPIRABIInfo::setCCs() {
+ assert(getRuntimeCC() == llvm::CallingConv::C);
+ RuntimeCC = llvm::CallingConv::SPIR_FUNC;
+}
+
+ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty) const {
+ if (getContext().getLangOpts().CUDAIsDevice) {
+ // Coerce pointer arguments with default address space to CrossWorkGroup
+ // pointers for HIPSPV/CUDASPV. When the language mode is HIP/CUDA, the
+ // SPIRTargetInfo maps cuda_device to SPIR-V's CrossWorkGroup address space.
+ llvm::Type *LTy = CGT.ConvertType(Ty);
+ auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default);
+ auto GlobalAS = getContext().getTargetAddressSpace(LangAS::cuda_device);
+ auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(LTy);
+ if (PtrTy && PtrTy->getAddressSpace() == DefaultAS) {
+ LTy = llvm::PointerType::get(PtrTy->getContext(), GlobalAS);
+ return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
+ }
+
+ // Force copying aggregate type in kernel arguments by value when
+ // compiling CUDA targeting SPIR-V. This is required for the object
+ // copied to be valid on the device.
+ // This behavior follows the CUDA spec
+ // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#global-function-argument-processing,
+ // and matches the NVPTX implementation.
+ if (isAggregateTypeForABI(Ty))
+ return getNaturalAlignIndirect(Ty, /* byval */ true);
+ }
+ return classifyArgumentType(Ty);
+}
+
+void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ // The logic is same as in DefaultABIInfo with an exception on the kernel
+ // arguments handling.
+ llvm::CallingConv::ID CC = FI.getCallingConvention();
+
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ for (auto &I : FI.arguments()) {
+ if (CC == llvm::CallingConv::SPIR_KERNEL) {
+ I.info = classifyKernelArgumentType(I.type);
+ } else {
+ I.info = classifyArgumentType(I.type);
+ }
+ }
+}
+
+namespace clang {
+namespace CodeGen {
+void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
+ if (CGM.getTarget().getTriple().isSPIRV())
+ SPIRVABIInfo(CGM.getTypes()).computeInfo(FI);
+ else
+ CommonSPIRABIInfo(CGM.getTypes()).computeInfo(FI);
+}
+}
+}
+
+unsigned CommonSPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
+ return llvm::CallingConv::SPIR_KERNEL;
+}
+
+void SPIRVTargetCodeGenInfo::setCUDAKernelCallingConvention(
+ const FunctionType *&FT) const {
+ // Convert HIP kernels to SPIR-V kernels.
+ if (getABIInfo().getContext().getLangOpts().HIP) {
+ FT = getABIInfo().getContext().adjustFunctionType(
+ FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
+ return;
+ }
+}
+
+/// Construct a SPIR-V target extension type for the given OpenCL image type.
+static llvm::Type *getSPIRVImageType(llvm::LLVMContext &Ctx, StringRef BaseType,
+ StringRef OpenCLName,
+ unsigned AccessQualifier) {
+ // These parameters compare to the operands of OpTypeImage (see
+ // https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#OpTypeImage
+ // for more details). The first 6 integer parameters all default to 0, and
+ // will be changed to 1 only for the image type(s) that set the parameter to
+ // one. The 7th integer parameter is the access qualifier, which is tacked on
+ // at the end.
+ SmallVector<unsigned, 7> IntParams = {0, 0, 0, 0, 0, 0};
+
+ // Choose the dimension of the image--this corresponds to the Dim enum in
+ // SPIR-V (first integer parameter of OpTypeImage).
+ if (OpenCLName.startswith("image2d"))
+ IntParams[0] = 1; // 1D
+ else if (OpenCLName.startswith("image3d"))
+ IntParams[0] = 2; // 2D
+ else if (OpenCLName == "image1d_buffer")
+ IntParams[0] = 5; // Buffer
+ else
+ assert(OpenCLName.startswith("image1d") && "Unknown image type");
+
+ // Set the other integer parameters of OpTypeImage if necessary. Note that the
+ // OpenCL image types don't provide any information for the Sampled or
+ // Image Format parameters.
+ if (OpenCLName.contains("_depth"))
+ IntParams[1] = 1;
+ if (OpenCLName.contains("_array"))
+ IntParams[2] = 1;
+ if (OpenCLName.contains("_msaa"))
+ IntParams[3] = 1;
+
+ // Access qualifier
+ IntParams.push_back(AccessQualifier);
+
+ return llvm::TargetExtType::get(Ctx, BaseType, {llvm::Type::getVoidTy(Ctx)},
+ IntParams);
+}
+
+llvm::Type *CommonSPIRTargetCodeGenInfo::getOpenCLType(CodeGenModule &CGM,
+ const Type *Ty) const {
+ llvm::LLVMContext &Ctx = CGM.getLLVMContext();
+ if (auto *PipeTy = dyn_cast<PipeType>(Ty))
+ return llvm::TargetExtType::get(Ctx, "spirv.Pipe", {},
+ {!PipeTy->isReadOnly()});
+ if (auto *BuiltinTy = dyn_cast<BuiltinType>(Ty)) {
+ enum AccessQualifier : unsigned { AQ_ro = 0, AQ_wo = 1, AQ_rw = 2 };
+ switch (BuiltinTy->getKind()) {
+#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
+ case BuiltinType::Id: \
+ return getSPIRVImageType(Ctx, "spirv.Image", #ImgType, AQ_##Suffix);
+#include "clang/Basic/OpenCLImageTypes.def"
+ case BuiltinType::OCLSampler:
+ return llvm::TargetExtType::get(Ctx, "spirv.Sampler");
+ case BuiltinType::OCLEvent:
+ return llvm::TargetExtType::get(Ctx, "spirv.Event");
+ case BuiltinType::OCLClkEvent:
+ return llvm::TargetExtType::get(Ctx, "spirv.DeviceEvent");
+ case BuiltinType::OCLQueue:
+ return llvm::TargetExtType::get(Ctx, "spirv.Queue");
+ case BuiltinType::OCLReserveID:
+ return llvm::TargetExtType::get(Ctx, "spirv.ReserveId");
+#define INTEL_SUBGROUP_AVC_TYPE(Name, Id) \
+ case BuiltinType::OCLIntelSubgroupAVC##Id: \
+ return llvm::TargetExtType::get(Ctx, "spirv.Avc" #Id "INTEL");
+#include "clang/Basic/OpenCLExtensionTypes.def"
+ default:
+ return nullptr;
+ }
+ }
+
+ return nullptr;
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createCommonSPIRTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<CommonSPIRTargetCodeGenInfo>(CGM.getTypes());
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createSPIRVTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<SPIRVTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/Sparc.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/Sparc.cpp
new file mode 100644
index 000000000000..f5cafaa97315
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/Sparc.cpp
@@ -0,0 +1,409 @@
+//===- Sparc.cpp ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// SPARC v8 ABI Implementation.
+// Based on the SPARC Compliance Definition version 2.4.1.
+//
+// Ensures that complex values are passed in registers.
+//
+namespace {
+class SparcV8ABIInfo : public DefaultABIInfo {
+public:
+ SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+private:
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ void computeInfo(CGFunctionInfo &FI) const override;
+};
+} // end anonymous namespace
+
+
+ABIArgInfo
+SparcV8ABIInfo::classifyReturnType(QualType Ty) const {
+ if (Ty->isAnyComplexType()) {
+ return ABIArgInfo::getDirect();
+ }
+ else {
+ return DefaultABIInfo::classifyReturnType(Ty);
+ }
+}
+
+void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &Arg : FI.arguments())
+ Arg.info = classifyArgumentType(Arg.type);
+}
+
+namespace {
+class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<SparcV8ABIInfo>(CGT)) {}
+
+ llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override {
+ int Offset;
+ if (isAggregateTypeForABI(CGF.CurFnInfo->getReturnType()))
+ Offset = 12;
+ else
+ Offset = 8;
+ return CGF.Builder.CreateGEP(CGF.Int8Ty, Address,
+ llvm::ConstantInt::get(CGF.Int32Ty, Offset));
+ }
+
+ llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override {
+ int Offset;
+ if (isAggregateTypeForABI(CGF.CurFnInfo->getReturnType()))
+ Offset = -12;
+ else
+ Offset = -8;
+ return CGF.Builder.CreateGEP(CGF.Int8Ty, Address,
+ llvm::ConstantInt::get(CGF.Int32Ty, Offset));
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// SPARC v9 ABI Implementation.
+// Based on the SPARC Compliance Definition version 2.4.1.
+//
+// Function arguments a mapped to a nominal "parameter array" and promoted to
+// registers depending on their type. Each argument occupies 8 or 16 bytes in
+// the array, structs larger than 16 bytes are passed indirectly.
+//
+// One case requires special care:
+//
+// struct mixed {
+// int i;
+// float f;
+// };
+//
+// When a struct mixed is passed by value, it only occupies 8 bytes in the
+// parameter array, but the int is passed in an integer register, and the float
+// is passed in a floating point register. This is represented as two arguments
+// with the LLVM IR inreg attribute:
+//
+// declare void f(i32 inreg %i, float inreg %f)
+//
+// The code generator will only allocate 4 bytes from the parameter array for
+// the inreg arguments. All other arguments are allocated a multiple of 8
+// bytes.
+//
+namespace {
+class SparcV9ABIInfo : public ABIInfo {
+public:
+ SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+private:
+ ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ // Coercion type builder for structs passed in registers. The coercion type
+ // serves two purposes:
+ //
+ // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
+ // in registers.
+ // 2. Expose aligned floating point elements as first-level elements, so the
+ // code generator knows to pass them in floating point registers.
+ //
+ // We also compute the InReg flag which indicates that the struct contains
+ // aligned 32-bit floats.
+ //
+ struct CoerceBuilder {
+ llvm::LLVMContext &Context;
+ const llvm::DataLayout &DL;
+ SmallVector<llvm::Type*, 8> Elems;
+ uint64_t Size;
+ bool InReg;
+
+ CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
+ : Context(c), DL(dl), Size(0), InReg(false) {}
+
+ // Pad Elems with integers until Size is ToSize.
+ void pad(uint64_t ToSize) {
+ assert(ToSize >= Size && "Cannot remove elements");
+ if (ToSize == Size)
+ return;
+
+ // Finish the current 64-bit word.
+ uint64_t Aligned = llvm::alignTo(Size, 64);
+ if (Aligned > Size && Aligned <= ToSize) {
+ Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
+ Size = Aligned;
+ }
+
+ // Add whole 64-bit words.
+ while (Size + 64 <= ToSize) {
+ Elems.push_back(llvm::Type::getInt64Ty(Context));
+ Size += 64;
+ }
+
+ // Final in-word padding.
+ if (Size < ToSize) {
+ Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
+ Size = ToSize;
+ }
+ }
+
+ // Add a floating point element at Offset.
+ void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
+ // Unaligned floats are treated as integers.
+ if (Offset % Bits)
+ return;
+ // The InReg flag is only required if there are any floats < 64 bits.
+ if (Bits < 64)
+ InReg = true;
+ pad(Offset);
+ Elems.push_back(Ty);
+ Size = Offset + Bits;
+ }
+
+ // Add a struct type to the coercion type, starting at Offset (in bits).
+ void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
+ const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
+ for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
+ llvm::Type *ElemTy = StrTy->getElementType(i);
+ uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
+ switch (ElemTy->getTypeID()) {
+ case llvm::Type::StructTyID:
+ addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
+ break;
+ case llvm::Type::FloatTyID:
+ addFloat(ElemOffset, ElemTy, 32);
+ break;
+ case llvm::Type::DoubleTyID:
+ addFloat(ElemOffset, ElemTy, 64);
+ break;
+ case llvm::Type::FP128TyID:
+ addFloat(ElemOffset, ElemTy, 128);
+ break;
+ case llvm::Type::PointerTyID:
+ if (ElemOffset % 64 == 0) {
+ pad(ElemOffset);
+ Elems.push_back(ElemTy);
+ Size += 64;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ // Check if Ty is a usable substitute for the coercion type.
+ bool isUsableType(llvm::StructType *Ty) const {
+ return llvm::ArrayRef(Elems) == Ty->elements();
+ }
+
+ // Get the coercion type as a literal struct type.
+ llvm::Type *getType() const {
+ if (Elems.size() == 1)
+ return Elems.front();
+ else
+ return llvm::StructType::get(Context, Elems);
+ }
+ };
+};
+} // end anonymous namespace
+
+ABIArgInfo
+SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
+ if (Ty->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // Anything too big to fit in registers is passed with an explicit indirect
+ // pointer / sret pointer.
+ if (Size > SizeLimit)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Integer types smaller than a register are extended.
+ if (Size < 64 && Ty->isIntegerType())
+ return ABIArgInfo::getExtend(Ty);
+
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() < 64)
+ return ABIArgInfo::getExtend(Ty);
+
+ // Other non-aggregates go in registers.
+ if (!isAggregateTypeForABI(Ty))
+ return ABIArgInfo::getDirect();
+
+ // If a C++ object has either a non-trivial copy constructor or a non-trivial
+ // destructor, it is passed with an explicit indirect pointer / sret pointer.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ // This is a small aggregate type that should be passed in registers.
+ // Build a coercion type from the LLVM struct type.
+ llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
+ if (!StrTy)
+ return ABIArgInfo::getDirect();
+
+ CoerceBuilder CB(getVMContext(), getDataLayout());
+ CB.addStruct(0, StrTy);
+ CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
+
+ // Try to use the original type for coercion.
+ llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
+
+ if (CB.InReg)
+ return ABIArgInfo::getDirectInReg(CoerceTy);
+ else
+ return ABIArgInfo::getDirect(CoerceTy);
+}
+
+Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ ABIArgInfo AI = classifyType(Ty, 16 * 8);
+ llvm::Type *ArgTy = CGT.ConvertType(Ty);
+ if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
+ AI.setCoerceToType(ArgTy);
+
+ CharUnits SlotSize = CharUnits::fromQuantity(8);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ Address Addr = Address(Builder.CreateLoad(VAListAddr, "ap.cur"),
+ getVAListElementType(CGF), SlotSize);
+ llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
+
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+
+ Address ArgAddr = Address::invalid();
+ CharUnits Stride;
+ switch (AI.getKind()) {
+ case ABIArgInfo::Expand:
+ case ABIArgInfo::CoerceAndExpand:
+ case ABIArgInfo::InAlloca:
+ llvm_unreachable("Unsupported ABI kind for va_arg");
+
+ case ABIArgInfo::Extend: {
+ Stride = SlotSize;
+ CharUnits Offset = SlotSize - TypeInfo.Width;
+ ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
+ break;
+ }
+
+ case ABIArgInfo::Direct: {
+ auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
+ Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize);
+ ArgAddr = Addr;
+ break;
+ }
+
+ case ABIArgInfo::Indirect:
+ case ABIArgInfo::IndirectAliased:
+ Stride = SlotSize;
+ ArgAddr = Addr.withElementType(ArgPtrTy);
+ ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"), ArgTy,
+ TypeInfo.Align);
+ break;
+
+ case ABIArgInfo::Ignore:
+ return Address(llvm::UndefValue::get(ArgPtrTy), ArgTy, TypeInfo.Align);
+ }
+
+ // Update VAList.
+ Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next");
+ Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
+
+ return ArgAddr.withElementType(ArgTy);
+}
+
+void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
+ for (auto &I : FI.arguments())
+ I.info = classifyType(I.type, 16 * 8);
+}
+
+namespace {
+class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<SparcV9ABIInfo>(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ return 14;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+
+ llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override {
+ return CGF.Builder.CreateGEP(CGF.Int8Ty, Address,
+ llvm::ConstantInt::get(CGF.Int32Ty, 8));
+ }
+
+ llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override {
+ return CGF.Builder.CreateGEP(CGF.Int8Ty, Address,
+ llvm::ConstantInt::get(CGF.Int32Ty, -8));
+ }
+};
+} // end anonymous namespace
+
+bool
+SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This is calculated from the LLVM and GCC tables and verified
+ // against gcc output. AFAIK all ABIs use the same encoding.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::IntegerType *i8 = CGF.Int8Ty;
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+
+ // 0-31: the 8-byte general-purpose registers
+ AssignToArrayRange(Builder, Address, Eight8, 0, 31);
+
+ // 32-63: f0-31, the 4-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Four8, 32, 63);
+
+ // Y = 64
+ // PSR = 65
+ // WIM = 66
+ // TBR = 67
+ // PC = 68
+ // NPC = 69
+ // FSR = 70
+ // CSR = 71
+ AssignToArrayRange(Builder, Address, Eight8, 64, 71);
+
+ // 72-87: d0-15, the 8-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Eight8, 72, 87);
+
+ return false;
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createSparcV8TargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<SparcV8TargetCodeGenInfo>(CGM.getTypes());
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createSparcV9TargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<SparcV9TargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/SystemZ.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/SystemZ.cpp
new file mode 100644
index 000000000000..6eb0c6ef2f7d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/SystemZ.cpp
@@ -0,0 +1,538 @@
+//===- SystemZ.cpp --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+#include "clang/Basic/Builtins.h"
+#include "llvm/IR/IntrinsicsS390.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// SystemZ ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class SystemZABIInfo : public ABIInfo {
+ bool HasVector;
+ bool IsSoftFloatABI;
+
+public:
+ SystemZABIInfo(CodeGenTypes &CGT, bool HV, bool SF)
+ : ABIInfo(CGT), HasVector(HV), IsSoftFloatABI(SF) {}
+
+ bool isPromotableIntegerTypeForABI(QualType Ty) const;
+ bool isCompoundType(QualType Ty) const;
+ bool isVectorArgumentType(QualType Ty) const;
+ bool isFPArgumentType(QualType Ty) const;
+ QualType GetSingleElementType(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType ArgTy) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
+ ASTContext &Ctx;
+
+ // These are used for speeding up the search for a visible vector ABI.
+ mutable bool HasVisibleVecABIFlag = false;
+ mutable std::set<const Type *> SeenTypes;
+
+ // Returns true (the first time) if Ty is, or is found to include, a vector
+ // type that exposes the vector ABI. This is any vector >=16 bytes which
+ // with vector support are aligned to only 8 bytes. When IsParam is true,
+ // the type belongs to a value as passed between functions. If it is a
+ // vector <=16 bytes it will be passed in a vector register (if supported).
+ bool isVectorTypeBased(const Type *Ty, bool IsParam) const;
+
+public:
+ SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector, bool SoftFloatABI)
+ : TargetCodeGenInfo(
+ std::make_unique<SystemZABIInfo>(CGT, HasVector, SoftFloatABI)),
+ Ctx(CGT.getContext()) {
+ SwiftInfo =
+ std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/false);
+ }
+
+ // The vector ABI is different when the vector facility is present and when
+ // a module e.g. defines an externally visible vector variable, a flag
+ // indicating a visible vector ABI is added. Eventually this will result in
+ // a GNU attribute indicating the vector ABI of the module. Ty is the type
+ // of a variable or function parameter that is globally visible.
+ void handleExternallyVisibleObjABI(const Type *Ty, CodeGen::CodeGenModule &M,
+ bool IsParam) const {
+ if (!HasVisibleVecABIFlag && isVectorTypeBased(Ty, IsParam)) {
+ M.getModule().addModuleFlag(llvm::Module::Warning,
+ "s390x-visible-vector-ABI", 1);
+ HasVisibleVecABIFlag = true;
+ }
+ }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override {
+ if (!D)
+ return;
+
+ // Check if the vector ABI becomes visible by an externally visible
+ // variable or function.
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->isExternallyVisible())
+ handleExternallyVisibleObjABI(VD->getType().getTypePtr(), M,
+ /*IsParam*/false);
+ }
+ else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isExternallyVisible())
+ handleExternallyVisibleObjABI(FD->getType().getTypePtr(), M,
+ /*IsParam*/false);
+ }
+ }
+
+ llvm::Value *testFPKind(llvm::Value *V, unsigned BuiltinID,
+ CGBuilderTy &Builder,
+ CodeGenModule &CGM) const override {
+ assert(V->getType()->isFloatingPointTy() && "V should have an FP type.");
+ // Only use TDC in constrained FP mode.
+ if (!Builder.getIsFPConstrained())
+ return nullptr;
+
+ llvm::Type *Ty = V->getType();
+ if (Ty->isFloatTy() || Ty->isDoubleTy() || Ty->isFP128Ty()) {
+ llvm::Module &M = CGM.getModule();
+ auto &Ctx = M.getContext();
+ llvm::Function *TDCFunc =
+ llvm::Intrinsic::getDeclaration(&M, llvm::Intrinsic::s390_tdc, Ty);
+ unsigned TDCBits = 0;
+ switch (BuiltinID) {
+ case Builtin::BI__builtin_isnan:
+ TDCBits = 0xf;
+ break;
+ case Builtin::BIfinite:
+ case Builtin::BI__finite:
+ case Builtin::BIfinitef:
+ case Builtin::BI__finitef:
+ case Builtin::BIfinitel:
+ case Builtin::BI__finitel:
+ case Builtin::BI__builtin_isfinite:
+ TDCBits = 0xfc0;
+ break;
+ case Builtin::BI__builtin_isinf:
+ TDCBits = 0x30;
+ break;
+ default:
+ break;
+ }
+ if (TDCBits)
+ return Builder.CreateCall(
+ TDCFunc,
+ {V, llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), TDCBits)});
+ }
+ return nullptr;
+ }
+};
+}
+
+bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Promotable integer types are required to be promoted by the ABI.
+ if (ABIInfo::isPromotableIntegerTypeForABI(Ty))
+ return true;
+
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() < 64)
+ return true;
+
+ // 32-bit values must also be promoted.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+bool SystemZABIInfo::isCompoundType(QualType Ty) const {
+ return (Ty->isAnyComplexType() ||
+ Ty->isVectorType() ||
+ isAggregateTypeForABI(Ty));
+}
+
+bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
+ return (HasVector &&
+ Ty->isVectorType() &&
+ getContext().getTypeSize(Ty) <= 128);
+}
+
+bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
+ if (IsSoftFloatABI)
+ return false;
+
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ return true;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
+ const RecordType *RT = Ty->getAs<RecordType>();
+
+ if (RT && RT->isStructureOrClassType()) {
+ const RecordDecl *RD = RT->getDecl();
+ QualType Found;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ if (CXXRD->hasDefinition())
+ for (const auto &I : CXXRD->bases()) {
+ QualType Base = I.getType();
+
+ // Empty bases don't affect things either way.
+ if (isEmptyRecord(getContext(), Base, true))
+ continue;
+
+ if (!Found.isNull())
+ return Ty;
+ Found = GetSingleElementType(Base);
+ }
+
+ // Check the fields.
+ for (const auto *FD : RD->fields()) {
+ // Unlike isSingleElementStruct(), empty structure and array fields
+ // do count. So do anonymous bitfields that aren't zero-sized.
+
+ // Like isSingleElementStruct(), ignore C++20 empty data members.
+ if (FD->hasAttr<NoUniqueAddressAttr>() &&
+ isEmptyRecord(getContext(), FD->getType(), true))
+ continue;
+
+ // Unlike isSingleElementStruct(), arrays do not count.
+ // Nested structures still do though.
+ if (!Found.isNull())
+ return Ty;
+ Found = GetSingleElementType(FD->getType());
+ }
+
+ // Unlike isSingleElementStruct(), trailing padding is allowed.
+ // An 8-byte aligned struct s { float f; } is passed as a double.
+ if (!Found.isNull())
+ return Found;
+ }
+
+ return Ty;
+}
+
+Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ // Assume that va_list type is correct; should be pointer to LLVM type:
+ // struct {
+ // i64 __gpr;
+ // i64 __fpr;
+ // i8 *__overflow_arg_area;
+ // i8 *__reg_save_area;
+ // };
+
+ // Every non-vector argument occupies 8 bytes and is passed by preference
+ // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
+ // always passed on the stack.
+ const SystemZTargetCodeGenInfo &SZCGI =
+ static_cast<const SystemZTargetCodeGenInfo &>(
+ CGT.getCGM().getTargetCodeGenInfo());
+ Ty = getContext().getCanonicalType(Ty);
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
+ llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Type *DirectTy = ArgTy;
+ ABIArgInfo AI = classifyArgumentType(Ty);
+ bool IsIndirect = AI.isIndirect();
+ bool InFPRs = false;
+ bool IsVector = false;
+ CharUnits UnpaddedSize;
+ CharUnits DirectAlign;
+ SZCGI.handleExternallyVisibleObjABI(Ty.getTypePtr(), CGT.getCGM(),
+ /*IsParam*/true);
+ if (IsIndirect) {
+ DirectTy = llvm::PointerType::getUnqual(DirectTy);
+ UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
+ } else {
+ if (AI.getCoerceToType())
+ ArgTy = AI.getCoerceToType();
+ InFPRs = (!IsSoftFloatABI && (ArgTy->isFloatTy() || ArgTy->isDoubleTy()));
+ IsVector = ArgTy->isVectorTy();
+ UnpaddedSize = TyInfo.Width;
+ DirectAlign = TyInfo.Align;
+ }
+ CharUnits PaddedSize = CharUnits::fromQuantity(8);
+ if (IsVector && UnpaddedSize > PaddedSize)
+ PaddedSize = CharUnits::fromQuantity(16);
+ assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
+
+ CharUnits Padding = (PaddedSize - UnpaddedSize);
+
+ llvm::Type *IndexTy = CGF.Int64Ty;
+ llvm::Value *PaddedSizeV =
+ llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
+
+ if (IsVector) {
+ // Work out the address of a vector argument on the stack.
+ // Vector arguments are always passed in the high bits of a
+ // single (8 byte) or double (16 byte) stack slot.
+ Address OverflowArgAreaPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
+ Address OverflowArgArea =
+ Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
+ CGF.Int8Ty, TyInfo.Align);
+ Address MemAddr = OverflowArgArea.withElementType(DirectTy);
+
+ // Update overflow_arg_area_ptr pointer
+ llvm::Value *NewOverflowArgArea = CGF.Builder.CreateGEP(
+ OverflowArgArea.getElementType(), OverflowArgArea.getPointer(),
+ PaddedSizeV, "overflow_arg_area");
+ CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
+
+ return MemAddr;
+ }
+
+ assert(PaddedSize.getQuantity() == 8);
+
+ unsigned MaxRegs, RegCountField, RegSaveIndex;
+ CharUnits RegPadding;
+ if (InFPRs) {
+ MaxRegs = 4; // Maximum of 4 FPR arguments
+ RegCountField = 1; // __fpr
+ RegSaveIndex = 16; // save offset for f0
+ RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
+ } else {
+ MaxRegs = 5; // Maximum of 5 GPR arguments
+ RegCountField = 0; // __gpr
+ RegSaveIndex = 2; // save offset for r2
+ RegPadding = Padding; // values are passed in the low bits of a GPR
+ }
+
+ Address RegCountPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
+ llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
+ llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
+ llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
+ "fits_in_regs");
+
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+ CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
+
+ // Emit code to load the value if it was passed in registers.
+ CGF.EmitBlock(InRegBlock);
+
+ // Work out the address of an argument register.
+ llvm::Value *ScaledRegCount =
+ CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
+ llvm::Value *RegBase =
+ llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
+ + RegPadding.getQuantity());
+ llvm::Value *RegOffset =
+ CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
+ Address RegSaveAreaPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
+ llvm::Value *RegSaveArea =
+ CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
+ Address RawRegAddr(
+ CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, RegOffset, "raw_reg_addr"),
+ CGF.Int8Ty, PaddedSize);
+ Address RegAddr = RawRegAddr.withElementType(DirectTy);
+
+ // Update the register count
+ llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
+ llvm::Value *NewRegCount =
+ CGF.Builder.CreateAdd(RegCount, One, "reg_count");
+ CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
+ CGF.EmitBranch(ContBlock);
+
+ // Emit code to load the value if it was passed in memory.
+ CGF.EmitBlock(InMemBlock);
+
+ // Work out the address of a stack argument.
+ Address OverflowArgAreaPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
+ Address OverflowArgArea =
+ Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
+ CGF.Int8Ty, PaddedSize);
+ Address RawMemAddr =
+ CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
+ Address MemAddr = RawMemAddr.withElementType(DirectTy);
+
+ // Update overflow_arg_area_ptr pointer
+ llvm::Value *NewOverflowArgArea =
+ CGF.Builder.CreateGEP(OverflowArgArea.getElementType(),
+ OverflowArgArea.getPointer(), PaddedSizeV,
+ "overflow_arg_area");
+ CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
+ CGF.EmitBranch(ContBlock);
+
+ // Return the appropriate result.
+ CGF.EmitBlock(ContBlock);
+ Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
+ "va_arg.addr");
+
+ if (IsIndirect)
+ ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"), ArgTy,
+ TyInfo.Align);
+
+ return ResAddr;
+}
+
+ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+ if (isVectorArgumentType(RetTy))
+ return ABIArgInfo::getDirect();
+ if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
+ return getNaturalAlignIndirect(RetTy);
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
+ // Handle the generic C++ ABI.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ // Integers and enums are extended to full register width.
+ if (isPromotableIntegerTypeForABI(Ty))
+ return ABIArgInfo::getExtend(Ty);
+
+ // Handle vector types and vector-like structure types. Note that
+ // as opposed to float-like structure types, we do not allow any
+ // padding for vector-like structures, so verify the sizes match.
+ uint64_t Size = getContext().getTypeSize(Ty);
+ QualType SingleElementTy = GetSingleElementType(Ty);
+ if (isVectorArgumentType(SingleElementTy) &&
+ getContext().getTypeSize(SingleElementTy) == Size)
+ return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
+
+ // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
+ if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
+ // Handle small structures.
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ // Structures with flexible arrays have variable length, so really
+ // fail the size test above.
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
+ // The structure is passed as an unextended integer, a float, or a double.
+ llvm::Type *PassTy;
+ if (isFPArgumentType(SingleElementTy)) {
+ assert(Size == 32 || Size == 64);
+ if (Size == 32)
+ PassTy = llvm::Type::getFloatTy(getVMContext());
+ else
+ PassTy = llvm::Type::getDoubleTy(getVMContext());
+ } else
+ PassTy = llvm::IntegerType::get(getVMContext(), Size);
+ return ABIArgInfo::getDirect(PassTy);
+ }
+
+ // Non-structure compounds are passed indirectly.
+ if (isCompoundType(Ty))
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
+ return ABIArgInfo::getDirect(nullptr);
+}
+
+void SystemZABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ const SystemZTargetCodeGenInfo &SZCGI =
+ static_cast<const SystemZTargetCodeGenInfo &>(
+ CGT.getCGM().getTargetCodeGenInfo());
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ unsigned Idx = 0;
+ for (auto &I : FI.arguments()) {
+ I.info = classifyArgumentType(I.type);
+ if (FI.isVariadic() && Idx++ >= FI.getNumRequiredArgs())
+ // Check if a vararg vector argument is passed, in which case the
+ // vector ABI becomes visible as the va_list could be passed on to
+ // other functions.
+ SZCGI.handleExternallyVisibleObjABI(I.type.getTypePtr(), CGT.getCGM(),
+ /*IsParam*/true);
+ }
+}
+
+bool SystemZTargetCodeGenInfo::isVectorTypeBased(const Type *Ty,
+ bool IsParam) const {
+ if (!SeenTypes.insert(Ty).second)
+ return false;
+
+ if (IsParam) {
+ // A narrow (<16 bytes) vector will as a parameter also expose the ABI as
+ // it will be passed in a vector register. A wide (>16 bytes) vector will
+ // be passed via "hidden" pointer where any extra alignment is not
+ // required (per GCC).
+ const Type *SingleEltTy = getABIInfo<SystemZABIInfo>()
+ .GetSingleElementType(QualType(Ty, 0))
+ .getTypePtr();
+ bool SingleVecEltStruct = SingleEltTy != Ty && SingleEltTy->isVectorType() &&
+ Ctx.getTypeSize(SingleEltTy) == Ctx.getTypeSize(Ty);
+ if (Ty->isVectorType() || SingleVecEltStruct)
+ return Ctx.getTypeSize(Ty) / 8 <= 16;
+ }
+
+ // Assume pointers are dereferenced.
+ while (Ty->isPointerType() || Ty->isArrayType())
+ Ty = Ty->getPointeeOrArrayElementType();
+
+ // Vectors >= 16 bytes expose the ABI through alignment requirements.
+ if (Ty->isVectorType() && Ctx.getTypeSize(Ty) / 8 >= 16)
+ return true;
+
+ if (const auto *RecordTy = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RecordTy->getDecl();
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ if (CXXRD->hasDefinition())
+ for (const auto &I : CXXRD->bases())
+ if (isVectorTypeBased(I.getType().getTypePtr(), /*IsParam*/false))
+ return true;
+ for (const auto *FD : RD->fields())
+ if (isVectorTypeBased(FD->getType().getTypePtr(), /*IsParam*/false))
+ return true;
+ }
+
+ if (const auto *FT = Ty->getAs<FunctionType>())
+ if (isVectorTypeBased(FT->getReturnType().getTypePtr(), /*IsParam*/true))
+ return true;
+ if (const FunctionProtoType *Proto = Ty->getAs<FunctionProtoType>())
+ for (const auto &ParamType : Proto->getParamTypes())
+ if (isVectorTypeBased(ParamType.getTypePtr(), /*IsParam*/true))
+ return true;
+
+ return false;
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createSystemZTargetCodeGenInfo(CodeGenModule &CGM, bool HasVector,
+ bool SoftFloatABI) {
+ return std::make_unique<SystemZTargetCodeGenInfo>(CGM.getTypes(), HasVector,
+ SoftFloatABI);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/TCE.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/TCE.cpp
new file mode 100644
index 000000000000..d7178b4b8a94
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/TCE.cpp
@@ -0,0 +1,82 @@
+//===- TCE.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
+// Currently subclassed only to implement custom OpenCL C function attribute
+// handling.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class TCETargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ TCETargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
+};
+
+void TCETargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (GV->isDeclaration())
+ return;
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD) return;
+
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ if (M.getLangOpts().OpenCL) {
+ if (FD->hasAttr<OpenCLKernelAttr>()) {
+ // OpenCL C Kernel functions are not subject to inlining
+ F->addFnAttr(llvm::Attribute::NoInline);
+ const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
+ if (Attr) {
+ // Convert the reqd_work_group_size() attributes to metadata.
+ llvm::LLVMContext &Context = F->getContext();
+ llvm::NamedMDNode *OpenCLMetadata =
+ M.getModule().getOrInsertNamedMetadata(
+ "opencl.kernel_wg_size_info");
+
+ SmallVector<llvm::Metadata *, 5> Operands;
+ Operands.push_back(llvm::ConstantAsMetadata::get(F));
+
+ Operands.push_back(
+ llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
+ M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
+ Operands.push_back(
+ llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
+ M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
+ Operands.push_back(
+ llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
+ M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
+
+ // Add a boolean constant operand for "required" (true) or "hint"
+ // (false) for implementing the work_group_size_hint attr later.
+ // Currently always true as the hint is not yet implemented.
+ Operands.push_back(
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
+ OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
+ }
+ }
+ }
+}
+
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createTCETargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<TCETargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/VE.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/VE.cpp
new file mode 100644
index 000000000000..a7acc249cc2b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/VE.cpp
@@ -0,0 +1,71 @@
+//===- VE.cpp -------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// VE ABI Implementation.
+//
+namespace {
+class VEABIInfo : public DefaultABIInfo {
+public:
+ VEABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+private:
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ void computeInfo(CGFunctionInfo &FI) const override;
+};
+} // end anonymous namespace
+
+ABIArgInfo VEABIInfo::classifyReturnType(QualType Ty) const {
+ if (Ty->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size < 64 && Ty->isIntegerType())
+ return ABIArgInfo::getExtend(Ty);
+ return DefaultABIInfo::classifyReturnType(Ty);
+}
+
+ABIArgInfo VEABIInfo::classifyArgumentType(QualType Ty) const {
+ if (Ty->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size < 64 && Ty->isIntegerType())
+ return ABIArgInfo::getExtend(Ty);
+ return DefaultABIInfo::classifyArgumentType(Ty);
+}
+
+void VEABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &Arg : FI.arguments())
+ Arg.info = classifyArgumentType(Arg.type);
+}
+
+namespace {
+class VETargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ VETargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<VEABIInfo>(CGT)) {}
+ // VE ABI requires the arguments of variadic and prototype-less functions
+ // are passed in both registers and memory.
+ bool isNoProtoCallVariadic(const CallArgList &args,
+ const FunctionNoProtoType *fnType) const override {
+ return true;
+ }
+};
+} // end anonymous namespace
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createVETargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<VETargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/WebAssembly.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/WebAssembly.cpp
new file mode 100644
index 000000000000..bd332228ce5b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/WebAssembly.cpp
@@ -0,0 +1,173 @@
+//===- WebAssembly.cpp ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// WebAssembly ABI Implementation
+//
+// This is a very simple ABI that relies a lot on DefaultABIInfo.
+//===----------------------------------------------------------------------===//
+
+class WebAssemblyABIInfo final : public ABIInfo {
+ DefaultABIInfo defaultInfo;
+ WebAssemblyABIKind Kind;
+
+public:
+ explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT,
+ WebAssemblyABIKind Kind)
+ : ABIInfo(CGT), defaultInfo(CGT), Kind(Kind) {}
+
+private:
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ // DefaultABIInfo's classifyReturnType and classifyArgumentType are
+ // non-virtual, but computeInfo and EmitVAArg are virtual, so we
+ // overload them.
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &Arg : FI.arguments())
+ Arg.info = classifyArgumentType(Arg.type);
+ }
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
+public:
+ explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
+ WebAssemblyABIKind K)
+ : TargetCodeGenInfo(std::make_unique<WebAssemblyABIInfo>(CGT, K)) {
+ SwiftInfo =
+ std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/false);
+ }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+ if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ llvm::AttrBuilder B(GV->getContext());
+ B.addAttribute("wasm-import-module", Attr->getImportModule());
+ Fn->addFnAttrs(B);
+ }
+ if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ llvm::AttrBuilder B(GV->getContext());
+ B.addAttribute("wasm-import-name", Attr->getImportName());
+ Fn->addFnAttrs(B);
+ }
+ if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ llvm::AttrBuilder B(GV->getContext());
+ B.addAttribute("wasm-export-name", Attr->getExportName());
+ Fn->addFnAttrs(B);
+ }
+ }
+
+ if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
+ Fn->addFnAttr("no-prototype");
+ }
+ }
+
+ /// Return the WebAssembly externref reference type.
+ virtual llvm::Type *getWasmExternrefReferenceType() const override {
+ return llvm::Type::getWasm_ExternrefTy(getABIInfo().getVMContext());
+ }
+ /// Return the WebAssembly funcref reference type.
+ virtual llvm::Type *getWasmFuncrefReferenceType() const override {
+ return llvm::Type::getWasm_FuncrefTy(getABIInfo().getVMContext());
+ }
+};
+
+/// Classify argument of given type \p Ty.
+ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // passed by value.
+ if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+ // Lower single-element structs to just pass a regular value. TODO: We
+ // could do reasonable-size multiple-element structs too, using getExpand(),
+ // though watch out for things like bitfields.
+ if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+ // For the experimental multivalue ABI, fully expand all other aggregates
+ if (Kind == WebAssemblyABIKind::ExperimentalMV) {
+ const RecordType *RT = Ty->getAs<RecordType>();
+ assert(RT);
+ bool HasBitField = false;
+ for (auto *Field : RT->getDecl()->fields()) {
+ if (Field->isBitField()) {
+ HasBitField = true;
+ break;
+ }
+ }
+ if (!HasBitField)
+ return ABIArgInfo::getExpand();
+ }
+ }
+
+ // Otherwise just do the default thing.
+ return defaultInfo.classifyArgumentType(Ty);
+}
+
+ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
+ if (isAggregateTypeForABI(RetTy)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // returned by value.
+ if (!getRecordArgABI(RetTy, getCXXABI())) {
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+ // Lower single-element structs to just return a regular value. TODO: We
+ // could do reasonable-size multiple-element structs too, using
+ // ABIArgInfo::getDirect().
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+ // For the experimental multivalue ABI, return all other aggregates
+ if (Kind == WebAssemblyABIKind::ExperimentalMV)
+ return ABIArgInfo::getDirect();
+ }
+ }
+
+ // Otherwise just do the default thing.
+ return defaultInfo.classifyReturnType(RetTy);
+}
+
+Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ bool IsIndirect = isAggregateTypeForABI(Ty) &&
+ !isEmptyRecord(getContext(), Ty, true) &&
+ !isSingleElementStruct(Ty, getContext());
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
+ getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(4),
+ /*AllowHigherAlign=*/true);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createWebAssemblyTargetCodeGenInfo(CodeGenModule &CGM,
+ WebAssemblyABIKind K) {
+ return std::make_unique<WebAssemblyTargetCodeGenInfo>(CGM.getTypes(), K);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/X86.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/X86.cpp
new file mode 100644
index 000000000000..31679d899a44
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/X86.cpp
@@ -0,0 +1,3402 @@
+//===- X86.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+#include "clang/Basic/DiagnosticFrontend.h"
+#include "llvm/ADT/SmallBitVector.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+namespace {
+
+/// IsX86_MMXType - Return true if this is an MMX type.
+bool IsX86_MMXType(llvm::Type *IRType) {
+ // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
+ return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
+ cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
+ IRType->getScalarSizeInBits() != 64;
+}
+
+static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ StringRef Constraint,
+ llvm::Type* Ty) {
+ bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
+ .Cases("y", "&y", "^Ym", true)
+ .Default(false);
+ if (IsMMXCons && Ty->isVectorTy()) {
+ if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedValue() !=
+ 64) {
+ // Invalid MMX constraint
+ return nullptr;
+ }
+
+ return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
+ }
+
+ // No operation needed
+ return Ty;
+}
+
+/// Returns true if this type can be passed in SSE registers with the
+/// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
+static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
+ if (BT->getKind() == BuiltinType::LongDouble) {
+ if (&Context.getTargetInfo().getLongDoubleFormat() ==
+ &llvm::APFloat::x87DoubleExtended())
+ return false;
+ }
+ return true;
+ }
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
+ // registers specially.
+ unsigned VecSize = Context.getTypeSize(VT);
+ if (VecSize == 128 || VecSize == 256 || VecSize == 512)
+ return true;
+ }
+ return false;
+}
+
+/// Returns true if this aggregate is small enough to be passed in SSE registers
+/// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
+static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
+ return NumMembers <= 4;
+}
+
+/// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
+static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
+ auto AI = ABIArgInfo::getDirect(T);
+ AI.setInReg(true);
+ AI.setCanBeFlattened(false);
+ return AI;
+}
+
+//===----------------------------------------------------------------------===//
+// X86-32 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+/// Similar to llvm::CCState, but for Clang.
+struct CCState {
+ CCState(CGFunctionInfo &FI)
+ : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {}
+
+ llvm::SmallBitVector IsPreassigned;
+ unsigned CC = CallingConv::CC_C;
+ unsigned FreeRegs = 0;
+ unsigned FreeSSERegs = 0;
+};
+
+/// X86_32ABIInfo - The X86-32 ABI information.
+class X86_32ABIInfo : public ABIInfo {
+ enum Class {
+ Integer,
+ Float
+ };
+
+ static const unsigned MinABIStackAlignInBytes = 4;
+
+ bool IsDarwinVectorABI;
+ bool IsRetSmallStructInRegABI;
+ bool IsWin32StructABI;
+ bool IsSoftFloatABI;
+ bool IsMCUABI;
+ bool IsLinuxABI;
+ unsigned DefaultNumRegisterParameters;
+
+ static bool isRegisterSize(unsigned Size) {
+ return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
+ }
+
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override {
+ // FIXME: Assumes vectorcall is in use.
+ return isX86VectorTypeForVectorCall(getContext(), Ty);
+ }
+
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t NumMembers) const override {
+ // FIXME: Assumes vectorcall is in use.
+ return isX86VectorCallAggregateSmallEnough(NumMembers);
+ }
+
+ bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
+
+ ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
+
+ /// Return the alignment to use for the given type on the stack.
+ unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
+
+ Class classify(QualType Ty) const;
+ ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
+
+ /// Updates the number of available free registers, returns
+ /// true if any registers were allocated.
+ bool updateFreeRegs(QualType Ty, CCState &State) const;
+
+ bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
+ bool &NeedsPadding) const;
+ bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
+
+ bool canExpandIndirectArgument(QualType Ty) const;
+
+ /// Rewrite the function info so that all memory arguments use
+ /// inalloca.
+ void rewriteWithInAlloca(CGFunctionInfo &FI) const;
+
+ void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
+ CharUnits &StackOffset, ABIArgInfo &Info,
+ QualType Type) const;
+ void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const;
+
+public:
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
+ bool RetSmallStructInRegABI, bool Win32StructABI,
+ unsigned NumRegisterParameters, bool SoftFloatABI)
+ : ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
+ IsRetSmallStructInRegABI(RetSmallStructInRegABI),
+ IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
+ IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
+ IsLinuxABI(CGT.getTarget().getTriple().isOSLinux() ||
+ CGT.getTarget().getTriple().isOSCygMing()),
+ DefaultNumRegisterParameters(NumRegisterParameters) {}
+};
+
+class X86_32SwiftABIInfo : public SwiftABIInfo {
+public:
+ explicit X86_32SwiftABIInfo(CodeGenTypes &CGT)
+ : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/false) {}
+
+ bool shouldPassIndirectly(ArrayRef<llvm::Type *> ComponentTys,
+ bool AsReturnValue) const override {
+ // LLVM's x86-32 lowering currently only assigns up to three
+ // integer registers and three fp registers. Oddly, it'll use up to
+ // four vector registers for vectors, but those can overlap with the
+ // scalar registers.
+ return occupiesMoreThan(ComponentTys, /*total=*/3);
+ }
+};
+
+class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
+ bool RetSmallStructInRegABI, bool Win32StructABI,
+ unsigned NumRegisterParameters, bool SoftFloatABI)
+ : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
+ CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
+ NumRegisterParameters, SoftFloatABI)) {
+ SwiftInfo = std::make_unique<X86_32SwiftABIInfo>(CGT);
+ }
+
+ static bool isStructReturnInRegABI(
+ const llvm::Triple &Triple, const CodeGenOptions &Opts);
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
+ // Darwin uses different dwarf register numbers for EH.
+ if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
+ return 4;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+
+ llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ StringRef Constraint,
+ llvm::Type* Ty) const override {
+ return X86AdjustInlineAsmType(CGF, Constraint, Ty);
+ }
+
+ void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
+ std::string &Constraints,
+ std::vector<llvm::Type *> &ResultRegTypes,
+ std::vector<llvm::Type *> &ResultTruncRegTypes,
+ std::vector<LValue> &ResultRegDests,
+ std::string &AsmString,
+ unsigned NumOutputs) const override;
+
+ StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
+ return "movl\t%ebp, %ebp"
+ "\t\t// marker for objc_retainAutoreleaseReturnValue";
+ }
+};
+
+}
+
+/// Rewrite input constraint references after adding some output constraints.
+/// In the case where there is one output and one input and we add one output,
+/// we need to replace all operand references greater than or equal to 1:
+/// mov $0, $1
+/// mov eax, $1
+/// The result will be:
+/// mov $0, $2
+/// mov eax, $2
+static void rewriteInputConstraintReferences(unsigned FirstIn,
+ unsigned NumNewOuts,
+ std::string &AsmString) {
+ std::string Buf;
+ llvm::raw_string_ostream OS(Buf);
+ size_t Pos = 0;
+ while (Pos < AsmString.size()) {
+ size_t DollarStart = AsmString.find('$', Pos);
+ if (DollarStart == std::string::npos)
+ DollarStart = AsmString.size();
+ size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
+ if (DollarEnd == std::string::npos)
+ DollarEnd = AsmString.size();
+ OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
+ Pos = DollarEnd;
+ size_t NumDollars = DollarEnd - DollarStart;
+ if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
+ // We have an operand reference.
+ size_t DigitStart = Pos;
+ if (AsmString[DigitStart] == '{') {
+ OS << '{';
+ ++DigitStart;
+ }
+ size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
+ if (DigitEnd == std::string::npos)
+ DigitEnd = AsmString.size();
+ StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
+ unsigned OperandIndex;
+ if (!OperandStr.getAsInteger(10, OperandIndex)) {
+ if (OperandIndex >= FirstIn)
+ OperandIndex += NumNewOuts;
+ OS << OperandIndex;
+ } else {
+ OS << OperandStr;
+ }
+ Pos = DigitEnd;
+ }
+ }
+ AsmString = std::move(OS.str());
+}
+
+/// Add output constraints for EAX:EDX because they are return registers.
+void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
+ CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
+ std::vector<llvm::Type *> &ResultRegTypes,
+ std::vector<llvm::Type *> &ResultTruncRegTypes,
+ std::vector<LValue> &ResultRegDests, std::string &AsmString,
+ unsigned NumOutputs) const {
+ uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
+
+ // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
+ // larger.
+ if (!Constraints.empty())
+ Constraints += ',';
+ if (RetWidth <= 32) {
+ Constraints += "={eax}";
+ ResultRegTypes.push_back(CGF.Int32Ty);
+ } else {
+ // Use the 'A' constraint for EAX:EDX.
+ Constraints += "=A";
+ ResultRegTypes.push_back(CGF.Int64Ty);
+ }
+
+ // Truncate EAX or EAX:EDX to an integer of the appropriate size.
+ llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
+ ResultTruncRegTypes.push_back(CoerceTy);
+
+ // Coerce the integer by bitcasting the return slot pointer.
+ ReturnSlot.setAddress(ReturnSlot.getAddress(CGF).withElementType(CoerceTy));
+ ResultRegDests.push_back(ReturnSlot);
+
+ rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
+}
+
+/// shouldReturnTypeInRegister - Determine if the given type should be
+/// returned in a register (for the Darwin and MCU ABI).
+bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
+ ASTContext &Context) const {
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // For i386, type must be register sized.
+ // For the MCU ABI, it only needs to be <= 8-byte
+ if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
+ return false;
+
+ if (Ty->isVectorType()) {
+ // 64- and 128- bit vectors inside structures are not returned in
+ // registers.
+ if (Size == 64 || Size == 128)
+ return false;
+
+ return true;
+ }
+
+ // If this is a builtin, pointer, enum, complex type, member pointer, or
+ // member function pointer it is ok.
+ if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
+ Ty->isAnyComplexType() || Ty->isEnumeralType() ||
+ Ty->isBlockPointerType() || Ty->isMemberPointerType())
+ return true;
+
+ // Arrays are treated like records.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
+ return shouldReturnTypeInRegister(AT->getElementType(), Context);
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT) return false;
+
+ // FIXME: Traverse bases here too.
+
+ // Structure types are passed in register if all fields would be
+ // passed in a register.
+ for (const auto *FD : RT->getDecl()->fields()) {
+ // Empty fields are ignored.
+ if (isEmptyField(Context, FD, true))
+ continue;
+
+ // Check fields recursively.
+ if (!shouldReturnTypeInRegister(FD->getType(), Context))
+ return false;
+ }
+ return true;
+}
+
+static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
+ // Treat complex types as the element type.
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>())
+ Ty = CTy->getElementType();
+
+ // Check for a type which we know has a simple scalar argument-passing
+ // convention without any padding. (We're specifically looking for 32
+ // and 64-bit integer and integer-equivalents, float, and double.)
+ if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
+ !Ty->isEnumeralType() && !Ty->isBlockPointerType())
+ return false;
+
+ uint64_t Size = Context.getTypeSize(Ty);
+ return Size == 32 || Size == 64;
+}
+
+static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
+ uint64_t &Size) {
+ for (const auto *FD : RD->fields()) {
+ // Scalar arguments on the stack get 4 byte alignment on x86. If the
+ // argument is smaller than 32-bits, expanding the struct will create
+ // alignment padding.
+ if (!is32Or64BitBasicType(FD->getType(), Context))
+ return false;
+
+ // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
+ // how to expand them yet, and the predicate for telling if a bitfield still
+ // counts as "basic" is more complicated than what we were doing previously.
+ if (FD->isBitField())
+ return false;
+
+ Size += Context.getTypeSize(FD->getType());
+ }
+ return true;
+}
+
+static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
+ uint64_t &Size) {
+ // Don't do this if there are any non-empty bases.
+ for (const CXXBaseSpecifier &Base : RD->bases()) {
+ if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
+ Size))
+ return false;
+ }
+ if (!addFieldSizes(Context, RD, Size))
+ return false;
+ return true;
+}
+
+/// Test whether an argument type which is to be passed indirectly (on the
+/// stack) would have the equivalent layout if it was expanded into separate
+/// arguments. If so, we prefer to do the latter to avoid inhibiting
+/// optimizations.
+bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
+ // We can only expand structure types.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT)
+ return false;
+ const RecordDecl *RD = RT->getDecl();
+ uint64_t Size = 0;
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (!IsWin32StructABI) {
+ // On non-Windows, we have to conservatively match our old bitcode
+ // prototypes in order to be ABI-compatible at the bitcode level.
+ if (!CXXRD->isCLike())
+ return false;
+ } else {
+ // Don't do this for dynamic classes.
+ if (CXXRD->isDynamicClass())
+ return false;
+ }
+ if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
+ return false;
+ } else {
+ if (!addFieldSizes(getContext(), RD, Size))
+ return false;
+ }
+
+ // We can do this if there was no alignment padding.
+ return Size == getContext().getTypeSize(Ty);
+}
+
+ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
+ // If the return value is indirect, then the hidden argument is consuming one
+ // integer register.
+ if (State.FreeRegs) {
+ --State.FreeRegs;
+ if (!IsMCUABI)
+ return getNaturalAlignIndirectInReg(RetTy);
+ }
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
+}
+
+ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
+ CCState &State) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ const Type *Base = nullptr;
+ uint64_t NumElts = 0;
+ if ((State.CC == llvm::CallingConv::X86_VectorCall ||
+ State.CC == llvm::CallingConv::X86_RegCall) &&
+ isHomogeneousAggregate(RetTy, Base, NumElts)) {
+ // The LLVM struct type for such an aggregate should lower properly.
+ return ABIArgInfo::getDirect();
+ }
+
+ if (const VectorType *VT = RetTy->getAs<VectorType>()) {
+ // On Darwin, some vectors are returned in registers.
+ if (IsDarwinVectorABI) {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ // 128-bit vectors are a special case; they are returned in
+ // registers and we need to make sure to pick a type the LLVM
+ // backend will like.
+ if (Size == 128)
+ return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
+ llvm::Type::getInt64Ty(getVMContext()), 2));
+
+ // Always return in register if it fits in a general purpose
+ // register, or if it is 64 bits and has a single element.
+ if ((Size == 8 || Size == 16 || Size == 32) ||
+ (Size == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+
+ return getIndirectReturnResult(RetTy, State);
+ }
+
+ return ABIArgInfo::getDirect();
+ }
+
+ if (isAggregateTypeForABI(RetTy)) {
+ if (const RecordType *RT = RetTy->getAs<RecordType>()) {
+ // Structures with flexible arrays are always indirect.
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectReturnResult(RetTy, State);
+ }
+
+ // If specified, structs and unions are always indirect.
+ if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
+ return getIndirectReturnResult(RetTy, State);
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Return complex of _Float16 as <2 x half> so the backend will use xmm0.
+ if (const ComplexType *CT = RetTy->getAs<ComplexType>()) {
+ QualType ET = getContext().getCanonicalType(CT->getElementType());
+ if (ET->isFloat16Type())
+ return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
+ llvm::Type::getHalfTy(getVMContext()), 2));
+ }
+
+ // Small structures which are register sized are generally returned
+ // in a register.
+ if (shouldReturnTypeInRegister(RetTy, getContext())) {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ // As a special-case, if the struct is a "single-element" struct, and
+ // the field is of type "float" or "double", return it in a
+ // floating-point register. (MSVC does not apply this special case.)
+ // We apply a similar transformation for pointer types to improve the
+ // quality of the generated IR.
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
+ if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
+ || SeltTy->hasPointerRepresentation())
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+
+ // FIXME: We should be able to narrow this integer in cases with dead
+ // padding.
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
+ }
+
+ return getIndirectReturnResult(RetTy, State);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
+ if (EIT->getNumBits() > 64)
+ return getIndirectReturnResult(RetTy, State);
+
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
+}
+
+unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
+ unsigned Align) const {
+ // Otherwise, if the alignment is less than or equal to the minimum ABI
+ // alignment, just use the default; the backend will handle this.
+ if (Align <= MinABIStackAlignInBytes)
+ return 0; // Use default alignment.
+
+ if (IsLinuxABI) {
+ // Exclude other System V OS (e.g Darwin, PS4 and FreeBSD) since we don't
+ // want to spend any effort dealing with the ramifications of ABI breaks.
+ //
+ // If the vector type is __m128/__m256/__m512, return the default alignment.
+ if (Ty->isVectorType() && (Align == 16 || Align == 32 || Align == 64))
+ return Align;
+ }
+ // On non-Darwin, the stack type alignment is always 4.
+ if (!IsDarwinVectorABI) {
+ // Set explicit alignment, since we may need to realign the top.
+ return MinABIStackAlignInBytes;
+ }
+
+ // Otherwise, if the type contains an SSE vector type, the alignment is 16.
+ if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) ||
+ isRecordWithSIMDVectorType(getContext(), Ty)))
+ return 16;
+
+ return MinABIStackAlignInBytes;
+}
+
+ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
+ CCState &State) const {
+ if (!ByVal) {
+ if (State.FreeRegs) {
+ --State.FreeRegs; // Non-byval indirects just use one pointer.
+ if (!IsMCUABI)
+ return getNaturalAlignIndirectInReg(Ty);
+ }
+ return getNaturalAlignIndirect(Ty, false);
+ }
+
+ // Compute the byval alignment.
+ unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
+ unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
+ if (StackAlign == 0)
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
+
+ // If the stack alignment is less than the type alignment, realign the
+ // argument.
+ bool Realign = TypeAlign > StackAlign;
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
+ /*ByVal=*/true, Realign);
+}
+
+X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
+ const Type *T = isSingleElementStruct(Ty, getContext());
+ if (!T)
+ T = Ty.getTypePtr();
+
+ if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
+ BuiltinType::Kind K = BT->getKind();
+ if (K == BuiltinType::Float || K == BuiltinType::Double)
+ return Float;
+ }
+ return Integer;
+}
+
+bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
+ if (!IsSoftFloatABI) {
+ Class C = classify(Ty);
+ if (C == Float)
+ return false;
+ }
+
+ unsigned Size = getContext().getTypeSize(Ty);
+ unsigned SizeInRegs = (Size + 31) / 32;
+
+ if (SizeInRegs == 0)
+ return false;
+
+ if (!IsMCUABI) {
+ if (SizeInRegs > State.FreeRegs) {
+ State.FreeRegs = 0;
+ return false;
+ }
+ } else {
+ // The MCU psABI allows passing parameters in-reg even if there are
+ // earlier parameters that are passed on the stack. Also,
+ // it does not allow passing >8-byte structs in-register,
+ // even if there are 3 free registers available.
+ if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
+ return false;
+ }
+
+ State.FreeRegs -= SizeInRegs;
+ return true;
+}
+
+bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
+ bool &InReg,
+ bool &NeedsPadding) const {
+ // On Windows, aggregates other than HFAs are never passed in registers, and
+ // they do not consume register slots. Homogenous floating-point aggregates
+ // (HFAs) have already been dealt with at this point.
+ if (IsWin32StructABI && isAggregateTypeForABI(Ty))
+ return false;
+
+ NeedsPadding = false;
+ InReg = !IsMCUABI;
+
+ if (!updateFreeRegs(Ty, State))
+ return false;
+
+ if (IsMCUABI)
+ return true;
+
+ if (State.CC == llvm::CallingConv::X86_FastCall ||
+ State.CC == llvm::CallingConv::X86_VectorCall ||
+ State.CC == llvm::CallingConv::X86_RegCall) {
+ if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
+ NeedsPadding = true;
+
+ return false;
+ }
+
+ return true;
+}
+
+bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
+ bool IsPtrOrInt = (getContext().getTypeSize(Ty) <= 32) &&
+ (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
+ Ty->isReferenceType());
+
+ if (!IsPtrOrInt && (State.CC == llvm::CallingConv::X86_FastCall ||
+ State.CC == llvm::CallingConv::X86_VectorCall))
+ return false;
+
+ if (!updateFreeRegs(Ty, State))
+ return false;
+
+ if (!IsPtrOrInt && State.CC == llvm::CallingConv::X86_RegCall)
+ return false;
+
+ // Return true to apply inreg to all legal parameters except for MCU targets.
+ return !IsMCUABI;
+}
+
+void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const {
+ // Vectorcall x86 works subtly different than in x64, so the format is
+ // a bit different than the x64 version. First, all vector types (not HVAs)
+ // are assigned, with the first 6 ending up in the [XYZ]MM0-5 registers.
+ // This differs from the x64 implementation, where the first 6 by INDEX get
+ // registers.
+ // In the second pass over the arguments, HVAs are passed in the remaining
+ // vector registers if possible, or indirectly by address. The address will be
+ // passed in ECX/EDX if available. Any other arguments are passed according to
+ // the usual fastcall rules.
+ MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
+ for (int I = 0, E = Args.size(); I < E; ++I) {
+ const Type *Base = nullptr;
+ uint64_t NumElts = 0;
+ const QualType &Ty = Args[I].type;
+ if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
+ isHomogeneousAggregate(Ty, Base, NumElts)) {
+ if (State.FreeSSERegs >= NumElts) {
+ State.FreeSSERegs -= NumElts;
+ Args[I].info = ABIArgInfo::getDirectInReg();
+ State.IsPreassigned.set(I);
+ }
+ }
+ }
+}
+
+ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
+ CCState &State) const {
+ // FIXME: Set alignment on indirect arguments.
+ bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
+ bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
+ bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
+
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+ TypeInfo TI = getContext().getTypeInfo(Ty);
+
+ // Check with the C++ ABI first.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (RT) {
+ CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
+ if (RAA == CGCXXABI::RAA_Indirect) {
+ return getIndirectResult(Ty, false, State);
+ } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
+ // The field index doesn't matter, we'll fix it up later.
+ return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
+ }
+ }
+
+ // Regcall uses the concept of a homogenous vector aggregate, similar
+ // to other targets.
+ const Type *Base = nullptr;
+ uint64_t NumElts = 0;
+ if ((IsRegCall || IsVectorCall) &&
+ isHomogeneousAggregate(Ty, Base, NumElts)) {
+ if (State.FreeSSERegs >= NumElts) {
+ State.FreeSSERegs -= NumElts;
+
+ // Vectorcall passes HVAs directly and does not flatten them, but regcall
+ // does.
+ if (IsVectorCall)
+ return getDirectX86Hva();
+
+ if (Ty->isBuiltinType() || Ty->isVectorType())
+ return ABIArgInfo::getDirect();
+ return ABIArgInfo::getExpand();
+ }
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+ }
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Structures with flexible arrays are always indirect.
+ // FIXME: This should not be byval!
+ if (RT && RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectResult(Ty, true, State);
+
+ // Ignore empty structs/unions on non-Windows.
+ if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ llvm::LLVMContext &LLVMContext = getVMContext();
+ llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
+ bool NeedsPadding = false;
+ bool InReg;
+ if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
+ unsigned SizeInRegs = (TI.Width + 31) / 32;
+ SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
+ llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
+ if (InReg)
+ return ABIArgInfo::getDirectInReg(Result);
+ else
+ return ABIArgInfo::getDirect(Result);
+ }
+ llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
+
+ // Pass over-aligned aggregates on Windows indirectly. This behavior was
+ // added in MSVC 2015. Use the required alignment from the record layout,
+ // since that may be less than the regular type alignment, and types with
+ // required alignment of less than 4 bytes are not passed indirectly.
+ if (IsWin32StructABI) {
+ unsigned AlignInBits = 0;
+ if (RT) {
+ const ASTRecordLayout &Layout =
+ getContext().getASTRecordLayout(RT->getDecl());
+ AlignInBits = getContext().toBits(Layout.getRequiredAlignment());
+ } else if (TI.isAlignRequired()) {
+ AlignInBits = TI.Align;
+ }
+ if (AlignInBits > 32)
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+ }
+
+ // Expand small (<= 128-bit) record types when we know that the stack layout
+ // of those arguments will match the struct. This is important because the
+ // LLVM backend isn't smart enough to remove byval, which inhibits many
+ // optimizations.
+ // Don't do this for the MCU if there are still free integer registers
+ // (see X86_64 ABI for full explanation).
+ if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
+ canExpandIndirectArgument(Ty))
+ return ABIArgInfo::getExpandWithPadding(
+ IsFastCall || IsVectorCall || IsRegCall, PaddingType);
+
+ return getIndirectResult(Ty, true, State);
+ }
+
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // On Windows, vectors are passed directly if registers are available, or
+ // indirectly if not. This avoids the need to align argument memory. Pass
+ // user-defined vector types larger than 512 bits indirectly for simplicity.
+ if (IsWin32StructABI) {
+ if (TI.Width <= 512 && State.FreeSSERegs > 0) {
+ --State.FreeSSERegs;
+ return ABIArgInfo::getDirectInReg();
+ }
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+ }
+
+ // On Darwin, some vectors are passed in memory, we handle this by passing
+ // it as an i8/i16/i32/i64.
+ if (IsDarwinVectorABI) {
+ if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) ||
+ (TI.Width == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), TI.Width));
+ }
+
+ if (IsX86_MMXType(CGT.ConvertType(Ty)))
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
+
+ return ABIArgInfo::getDirect();
+ }
+
+
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ bool InReg = shouldPrimitiveUseInReg(Ty, State);
+
+ if (isPromotableIntegerTypeForABI(Ty)) {
+ if (InReg)
+ return ABIArgInfo::getExtendInReg(Ty);
+ return ABIArgInfo::getExtend(Ty);
+ }
+
+ if (const auto *EIT = Ty->getAs<BitIntType>()) {
+ if (EIT->getNumBits() <= 64) {
+ if (InReg)
+ return ABIArgInfo::getDirectInReg();
+ return ABIArgInfo::getDirect();
+ }
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+ }
+
+ if (InReg)
+ return ABIArgInfo::getDirectInReg();
+ return ABIArgInfo::getDirect();
+}
+
+void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ CCState State(FI);
+ if (IsMCUABI)
+ State.FreeRegs = 3;
+ else if (State.CC == llvm::CallingConv::X86_FastCall) {
+ State.FreeRegs = 2;
+ State.FreeSSERegs = 3;
+ } else if (State.CC == llvm::CallingConv::X86_VectorCall) {
+ State.FreeRegs = 2;
+ State.FreeSSERegs = 6;
+ } else if (FI.getHasRegParm())
+ State.FreeRegs = FI.getRegParm();
+ else if (State.CC == llvm::CallingConv::X86_RegCall) {
+ State.FreeRegs = 5;
+ State.FreeSSERegs = 8;
+ } else if (IsWin32StructABI) {
+ // Since MSVC 2015, the first three SSE vectors have been passed in
+ // registers. The rest are passed indirectly.
+ State.FreeRegs = DefaultNumRegisterParameters;
+ State.FreeSSERegs = 3;
+ } else
+ State.FreeRegs = DefaultNumRegisterParameters;
+
+ if (!::classifyReturnType(getCXXABI(), FI, *this)) {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
+ } else if (FI.getReturnInfo().isIndirect()) {
+ // The C++ ABI is not aware of register usage, so we have to check if the
+ // return value was sret and put it in a register ourselves if appropriate.
+ if (State.FreeRegs) {
+ --State.FreeRegs; // The sret parameter consumes a register.
+ if (!IsMCUABI)
+ FI.getReturnInfo().setInReg(true);
+ }
+ }
+
+ // The chain argument effectively gives us another free register.
+ if (FI.isChainCall())
+ ++State.FreeRegs;
+
+ // For vectorcall, do a first pass over the arguments, assigning FP and vector
+ // arguments to XMM registers as available.
+ if (State.CC == llvm::CallingConv::X86_VectorCall)
+ runVectorCallFirstPass(FI, State);
+
+ bool UsedInAlloca = false;
+ MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
+ for (int I = 0, E = Args.size(); I < E; ++I) {
+ // Skip arguments that have already been assigned.
+ if (State.IsPreassigned.test(I))
+ continue;
+
+ Args[I].info = classifyArgumentType(Args[I].type, State);
+ UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca);
+ }
+
+ // If we needed to use inalloca for any argument, do a second pass and rewrite
+ // all the memory arguments to use inalloca.
+ if (UsedInAlloca)
+ rewriteWithInAlloca(FI);
+}
+
+void
+X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
+ CharUnits &StackOffset, ABIArgInfo &Info,
+ QualType Type) const {
+ // Arguments are always 4-byte-aligned.
+ CharUnits WordSize = CharUnits::fromQuantity(4);
+ assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct");
+
+ // sret pointers and indirect things will require an extra pointer
+ // indirection, unless they are byval. Most things are byval, and will not
+ // require this indirection.
+ bool IsIndirect = false;
+ if (Info.isIndirect() && !Info.getIndirectByVal())
+ IsIndirect = true;
+ Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect);
+ llvm::Type *LLTy = CGT.ConvertTypeForMem(Type);
+ if (IsIndirect)
+ LLTy = llvm::PointerType::getUnqual(getVMContext());
+ FrameFields.push_back(LLTy);
+ StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type);
+
+ // Insert padding bytes to respect alignment.
+ CharUnits FieldEnd = StackOffset;
+ StackOffset = FieldEnd.alignTo(WordSize);
+ if (StackOffset != FieldEnd) {
+ CharUnits NumBytes = StackOffset - FieldEnd;
+ llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
+ Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
+ FrameFields.push_back(Ty);
+ }
+}
+
+static bool isArgInAlloca(const ABIArgInfo &Info) {
+ // Leave ignored and inreg arguments alone.
+ switch (Info.getKind()) {
+ case ABIArgInfo::InAlloca:
+ return true;
+ case ABIArgInfo::Ignore:
+ case ABIArgInfo::IndirectAliased:
+ return false;
+ case ABIArgInfo::Indirect:
+ case ABIArgInfo::Direct:
+ case ABIArgInfo::Extend:
+ return !Info.getInReg();
+ case ABIArgInfo::Expand:
+ case ABIArgInfo::CoerceAndExpand:
+ // These are aggregate types which are never passed in registers when
+ // inalloca is involved.
+ return true;
+ }
+ llvm_unreachable("invalid enum");
+}
+
+void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
+ assert(IsWin32StructABI && "inalloca only supported on win32");
+
+ // Build a packed struct type for all of the arguments in memory.
+ SmallVector<llvm::Type *, 6> FrameFields;
+
+ // The stack alignment is always 4.
+ CharUnits StackAlign = CharUnits::fromQuantity(4);
+
+ CharUnits StackOffset;
+ CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
+
+ // Put 'this' into the struct before 'sret', if necessary.
+ bool IsThisCall =
+ FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
+ ABIArgInfo &Ret = FI.getReturnInfo();
+ if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
+ isArgInAlloca(I->info)) {
+ addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
+ ++I;
+ }
+
+ // Put the sret parameter into the inalloca struct if it's in memory.
+ if (Ret.isIndirect() && !Ret.getInReg()) {
+ addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType());
+ // On Windows, the hidden sret parameter is always returned in eax.
+ Ret.setInAllocaSRet(IsWin32StructABI);
+ }
+
+ // Skip the 'this' parameter in ecx.
+ if (IsThisCall)
+ ++I;
+
+ // Put arguments passed in memory into the struct.
+ for (; I != E; ++I) {
+ if (isArgInAlloca(I->info))
+ addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
+ }
+
+ FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
+ /*isPacked=*/true),
+ StackAlign);
+}
+
+Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
+ Address VAListAddr, QualType Ty) const {
+
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+
+ // x86-32 changes the alignment of certain arguments on the stack.
+ //
+ // Just messing with TypeInfo like this works because we never pass
+ // anything indirectly.
+ TypeInfo.Align = CharUnits::fromQuantity(
+ getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
+ TypeInfo, CharUnits::fromQuantity(4),
+ /*AllowHigherAlign*/ true);
+}
+
+bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
+ const llvm::Triple &Triple, const CodeGenOptions &Opts) {
+ assert(Triple.getArch() == llvm::Triple::x86);
+
+ switch (Opts.getStructReturnConvention()) {
+ case CodeGenOptions::SRCK_Default:
+ break;
+ case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
+ return false;
+ case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
+ return true;
+ }
+
+ if (Triple.isOSDarwin() || Triple.isOSIAMCU())
+ return true;
+
+ switch (Triple.getOS()) {
+ case llvm::Triple::DragonFly:
+ case llvm::Triple::FreeBSD:
+ case llvm::Triple::OpenBSD:
+ case llvm::Triple::Win32:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) {
+ if (!FD->hasAttr<AnyX86InterruptAttr>())
+ return;
+
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ Fn->setCallingConv(llvm::CallingConv::X86_INTR);
+ if (FD->getNumParams() == 0)
+ return;
+
+ auto PtrTy = cast<PointerType>(FD->getParamDecl(0)->getType());
+ llvm::Type *ByValTy = CGM.getTypes().ConvertType(PtrTy->getPointeeType());
+ llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
+ Fn->getContext(), ByValTy);
+ Fn->addParamAttr(0, NewAttr);
+}
+
+void X86_32TargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ if (GV->isDeclaration())
+ return;
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ Fn->addFnAttr("stackrealign");
+ }
+
+ addX86InterruptAttrs(FD, GV, CGM);
+ }
+}
+
+bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
+
+ // 0-7 are the eight integer registers; the order is different
+ // on Darwin (for EH), but the range is the same.
+ // 8 is %eip.
+ AssignToArrayRange(Builder, Address, Four8, 0, 8);
+
+ if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
+ // 12-16 are st(0..4). Not sure why we stop at 4.
+ // These have size 16, which is sizeof(long double) on
+ // platforms with 8-byte alignment for that type.
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
+ AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
+
+ } else {
+ // 9 is %eflags, which doesn't get a size on Darwin for some
+ // reason.
+ Builder.CreateAlignedStore(
+ Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
+ CharUnits::One());
+
+ // 11-16 are st(0..5). Not sure why we stop at 5.
+ // These have size 12, which is sizeof(long double) on
+ // platforms with 4-byte alignment for that type.
+ llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
+ AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// X86-64 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+
+namespace {
+
+/// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
+static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
+ switch (AVXLevel) {
+ case X86AVXABILevel::AVX512:
+ return 512;
+ case X86AVXABILevel::AVX:
+ return 256;
+ case X86AVXABILevel::None:
+ return 128;
+ }
+ llvm_unreachable("Unknown AVXLevel");
+}
+
+/// X86_64ABIInfo - The X86_64 ABI information.
+class X86_64ABIInfo : public ABIInfo {
+ enum Class {
+ Integer = 0,
+ SSE,
+ SSEUp,
+ X87,
+ X87Up,
+ ComplexX87,
+ NoClass,
+ Memory
+ };
+
+ /// merge - Implement the X86_64 ABI merging algorithm.
+ ///
+ /// Merge an accumulating classification \arg Accum with a field
+ /// classification \arg Field.
+ ///
+ /// \param Accum - The accumulating classification. This should
+ /// always be either NoClass or the result of a previous merge
+ /// call. In addition, this should never be Memory (the caller
+ /// should just return Memory for the aggregate).
+ static Class merge(Class Accum, Class Field);
+
+ /// postMerge - Implement the X86_64 ABI post merging algorithm.
+ ///
+ /// Post merger cleanup, reduces a malformed Hi and Lo pair to
+ /// final MEMORY or SSE classes when necessary.
+ ///
+ /// \param AggregateSize - The size of the current aggregate in
+ /// the classification process.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the higher words of the containing object.
+ ///
+ void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
+
+ /// classify - Determine the x86_64 register classes in which the
+ /// given type T should be passed.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the high word of the containing object.
+ ///
+ /// \param OffsetBase - The bit offset of this type in the
+ /// containing object. Some parameters are classified different
+ /// depending on whether they straddle an eightbyte boundary.
+ ///
+ /// \param isNamedArg - Whether the argument in question is a "named"
+ /// argument, as used in AMD64-ABI 3.5.7.
+ ///
+ /// \param IsRegCall - Whether the calling conversion is regcall.
+ ///
+ /// If a word is unused its result will be NoClass; if a type should
+ /// be passed in Memory then at least the classification of \arg Lo
+ /// will be Memory.
+ ///
+ /// The \arg Lo class will be NoClass iff the argument is ignored.
+ ///
+ /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
+ /// also be ComplexX87.
+ void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
+ bool isNamedArg, bool IsRegCall = false) const;
+
+ llvm::Type *GetByteVectorType(QualType Ty) const;
+ llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
+ llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be returned in memory.
+ ABIArgInfo getIndirectReturnResult(QualType Ty) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ///
+ /// \param freeIntRegs - The number of free integer registers remaining
+ /// available.
+ ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
+ unsigned &neededInt, unsigned &neededSSE,
+ bool isNamedArg,
+ bool IsRegCall = false) const;
+
+ ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
+ unsigned &NeededSSE,
+ unsigned &MaxVectorWidth) const;
+
+ ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
+ unsigned &NeededSSE,
+ unsigned &MaxVectorWidth) const;
+
+ bool IsIllegalVectorType(QualType Ty) const;
+
+ /// The 0.98 ABI revision clarified a lot of ambiguities,
+ /// unfortunately in ways that were not always consistent with
+ /// certain previous compilers. In particular, platforms which
+ /// required strict binary compatibility with older versions of GCC
+ /// may need to exempt themselves.
+ bool honorsRevision0_98() const {
+ return !getTarget().getTriple().isOSDarwin();
+ }
+
+ /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
+ /// classify it as INTEGER (for compatibility with older clang compilers).
+ bool classifyIntegerMMXAsSSE() const {
+ // Clang <= 3.8 did not do this.
+ if (getContext().getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver3_8)
+ return false;
+
+ const llvm::Triple &Triple = getTarget().getTriple();
+ if (Triple.isOSDarwin() || Triple.isPS() || Triple.isOSFreeBSD())
+ return false;
+ return true;
+ }
+
+ // GCC classifies vectors of __int128 as memory.
+ bool passInt128VectorsInMem() const {
+ // Clang <= 9.0 did not do this.
+ if (getContext().getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver9)
+ return false;
+
+ const llvm::Triple &T = getTarget().getTriple();
+ return T.isOSLinux() || T.isOSNetBSD();
+ }
+
+ X86AVXABILevel AVXLevel;
+ // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
+ // 64-bit hardware.
+ bool Has64BitPointers;
+
+public:
+ X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
+ : ABIInfo(CGT), AVXLevel(AVXLevel),
+ Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {}
+
+ bool isPassedUsingAVXType(QualType type) const {
+ unsigned neededInt, neededSSE;
+ // The freeIntRegs argument doesn't matter here.
+ ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
+ /*isNamedArg*/true);
+ if (info.isDirect()) {
+ llvm::Type *ty = info.getCoerceToType();
+ if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
+ return vectorTy->getPrimitiveSizeInBits().getFixedValue() > 128;
+ }
+ return false;
+ }
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+ Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ bool has64BitPointers() const {
+ return Has64BitPointers;
+ }
+};
+
+/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
+class WinX86_64ABIInfo : public ABIInfo {
+public:
+ WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
+ : ABIInfo(CGT), AVXLevel(AVXLevel),
+ IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override {
+ // FIXME: Assumes vectorcall is in use.
+ return isX86VectorTypeForVectorCall(getContext(), Ty);
+ }
+
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t NumMembers) const override {
+ // FIXME: Assumes vectorcall is in use.
+ return isX86VectorCallAggregateSmallEnough(NumMembers);
+ }
+
+private:
+ ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
+ bool IsVectorCall, bool IsRegCall) const;
+ ABIArgInfo reclassifyHvaArgForVectorCall(QualType Ty, unsigned &FreeSSERegs,
+ const ABIArgInfo &current) const;
+
+ X86AVXABILevel AVXLevel;
+
+ bool IsMingw64;
+};
+
+class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
+ : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {
+ SwiftInfo =
+ std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/true);
+ }
+
+ /// Disable tail call on x86-64. The epilogue code before the tail jump blocks
+ /// autoreleaseRV/retainRV and autoreleaseRV/unsafeClaimRV optimizations.
+ bool markARCOptimizedReturnCallsAsNoTail() const override { return true; }
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
+ return 7;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override {
+ llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
+
+ // 0-15 are the 16 integer registers.
+ // 16 is %rip.
+ AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
+ return false;
+ }
+
+ llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ StringRef Constraint,
+ llvm::Type* Ty) const override {
+ return X86AdjustInlineAsmType(CGF, Constraint, Ty);
+ }
+
+ bool isNoProtoCallVariadic(const CallArgList &args,
+ const FunctionNoProtoType *fnType) const override {
+ // The default CC on x86-64 sets %al to the number of SSA
+ // registers used, and GCC sets this when calling an unprototyped
+ // function, so we override the default behavior. However, don't do
+ // that when AVX types are involved: the ABI explicitly states it is
+ // undefined, and it doesn't work in practice because of how the ABI
+ // defines varargs anyway.
+ if (fnType->getCallConv() == CC_C) {
+ bool HasAVXType = false;
+ for (CallArgList::const_iterator
+ it = args.begin(), ie = args.end(); it != ie; ++it) {
+ if (getABIInfo<X86_64ABIInfo>().isPassedUsingAVXType(it->Ty)) {
+ HasAVXType = true;
+ break;
+ }
+ }
+
+ if (!HasAVXType)
+ return true;
+ }
+
+ return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
+ }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ if (GV->isDeclaration())
+ return;
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ Fn->addFnAttr("stackrealign");
+ }
+
+ addX86InterruptAttrs(FD, GV, CGM);
+ }
+ }
+
+ void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
+ const FunctionDecl *Caller,
+ const FunctionDecl *Callee,
+ const CallArgList &Args) const override;
+};
+} // namespace
+
+static void initFeatureMaps(const ASTContext &Ctx,
+ llvm::StringMap<bool> &CallerMap,
+ const FunctionDecl *Caller,
+ llvm::StringMap<bool> &CalleeMap,
+ const FunctionDecl *Callee) {
+ if (CalleeMap.empty() && CallerMap.empty()) {
+ // The caller is potentially nullptr in the case where the call isn't in a
+ // function. In this case, the getFunctionFeatureMap ensures we just get
+ // the TU level setting (since it cannot be modified by 'target'..
+ Ctx.getFunctionFeatureMap(CallerMap, Caller);
+ Ctx.getFunctionFeatureMap(CalleeMap, Callee);
+ }
+}
+
+static bool checkAVXParamFeature(DiagnosticsEngine &Diag,
+ SourceLocation CallLoc,
+ const llvm::StringMap<bool> &CallerMap,
+ const llvm::StringMap<bool> &CalleeMap,
+ QualType Ty, StringRef Feature,
+ bool IsArgument) {
+ bool CallerHasFeat = CallerMap.lookup(Feature);
+ bool CalleeHasFeat = CalleeMap.lookup(Feature);
+ if (!CallerHasFeat && !CalleeHasFeat)
+ return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
+ << IsArgument << Ty << Feature;
+
+ // Mixing calling conventions here is very clearly an error.
+ if (!CallerHasFeat || !CalleeHasFeat)
+ return Diag.Report(CallLoc, diag::err_avx_calling_convention)
+ << IsArgument << Ty << Feature;
+
+ // Else, both caller and callee have the required feature, so there is no need
+ // to diagnose.
+ return false;
+}
+
+static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx,
+ SourceLocation CallLoc,
+ const llvm::StringMap<bool> &CallerMap,
+ const llvm::StringMap<bool> &CalleeMap, QualType Ty,
+ bool IsArgument) {
+ uint64_t Size = Ctx.getTypeSize(Ty);
+ if (Size > 256)
+ return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty,
+ "avx512f", IsArgument);
+
+ if (Size > 128)
+ return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx",
+ IsArgument);
+
+ return false;
+}
+
+void X86_64TargetCodeGenInfo::checkFunctionCallABI(
+ CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
+ const FunctionDecl *Callee, const CallArgList &Args) const {
+ llvm::StringMap<bool> CallerMap;
+ llvm::StringMap<bool> CalleeMap;
+ unsigned ArgIndex = 0;
+
+ // We need to loop through the actual call arguments rather than the
+ // function's parameters, in case this variadic.
+ for (const CallArg &Arg : Args) {
+ // The "avx" feature changes how vectors >128 in size are passed. "avx512f"
+ // additionally changes how vectors >256 in size are passed. Like GCC, we
+ // warn when a function is called with an argument where this will change.
+ // Unlike GCC, we also error when it is an obvious ABI mismatch, that is,
+ // the caller and callee features are mismatched.
+ // Unfortunately, we cannot do this diagnostic in SEMA, since the callee can
+ // change its ABI with attribute-target after this call.
+ if (Arg.getType()->isVectorType() &&
+ CGM.getContext().getTypeSize(Arg.getType()) > 128) {
+ initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
+ QualType Ty = Arg.getType();
+ // The CallArg seems to have desugared the type already, so for clearer
+ // diagnostics, replace it with the type in the FunctionDecl if possible.
+ if (ArgIndex < Callee->getNumParams())
+ Ty = Callee->getParamDecl(ArgIndex)->getType();
+
+ if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
+ CalleeMap, Ty, /*IsArgument*/ true))
+ return;
+ }
+ ++ArgIndex;
+ }
+
+ // Check return always, as we don't have a good way of knowing in codegen
+ // whether this value is used, tail-called, etc.
+ if (Callee->getReturnType()->isVectorType() &&
+ CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) {
+ initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
+ checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
+ CalleeMap, Callee->getReturnType(),
+ /*IsArgument*/ false);
+ }
+}
+
+std::string TargetCodeGenInfo::qualifyWindowsLibrary(StringRef Lib) {
+ // If the argument does not end in .lib, automatically add the suffix.
+ // If the argument contains a space, enclose it in quotes.
+ // This matches the behavior of MSVC.
+ bool Quote = Lib.contains(' ');
+ std::string ArgStr = Quote ? "\"" : "";
+ ArgStr += Lib;
+ if (!Lib.ends_with_insensitive(".lib") && !Lib.ends_with_insensitive(".a"))
+ ArgStr += ".lib";
+ ArgStr += Quote ? "\"" : "";
+ return ArgStr;
+}
+
+namespace {
+class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
+public:
+ WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
+ bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
+ unsigned NumRegisterParameters)
+ : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
+ Win32StructABI, NumRegisterParameters, false) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
+
+ void getDependentLibraryOption(llvm::StringRef Lib,
+ llvm::SmallString<24> &Opt) const override {
+ Opt = "/DEFAULTLIB:";
+ Opt += qualifyWindowsLibrary(Lib);
+ }
+
+ void getDetectMismatchOption(llvm::StringRef Name,
+ llvm::StringRef Value,
+ llvm::SmallString<32> &Opt) const override {
+ Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
+ }
+};
+} // namespace
+
+void WinX86_32TargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+ if (GV->isDeclaration())
+ return;
+ addStackProbeTargetAttributes(D, GV, CGM);
+}
+
+namespace {
+class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
+ X86AVXABILevel AVXLevel)
+ : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {
+ SwiftInfo =
+ std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/true);
+ }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
+ return 7;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override {
+ llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
+
+ // 0-15 are the 16 integer registers.
+ // 16 is %rip.
+ AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
+ return false;
+ }
+
+ void getDependentLibraryOption(llvm::StringRef Lib,
+ llvm::SmallString<24> &Opt) const override {
+ Opt = "/DEFAULTLIB:";
+ Opt += qualifyWindowsLibrary(Lib);
+ }
+
+ void getDetectMismatchOption(llvm::StringRef Name,
+ llvm::StringRef Value,
+ llvm::SmallString<32> &Opt) const override {
+ Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
+ }
+};
+} // namespace
+
+void WinX86_64TargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+ if (GV->isDeclaration())
+ return;
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ Fn->addFnAttr("stackrealign");
+ }
+
+ addX86InterruptAttrs(FD, GV, CGM);
+ }
+
+ addStackProbeTargetAttributes(D, GV, CGM);
+}
+
+void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
+ Class &Hi) const {
+ // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
+ //
+ // (a) If one of the classes is Memory, the whole argument is passed in
+ // memory.
+ //
+ // (b) If X87UP is not preceded by X87, the whole argument is passed in
+ // memory.
+ //
+ // (c) If the size of the aggregate exceeds two eightbytes and the first
+ // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
+ // argument is passed in memory. NOTE: This is necessary to keep the
+ // ABI working for processors that don't support the __m256 type.
+ //
+ // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
+ //
+ // Some of these are enforced by the merging logic. Others can arise
+ // only with unions; for example:
+ // union { _Complex double; unsigned; }
+ //
+ // Note that clauses (b) and (c) were added in 0.98.
+ //
+ if (Hi == Memory)
+ Lo = Memory;
+ if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
+ Lo = Memory;
+ if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
+ Lo = Memory;
+ if (Hi == SSEUp && Lo != SSE)
+ Hi = SSE;
+}
+
+X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
+ // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
+ // classified recursively so that always two fields are
+ // considered. The resulting class is calculated according to
+ // the classes of the fields in the eightbyte:
+ //
+ // (a) If both classes are equal, this is the resulting class.
+ //
+ // (b) If one of the classes is NO_CLASS, the resulting class is
+ // the other class.
+ //
+ // (c) If one of the classes is MEMORY, the result is the MEMORY
+ // class.
+ //
+ // (d) If one of the classes is INTEGER, the result is the
+ // INTEGER.
+ //
+ // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
+ // MEMORY is used as class.
+ //
+ // (f) Otherwise class SSE is used.
+
+ // Accum should never be memory (we should have returned) or
+ // ComplexX87 (because this cannot be passed in a structure).
+ assert((Accum != Memory && Accum != ComplexX87) &&
+ "Invalid accumulated classification during merge.");
+ if (Accum == Field || Field == NoClass)
+ return Accum;
+ if (Field == Memory)
+ return Memory;
+ if (Accum == NoClass)
+ return Field;
+ if (Accum == Integer || Field == Integer)
+ return Integer;
+ if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
+ Accum == X87 || Accum == X87Up)
+ return Memory;
+ return SSE;
+}
+
+void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
+ Class &Hi, bool isNamedArg, bool IsRegCall) const {
+ // FIXME: This code can be simplified by introducing a simple value class for
+ // Class pairs with appropriate constructor methods for the various
+ // situations.
+
+ // FIXME: Some of the split computations are wrong; unaligned vectors
+ // shouldn't be passed in registers for example, so there is no chance they
+ // can straddle an eightbyte. Verify & simplify.
+
+ Lo = Hi = NoClass;
+
+ Class &Current = OffsetBase < 64 ? Lo : Hi;
+ Current = Memory;
+
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ BuiltinType::Kind k = BT->getKind();
+
+ if (k == BuiltinType::Void) {
+ Current = NoClass;
+ } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
+ Lo = Integer;
+ Hi = Integer;
+ } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
+ Current = Integer;
+ } else if (k == BuiltinType::Float || k == BuiltinType::Double ||
+ k == BuiltinType::Float16 || k == BuiltinType::BFloat16) {
+ Current = SSE;
+ } else if (k == BuiltinType::LongDouble) {
+ const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
+ if (LDF == &llvm::APFloat::IEEEquad()) {
+ Lo = SSE;
+ Hi = SSEUp;
+ } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
+ Lo = X87;
+ Hi = X87Up;
+ } else if (LDF == &llvm::APFloat::IEEEdouble()) {
+ Current = SSE;
+ } else
+ llvm_unreachable("unexpected long double representation!");
+ }
+ // FIXME: _Decimal32 and _Decimal64 are SSE.
+ // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
+ return;
+ }
+
+ if (const EnumType *ET = Ty->getAs<EnumType>()) {
+ // Classify the underlying integer type.
+ classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
+ return;
+ }
+
+ if (Ty->hasPointerRepresentation()) {
+ Current = Integer;
+ return;
+ }
+
+ if (Ty->isMemberPointerType()) {
+ if (Ty->isMemberFunctionPointerType()) {
+ if (Has64BitPointers) {
+ // If Has64BitPointers, this is an {i64, i64}, so classify both
+ // Lo and Hi now.
+ Lo = Hi = Integer;
+ } else {
+ // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
+ // straddles an eightbyte boundary, Hi should be classified as well.
+ uint64_t EB_FuncPtr = (OffsetBase) / 64;
+ uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
+ if (EB_FuncPtr != EB_ThisAdj) {
+ Lo = Hi = Integer;
+ } else {
+ Current = Integer;
+ }
+ }
+ } else {
+ Current = Integer;
+ }
+ return;
+ }
+
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ uint64_t Size = getContext().getTypeSize(VT);
+ if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
+ // gcc passes the following as integer:
+ // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
+ // 2 bytes - <2 x char>, <1 x short>
+ // 1 byte - <1 x char>
+ Current = Integer;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ uint64_t EB_Lo = (OffsetBase) / 64;
+ uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
+ if (EB_Lo != EB_Hi)
+ Hi = Lo;
+ } else if (Size == 64) {
+ QualType ElementType = VT->getElementType();
+
+ // gcc passes <1 x double> in memory. :(
+ if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
+ return;
+
+ // gcc passes <1 x long long> as SSE but clang used to unconditionally
+ // pass them as integer. For platforms where clang is the de facto
+ // platform compiler, we must continue to use integer.
+ if (!classifyIntegerMMXAsSSE() &&
+ (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
+ ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
+ ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
+ ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
+ Current = Integer;
+ else
+ Current = SSE;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ if (OffsetBase && OffsetBase != 64)
+ Hi = Lo;
+ } else if (Size == 128 ||
+ (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
+ QualType ElementType = VT->getElementType();
+
+ // gcc passes 256 and 512 bit <X x __int128> vectors in memory. :(
+ if (passInt128VectorsInMem() && Size != 128 &&
+ (ElementType->isSpecificBuiltinType(BuiltinType::Int128) ||
+ ElementType->isSpecificBuiltinType(BuiltinType::UInt128)))
+ return;
+
+ // Arguments of 256-bits are split into four eightbyte chunks. The
+ // least significant one belongs to class SSE and all the others to class
+ // SSEUP. The original Lo and Hi design considers that types can't be
+ // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
+ // This design isn't correct for 256-bits, but since there're no cases
+ // where the upper parts would need to be inspected, avoid adding
+ // complexity and just consider Hi to match the 64-256 part.
+ //
+ // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
+ // registers if they are "named", i.e. not part of the "..." of a
+ // variadic function.
+ //
+ // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
+ // split into eight eightbyte chunks, one SSE and seven SSEUP.
+ Lo = SSE;
+ Hi = SSEUp;
+ }
+ return;
+ }
+
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ QualType ET = getContext().getCanonicalType(CT->getElementType());
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (ET->isIntegralOrEnumerationType()) {
+ if (Size <= 64)
+ Current = Integer;
+ else if (Size <= 128)
+ Lo = Hi = Integer;
+ } else if (ET->isFloat16Type() || ET == getContext().FloatTy ||
+ ET->isBFloat16Type()) {
+ Current = SSE;
+ } else if (ET == getContext().DoubleTy) {
+ Lo = Hi = SSE;
+ } else if (ET == getContext().LongDoubleTy) {
+ const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
+ if (LDF == &llvm::APFloat::IEEEquad())
+ Current = Memory;
+ else if (LDF == &llvm::APFloat::x87DoubleExtended())
+ Current = ComplexX87;
+ else if (LDF == &llvm::APFloat::IEEEdouble())
+ Lo = Hi = SSE;
+ else
+ llvm_unreachable("unexpected long double representation!");
+ }
+
+ // If this complex type crosses an eightbyte boundary then it
+ // should be split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
+ if (Hi == NoClass && EB_Real != EB_Imag)
+ Hi = Lo;
+
+ return;
+ }
+
+ if (const auto *EITy = Ty->getAs<BitIntType>()) {
+ if (EITy->getNumBits() <= 64)
+ Current = Integer;
+ else if (EITy->getNumBits() <= 128)
+ Lo = Hi = Integer;
+ // Larger values need to get passed in memory.
+ return;
+ }
+
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ // Arrays are treated like structures.
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than eight eightbytes, ..., it has class MEMORY.
+ // regcall ABI doesn't have limitation to an object. The only limitation
+ // is the free registers, which will be checked in computeInfo.
+ if (!IsRegCall && Size > 512)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+ // fields, it has class MEMORY.
+ //
+ // Only need to check alignment of array base.
+ if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
+ return;
+
+ // Otherwise implement simplified merge. We could be smarter about
+ // this, but it isn't worth it and would be harder to verify.
+ Current = NoClass;
+ uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
+ uint64_t ArraySize = AT->getSize().getZExtValue();
+
+ // The only case a 256-bit wide vector could be used is when the array
+ // contains a single 256-bit element. Since Lo and Hi logic isn't extended
+ // to work for sizes wider than 128, early check and fallback to memory.
+ //
+ if (Size > 128 &&
+ (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
+ return;
+
+ for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
+ Class FieldLo, FieldHi;
+ classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ postMerge(Size, Lo, Hi);
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
+ return;
+ }
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than eight eightbytes, ..., it has class MEMORY.
+ if (Size > 512)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
+ // copy constructor or a non-trivial destructor, it is passed by invisible
+ // reference.
+ if (getRecordArgABI(RT, getCXXABI()))
+ return;
+
+ const RecordDecl *RD = RT->getDecl();
+
+ // Assume variable sized types are passed in memory.
+ if (RD->hasFlexibleArrayMember())
+ return;
+
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+
+ // Reset Lo class, this will be recomputed.
+ Current = NoClass;
+
+ // If this is a C++ record, classify the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const auto &I : CXXRD->bases()) {
+ assert(!I.isVirtual() && !I.getType()->isDependentType() &&
+ "Unexpected base class!");
+ const auto *Base =
+ cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
+ // single eightbyte, each is classified separately. Each eightbyte gets
+ // initialized to class NO_CLASS.
+ Class FieldLo, FieldHi;
+ uint64_t Offset =
+ OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
+ classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory) {
+ postMerge(Size, Lo, Hi);
+ return;
+ }
+ }
+ }
+
+ // Classify the fields one at a time, merging the results.
+ unsigned idx = 0;
+ bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver11 ||
+ getContext().getTargetInfo().getTriple().isPS();
+ bool IsUnion = RT->isUnionType() && !UseClang11Compat;
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ bool BitField = i->isBitField();
+
+ // Ignore padding bit-fields.
+ if (BitField && i->isUnnamedBitfield())
+ continue;
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
+ // eight eightbytes, or it contains unaligned fields, it has class MEMORY.
+ //
+ // The only case a 256-bit or a 512-bit wide vector could be used is when
+ // the struct contains a single 256-bit or 512-bit element. Early check
+ // and fallback to memory.
+ //
+ // FIXME: Extended the Lo and Hi logic properly to work for size wider
+ // than 128.
+ if (Size > 128 &&
+ ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
+ Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
+ Lo = Memory;
+ postMerge(Size, Lo, Hi);
+ return;
+ }
+ // Note, skip this test for bit-fields, see below.
+ if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
+ Lo = Memory;
+ postMerge(Size, Lo, Hi);
+ return;
+ }
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
+ // exceeds a single eightbyte, each is classified
+ // separately. Each eightbyte gets initialized to class
+ // NO_CLASS.
+ Class FieldLo, FieldHi;
+
+ // Bit-fields require special handling, they do not force the
+ // structure to be passed in memory even if unaligned, and
+ // therefore they can straddle an eightbyte.
+ if (BitField) {
+ assert(!i->isUnnamedBitfield());
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ uint64_t Size = i->getBitWidthValue(getContext());
+
+ uint64_t EB_Lo = Offset / 64;
+ uint64_t EB_Hi = (Offset + Size - 1) / 64;
+
+ if (EB_Lo) {
+ assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
+ FieldLo = NoClass;
+ FieldHi = Integer;
+ } else {
+ FieldLo = Integer;
+ FieldHi = EB_Hi ? Integer : NoClass;
+ }
+ } else
+ classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ postMerge(Size, Lo, Hi);
+ }
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ if (Ty->isBitIntType())
+ return getNaturalAlignIndirect(Ty);
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+ }
+
+ return getNaturalAlignIndirect(Ty);
+}
+
+bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
+ if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
+ uint64_t Size = getContext().getTypeSize(VecTy);
+ unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
+ if (Size <= 64 || Size > LargestVector)
+ return true;
+ QualType EltTy = VecTy->getElementType();
+ if (passInt128VectorsInMem() &&
+ (EltTy->isSpecificBuiltinType(BuiltinType::Int128) ||
+ EltTy->isSpecificBuiltinType(BuiltinType::UInt128)))
+ return true;
+ }
+
+ return false;
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
+ unsigned freeIntRegs) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ //
+ // This assumption is optimistic, as there could be free registers available
+ // when we need to pass this argument in memory, and LLVM could try to pass
+ // the argument in the free register. This does not seem to happen currently,
+ // but this code would be much safer if we could mark the argument with
+ // 'onstack'. See PR12193.
+ if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) &&
+ !Ty->isBitIntType()) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+ }
+
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ // Compute the byval alignment. We specify the alignment of the byval in all
+ // cases so that the mid-level optimizer knows the alignment of the byval.
+ unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
+
+ // Attempt to avoid passing indirect results using byval when possible. This
+ // is important for good codegen.
+ //
+ // We do this by coercing the value into a scalar type which the backend can
+ // handle naturally (i.e., without using byval).
+ //
+ // For simplicity, we currently only do this when we have exhausted all of the
+ // free integer registers. Doing this when there are free integer registers
+ // would require more care, as we would have to ensure that the coerced value
+ // did not claim the unused register. That would require either reording the
+ // arguments to the function (so that any subsequent inreg values came first),
+ // or only doing this optimization when there were no following arguments that
+ // might be inreg.
+ //
+ // We currently expect it to be rare (particularly in well written code) for
+ // arguments to be passed on the stack when there are still free integer
+ // registers available (this would typically imply large structs being passed
+ // by value), so this seems like a fair tradeoff for now.
+ //
+ // We can revisit this if the backend grows support for 'onstack' parameter
+ // attributes. See PR12193.
+ if (freeIntRegs == 0) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // If this type fits in an eightbyte, coerce it into the matching integral
+ // type, which will end up on the stack (with alignment 8).
+ if (Align == 8 && Size <= 64)
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+ }
+
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
+}
+
+/// The ABI specifies that a value should be passed in a full vector XMM/YMM
+/// register. Pick an LLVM IR type that will be passed as a vector register.
+llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
+ // Wrapper structs/arrays that only contain vectors are passed just like
+ // vectors; strip them off if present.
+ if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
+ Ty = QualType(InnerTy, 0);
+
+ llvm::Type *IRType = CGT.ConvertType(Ty);
+ if (isa<llvm::VectorType>(IRType)) {
+ // Don't pass vXi128 vectors in their native type, the backend can't
+ // legalize them.
+ if (passInt128VectorsInMem() &&
+ cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
+ // Use a vXi64 vector.
+ uint64_t Size = getContext().getTypeSize(Ty);
+ return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
+ Size / 64);
+ }
+
+ return IRType;
+ }
+
+ if (IRType->getTypeID() == llvm::Type::FP128TyID)
+ return IRType;
+
+ // We couldn't find the preferred IR vector type for 'Ty'.
+ uint64_t Size = getContext().getTypeSize(Ty);
+ assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");
+
+
+ // Return a LLVM IR vector type based on the size of 'Ty'.
+ return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
+ Size / 64);
+}
+
+/// BitsContainNoUserData - Return true if the specified [start,end) bit range
+/// is known to either be off the end of the specified type or being in
+/// alignment padding. The user type specified is known to be at most 128 bits
+/// in size, and have passed through X86_64ABIInfo::classify with a successful
+/// classification that put one of the two halves in the INTEGER class.
+///
+/// It is conservatively correct to return false.
+static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
+ unsigned EndBit, ASTContext &Context) {
+ // If the bytes being queried are off the end of the type, there is no user
+ // data hiding here. This handles analysis of builtins, vectors and other
+ // types that don't contain interesting padding.
+ unsigned TySize = (unsigned)Context.getTypeSize(Ty);
+ if (TySize <= StartBit)
+ return true;
+
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
+ unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
+
+ // Check each element to see if the element overlaps with the queried range.
+ for (unsigned i = 0; i != NumElts; ++i) {
+ // If the element is after the span we care about, then we're done..
+ unsigned EltOffset = i*EltSize;
+ if (EltOffset >= EndBit) break;
+
+ unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
+ if (!BitsContainNoUserData(AT->getElementType(), EltStart,
+ EndBit-EltOffset, Context))
+ return false;
+ }
+ // If it overlaps no elements, then it is safe to process as padding.
+ return true;
+ }
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const auto &I : CXXRD->bases()) {
+ assert(!I.isVirtual() && !I.getType()->isDependentType() &&
+ "Unexpected base class!");
+ const auto *Base =
+ cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+
+ // If the base is after the span we care about, ignore it.
+ unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
+ if (BaseOffset >= EndBit) continue;
+
+ unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
+ if (!BitsContainNoUserData(I.getType(), BaseStart,
+ EndBit-BaseOffset, Context))
+ return false;
+ }
+ }
+
+ // Verify that no field has data that overlaps the region of interest. Yes
+ // this could be sped up a lot by being smarter about queried fields,
+ // however we're only looking at structs up to 16 bytes, so we don't care
+ // much.
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
+
+ // If we found a field after the region we care about, then we're done.
+ if (FieldOffset >= EndBit) break;
+
+ unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
+ if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
+ Context))
+ return false;
+ }
+
+ // If nothing in this record overlapped the area of interest, then we're
+ // clean.
+ return true;
+ }
+
+ return false;
+}
+
+/// getFPTypeAtOffset - Return a floating point type at the specified offset.
+static llvm::Type *getFPTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
+ const llvm::DataLayout &TD) {
+ if (IROffset == 0 && IRType->isFloatingPointTy())
+ return IRType;
+
+ // If this is a struct, recurse into the field at the specified offset.
+ if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
+ if (!STy->getNumContainedTypes())
+ return nullptr;
+
+ const llvm::StructLayout *SL = TD.getStructLayout(STy);
+ unsigned Elt = SL->getElementContainingOffset(IROffset);
+ IROffset -= SL->getElementOffset(Elt);
+ return getFPTypeAtOffset(STy->getElementType(Elt), IROffset, TD);
+ }
+
+ // If this is an array, recurse into the field at the specified offset.
+ if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
+ llvm::Type *EltTy = ATy->getElementType();
+ unsigned EltSize = TD.getTypeAllocSize(EltTy);
+ IROffset -= IROffset / EltSize * EltSize;
+ return getFPTypeAtOffset(EltTy, IROffset, TD);
+ }
+
+ return nullptr;
+}
+
+/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
+/// low 8 bytes of an XMM register, corresponding to the SSE class.
+llvm::Type *X86_64ABIInfo::
+GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
+ QualType SourceTy, unsigned SourceOffset) const {
+ const llvm::DataLayout &TD = getDataLayout();
+ unsigned SourceSize =
+ (unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset;
+ llvm::Type *T0 = getFPTypeAtOffset(IRType, IROffset, TD);
+ if (!T0 || T0->isDoubleTy())
+ return llvm::Type::getDoubleTy(getVMContext());
+
+ // Get the adjacent FP type.
+ llvm::Type *T1 = nullptr;
+ unsigned T0Size = TD.getTypeAllocSize(T0);
+ if (SourceSize > T0Size)
+ T1 = getFPTypeAtOffset(IRType, IROffset + T0Size, TD);
+ if (T1 == nullptr) {
+ // Check if IRType is a half/bfloat + float. float type will be in IROffset+4 due
+ // to its alignment.
+ if (T0->is16bitFPTy() && SourceSize > 4)
+ T1 = getFPTypeAtOffset(IRType, IROffset + 4, TD);
+ // If we can't get a second FP type, return a simple half or float.
+ // avx512fp16-abi.c:pr51813_2 shows it works to return float for
+ // {float, i8} too.
+ if (T1 == nullptr)
+ return T0;
+ }
+
+ if (T0->isFloatTy() && T1->isFloatTy())
+ return llvm::FixedVectorType::get(T0, 2);
+
+ if (T0->is16bitFPTy() && T1->is16bitFPTy()) {
+ llvm::Type *T2 = nullptr;
+ if (SourceSize > 4)
+ T2 = getFPTypeAtOffset(IRType, IROffset + 4, TD);
+ if (T2 == nullptr)
+ return llvm::FixedVectorType::get(T0, 2);
+ return llvm::FixedVectorType::get(T0, 4);
+ }
+
+ if (T0->is16bitFPTy() || T1->is16bitFPTy())
+ return llvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4);
+
+ return llvm::Type::getDoubleTy(getVMContext());
+}
+
+
+/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
+/// an 8-byte GPR. This means that we either have a scalar or we are talking
+/// about the high or low part of an up-to-16-byte struct. This routine picks
+/// the best LLVM IR type to represent this, which may be i64 or may be anything
+/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
+/// etc).
+///
+/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
+/// the source type. IROffset is an offset in bytes into the LLVM IR type that
+/// the 8-byte value references. PrefType may be null.
+///
+/// SourceTy is the source-level type for the entire argument. SourceOffset is
+/// an offset into this that we're processing (which is always either 0 or 8).
+///
+llvm::Type *X86_64ABIInfo::
+GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
+ QualType SourceTy, unsigned SourceOffset) const {
+ // If we're dealing with an un-offset LLVM IR type, then it means that we're
+ // returning an 8-byte unit starting with it. See if we can safely use it.
+ if (IROffset == 0) {
+ // Pointers and int64's always fill the 8-byte unit.
+ if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
+ IRType->isIntegerTy(64))
+ return IRType;
+
+ // If we have a 1/2/4-byte integer, we can use it only if the rest of the
+ // goodness in the source type is just tail padding. This is allowed to
+ // kick in for struct {double,int} on the int, but not on
+ // struct{double,int,int} because we wouldn't return the second int. We
+ // have to do this analysis on the source type because we can't depend on
+ // unions being lowered a specific way etc.
+ if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
+ IRType->isIntegerTy(32) ||
+ (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
+ unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
+ cast<llvm::IntegerType>(IRType)->getBitWidth();
+
+ if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
+ SourceOffset*8+64, getContext()))
+ return IRType;
+ }
+ }
+
+ if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
+ // If this is a struct, recurse into the field at the specified offset.
+ const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
+ if (IROffset < SL->getSizeInBytes()) {
+ unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
+ IROffset -= SL->getElementOffset(FieldIdx);
+
+ return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
+ SourceTy, SourceOffset);
+ }
+ }
+
+ if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
+ llvm::Type *EltTy = ATy->getElementType();
+ unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
+ unsigned EltOffset = IROffset/EltSize*EltSize;
+ return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
+ SourceOffset);
+ }
+
+ // Okay, we don't have any better idea of what to pass, so we pass this in an
+ // integer register that isn't too big to fit the rest of the struct.
+ unsigned TySizeInBytes =
+ (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
+
+ assert(TySizeInBytes != SourceOffset && "Empty field?");
+
+ // It is always safe to classify this as an integer type up to i64 that
+ // isn't larger than the structure.
+ return llvm::IntegerType::get(getVMContext(),
+ std::min(TySizeInBytes-SourceOffset, 8U)*8);
+}
+
+
+/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
+/// be used as elements of a two register pair to pass or return, return a
+/// first class aggregate to represent them. For example, if the low part of
+/// a by-value argument should be passed as i32* and the high part as float,
+/// return {i32*, float}.
+static llvm::Type *
+GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
+ const llvm::DataLayout &TD) {
+ // In order to correctly satisfy the ABI, we need to the high part to start
+ // at offset 8. If the high and low parts we inferred are both 4-byte types
+ // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
+ // the second element at offset 8. Check for this:
+ unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
+ llvm::Align HiAlign = TD.getABITypeAlign(Hi);
+ unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
+ assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
+
+ // To handle this, we have to increase the size of the low part so that the
+ // second element will start at an 8 byte offset. We can't increase the size
+ // of the second element because it might make us access off the end of the
+ // struct.
+ if (HiStart != 8) {
+ // There are usually two sorts of types the ABI generation code can produce
+ // for the low part of a pair that aren't 8 bytes in size: half, float or
+ // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
+ // NaCl).
+ // Promote these to a larger type.
+ if (Lo->isHalfTy() || Lo->isFloatTy())
+ Lo = llvm::Type::getDoubleTy(Lo->getContext());
+ else {
+ assert((Lo->isIntegerTy() || Lo->isPointerTy())
+ && "Invalid/unknown lo type");
+ Lo = llvm::Type::getInt64Ty(Lo->getContext());
+ }
+ }
+
+ llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
+
+ // Verify that the second element is at an 8-byte offset.
+ assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
+ "Invalid x86-64 argument pair!");
+ return Result;
+}
+
+ABIArgInfo X86_64ABIInfo::
+classifyReturnType(QualType RetTy) const {
+ // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
+ // classification algorithm.
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
+
+ // Check some invariants.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ llvm::Type *ResType = nullptr;
+ switch (Lo) {
+ case NoClass:
+ if (Hi == NoClass)
+ return ABIArgInfo::getIgnore();
+ // If the low part is just padding, it takes no register, leave ResType
+ // null.
+ assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
+ "Unknown missing lo part");
+ break;
+
+ case SSEUp:
+ case X87Up:
+ llvm_unreachable("Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
+ // hidden argument.
+ case Memory:
+ return getIndirectReturnResult(RetTy);
+
+ // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
+ // available register of the sequence %rax, %rdx is used.
+ case Integer:
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
+
+ // If we have a sign or zero extended integer, make sure to return Extend
+ // so that the parameter gets the right LLVM IR attributes.
+ if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ if (RetTy->isIntegralOrEnumerationType() &&
+ isPromotableIntegerTypeForABI(RetTy))
+ return ABIArgInfo::getExtend(RetTy);
+ }
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
+ // available SSE register of the sequence %xmm0, %xmm1 is used.
+ case SSE:
+ ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
+ // returned on the X87 stack in %st0 as 80-bit x87 number.
+ case X87:
+ ResType = llvm::Type::getX86_FP80Ty(getVMContext());
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
+ // part of the value is returned in %st0 and the imaginary part in
+ // %st1.
+ case ComplexX87:
+ assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
+ ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
+ llvm::Type::getX86_FP80Ty(getVMContext()));
+ break;
+ }
+
+ llvm::Type *HighPart = nullptr;
+ switch (Hi) {
+ // Memory was handled previously and X87 should
+ // never occur as a hi class.
+ case Memory:
+ case X87:
+ llvm_unreachable("Invalid classification for hi word.");
+
+ case ComplexX87: // Previously handled.
+ case NoClass:
+ break;
+
+ case Integer:
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ break;
+ case SSE:
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
+ // is passed in the next available eightbyte chunk if the last used
+ // vector register.
+ //
+ // SSEUP should always be preceded by SSE, just widen.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification.");
+ ResType = GetByteVectorType(RetTy);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
+ // returned together with the previous X87 value in %st0.
+ case X87Up:
+ // If X87Up is preceded by X87, we don't need to do
+ // anything. However, in some cases with unions it may not be
+ // preceded by X87. In such situations we follow gcc and pass the
+ // extra bits in an SSE reg.
+ if (Lo != X87) {
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ }
+ break;
+ }
+
+ // If a high part was specified, merge it together with the low part. It is
+ // known to pass in the high eightbyte of the result. We do this by forming a
+ // first class struct aggregate with the high and low part: {low, high}
+ if (HighPart)
+ ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
+
+ return ABIArgInfo::getDirect(ResType);
+}
+
+ABIArgInfo
+X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned freeIntRegs,
+ unsigned &neededInt, unsigned &neededSSE,
+ bool isNamedArg, bool IsRegCall) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);
+
+ // Check some invariants.
+ // FIXME: Enforce these by construction.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ neededInt = 0;
+ neededSSE = 0;
+ llvm::Type *ResType = nullptr;
+ switch (Lo) {
+ case NoClass:
+ if (Hi == NoClass)
+ return ABIArgInfo::getIgnore();
+ // If the low part is just padding, it takes no register, leave ResType
+ // null.
+ assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
+ "Unknown missing lo part");
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
+ // on the stack.
+ case Memory:
+
+ // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
+ // COMPLEX_X87, it is passed in memory.
+ case X87:
+ case ComplexX87:
+ if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
+ ++neededInt;
+ return getIndirectResult(Ty, freeIntRegs);
+
+ case SSEUp:
+ case X87Up:
+ llvm_unreachable("Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
+ // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
+ // and %r9 is used.
+ case Integer:
+ ++neededInt;
+
+ // Pick an 8-byte type based on the preferred type.
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
+
+ // If we have a sign or zero extended integer, make sure to return Extend
+ // so that the parameter gets the right LLVM IR attributes.
+ if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ if (Ty->isIntegralOrEnumerationType() &&
+ isPromotableIntegerTypeForABI(Ty))
+ return ABIArgInfo::getExtend(Ty);
+ }
+
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
+ // available SSE register is used, the registers are taken in the
+ // order from %xmm0 to %xmm7.
+ case SSE: {
+ llvm::Type *IRType = CGT.ConvertType(Ty);
+ ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
+ ++neededSSE;
+ break;
+ }
+ }
+
+ llvm::Type *HighPart = nullptr;
+ switch (Hi) {
+ // Memory was handled previously, ComplexX87 and X87 should
+ // never occur as hi classes, and X87Up must be preceded by X87,
+ // which is passed in memory.
+ case Memory:
+ case X87:
+ case ComplexX87:
+ llvm_unreachable("Invalid classification for hi word.");
+
+ case NoClass: break;
+
+ case Integer:
+ ++neededInt;
+ // Pick an 8-byte type based on the preferred type.
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
+
+ if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ break;
+
+ // X87Up generally doesn't occur here (long double is passed in
+ // memory), except in situations involving unions.
+ case X87Up:
+ case SSE:
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
+
+ if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+
+ ++neededSSE;
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
+ // eightbyte is passed in the upper half of the last used SSE
+ // register. This only happens when 128-bit vectors are passed.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification");
+ ResType = GetByteVectorType(Ty);
+ break;
+ }
+
+ // If a high part was specified, merge it together with the low part. It is
+ // known to pass in the high eightbyte of the result. We do this by forming a
+ // first class struct aggregate with the high and low part: {low, high}
+ if (HighPart)
+ ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
+
+ return ABIArgInfo::getDirect(ResType);
+}
+
+ABIArgInfo
+X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
+ unsigned &NeededSSE,
+ unsigned &MaxVectorWidth) const {
+ auto RT = Ty->getAs<RecordType>();
+ assert(RT && "classifyRegCallStructType only valid with struct types");
+
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectReturnResult(Ty);
+
+ // Sum up bases
+ if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (CXXRD->isDynamicClass()) {
+ NeededInt = NeededSSE = 0;
+ return getIndirectReturnResult(Ty);
+ }
+
+ for (const auto &I : CXXRD->bases())
+ if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE,
+ MaxVectorWidth)
+ .isIndirect()) {
+ NeededInt = NeededSSE = 0;
+ return getIndirectReturnResult(Ty);
+ }
+ }
+
+ // Sum up members
+ for (const auto *FD : RT->getDecl()->fields()) {
+ QualType MTy = FD->getType();
+ if (MTy->isRecordType() && !MTy->isUnionType()) {
+ if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
+ MaxVectorWidth)
+ .isIndirect()) {
+ NeededInt = NeededSSE = 0;
+ return getIndirectReturnResult(Ty);
+ }
+ } else {
+ unsigned LocalNeededInt, LocalNeededSSE;
+ if (classifyArgumentType(MTy, UINT_MAX, LocalNeededInt, LocalNeededSSE,
+ true, true)
+ .isIndirect()) {
+ NeededInt = NeededSSE = 0;
+ return getIndirectReturnResult(Ty);
+ }
+ if (const auto *AT = getContext().getAsConstantArrayType(MTy))
+ MTy = AT->getElementType();
+ if (const auto *VT = MTy->getAs<VectorType>())
+ if (getContext().getTypeSize(VT) > MaxVectorWidth)
+ MaxVectorWidth = getContext().getTypeSize(VT);
+ NeededInt += LocalNeededInt;
+ NeededSSE += LocalNeededSSE;
+ }
+ }
+
+ return ABIArgInfo::getDirect();
+}
+
+ABIArgInfo
+X86_64ABIInfo::classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
+ unsigned &NeededSSE,
+ unsigned &MaxVectorWidth) const {
+
+ NeededInt = 0;
+ NeededSSE = 0;
+ MaxVectorWidth = 0;
+
+ return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE,
+ MaxVectorWidth);
+}
+
+void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+
+ const unsigned CallingConv = FI.getCallingConvention();
+ // It is possible to force Win64 calling convention on any x86_64 target by
+ // using __attribute__((ms_abi)). In such case to correctly emit Win64
+ // compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
+ if (CallingConv == llvm::CallingConv::Win64) {
+ WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
+ Win64ABIInfo.computeInfo(FI);
+ return;
+ }
+
+ bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
+
+ // Keep track of the number of assigned registers.
+ unsigned FreeIntRegs = IsRegCall ? 11 : 6;
+ unsigned FreeSSERegs = IsRegCall ? 16 : 8;
+ unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;
+
+ if (!::classifyReturnType(getCXXABI(), FI, *this)) {
+ if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
+ !FI.getReturnType()->getTypePtr()->isUnionType()) {
+ FI.getReturnInfo() = classifyRegCallStructType(
+ FI.getReturnType(), NeededInt, NeededSSE, MaxVectorWidth);
+ if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
+ FreeIntRegs -= NeededInt;
+ FreeSSERegs -= NeededSSE;
+ } else {
+ FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
+ }
+ } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() &&
+ getContext().getCanonicalType(FI.getReturnType()
+ ->getAs<ComplexType>()
+ ->getElementType()) ==
+ getContext().LongDoubleTy)
+ // Complex Long Double Type is passed in Memory when Regcall
+ // calling convention is used.
+ FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
+ else
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ }
+
+ // If the return value is indirect, then the hidden argument is consuming one
+ // integer register.
+ if (FI.getReturnInfo().isIndirect())
+ --FreeIntRegs;
+ else if (NeededSSE && MaxVectorWidth > 0)
+ FI.setMaxVectorWidth(MaxVectorWidth);
+
+ // The chain argument effectively gives us another free register.
+ if (FI.isChainCall())
+ ++FreeIntRegs;
+
+ unsigned NumRequiredArgs = FI.getNumRequiredArgs();
+ // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
+ // get assigned (in left-to-right order) for passing as follows...
+ unsigned ArgNo = 0;
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it, ++ArgNo) {
+ bool IsNamedArg = ArgNo < NumRequiredArgs;
+
+ if (IsRegCall && it->type->isStructureOrClassType())
+ it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,
+ MaxVectorWidth);
+ else
+ it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
+ NeededSSE, IsNamedArg);
+
+ // AMD64-ABI 3.2.3p3: If there are no registers available for any
+ // eightbyte of an argument, the whole argument is passed on the
+ // stack. If registers have already been assigned for some
+ // eightbytes of such an argument, the assignments get reverted.
+ if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
+ FreeIntRegs -= NeededInt;
+ FreeSSERegs -= NeededSSE;
+ if (MaxVectorWidth > FI.getMaxVectorWidth())
+ FI.setMaxVectorWidth(MaxVectorWidth);
+ } else {
+ it->info = getIndirectResult(it->type, FreeIntRegs);
+ }
+ }
+}
+
+static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
+ Address VAListAddr, QualType Ty) {
+ Address overflow_arg_area_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
+ llvm::Value *overflow_arg_area =
+ CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
+
+ // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
+ // byte boundary if alignment needed by type exceeds 8 byte boundary.
+ // It isn't stated explicitly in the standard, but in practice we use
+ // alignment greater than 16 where necessary.
+ CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
+ if (Align > CharUnits::fromQuantity(8)) {
+ overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
+ Align);
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
+ llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *Res =
+ CGF.Builder.CreateBitCast(overflow_arg_area,
+ llvm::PointerType::getUnqual(LTy));
+
+ // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
+ // l->overflow_arg_area + sizeof(type).
+ // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
+ // an 8 byte boundary.
+
+ uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
+ llvm::Value *Offset =
+ llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
+ overflow_arg_area = CGF.Builder.CreateGEP(CGF.Int8Ty, overflow_arg_area,
+ Offset, "overflow_arg_area.next");
+ CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
+
+ // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
+ return Address(Res, LTy, Align);
+}
+
+Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ // Assume that va_list type is correct; should be pointer to LLVM type:
+ // struct {
+ // i32 gp_offset;
+ // i32 fp_offset;
+ // i8* overflow_arg_area;
+ // i8* reg_save_area;
+ // };
+ unsigned neededInt, neededSSE;
+
+ Ty = getContext().getCanonicalType(Ty);
+ ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
+ /*isNamedArg*/false);
+
+ // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
+ // in the registers. If not go to step 7.
+ if (!neededInt && !neededSSE)
+ return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
+
+ // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
+ // general purpose registers needed to pass type and num_fp to hold
+ // the number of floating point registers needed.
+
+ // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
+ // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
+ // l->fp_offset > 304 - num_fp * 16 go to step 7.
+ //
+ // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
+ // register save space).
+
+ llvm::Value *InRegs = nullptr;
+ Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
+ llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
+ if (neededInt) {
+ gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
+ gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
+ InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
+ InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
+ }
+
+ if (neededSSE) {
+ fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
+ fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
+ llvm::Value *FitsInFP =
+ llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
+ FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
+ InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
+ }
+
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+ CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
+
+ // Emit code to load the value if it was passed in registers.
+
+ CGF.EmitBlock(InRegBlock);
+
+ // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
+ // an offset of l->gp_offset and/or l->fp_offset. This may require
+ // copying to a temporary location in case the parameter is passed
+ // in different register classes or requires an alignment greater
+ // than 8 for general purpose registers and 16 for XMM registers.
+ //
+ // FIXME: This really results in shameful code when we end up needing to
+ // collect arguments from different places; often what should result in a
+ // simple assembling of a structure from scattered addresses has many more
+ // loads than necessary. Can we clean this up?
+ llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
+ CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area");
+
+ Address RegAddr = Address::invalid();
+ if (neededInt && neededSSE) {
+ // FIXME: Cleanup.
+ assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
+ llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
+ Address Tmp = CGF.CreateMemTemp(Ty);
+ Tmp = Tmp.withElementType(ST);
+ assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
+ llvm::Type *TyLo = ST->getElementType(0);
+ llvm::Type *TyHi = ST->getElementType(1);
+ assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
+ "Unexpected ABI info for mixed regs");
+ llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
+ llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
+ llvm::Value *GPAddr =
+ CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset);
+ llvm::Value *FPAddr =
+ CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset);
+ llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
+ llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
+
+ // Copy the first element.
+ // FIXME: Our choice of alignment here and below is probably pessimistic.
+ llvm::Value *V = CGF.Builder.CreateAlignedLoad(
+ TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
+ CharUnits::fromQuantity(getDataLayout().getABITypeAlign(TyLo)));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+
+ // Copy the second element.
+ V = CGF.Builder.CreateAlignedLoad(
+ TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
+ CharUnits::fromQuantity(getDataLayout().getABITypeAlign(TyHi)));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+
+ RegAddr = Tmp.withElementType(LTy);
+ } else if (neededInt) {
+ RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset),
+ LTy, CharUnits::fromQuantity(8));
+
+ // Copy to a temporary if necessary to ensure the appropriate alignment.
+ auto TInfo = getContext().getTypeInfoInChars(Ty);
+ uint64_t TySize = TInfo.Width.getQuantity();
+ CharUnits TyAlign = TInfo.Align;
+
+ // Copy into a temporary if the type is more aligned than the
+ // register save area.
+ if (TyAlign.getQuantity() > 8) {
+ Address Tmp = CGF.CreateMemTemp(Ty);
+ CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
+ RegAddr = Tmp;
+ }
+
+ } else if (neededSSE == 1) {
+ RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset),
+ LTy, CharUnits::fromQuantity(16));
+ } else {
+ assert(neededSSE == 2 && "Invalid number of needed registers!");
+ // SSE registers are spaced 16 bytes apart in the register save
+ // area, we need to collect the two eightbytes together.
+ // The ABI isn't explicit about this, but it seems reasonable
+ // to assume that the slots are 16-byte aligned, since the stack is
+ // naturally 16-byte aligned and the prologue is expected to store
+ // all the SSE registers to the RSA.
+ Address RegAddrLo = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea,
+ fp_offset),
+ CGF.Int8Ty, CharUnits::fromQuantity(16));
+ Address RegAddrHi =
+ CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
+ CharUnits::fromQuantity(16));
+ llvm::Type *ST = AI.canHaveCoerceToType()
+ ? AI.getCoerceToType()
+ : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy);
+ llvm::Value *V;
+ Address Tmp = CGF.CreateMemTemp(Ty);
+ Tmp = Tmp.withElementType(ST);
+ V = CGF.Builder.CreateLoad(
+ RegAddrLo.withElementType(ST->getStructElementType(0)));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(
+ RegAddrHi.withElementType(ST->getStructElementType(1)));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+
+ RegAddr = Tmp.withElementType(LTy);
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 5. Set:
+ // l->gp_offset = l->gp_offset + num_gp * 8
+ // l->fp_offset = l->fp_offset + num_fp * 16.
+ if (neededInt) {
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
+ gp_offset_p);
+ }
+ if (neededSSE) {
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
+ fp_offset_p);
+ }
+ CGF.EmitBranch(ContBlock);
+
+ // Emit code to load the value if it was passed in memory.
+
+ CGF.EmitBlock(InMemBlock);
+ Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
+
+ // Return the appropriate result.
+
+ CGF.EmitBlock(ContBlock);
+ Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
+ "vaarg.addr");
+ return ResAddr;
+}
+
+Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
+ // not 1, 2, 4, or 8 bytes, must be passed by reference."
+ uint64_t Width = getContext().getTypeSize(Ty);
+ bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
+ CGF.getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(8),
+ /*allowHigherAlign*/ false);
+}
+
+ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
+ QualType Ty, unsigned &FreeSSERegs, const ABIArgInfo &current) const {
+ const Type *Base = nullptr;
+ uint64_t NumElts = 0;
+
+ if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
+ isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
+ FreeSSERegs -= NumElts;
+ return getDirectX86Hva();
+ }
+ return current;
+}
+
+ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
+ bool IsReturnType, bool IsVectorCall,
+ bool IsRegCall) const {
+
+ if (Ty->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ TypeInfo Info = getContext().getTypeInfo(Ty);
+ uint64_t Width = Info.Width;
+ CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
+
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (RT) {
+ if (!IsReturnType) {
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+ }
+
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
+ }
+
+ const Type *Base = nullptr;
+ uint64_t NumElts = 0;
+ // vectorcall adds the concept of a homogenous vector aggregate, similar to
+ // other targets.
+ if ((IsVectorCall || IsRegCall) &&
+ isHomogeneousAggregate(Ty, Base, NumElts)) {
+ if (IsRegCall) {
+ if (FreeSSERegs >= NumElts) {
+ FreeSSERegs -= NumElts;
+ if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
+ return ABIArgInfo::getDirect();
+ return ABIArgInfo::getExpand();
+ }
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ } else if (IsVectorCall) {
+ if (FreeSSERegs >= NumElts &&
+ (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
+ FreeSSERegs -= NumElts;
+ return ABIArgInfo::getDirect();
+ } else if (IsReturnType) {
+ return ABIArgInfo::getExpand();
+ } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
+ // HVAs are delayed and reclassified in the 2nd step.
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ }
+ }
+ }
+
+ if (Ty->isMemberPointerType()) {
+ // If the member pointer is represented by an LLVM int or ptr, pass it
+ // directly.
+ llvm::Type *LLTy = CGT.ConvertType(Ty);
+ if (LLTy->isPointerTy() || LLTy->isIntegerTy())
+ return ABIArgInfo::getDirect();
+ }
+
+ if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
+ // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
+ // not 1, 2, 4, or 8 bytes, must be passed by reference."
+ if (Width > 64 || !llvm::isPowerOf2_64(Width))
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
+ // Otherwise, coerce it to a small integer.
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
+ }
+
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ // Bool type is always extended to the ABI, other builtin types are not
+ // extended.
+ return ABIArgInfo::getExtend(Ty);
+
+ case BuiltinType::LongDouble:
+ // Mingw64 GCC uses the old 80 bit extended precision floating point
+ // unit. It passes them indirectly through memory.
+ if (IsMingw64) {
+ const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
+ if (LDF == &llvm::APFloat::x87DoubleExtended())
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ }
+ break;
+
+ case BuiltinType::Int128:
+ case BuiltinType::UInt128:
+ // If it's a parameter type, the normal ABI rule is that arguments larger
+ // than 8 bytes are passed indirectly. GCC follows it. We follow it too,
+ // even though it isn't particularly efficient.
+ if (!IsReturnType)
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+
+ // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
+ // Clang matches them for compatibility.
+ return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
+ llvm::Type::getInt64Ty(getVMContext()), 2));
+
+ default:
+ break;
+ }
+ }
+
+ if (Ty->isBitIntType()) {
+ // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
+ // not 1, 2, 4, or 8 bytes, must be passed by reference."
+ // However, non-power-of-two bit-precise integers will be passed as 1, 2, 4,
+ // or 8 bytes anyway as long is it fits in them, so we don't have to check
+ // the power of 2.
+ if (Width <= 64)
+ return ABIArgInfo::getDirect();
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ }
+
+ return ABIArgInfo::getDirect();
+}
+
+void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ const unsigned CC = FI.getCallingConvention();
+ bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
+ bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
+
+ // If __attribute__((sysv_abi)) is in use, use the SysV argument
+ // classification rules.
+ if (CC == llvm::CallingConv::X86_64_SysV) {
+ X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
+ SysVABIInfo.computeInfo(FI);
+ return;
+ }
+
+ unsigned FreeSSERegs = 0;
+ if (IsVectorCall) {
+ // We can use up to 4 SSE return registers with vectorcall.
+ FreeSSERegs = 4;
+ } else if (IsRegCall) {
+ // RegCall gives us 16 SSE registers.
+ FreeSSERegs = 16;
+ }
+
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
+ IsVectorCall, IsRegCall);
+
+ if (IsVectorCall) {
+ // We can use up to 6 SSE register parameters with vectorcall.
+ FreeSSERegs = 6;
+ } else if (IsRegCall) {
+ // RegCall gives us 16 SSE registers, we can reuse the return registers.
+ FreeSSERegs = 16;
+ }
+
+ unsigned ArgNum = 0;
+ unsigned ZeroSSERegs = 0;
+ for (auto &I : FI.arguments()) {
+ // Vectorcall in x64 only permits the first 6 arguments to be passed as
+ // XMM/YMM registers. After the sixth argument, pretend no vector
+ // registers are left.
+ unsigned *MaybeFreeSSERegs =
+ (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
+ I.info =
+ classify(I.type, *MaybeFreeSSERegs, false, IsVectorCall, IsRegCall);
+ ++ArgNum;
+ }
+
+ if (IsVectorCall) {
+ // For vectorcall, assign aggregate HVAs to any free vector registers in a
+ // second pass.
+ for (auto &I : FI.arguments())
+ I.info = reclassifyHvaArgForVectorCall(I.type, FreeSSERegs, I.info);
+ }
+}
+
+Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
+ // not 1, 2, 4, or 8 bytes, must be passed by reference."
+ uint64_t Width = getContext().getTypeSize(Ty);
+ bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
+ CGF.getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(8),
+ /*allowHigherAlign*/ false);
+}
+
+std::unique_ptr<TargetCodeGenInfo> CodeGen::createX86_32TargetCodeGenInfo(
+ CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI,
+ unsigned NumRegisterParameters, bool SoftFloatABI) {
+ bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
+ CGM.getTriple(), CGM.getCodeGenOpts());
+ return std::make_unique<X86_32TargetCodeGenInfo>(
+ CGM.getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
+ NumRegisterParameters, SoftFloatABI);
+}
+
+std::unique_ptr<TargetCodeGenInfo> CodeGen::createWinX86_32TargetCodeGenInfo(
+ CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI,
+ unsigned NumRegisterParameters) {
+ bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
+ CGM.getTriple(), CGM.getCodeGenOpts());
+ return std::make_unique<WinX86_32TargetCodeGenInfo>(
+ CGM.getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
+ NumRegisterParameters);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createX86_64TargetCodeGenInfo(CodeGenModule &CGM,
+ X86AVXABILevel AVXLevel) {
+ return std::make_unique<X86_64TargetCodeGenInfo>(CGM.getTypes(), AVXLevel);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM,
+ X86AVXABILevel AVXLevel) {
+ return std::make_unique<WinX86_64TargetCodeGenInfo>(CGM.getTypes(), AVXLevel);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/XCore.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/XCore.cpp
new file mode 100644
index 000000000000..8be240c018d0
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/XCore.cpp
@@ -0,0 +1,662 @@
+//===- XCore.cpp ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// XCore ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+/// A SmallStringEnc instance is used to build up the TypeString by passing
+/// it by reference between functions that append to it.
+typedef llvm::SmallString<128> SmallStringEnc;
+
+/// TypeStringCache caches the meta encodings of Types.
+///
+/// The reason for caching TypeStrings is two fold:
+/// 1. To cache a type's encoding for later uses;
+/// 2. As a means to break recursive member type inclusion.
+///
+/// A cache Entry can have a Status of:
+/// NonRecursive: The type encoding is not recursive;
+/// Recursive: The type encoding is recursive;
+/// Incomplete: An incomplete TypeString;
+/// IncompleteUsed: An incomplete TypeString that has been used in a
+/// Recursive type encoding.
+///
+/// A NonRecursive entry will have all of its sub-members expanded as fully
+/// as possible. Whilst it may contain types which are recursive, the type
+/// itself is not recursive and thus its encoding may be safely used whenever
+/// the type is encountered.
+///
+/// A Recursive entry will have all of its sub-members expanded as fully as
+/// possible. The type itself is recursive and it may contain other types which
+/// are recursive. The Recursive encoding must not be used during the expansion
+/// of a recursive type's recursive branch. For simplicity the code uses
+/// IncompleteCount to reject all usage of Recursive encodings for member types.
+///
+/// An Incomplete entry is always a RecordType and only encodes its
+/// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
+/// are placed into the cache during type expansion as a means to identify and
+/// handle recursive inclusion of types as sub-members. If there is recursion
+/// the entry becomes IncompleteUsed.
+///
+/// During the expansion of a RecordType's members:
+///
+/// If the cache contains a NonRecursive encoding for the member type, the
+/// cached encoding is used;
+///
+/// If the cache contains a Recursive encoding for the member type, the
+/// cached encoding is 'Swapped' out, as it may be incorrect, and...
+///
+/// If the member is a RecordType, an Incomplete encoding is placed into the
+/// cache to break potential recursive inclusion of itself as a sub-member;
+///
+/// Once a member RecordType has been expanded, its temporary incomplete
+/// entry is removed from the cache. If a Recursive encoding was swapped out
+/// it is swapped back in;
+///
+/// If an incomplete entry is used to expand a sub-member, the incomplete
+/// entry is marked as IncompleteUsed. The cache keeps count of how many
+/// IncompleteUsed entries it currently contains in IncompleteUsedCount;
+///
+/// If a member's encoding is found to be a NonRecursive or Recursive viz:
+/// IncompleteUsedCount==0, the member's encoding is added to the cache.
+/// Else the member is part of a recursive type and thus the recursion has
+/// been exited too soon for the encoding to be correct for the member.
+///
+class TypeStringCache {
+ enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
+ struct Entry {
+ std::string Str; // The encoded TypeString for the type.
+ enum Status State; // Information about the encoding in 'Str'.
+ std::string Swapped; // A temporary place holder for a Recursive encoding
+ // during the expansion of RecordType's members.
+ };
+ std::map<const IdentifierInfo *, struct Entry> Map;
+ unsigned IncompleteCount; // Number of Incomplete entries in the Map.
+ unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
+public:
+ TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
+ void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
+ bool removeIncomplete(const IdentifierInfo *ID);
+ void addIfComplete(const IdentifierInfo *ID, StringRef Str,
+ bool IsRecursive);
+ StringRef lookupStr(const IdentifierInfo *ID);
+};
+
+/// TypeString encodings for enum & union fields must be order.
+/// FieldEncoding is a helper for this ordering process.
+class FieldEncoding {
+ bool HasName;
+ std::string Enc;
+public:
+ FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
+ StringRef str() { return Enc; }
+ bool operator<(const FieldEncoding &rhs) const {
+ if (HasName != rhs.HasName) return HasName;
+ return Enc < rhs.Enc;
+ }
+};
+
+class XCoreABIInfo : public DefaultABIInfo {
+public:
+ XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
+ mutable TypeStringCache TSC;
+ void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
+ const CodeGen::CodeGenModule &M) const;
+
+public:
+ XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<XCoreABIInfo>(CGT)) {}
+ void emitTargetMetadata(CodeGen::CodeGenModule &CGM,
+ const llvm::MapVector<GlobalDecl, StringRef>
+ &MangledDeclNames) const override;
+};
+
+} // End anonymous namespace.
+
+// TODO: this implementation is likely now redundant with the default
+// EmitVAArg.
+Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // Get the VAList.
+ CharUnits SlotSize = CharUnits::fromQuantity(4);
+ Address AP = Address(Builder.CreateLoad(VAListAddr),
+ getVAListElementType(CGF), SlotSize);
+
+ // Handle the argument.
+ ABIArgInfo AI = classifyArgumentType(Ty);
+ CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
+ llvm::Type *ArgTy = CGT.ConvertType(Ty);
+ if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
+ AI.setCoerceToType(ArgTy);
+ llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
+
+ Address Val = Address::invalid();
+ CharUnits ArgSize = CharUnits::Zero();
+ switch (AI.getKind()) {
+ case ABIArgInfo::Expand:
+ case ABIArgInfo::CoerceAndExpand:
+ case ABIArgInfo::InAlloca:
+ llvm_unreachable("Unsupported ABI kind for va_arg");
+ case ABIArgInfo::Ignore:
+ Val = Address(llvm::UndefValue::get(ArgPtrTy), ArgTy, TypeAlign);
+ ArgSize = CharUnits::Zero();
+ break;
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct:
+ Val = AP.withElementType(ArgTy);
+ ArgSize = CharUnits::fromQuantity(
+ getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
+ ArgSize = ArgSize.alignTo(SlotSize);
+ break;
+ case ABIArgInfo::Indirect:
+ case ABIArgInfo::IndirectAliased:
+ Val = AP.withElementType(ArgPtrTy);
+ Val = Address(Builder.CreateLoad(Val), ArgTy, TypeAlign);
+ ArgSize = SlotSize;
+ break;
+ }
+
+ // Increment the VAList.
+ if (!ArgSize.isZero()) {
+ Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize);
+ Builder.CreateStore(APN.getPointer(), VAListAddr);
+ }
+
+ return Val;
+}
+
+/// During the expansion of a RecordType, an incomplete TypeString is placed
+/// into the cache as a means to identify and break recursion.
+/// If there is a Recursive encoding in the cache, it is swapped out and will
+/// be reinserted by removeIncomplete().
+/// All other types of encoding should have been used rather than arriving here.
+void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
+ std::string StubEnc) {
+ if (!ID)
+ return;
+ Entry &E = Map[ID];
+ assert( (E.Str.empty() || E.State == Recursive) &&
+ "Incorrectly use of addIncomplete");
+ assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
+ E.Swapped.swap(E.Str); // swap out the Recursive
+ E.Str.swap(StubEnc);
+ E.State = Incomplete;
+ ++IncompleteCount;
+}
+
+/// Once the RecordType has been expanded, the temporary incomplete TypeString
+/// must be removed from the cache.
+/// If a Recursive was swapped out by addIncomplete(), it will be replaced.
+/// Returns true if the RecordType was defined recursively.
+bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
+ if (!ID)
+ return false;
+ auto I = Map.find(ID);
+ assert(I != Map.end() && "Entry not present");
+ Entry &E = I->second;
+ assert( (E.State == Incomplete ||
+ E.State == IncompleteUsed) &&
+ "Entry must be an incomplete type");
+ bool IsRecursive = false;
+ if (E.State == IncompleteUsed) {
+ // We made use of our Incomplete encoding, thus we are recursive.
+ IsRecursive = true;
+ --IncompleteUsedCount;
+ }
+ if (E.Swapped.empty())
+ Map.erase(I);
+ else {
+ // Swap the Recursive back.
+ E.Swapped.swap(E.Str);
+ E.Swapped.clear();
+ E.State = Recursive;
+ }
+ --IncompleteCount;
+ return IsRecursive;
+}
+
+/// Add the encoded TypeString to the cache only if it is NonRecursive or
+/// Recursive (viz: all sub-members were expanded as fully as possible).
+void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
+ bool IsRecursive) {
+ if (!ID || IncompleteUsedCount)
+ return; // No key or it is an incomplete sub-type so don't add.
+ Entry &E = Map[ID];
+ if (IsRecursive && !E.Str.empty()) {
+ assert(E.State==Recursive && E.Str.size() == Str.size() &&
+ "This is not the same Recursive entry");
+ // The parent container was not recursive after all, so we could have used
+ // this Recursive sub-member entry after all, but we assumed the worse when
+ // we started viz: IncompleteCount!=0.
+ return;
+ }
+ assert(E.Str.empty() && "Entry already present");
+ E.Str = Str.str();
+ E.State = IsRecursive? Recursive : NonRecursive;
+}
+
+/// Return a cached TypeString encoding for the ID. If there isn't one, or we
+/// are recursively expanding a type (IncompleteCount != 0) and the cached
+/// encoding is Recursive, return an empty StringRef.
+StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
+ if (!ID)
+ return StringRef(); // We have no key.
+ auto I = Map.find(ID);
+ if (I == Map.end())
+ return StringRef(); // We have no encoding.
+ Entry &E = I->second;
+ if (E.State == Recursive && IncompleteCount)
+ return StringRef(); // We don't use Recursive encodings for member types.
+
+ if (E.State == Incomplete) {
+ // The incomplete type is being used to break out of recursion.
+ E.State = IncompleteUsed;
+ ++IncompleteUsedCount;
+ }
+ return E.Str;
+}
+
+/// The XCore ABI includes a type information section that communicates symbol
+/// type information to the linker. The linker uses this information to verify
+/// safety/correctness of things such as array bound and pointers et al.
+/// The ABI only requires C (and XC) language modules to emit TypeStrings.
+/// This type information (TypeString) is emitted into meta data for all global
+/// symbols: definitions, declarations, functions & variables.
+///
+/// The TypeString carries type, qualifier, name, size & value details.
+/// Please see 'Tools Development Guide' section 2.16.2 for format details:
+/// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
+/// The output is tested by test/CodeGen/xcore-stringtype.c.
+///
+static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC);
+
+/// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
+void XCoreTargetCodeGenInfo::emitTargetMD(
+ const Decl *D, llvm::GlobalValue *GV,
+ const CodeGen::CodeGenModule &CGM) const {
+ SmallStringEnc Enc;
+ if (getTypeString(Enc, D, CGM, TSC)) {
+ llvm::LLVMContext &Ctx = CGM.getModule().getContext();
+ llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
+ llvm::MDString::get(Ctx, Enc.str())};
+ llvm::NamedMDNode *MD =
+ CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
+ MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
+ }
+}
+
+void XCoreTargetCodeGenInfo::emitTargetMetadata(
+ CodeGen::CodeGenModule &CGM,
+ const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {
+ // Warning, new MangledDeclNames may be appended within this loop.
+ // We rely on MapVector insertions adding new elements to the end
+ // of the container.
+ for (unsigned I = 0; I != MangledDeclNames.size(); ++I) {
+ auto Val = *(MangledDeclNames.begin() + I);
+ llvm::GlobalValue *GV = CGM.GetGlobalValue(Val.second);
+ if (GV) {
+ const Decl *D = Val.first.getDecl()->getMostRecentDecl();
+ emitTargetMD(D, GV, CGM);
+ }
+ }
+}
+
+static bool appendType(SmallStringEnc &Enc, QualType QType,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC);
+
+/// Helper function for appendRecordType().
+/// Builds a SmallVector containing the encoded field types in declaration
+/// order.
+static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
+ const RecordDecl *RD,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC) {
+ for (const auto *Field : RD->fields()) {
+ SmallStringEnc Enc;
+ Enc += "m(";
+ Enc += Field->getName();
+ Enc += "){";
+ if (Field->isBitField()) {
+ Enc += "b(";
+ llvm::raw_svector_ostream OS(Enc);
+ OS << Field->getBitWidthValue(CGM.getContext());
+ Enc += ':';
+ }
+ if (!appendType(Enc, Field->getType(), CGM, TSC))
+ return false;
+ if (Field->isBitField())
+ Enc += ')';
+ Enc += '}';
+ FE.emplace_back(!Field->getName().empty(), Enc);
+ }
+ return true;
+}
+
+/// Appends structure and union types to Enc and adds encoding to cache.
+/// Recursively calls appendType (via extractFieldType) for each field.
+/// Union types have their fields ordered according to the ABI.
+static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC, const IdentifierInfo *ID) {
+ // Append the cached TypeString if we have one.
+ StringRef TypeString = TSC.lookupStr(ID);
+ if (!TypeString.empty()) {
+ Enc += TypeString;
+ return true;
+ }
+
+ // Start to emit an incomplete TypeString.
+ size_t Start = Enc.size();
+ Enc += (RT->isUnionType()? 'u' : 's');
+ Enc += '(';
+ if (ID)
+ Enc += ID->getName();
+ Enc += "){";
+
+ // We collect all encoded fields and order as necessary.
+ bool IsRecursive = false;
+ const RecordDecl *RD = RT->getDecl()->getDefinition();
+ if (RD && !RD->field_empty()) {
+ // An incomplete TypeString stub is placed in the cache for this RecordType
+ // so that recursive calls to this RecordType will use it whilst building a
+ // complete TypeString for this RecordType.
+ SmallVector<FieldEncoding, 16> FE;
+ std::string StubEnc(Enc.substr(Start).str());
+ StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
+ TSC.addIncomplete(ID, std::move(StubEnc));
+ if (!extractFieldType(FE, RD, CGM, TSC)) {
+ (void) TSC.removeIncomplete(ID);
+ return false;
+ }
+ IsRecursive = TSC.removeIncomplete(ID);
+ // The ABI requires unions to be sorted but not structures.
+ // See FieldEncoding::operator< for sort algorithm.
+ if (RT->isUnionType())
+ llvm::sort(FE);
+ // We can now complete the TypeString.
+ unsigned E = FE.size();
+ for (unsigned I = 0; I != E; ++I) {
+ if (I)
+ Enc += ',';
+ Enc += FE[I].str();
+ }
+ }
+ Enc += '}';
+ TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
+ return true;
+}
+
+/// Appends enum types to Enc and adds the encoding to the cache.
+static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
+ TypeStringCache &TSC,
+ const IdentifierInfo *ID) {
+ // Append the cached TypeString if we have one.
+ StringRef TypeString = TSC.lookupStr(ID);
+ if (!TypeString.empty()) {
+ Enc += TypeString;
+ return true;
+ }
+
+ size_t Start = Enc.size();
+ Enc += "e(";
+ if (ID)
+ Enc += ID->getName();
+ Enc += "){";
+
+ // We collect all encoded enumerations and order them alphanumerically.
+ if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
+ SmallVector<FieldEncoding, 16> FE;
+ for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
+ ++I) {
+ SmallStringEnc EnumEnc;
+ EnumEnc += "m(";
+ EnumEnc += I->getName();
+ EnumEnc += "){";
+ I->getInitVal().toString(EnumEnc);
+ EnumEnc += '}';
+ FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
+ }
+ llvm::sort(FE);
+ unsigned E = FE.size();
+ for (unsigned I = 0; I != E; ++I) {
+ if (I)
+ Enc += ',';
+ Enc += FE[I].str();
+ }
+ }
+ Enc += '}';
+ TSC.addIfComplete(ID, Enc.substr(Start), false);
+ return true;
+}
+
+/// Appends type's qualifier to Enc.
+/// This is done prior to appending the type's encoding.
+static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
+ // Qualifiers are emitted in alphabetical order.
+ static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
+ int Lookup = 0;
+ if (QT.isConstQualified())
+ Lookup += 1<<0;
+ if (QT.isRestrictQualified())
+ Lookup += 1<<1;
+ if (QT.isVolatileQualified())
+ Lookup += 1<<2;
+ Enc += Table[Lookup];
+}
+
+/// Appends built-in types to Enc.
+static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
+ const char *EncType;
+ switch (BT->getKind()) {
+ case BuiltinType::Void:
+ EncType = "0";
+ break;
+ case BuiltinType::Bool:
+ EncType = "b";
+ break;
+ case BuiltinType::Char_U:
+ EncType = "uc";
+ break;
+ case BuiltinType::UChar:
+ EncType = "uc";
+ break;
+ case BuiltinType::SChar:
+ EncType = "sc";
+ break;
+ case BuiltinType::UShort:
+ EncType = "us";
+ break;
+ case BuiltinType::Short:
+ EncType = "ss";
+ break;
+ case BuiltinType::UInt:
+ EncType = "ui";
+ break;
+ case BuiltinType::Int:
+ EncType = "si";
+ break;
+ case BuiltinType::ULong:
+ EncType = "ul";
+ break;
+ case BuiltinType::Long:
+ EncType = "sl";
+ break;
+ case BuiltinType::ULongLong:
+ EncType = "ull";
+ break;
+ case BuiltinType::LongLong:
+ EncType = "sll";
+ break;
+ case BuiltinType::Float:
+ EncType = "ft";
+ break;
+ case BuiltinType::Double:
+ EncType = "d";
+ break;
+ case BuiltinType::LongDouble:
+ EncType = "ld";
+ break;
+ default:
+ return false;
+ }
+ Enc += EncType;
+ return true;
+}
+
+/// Appends a pointer encoding to Enc before calling appendType for the pointee.
+static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC) {
+ Enc += "p(";
+ if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
+ return false;
+ Enc += ')';
+ return true;
+}
+
+/// Appends array encoding to Enc before calling appendType for the element.
+static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
+ const ArrayType *AT,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC, StringRef NoSizeEnc) {
+ if (AT->getSizeModifier() != ArrayType::Normal)
+ return false;
+ Enc += "a(";
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
+ CAT->getSize().toStringUnsigned(Enc);
+ else
+ Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
+ Enc += ':';
+ // The Qualifiers should be attached to the type rather than the array.
+ appendQualifier(Enc, QT);
+ if (!appendType(Enc, AT->getElementType(), CGM, TSC))
+ return false;
+ Enc += ')';
+ return true;
+}
+
+/// Appends a function encoding to Enc, calling appendType for the return type
+/// and the arguments.
+static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC) {
+ Enc += "f{";
+ if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
+ return false;
+ Enc += "}(";
+ if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
+ // N.B. we are only interested in the adjusted param types.
+ auto I = FPT->param_type_begin();
+ auto E = FPT->param_type_end();
+ if (I != E) {
+ do {
+ if (!appendType(Enc, *I, CGM, TSC))
+ return false;
+ ++I;
+ if (I != E)
+ Enc += ',';
+ } while (I != E);
+ if (FPT->isVariadic())
+ Enc += ",va";
+ } else {
+ if (FPT->isVariadic())
+ Enc += "va";
+ else
+ Enc += '0';
+ }
+ }
+ Enc += ')';
+ return true;
+}
+
+/// Handles the type's qualifier before dispatching a call to handle specific
+/// type encodings.
+static bool appendType(SmallStringEnc &Enc, QualType QType,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC) {
+
+ QualType QT = QType.getCanonicalType();
+
+ if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
+ // The Qualifiers should be attached to the type rather than the array.
+ // Thus we don't call appendQualifier() here.
+ return appendArrayType(Enc, QT, AT, CGM, TSC, "");
+
+ appendQualifier(Enc, QT);
+
+ if (const BuiltinType *BT = QT->getAs<BuiltinType>())
+ return appendBuiltinType(Enc, BT);
+
+ if (const PointerType *PT = QT->getAs<PointerType>())
+ return appendPointerType(Enc, PT, CGM, TSC);
+
+ if (const EnumType *ET = QT->getAs<EnumType>())
+ return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
+
+ if (const RecordType *RT = QT->getAsStructureType())
+ return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
+
+ if (const RecordType *RT = QT->getAsUnionType())
+ return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
+
+ if (const FunctionType *FT = QT->getAs<FunctionType>())
+ return appendFunctionType(Enc, FT, CGM, TSC);
+
+ return false;
+}
+
+static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC) {
+ if (!D)
+ return false;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->getLanguageLinkage() != CLanguageLinkage)
+ return false;
+ return appendType(Enc, FD->getType(), CGM, TSC);
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->getLanguageLinkage() != CLanguageLinkage)
+ return false;
+ QualType QT = VD->getType().getCanonicalType();
+ if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
+ // Global ArrayTypes are given a size of '*' if the size is unknown.
+ // The Qualifiers should be attached to the type rather than the array.
+ // Thus we don't call appendQualifier() here.
+ return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
+ }
+ return appendType(Enc, QT, CGM, TSC);
+ }
+ return false;
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createXCoreTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<XCoreTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp b/contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp
index c75b9b9c9e13..1ead01e49ec1 100644
--- a/contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp
+++ b/contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp
@@ -20,13 +20,13 @@
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Index/USRGeneration.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/YAMLParser.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <fstream>
#include <optional>
@@ -568,7 +568,7 @@ CrossTranslationUnitContext::ASTLoader::loadFromDump(StringRef ASTDumpPath) {
return ASTUnit::LoadFromASTFile(
std::string(ASTDumpPath.str()),
CI.getPCHContainerOperations()->getRawReader(), ASTUnit::LoadEverything,
- Diags, CI.getFileSystemOpts());
+ Diags, CI.getFileSystemOpts(), CI.getHeaderSearchOptsPtr());
}
/// Load the AST from a source-file, which is supposed to be located inside the
@@ -609,10 +609,10 @@ CrossTranslationUnitContext::ASTLoader::loadFromSource(
IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
new DiagnosticsEngine{DiagID, &*DiagOpts, DiagClient});
- return std::unique_ptr<ASTUnit>(ASTUnit::LoadFromCommandLine(
- CommandLineArgs.begin(), (CommandLineArgs.end()),
- CI.getPCHContainerOperations(), Diags,
- CI.getHeaderSearchOpts().ResourceDir));
+ return ASTUnit::LoadFromCommandLine(CommandLineArgs.begin(),
+ (CommandLineArgs.end()),
+ CI.getPCHContainerOperations(), Diags,
+ CI.getHeaderSearchOpts().ResourceDir);
}
llvm::Expected<InvocationListTy>
@@ -660,7 +660,7 @@ parseInvocationList(StringRef FileContent, llvm::sys::path::Style PathStyle) {
StringRef InvocationKey = NativeSourcePath;
- if (InvocationList.find(InvocationKey) != InvocationList.end())
+ if (InvocationList.contains(InvocationKey))
return llvm::make_error<IndexError>(
index_error_code::invocation_list_ambiguous);
diff --git a/contrib/llvm-project/clang/lib/Driver/Action.cpp b/contrib/llvm-project/clang/lib/Driver/Action.cpp
index 44b4715503f0..849bf6035ebd 100644
--- a/contrib/llvm-project/clang/lib/Driver/Action.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Action.cpp
@@ -48,6 +48,8 @@ const char *Action::getClassName(ActionClass AC) {
return "clang-linker-wrapper";
case StaticLibJobClass:
return "static-lib-linker";
+ case BinaryAnalyzeJobClass:
+ return "binary-analyzer";
}
llvm_unreachable("invalid class");
@@ -451,3 +453,8 @@ void StaticLibJobAction::anchor() {}
StaticLibJobAction::StaticLibJobAction(ActionList &Inputs, types::ID Type)
: JobAction(StaticLibJobClass, Inputs, Type) {}
+
+void BinaryAnalyzeJobAction::anchor() {}
+
+BinaryAnalyzeJobAction::BinaryAnalyzeJobAction(Action *Input, types::ID Type)
+ : JobAction(BinaryAnalyzeJobClass, Input, Type) {}
diff --git a/contrib/llvm-project/clang/lib/Driver/Compilation.cpp b/contrib/llvm-project/clang/lib/Driver/Compilation.cpp
index f6eb7f009e78..ad077d5bbfa6 100644
--- a/contrib/llvm-project/clang/lib/Driver/Compilation.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Compilation.cpp
@@ -17,12 +17,12 @@
#include "clang/Driver/Util.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/OptSpecifier.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
#include <cassert>
#include <string>
#include <system_error>
diff --git a/contrib/llvm-project/clang/lib/Driver/Distro.cpp b/contrib/llvm-project/clang/lib/Driver/Distro.cpp
index 87a0c5a58511..6e0087565941 100644
--- a/contrib/llvm-project/clang/lib/Driver/Distro.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Distro.cpp
@@ -11,11 +11,11 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/ErrorOr.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Threading.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/Triple.h"
using namespace clang::driver;
using namespace clang;
@@ -93,6 +93,7 @@ static Distro::DistroType DetectLsbRelease(llvm::vfs::FileSystem &VFS) {
.Case("jammy", Distro::UbuntuJammy)
.Case("kinetic", Distro::UbuntuKinetic)
.Case("lunar", Distro::UbuntuLunar)
+ .Case("mantic", Distro::UbuntuMantic)
.Default(Distro::UnknownDistro);
return Version;
}
@@ -169,6 +170,7 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
.Case("buster/sid", Distro::DebianBuster)
.Case("bullseye/sid", Distro::DebianBullseye)
.Case("bookworm/sid", Distro::DebianBookworm)
+ .Case("trixie/sid", Distro::DebianTrixie)
.Default(Distro::UnknownDistro);
}
diff --git a/contrib/llvm-project/clang/lib/Driver/Driver.cpp b/contrib/llvm-project/clang/lib/Driver/Driver.cpp
index a268f2fa8fc5..488350169efa 100644
--- a/contrib/llvm-project/clang/lib/Driver/Driver.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Driver.cpp
@@ -12,6 +12,7 @@
#include "ToolChains/AMDGPUOpenMP.h"
#include "ToolChains/AVR.h"
#include "ToolChains/Ananas.h"
+#include "ToolChains/Arch/RISCV.h"
#include "ToolChains/BareMetal.h"
#include "ToolChains/CSKYToolChain.h"
#include "ToolChains/Clang.h"
@@ -40,6 +41,7 @@
#include "ToolChains/Myriad.h"
#include "ToolChains/NaCl.h"
#include "ToolChains/NetBSD.h"
+#include "ToolChains/OHOS.h"
#include "ToolChains/OpenBSD.h"
#include "ToolChains/PPCFreeBSD.h"
#include "ToolChains/PPCLinux.h"
@@ -68,7 +70,6 @@
#include "clang/Driver/Types.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
@@ -85,7 +86,6 @@
#include "llvm/Support/ExitCodes.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormatVariadic.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/PrettyStackTrace.h"
@@ -94,10 +94,12 @@
#include "llvm/Support/StringSaver.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
#include <cstdlib> // ::getenv
#include <map>
#include <memory>
#include <optional>
+#include <set>
#include <utility>
#if LLVM_ON_UNIX
#include <unistd.h> // getpid
@@ -182,6 +184,8 @@ std::string Driver::GetResourcesPath(StringRef BinaryPath,
// path of the embedding binary, which for LLVM binaries will be in bin/.
// ../lib gets us to lib/ in both cases.
P = llvm::sys::path::parent_path(Dir);
+ // This search path is also created in the COFF driver of lld, so any
+ // changes here also needs to happen in lld/COFF/Driver.cpp
llvm::sys::path::append(P, CLANG_INSTALL_LIBDIR_BASENAME, "clang",
CLANG_VERSION_MAJOR_STRING);
}
@@ -199,7 +203,8 @@ Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
ClangExecutable(ClangExecutable), SysRoot(DEFAULT_SYSROOT),
DriverTitle(Title), CCCPrintBindings(false), CCPrintOptions(false),
CCLogDiagnostics(false), CCGenDiagnostics(false),
- CCPrintProcessStats(false), TargetTriple(TargetTriple), Saver(Alloc),
+ CCPrintProcessStats(false), CCPrintInternalStats(false),
+ TargetTriple(TargetTriple), Saver(Alloc), PrependArg(nullptr),
CheckInputsExist(true), ProbePrecompiled(true),
SuppressMissingInputWarning(false) {
// Provide a sane fallback if no VFS is specified.
@@ -543,29 +548,28 @@ static llvm::Triple computeTargetTriple(const Driver &D,
if (Target.isOSBinFormatMachO()) {
// If an explicit Darwin arch name is given, that trumps all.
if (!DarwinArchName.empty()) {
- tools::darwin::setTripleTypeForMachOArchName(Target, DarwinArchName);
+ tools::darwin::setTripleTypeForMachOArchName(Target, DarwinArchName,
+ Args);
return Target;
}
// Handle the Darwin '-arch' flag.
if (Arg *A = Args.getLastArg(options::OPT_arch)) {
StringRef ArchName = A->getValue();
- tools::darwin::setTripleTypeForMachOArchName(Target, ArchName);
+ tools::darwin::setTripleTypeForMachOArchName(Target, ArchName, Args);
}
}
// Handle pseudo-target flags '-mlittle-endian'/'-EL' and
// '-mbig-endian'/'-EB'.
- if (Arg *A = Args.getLastArg(options::OPT_mlittle_endian,
- options::OPT_mbig_endian)) {
- if (A->getOption().matches(options::OPT_mlittle_endian)) {
- llvm::Triple LE = Target.getLittleEndianArchVariant();
- if (LE.getArch() != llvm::Triple::UnknownArch)
- Target = std::move(LE);
- } else {
- llvm::Triple BE = Target.getBigEndianArchVariant();
- if (BE.getArch() != llvm::Triple::UnknownArch)
- Target = std::move(BE);
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_mlittle_endian,
+ options::OPT_mbig_endian)) {
+ llvm::Triple T = A->getOption().matches(options::OPT_mlittle_endian)
+ ? Target.getLittleEndianArchVariant()
+ : Target.getBigEndianArchVariant();
+ if (T.getArch() != llvm::Triple::UnknownArch) {
+ Target = std::move(T);
+ Args.claimAllArgs(options::OPT_mlittle_endian, options::OPT_mbig_endian);
}
}
@@ -594,13 +598,21 @@ static llvm::Triple computeTargetTriple(const Driver &D,
}
}
+ // The `-maix[32|64]` flags are only valid for AIX targets.
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_maix32, options::OPT_maix64);
+ A && !Target.isOSAIX())
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << Target.str();
+
// Handle pseudo-target flags '-m64', '-mx32', '-m32' and '-m16'.
Arg *A = Args.getLastArg(options::OPT_m64, options::OPT_mx32,
- options::OPT_m32, options::OPT_m16);
+ options::OPT_m32, options::OPT_m16,
+ options::OPT_maix32, options::OPT_maix64);
if (A) {
llvm::Triple::ArchType AT = llvm::Triple::UnknownArch;
- if (A->getOption().matches(options::OPT_m64)) {
+ if (A->getOption().matches(options::OPT_m64) ||
+ A->getOption().matches(options::OPT_maix64)) {
AT = Target.get64BitArchVariant().getArch();
if (Target.getEnvironment() == llvm::Triple::GNUX32)
Target.setEnvironment(llvm::Triple::GNU);
@@ -613,7 +625,8 @@ static llvm::Triple computeTargetTriple(const Driver &D,
Target.setEnvironment(llvm::Triple::MuslX32);
else
Target.setEnvironment(llvm::Triple::GNUX32);
- } else if (A->getOption().matches(options::OPT_m32)) {
+ } else if (A->getOption().matches(options::OPT_m32) ||
+ A->getOption().matches(options::OPT_maix32)) {
AT = Target.get32BitArchVariant().getArch();
if (Target.getEnvironment() == llvm::Triple::GNUX32)
Target.setEnvironment(llvm::Triple::GNU);
@@ -678,11 +691,12 @@ static llvm::Triple computeTargetTriple(const Driver &D,
// If target is RISC-V adjust the target triple according to
// provided architecture name
if (Target.isRISCV()) {
- if ((A = Args.getLastArg(options::OPT_march_EQ))) {
- StringRef ArchName = A->getValue();
- if (ArchName.startswith_insensitive("rv32"))
+ if (Args.hasArg(options::OPT_march_EQ) ||
+ Args.hasArg(options::OPT_mcpu_EQ)) {
+ StringRef ArchName = tools::riscv::getRISCVArch(Args, Target);
+ if (ArchName.starts_with_insensitive("rv32"))
Target.setArch(llvm::Triple::riscv32);
- else if (ArchName.startswith_insensitive("rv64"))
+ else if (ArchName.starts_with_insensitive("rv64"))
Target.setArch(llvm::Triple::riscv64);
}
}
@@ -796,6 +810,12 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
if (!CudaTC) {
CudaTC = std::make_unique<toolchains::CudaToolChain>(
*this, *CudaTriple, *HostTC, C.getInputArgs());
+
+ // Emit a warning if the detected CUDA version is too new.
+ CudaInstallationDetector &CudaInstallation =
+ static_cast<toolchains::CudaToolChain &>(*CudaTC).CudaInstallation;
+ if (CudaInstallation.isValid())
+ CudaInstallation.WarnIfUnsupportedVersion();
}
C.addOffloadDeviceToolChain(CudaTC.get(), OFK);
} else if (IsHIP) {
@@ -838,7 +858,7 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
llvm::StringMap<llvm::DenseSet<StringRef>> DerivedArchs;
llvm::StringMap<StringRef> FoundNormalizedTriples;
- llvm::SmallVector<StringRef, 4> OpenMPTriples;
+ std::multiset<StringRef> OpenMPTriples;
// If the user specified -fopenmp-targets= we create a toolchain for each
// valid triple. Otherwise, if only --offload-arch= was specified we instead
@@ -850,7 +870,8 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
<< OpenMPTargets->getAsString(C.getInputArgs());
return;
}
- llvm::copy(OpenMPTargets->getValues(), std::back_inserter(OpenMPTriples));
+ for (StringRef T : OpenMPTargets->getValues())
+ OpenMPTriples.insert(T);
} else if (C.getInputArgs().hasArg(options::OPT_offload_arch_EQ) &&
!IsHIP && !IsCuda) {
const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
@@ -905,7 +926,7 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
}
for (const auto &TripleAndArchs : DerivedArchs)
- OpenMPTriples.push_back(TripleAndArchs.first());
+ OpenMPTriples.insert(TripleAndArchs.first());
}
for (StringRef Val : OpenMPTriples) {
@@ -952,7 +973,7 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
} else
TC = &getToolChain(C.getInputArgs(), TT);
C.addOffloadDeviceToolChain(TC, Action::OFK_OpenMP);
- if (DerivedArchs.find(TT.getTriple()) != DerivedArchs.end())
+ if (DerivedArchs.contains(TT.getTriple()))
KnownArchs[TC] = DerivedArchs[TT.getTriple()];
}
}
@@ -1382,8 +1403,10 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
const Arg *Std = Args.getLastArg(options::OPT_std_EQ);
ModulesModeCXX20 =
!Args.hasArg(options::OPT_fmodules) && Std &&
- (Std->containsValue("c++20") || Std->containsValue("c++2b") ||
- Std->containsValue("c++2a") || Std->containsValue("c++latest"));
+ (Std->containsValue("c++20") || Std->containsValue("c++2a") ||
+ Std->containsValue("c++23") || Std->containsValue("c++2b") ||
+ Std->containsValue("c++26") || Std->containsValue("c++2c") ||
+ Std->containsValue("c++latest"));
// Process -fmodule-header{=} flags.
if (Arg *A = Args.getLastArg(options::OPT_fmodule_header_EQ,
@@ -1424,6 +1447,36 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
<< TC.getTriple().str();
}
+ // A common user mistake is specifying a target of aarch64-none-eabi or
+ // arm-none-elf whereas the correct names are aarch64-none-elf &
+ // arm-none-eabi. Detect these cases and issue a warning.
+ if (TC.getTriple().getOS() == llvm::Triple::UnknownOS &&
+ TC.getTriple().getVendor() == llvm::Triple::UnknownVendor) {
+ switch (TC.getTriple().getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ if (TC.getTriple().getEnvironmentName() == "elf") {
+ Diag(diag::warn_target_unrecognized_env)
+ << TargetTriple
+ << (TC.getTriple().getArchName().str() + "-none-eabi");
+ }
+ break;
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
+ case llvm::Triple::aarch64_32:
+ if (TC.getTriple().getEnvironmentName().startswith("eabi")) {
+ Diag(diag::warn_target_unrecognized_env)
+ << TargetTriple
+ << (TC.getTriple().getArchName().str() + "-none-elf");
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
// The compilation takes ownership of Args.
Compilation *C = new Compilation(*this, TC, UArgs.release(), TranslatedArgs,
ContainsError);
@@ -1798,9 +1851,6 @@ void Driver::generateCompilationDiagnostics(
}
}
- for (const auto &A : C.getArgs().filtered(options::OPT_frewrite_map_file_EQ))
- Diag(clang::diag::note_drv_command_failed_diag_msg) << A->getValue();
-
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "\n\n********************";
}
@@ -2207,14 +2257,26 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
return false;
}
+ if (C.getArgs().hasArg(options::OPT_print_multi_flags)) {
+ Multilib::flags_list ArgFlags = TC.getMultilibFlags(C.getArgs());
+ llvm::StringSet<> ExpandedFlags = TC.getMultilibs().expandFlags(ArgFlags);
+ std::set<llvm::StringRef> SortedFlags;
+ for (const auto &FlagEntry : ExpandedFlags)
+ SortedFlags.insert(FlagEntry.getKey());
+ for (auto Flag : SortedFlags)
+ llvm::outs() << Flag << '\n';
+ return false;
+ }
+
if (C.getArgs().hasArg(options::OPT_print_multi_directory)) {
- const Multilib &Multilib = TC.getMultilib();
- if (Multilib.gccSuffix().empty())
- llvm::outs() << ".\n";
- else {
- StringRef Suffix(Multilib.gccSuffix());
- assert(Suffix.front() == '/');
- llvm::outs() << Suffix.substr(1) << "\n";
+ for (const Multilib &Multilib : TC.getSelectedMultilibs()) {
+ if (Multilib.gccSuffix().empty())
+ llvm::outs() << ".\n";
+ else {
+ StringRef Suffix(Multilib.gccSuffix());
+ assert(Suffix.front() == '/');
+ llvm::outs() << Suffix.substr(1) << "\n";
+ }
}
return false;
}
@@ -2564,17 +2626,21 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
}
if (ShowNote)
Diag(clang::diag::note_drv_t_option_is_global);
-
- // No driver mode exposes -x and /TC or /TP; we don't support mixing them.
- assert(!Args.hasArg(options::OPT_x) && "-x and /TC or /TP is not allowed");
}
// Warn -x after last input file has no effect
- {
+ if (!IsCLMode()) {
Arg *LastXArg = Args.getLastArgNoClaim(options::OPT_x);
Arg *LastInputArg = Args.getLastArgNoClaim(options::OPT_INPUT);
- if (LastXArg && LastInputArg && LastInputArg->getIndex() < LastXArg->getIndex())
+ if (LastXArg && LastInputArg &&
+ LastInputArg->getIndex() < LastXArg->getIndex())
Diag(clang::diag::warn_drv_unused_x) << LastXArg->getValue();
+ } else {
+ // In CL mode suggest /TC or /TP since -x doesn't make sense if passed via
+ // /clang:.
+ if (auto *A = Args.getLastArg(options::OPT_x))
+ Diag(diag::err_drv_unsupported_opt_with_suggestion)
+ << A->getAsString(Args) << "/TC' or '/TP";
}
for (Arg *A : Args) {
@@ -2890,7 +2956,12 @@ class OffloadingActionBuilder final {
CudaActionBuilderBase(Compilation &C, DerivedArgList &Args,
const Driver::InputList &Inputs,
Action::OffloadKind OFKind)
- : DeviceActionBuilder(C, Args, Inputs, OFKind) {}
+ : DeviceActionBuilder(C, Args, Inputs, OFKind) {
+
+ CompileDeviceOnly = C.getDriver().offloadDeviceOnly();
+ Relocatable = Args.hasFlag(options::OPT_fgpu_rdc,
+ options::OPT_fno_gpu_rdc, /*Default=*/false);
+ }
ActionBuilderReturnCode addDeviceDependences(Action *HostAction) override {
// While generating code for CUDA, we only depend on the host input action
@@ -3043,9 +3114,6 @@ class OffloadingActionBuilder final {
!C.hasOffloadToolChain<Action::OFK_HIP>())
return false;
- Relocatable = Args.hasFlag(options::OPT_fgpu_rdc,
- options::OPT_fno_gpu_rdc, /*Default=*/false);
-
const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
assert(HostTC && "No toolchain for host compilation.");
if (HostTC->getTriple().isNVPTX() ||
@@ -3064,7 +3132,6 @@ class OffloadingActionBuilder final {
: C.getSingleOffloadToolChain<Action::OFK_HIP>());
CompileHostOnly = C.getDriver().offloadHostOnly();
- CompileDeviceOnly = C.getDriver().offloadDeviceOnly();
EmitLLVM = Args.getLastArg(options::OPT_emit_llvm);
EmitAsm = Args.getLastArg(options::OPT_S);
FixedCUID = Args.getLastArgValue(options::OPT_cuid_EQ);
@@ -3296,16 +3363,40 @@ class OffloadingActionBuilder final {
// only compilation. Bundle other type of output files only if
// --gpu-bundle-output is specified for device only compilation.
std::optional<bool> BundleOutput;
+ std::optional<bool> EmitReloc;
public:
HIPActionBuilder(Compilation &C, DerivedArgList &Args,
const Driver::InputList &Inputs)
: CudaActionBuilderBase(C, Args, Inputs, Action::OFK_HIP) {
+
DefaultCudaArch = CudaArch::GFX906;
+
+ if (Args.hasArg(options::OPT_fhip_emit_relocatable,
+ options::OPT_fno_hip_emit_relocatable)) {
+ EmitReloc = Args.hasFlag(options::OPT_fhip_emit_relocatable,
+ options::OPT_fno_hip_emit_relocatable, false);
+
+ if (*EmitReloc) {
+ if (Relocatable) {
+ C.getDriver().Diag(diag::err_opt_not_valid_with_opt)
+ << "-fhip-emit-relocatable"
+ << "-fgpu-rdc";
+ }
+
+ if (!CompileDeviceOnly) {
+ C.getDriver().Diag(diag::err_opt_not_valid_without_opt)
+ << "-fhip-emit-relocatable"
+ << "--cuda-device-only";
+ }
+ }
+ }
+
if (Args.hasArg(options::OPT_gpu_bundle_output,
options::OPT_no_gpu_bundle_output))
BundleOutput = Args.hasFlag(options::OPT_gpu_bundle_output,
- options::OPT_no_gpu_bundle_output, true);
+ options::OPT_no_gpu_bundle_output, true) &&
+ (!EmitReloc || !*EmitReloc);
}
bool canUseBundlerUnbundler() const override { return true; }
@@ -3352,8 +3443,10 @@ class OffloadingActionBuilder final {
assert(!CompileHostOnly &&
"Not expecting HIP actions in host-only compilation.");
+ bool ShouldLink = !EmitReloc || !*EmitReloc;
+
if (!Relocatable && CurPhase == phases::Backend && !EmitLLVM &&
- !EmitAsm) {
+ !EmitAsm && ShouldLink) {
// If we are in backend phase, we attempt to generate the fat binary.
// We compile each arch to IR and use a link action to generate code
// object containing ISA. Then we use a special "link" action to create
@@ -3429,6 +3522,8 @@ class OffloadingActionBuilder final {
return CompileDeviceOnly ? ABRT_Ignore_Host : ABRT_Success;
} else if (CurPhase == phases::Link) {
+ if (!ShouldLink)
+ return ABRT_Success;
// Save CudaDeviceActions to DeviceLinkerInputs for each GPU subarch.
// This happens to each device action originated from each input file.
// Later on, device actions in DeviceLinkerInputs are used to create
@@ -3466,8 +3561,11 @@ class OffloadingActionBuilder final {
CudaDeviceActions.clear();
}
- return (CompileDeviceOnly && CurPhase == FinalPhase) ? ABRT_Ignore_Host
- : ABRT_Success;
+ return (CompileDeviceOnly &&
+ (CurPhase == FinalPhase ||
+ (!ShouldLink && CurPhase == phases::Assemble)))
+ ? ABRT_Ignore_Host
+ : ABRT_Success;
}
void appendLinkDeviceActions(ActionList &AL) override {
@@ -3618,7 +3716,6 @@ public:
++InactiveBuilders;
continue;
}
-
auto RetCode =
SB->getDeviceDependences(DDeps, CurPhase, FinalPhase, Phases);
@@ -3863,6 +3960,22 @@ void Driver::handleArguments(Compilation &C, DerivedArgList &Args,
!Args.getLastArgValue(options::OPT_fuse_ld_EQ)
.equals_insensitive("lld"))
Diag(clang::diag::err_drv_lto_without_lld);
+
+ // If -dumpdir is not specified, give a default prefix derived from the link
+ // output filename. For example, `clang -g -gsplit-dwarf a.c -o x` passes
+ // `-dumpdir x-` to cc1. If -o is unspecified, use
+ // stem(getDefaultImageName()) (usually stem("a.out") = "a").
+ if (!Args.hasArg(options::OPT_dumpdir)) {
+ Arg *FinalOutput = Args.getLastArg(options::OPT_o, options::OPT__SLASH_o);
+ Arg *Arg = Args.MakeSeparateArg(
+ nullptr, getOpts().getOption(options::OPT_dumpdir),
+ Args.MakeArgString(
+ (FinalOutput ? FinalOutput->getValue()
+ : llvm::sys::path::stem(getDefaultImageName())) +
+ "-"));
+ Arg->claim();
+ Args.append(Arg);
+ }
}
if (FinalPhase == phases::Preprocess || Args.hasArg(options::OPT__SLASH_Y_)) {
@@ -4040,9 +4153,11 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
// Queue linker inputs.
if (Phase == phases::Link) {
assert(Phase == PL.back() && "linking must be final compilation step.");
- // We don't need to generate additional link commands if emitting AMD bitcode
+ // We don't need to generate additional link commands if emitting AMD
+ // bitcode or compiling only for the offload device
if (!(C.getInputArgs().hasArg(options::OPT_hip_link) &&
- (C.getInputArgs().hasArg(options::OPT_emit_llvm))))
+ (C.getInputArgs().hasArg(options::OPT_emit_llvm))) &&
+ !offloadDeviceOnly())
LinkerInputs.push_back(Current);
Current = nullptr;
break;
@@ -4080,16 +4195,15 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
Current = NewCurrent;
- // Use the current host action in any of the offloading actions, if
- // required.
- if (!UseNewOffloadingDriver)
- if (OffloadBuilder->addHostDependenceToDeviceActions(Current, InputArg))
- break;
-
// Try to build the offloading actions and add the result as a dependency
// to the host.
if (UseNewOffloadingDriver)
Current = BuildOffloadingActions(C, Args, I, Current);
+ // Use the current host action in any of the offloading actions, if
+ // required.
+ else if (OffloadBuilder->addHostDependenceToDeviceActions(Current,
+ InputArg))
+ break;
if (Current->getType() == types::TY_Nothing)
break;
@@ -4210,6 +4324,18 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
I.second->claim();
}
+ // Call validator for dxil when -Vd not in Args.
+ if (C.getDefaultToolChain().getTriple().isDXIL()) {
+ // Only add action when needValidation.
+ const auto &TC =
+ static_cast<const toolchains::HLSLToolChain &>(C.getDefaultToolChain());
+ if (TC.requiresValidation(Args)) {
+ Action *LastAction = Actions.back();
+ Actions.push_back(C.MakeAction<BinaryAnalyzeJobAction>(
+ LastAction, types::TY_DX_CONTAINER));
+ }
+ }
+
// Claim ignored clang-cl options.
Args.ClaimAllArgs(options::OPT_cl_ignored_Group);
}
@@ -4261,8 +4387,8 @@ static StringRef getCanonicalArchString(Compilation &C,
/// incompatible pair if a conflict occurs.
static std::optional<std::pair<llvm::StringRef, llvm::StringRef>>
getConflictOffloadArchCombination(const llvm::DenseSet<StringRef> &Archs,
- Action::OffloadKind Kind) {
- if (Kind != Action::OFK_HIP)
+ llvm::Triple Triple) {
+ if (!Triple.isAMDGPU())
return std::nullopt;
std::set<StringRef> ArchSet;
@@ -4288,7 +4414,7 @@ Driver::getOffloadArchs(Compilation &C, const llvm::opt::DerivedArgList &Args,
: "--no-offload-arch");
}
- if (KnownArchs.find(TC) != KnownArchs.end())
+ if (KnownArchs.contains(TC))
return KnownArchs.lookup(TC);
llvm::DenseSet<StringRef> Archs;
@@ -4347,7 +4473,8 @@ Driver::getOffloadArchs(Compilation &C, const llvm::opt::DerivedArgList &Args,
}
}
- if (auto ConflictingArchs = getConflictOffloadArchCombination(Archs, Kind)) {
+ if (auto ConflictingArchs =
+ getConflictOffloadArchCombination(Archs, TC->getTriple())) {
C.getDriver().Diag(clang::diag::err_drv_bad_offload_arch_combo)
<< ConflictingArchs->first << ConflictingArchs->second;
C.setContainsError();
@@ -4614,8 +4741,13 @@ Action *Driver::ConstructPhaseAction(
}
case phases::Backend: {
if (isUsingLTO() && TargetDeviceOffloadKind == Action::OFK_None) {
- types::ID Output =
- Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC;
+ types::ID Output;
+ if (Args.hasArg(options::OPT_S))
+ Output = types::TY_LTO_IR;
+ else if (Args.hasArg(options::OPT_ffat_lto_objects))
+ Output = types::TY_PP_Asm;
+ else
+ Output = types::TY_LTO_BC;
return C.MakeAction<BackendJobAction>(Input, Output);
}
if (isUsingLTO(/* IsOffload */ true) &&
@@ -4678,6 +4810,7 @@ void Driver::BuildJobs(Compilation &C) const {
unsigned NumIfsOutputs = 0;
for (const Action *A : C.getActions()) {
if (A->getType() != types::TY_Nothing &&
+ A->getType() != types::TY_DX_CONTAINER &&
!(A->getKind() == Action::IfsMergeJobClass ||
(A->getType() == clang::driver::types::TY_IFS_CPP &&
A->getKind() == clang::driver::Action::CompileJobClass &&
@@ -4698,13 +4831,6 @@ void Driver::BuildJobs(Compilation &C) const {
}
const llvm::Triple &RawTriple = C.getDefaultToolChain().getTriple();
- if (RawTriple.isOSAIX()) {
- if (Arg *A = C.getArgs().getLastArg(options::OPT_G))
- Diag(diag::err_drv_unsupported_opt_for_target)
- << A->getSpelling() << RawTriple.str();
- if (LTOMode == LTOK_Thin)
- Diag(diag::err_drv_clang_unsupported) << "thinLTO on AIX";
- }
// Collect the list of architectures.
llvm::StringSet<> ArchNames;
@@ -4842,9 +4968,16 @@ void Driver::BuildJobs(Compilation &C) const {
// In clang-cl, don't mention unknown arguments here since they have
// already been warned about.
- if (!IsCLMode() || !A->getOption().matches(options::OPT_UNKNOWN))
- Diag(clang::diag::warn_drv_unused_argument)
- << A->getAsString(C.getArgs());
+ if (!IsCLMode() || !A->getOption().matches(options::OPT_UNKNOWN)) {
+ if (A->getOption().hasFlag(options::TargetSpecific) &&
+ !A->isIgnoredTargetSpecific()) {
+ Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << getTargetTriple();
+ } else {
+ Diag(clang::diag::warn_drv_unused_argument)
+ << A->getAsString(C.getArgs());
+ }
+ }
}
}
}
@@ -5192,6 +5325,37 @@ InputInfoList Driver::BuildJobsForAction(
return Result;
}
+static void handleTimeTrace(Compilation &C, const ArgList &Args,
+ const JobAction *JA, const char *BaseInput,
+ const InputInfo &Result) {
+ Arg *A =
+ Args.getLastArg(options::OPT_ftime_trace, options::OPT_ftime_trace_EQ);
+ if (!A)
+ return;
+ SmallString<128> Path;
+ if (A->getOption().matches(options::OPT_ftime_trace_EQ)) {
+ Path = A->getValue();
+ if (llvm::sys::fs::is_directory(Path)) {
+ SmallString<128> Tmp(Result.getFilename());
+ llvm::sys::path::replace_extension(Tmp, "json");
+ llvm::sys::path::append(Path, llvm::sys::path::filename(Tmp));
+ }
+ } else {
+ if (Arg *DumpDir = Args.getLastArgNoClaim(options::OPT_dumpdir)) {
+ // The trace file is ${dumpdir}${basename}.json. Note that dumpdir may not
+ // end with a path separator.
+ Path = DumpDir->getValue();
+ Path += llvm::sys::path::filename(BaseInput);
+ } else {
+ Path = Result.getFilename();
+ }
+ llvm::sys::path::replace_extension(Path, "json");
+ }
+ const char *ResultFile = C.getArgs().MakeArgString(Path);
+ C.addTimeTraceFile(ResultFile, JA);
+ C.addResultFile(ResultFile, JA);
+}
+
InputInfoList Driver::BuildJobsForActionNoCache(
Compilation &C, const Action *A, const ToolChain *TC, StringRef BoundArch,
bool AtTopLevel, bool MultipleArchs, const char *LinkingOutput,
@@ -5441,6 +5605,8 @@ InputInfoList Driver::BuildJobsForActionNoCache(
AtTopLevel, MultipleArchs,
OffloadingPrefix),
BaseInput);
+ if (T->canEmitIR() && OffloadingPrefix.empty())
+ handleTimeTrace(C, Args, JA, BaseInput, Result);
}
if (CCCPrintBindings && !CCGenDiagnostics) {
@@ -5528,7 +5694,8 @@ static bool HasPreprocessOutput(const Action &JA) {
const char *Driver::CreateTempFile(Compilation &C, StringRef Prefix,
StringRef Suffix, bool MultipleArchs,
- StringRef BoundArch) const {
+ StringRef BoundArch,
+ bool NeedUniqueDirectory) const {
SmallString<128> TmpName;
Arg *A = C.getArgs().getLastArg(options::OPT_fcrash_diagnostics_dir);
std::optional<std::string> CrashDirectory =
@@ -5548,9 +5715,15 @@ const char *Driver::CreateTempFile(Compilation &C, StringRef Prefix,
}
} else {
if (MultipleArchs && !BoundArch.empty()) {
- TmpName = GetTemporaryDirectory(Prefix);
- llvm::sys::path::append(TmpName,
- Twine(Prefix) + "-" + BoundArch + "." + Suffix);
+ if (NeedUniqueDirectory) {
+ TmpName = GetTemporaryDirectory(Prefix);
+ llvm::sys::path::append(TmpName,
+ Twine(Prefix) + "-" + BoundArch + "." + Suffix);
+ } else {
+ TmpName =
+ GetTemporaryPath((Twine(Prefix) + "-" + BoundArch).str(), Suffix);
+ }
+
} else {
TmpName = GetTemporaryPath(Prefix, Suffix);
}
@@ -5666,7 +5839,16 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
StringRef Name = llvm::sys::path::filename(BaseInput);
std::pair<StringRef, StringRef> Split = Name.split('.');
const char *Suffix = types::getTypeTempSuffix(JA.getType(), IsCLMode());
- return CreateTempFile(C, Split.first, Suffix, MultipleArchs, BoundArch);
+ // The non-offloading toolchain on Darwin requires deterministic input
+ // file name for binaries to be deterministic, therefore it needs unique
+ // directory.
+ llvm::Triple Triple(C.getDriver().getTargetTriple());
+ bool NeedUniqueDirectory =
+ (JA.getOffloadingDeviceKind() == Action::OFK_None ||
+ JA.getOffloadingDeviceKind() == Action::OFK_Host) &&
+ Triple.isOSDarwin();
+ return CreateTempFile(C, Split.first, Suffix, MultipleArchs, BoundArch,
+ NeedUniqueDirectory);
}
SmallString<128> BasePath(BaseInput);
@@ -6031,7 +6213,8 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
Args);
else if (Target.getArch() == llvm::Triple::ve)
TC = std::make_unique<toolchains::VEToolChain>(*this, Target, Args);
-
+ else if (Target.isOHOSFamily())
+ TC = std::make_unique<toolchains::OHOS>(*this, Target, Args);
else
TC = std::make_unique<toolchains::Linux>(*this, Target, Args);
break;
@@ -6074,7 +6257,7 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::MSVC:
case llvm::Triple::UnknownEnvironment:
if (Args.getLastArgValue(options::OPT_fuse_ld_EQ)
- .startswith_insensitive("bfd"))
+ .starts_with_insensitive("bfd"))
TC = std::make_unique<toolchains::CrossWindowsToolChain>(
*this, Target, Args);
else
@@ -6095,6 +6278,9 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::Hurd:
TC = std::make_unique<toolchains::Hurd>(*this, Target, Args);
break;
+ case llvm::Triple::LiteOS:
+ TC = std::make_unique<toolchains::OHOS>(*this, Target, Args);
+ break;
case llvm::Triple::ZOS:
TC = std::make_unique<toolchains::ZOS>(*this, Target, Args);
break;
@@ -6392,3 +6578,58 @@ llvm::StringRef clang::driver::getDriverMode(StringRef ProgName,
}
bool driver::IsClangCL(StringRef DriverMode) { return DriverMode.equals("cl"); }
+
+llvm::Error driver::expandResponseFiles(SmallVectorImpl<const char *> &Args,
+ bool ClangCLMode,
+ llvm::BumpPtrAllocator &Alloc,
+ llvm::vfs::FileSystem *FS) {
+ // Parse response files using the GNU syntax, unless we're in CL mode. There
+ // are two ways to put clang in CL compatibility mode: ProgName is either
+ // clang-cl or cl, or --driver-mode=cl is on the command line. The normal
+ // command line parsing can't happen until after response file parsing, so we
+ // have to manually search for a --driver-mode=cl argument the hard way.
+ // Finally, our -cc1 tools don't care which tokenization mode we use because
+ // response files written by clang will tokenize the same way in either mode.
+ enum { Default, POSIX, Windows } RSPQuoting = Default;
+ for (const char *F : Args) {
+ if (strcmp(F, "--rsp-quoting=posix") == 0)
+ RSPQuoting = POSIX;
+ else if (strcmp(F, "--rsp-quoting=windows") == 0)
+ RSPQuoting = Windows;
+ }
+
+ // Determines whether we want nullptr markers in Args to indicate response
+ // files end-of-lines. We only use this for the /LINK driver argument with
+ // clang-cl.exe on Windows.
+ bool MarkEOLs = ClangCLMode;
+
+ llvm::cl::TokenizerCallback Tokenizer;
+ if (RSPQuoting == Windows || (RSPQuoting == Default && ClangCLMode))
+ Tokenizer = &llvm::cl::TokenizeWindowsCommandLine;
+ else
+ Tokenizer = &llvm::cl::TokenizeGNUCommandLine;
+
+ if (MarkEOLs && Args.size() > 1 && StringRef(Args[1]).startswith("-cc1"))
+ MarkEOLs = false;
+
+ llvm::cl::ExpansionContext ECtx(Alloc, Tokenizer);
+ ECtx.setMarkEOLs(MarkEOLs);
+ if (FS)
+ ECtx.setVFS(FS);
+
+ if (llvm::Error Err = ECtx.expandResponseFiles(Args))
+ return Err;
+
+ // If -cc1 came from a response file, remove the EOL sentinels.
+ auto FirstArg = llvm::find_if(llvm::drop_begin(Args),
+ [](const char *A) { return A != nullptr; });
+ if (FirstArg != Args.end() && StringRef(*FirstArg).startswith("-cc1")) {
+ // If -cc1 came from a response file, remove the EOL sentinels.
+ if (MarkEOLs) {
+ auto newEnd = std::remove(Args.begin(), Args.end(), nullptr);
+ Args.resize(newEnd - Args.begin());
+ }
+ }
+
+ return llvm::Error::success();
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/Job.cpp b/contrib/llvm-project/clang/lib/Driver/Job.cpp
index ec355ceb84a9..203400440f9f 100644
--- a/contrib/llvm-project/clang/lib/Driver/Job.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Job.cpp
@@ -16,6 +16,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
@@ -38,9 +39,10 @@ using namespace driver;
Command::Command(const Action &Source, const Tool &Creator,
ResponseFileSupport ResponseSupport, const char *Executable,
const llvm::opt::ArgStringList &Arguments,
- ArrayRef<InputInfo> Inputs, ArrayRef<InputInfo> Outputs)
+ ArrayRef<InputInfo> Inputs, ArrayRef<InputInfo> Outputs,
+ const char *PrependArg)
: Source(Source), Creator(Creator), ResponseSupport(ResponseSupport),
- Executable(Executable), Arguments(Arguments) {
+ Executable(Executable), PrependArg(PrependArg), Arguments(Arguments) {
for (const auto &II : Inputs)
if (II.isFilename())
InputInfoList.push_back(II);
@@ -144,6 +146,10 @@ void Command::buildArgvForResponseFile(
for (const auto *InputName : InputFileList)
Inputs.insert(InputName);
Out.push_back(Executable);
+
+ if (PrependArg)
+ Out.push_back(PrependArg);
+
// In a file list, build args vector ignoring parameters that will go in the
// response file (elements of the InputFileList vector)
bool FirstInput = true;
@@ -209,6 +215,9 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
if (ResponseFile != nullptr) {
buildArgvForResponseFile(ArgsRespFile);
Args = ArrayRef<const char *>(ArgsRespFile).slice(1); // no executable name
+ } else if (PrependArg) {
+ OS << ' ';
+ llvm::sys::printArg(OS, PrependArg, /*Quote=*/true);
}
bool HaveCrashVFS = CrashInfo && !CrashInfo->VFSPath.empty();
@@ -321,6 +330,8 @@ int Command::Execute(ArrayRef<std::optional<StringRef>> Redirects,
SmallVector<const char *, 128> Argv;
if (ResponseFile == nullptr) {
Argv.push_back(Executable);
+ if (PrependArg)
+ Argv.push_back(PrependArg);
Argv.append(Arguments.begin(), Arguments.end());
Argv.push_back(nullptr);
} else {
@@ -382,9 +393,10 @@ CC1Command::CC1Command(const Action &Source, const Tool &Creator,
ResponseFileSupport ResponseSupport,
const char *Executable,
const llvm::opt::ArgStringList &Arguments,
- ArrayRef<InputInfo> Inputs, ArrayRef<InputInfo> Outputs)
+ ArrayRef<InputInfo> Inputs, ArrayRef<InputInfo> Outputs,
+ const char *PrependArg)
: Command(Source, Creator, ResponseSupport, Executable, Arguments, Inputs,
- Outputs) {
+ Outputs, PrependArg) {
InProcess = true;
}
@@ -438,30 +450,6 @@ void CC1Command::setEnvironment(llvm::ArrayRef<const char *> NewEnvironment) {
"The CC1Command doesn't support changing the environment vars!");
}
-ForceSuccessCommand::ForceSuccessCommand(
- const Action &Source_, const Tool &Creator_,
- ResponseFileSupport ResponseSupport, const char *Executable_,
- const llvm::opt::ArgStringList &Arguments_, ArrayRef<InputInfo> Inputs,
- ArrayRef<InputInfo> Outputs)
- : Command(Source_, Creator_, ResponseSupport, Executable_, Arguments_,
- Inputs, Outputs) {}
-
-void ForceSuccessCommand::Print(raw_ostream &OS, const char *Terminator,
- bool Quote, CrashReportInfo *CrashInfo) const {
- Command::Print(OS, "", Quote, CrashInfo);
- OS << " || (exit 0)" << Terminator;
-}
-
-int ForceSuccessCommand::Execute(ArrayRef<std::optional<StringRef>> Redirects,
- std::string *ErrMsg,
- bool *ExecutionFailed) const {
- int Status = Command::Execute(Redirects, ErrMsg, ExecutionFailed);
- (void)Status;
- if (ExecutionFailed)
- *ExecutionFailed = false;
- return 0;
-}
-
void JobList::Print(raw_ostream &OS, const char *Terminator, bool Quote,
CrashReportInfo *CrashInfo) const {
for (const auto &Job : *this)
diff --git a/contrib/llvm-project/clang/lib/Driver/Multilib.cpp b/contrib/llvm-project/clang/lib/Driver/Multilib.cpp
index ec619874ad60..a37dffc8a6f1 100644
--- a/contrib/llvm-project/clang/lib/Driver/Multilib.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Multilib.cpp
@@ -8,14 +8,18 @@
#include "clang/Driver/Multilib.h"
#include "clang/Basic/LLVM.h"
+#include "clang/Basic/Version.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Regex.h"
+#include "llvm/Support/VersionTuple.h"
+#include "llvm/Support/YAMLParser.h"
+#include "llvm/Support/YAMLTraits.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -25,56 +29,16 @@ using namespace clang;
using namespace driver;
using namespace llvm::sys;
-/// normalize Segment to "/foo/bar" or "".
-static void normalizePathSegment(std::string &Segment) {
- StringRef seg = Segment;
-
- // Prune trailing "/" or "./"
- while (true) {
- StringRef last = path::filename(seg);
- if (last != ".")
- break;
- seg = path::parent_path(seg);
- }
-
- if (seg.empty() || seg == "/") {
- Segment.clear();
- return;
- }
-
- // Add leading '/'
- if (seg.front() != '/') {
- Segment = "/" + seg.str();
- } else {
- Segment = std::string(seg);
- }
-}
-
Multilib::Multilib(StringRef GCCSuffix, StringRef OSSuffix,
- StringRef IncludeSuffix, int Priority)
+ StringRef IncludeSuffix, const flags_list &Flags)
: GCCSuffix(GCCSuffix), OSSuffix(OSSuffix), IncludeSuffix(IncludeSuffix),
- Priority(Priority) {
- normalizePathSegment(this->GCCSuffix);
- normalizePathSegment(this->OSSuffix);
- normalizePathSegment(this->IncludeSuffix);
-}
-
-Multilib &Multilib::gccSuffix(StringRef S) {
- GCCSuffix = std::string(S);
- normalizePathSegment(GCCSuffix);
- return *this;
-}
-
-Multilib &Multilib::osSuffix(StringRef S) {
- OSSuffix = std::string(S);
- normalizePathSegment(OSSuffix);
- return *this;
-}
-
-Multilib &Multilib::includeSuffix(StringRef S) {
- IncludeSuffix = std::string(S);
- normalizePathSegment(IncludeSuffix);
- return *this;
+ Flags(Flags) {
+ assert(GCCSuffix.empty() ||
+ (StringRef(GCCSuffix).front() == '/' && GCCSuffix.size() > 1));
+ assert(OSSuffix.empty() ||
+ (StringRef(OSSuffix).front() == '/' && OSSuffix.size() > 1));
+ assert(IncludeSuffix.empty() ||
+ (StringRef(IncludeSuffix).front() == '/' && IncludeSuffix.size() > 1));
}
LLVM_DUMP_METHOD void Multilib::dump() const {
@@ -82,7 +46,6 @@ LLVM_DUMP_METHOD void Multilib::dump() const {
}
void Multilib::print(raw_ostream &OS) const {
- assert(GCCSuffix.empty() || (StringRef(GCCSuffix).front() == '/'));
if (GCCSuffix.empty())
OS << ".";
else {
@@ -90,27 +53,11 @@ void Multilib::print(raw_ostream &OS) const {
}
OS << ";";
for (StringRef Flag : Flags) {
- if (Flag.front() == '+')
+ if (Flag.front() == '-')
OS << "@" << Flag.substr(1);
}
}
-bool Multilib::isValid() const {
- llvm::StringMap<int> FlagSet;
- for (unsigned I = 0, N = Flags.size(); I != N; ++I) {
- StringRef Flag(Flags[I]);
- llvm::StringMap<int>::iterator SI = FlagSet.find(Flag.substr(1));
-
- assert(StringRef(Flag).front() == '+' || StringRef(Flag).front() == '-');
-
- if (SI == FlagSet.end())
- FlagSet[Flag.substr(1)] = I;
- else if (Flags[I] != Flags[SI->getValue()])
- return false;
- }
- return true;
-}
-
bool Multilib::operator==(const Multilib &Other) const {
// Check whether the flags sets match
// allowing for the match to be order invariant
@@ -119,7 +66,7 @@ bool Multilib::operator==(const Multilib &Other) const {
MyFlags.insert(Flag);
for (const auto &Flag : Other.Flags)
- if (MyFlags.find(Flag) == MyFlags.end())
+ if (!MyFlags.contains(Flag))
return false;
if (osSuffix() != Other.osSuffix())
@@ -139,146 +86,140 @@ raw_ostream &clang::driver::operator<<(raw_ostream &OS, const Multilib &M) {
return OS;
}
-MultilibSet &MultilibSet::Maybe(const Multilib &M) {
- Multilib Opposite;
- // Negate any '+' flags
- for (StringRef Flag : M.flags()) {
- if (Flag.front() == '+')
- Opposite.flags().push_back(("-" + Flag.substr(1)).str());
- }
- return Either(M, Opposite);
-}
-
-MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2) {
- return Either({M1, M2});
-}
-
-MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2,
- const Multilib &M3) {
- return Either({M1, M2, M3});
-}
-
-MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2,
- const Multilib &M3, const Multilib &M4) {
- return Either({M1, M2, M3, M4});
-}
-
-MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2,
- const Multilib &M3, const Multilib &M4,
- const Multilib &M5) {
- return Either({M1, M2, M3, M4, M5});
-}
-
-static Multilib compose(const Multilib &Base, const Multilib &New) {
- SmallString<128> GCCSuffix;
- llvm::sys::path::append(GCCSuffix, "/", Base.gccSuffix(), New.gccSuffix());
- SmallString<128> OSSuffix;
- llvm::sys::path::append(OSSuffix, "/", Base.osSuffix(), New.osSuffix());
- SmallString<128> IncludeSuffix;
- llvm::sys::path::append(IncludeSuffix, "/", Base.includeSuffix(),
- New.includeSuffix());
-
- Multilib Composed(GCCSuffix, OSSuffix, IncludeSuffix);
-
- Multilib::flags_list &Flags = Composed.flags();
-
- Flags.insert(Flags.end(), Base.flags().begin(), Base.flags().end());
- Flags.insert(Flags.end(), New.flags().begin(), New.flags().end());
-
- return Composed;
-}
-
-MultilibSet &MultilibSet::Either(ArrayRef<Multilib> MultilibSegments) {
- multilib_list Composed;
-
- if (Multilibs.empty())
- Multilibs.insert(Multilibs.end(), MultilibSegments.begin(),
- MultilibSegments.end());
- else {
- for (const auto &New : MultilibSegments) {
- for (const auto &Base : *this) {
- Multilib MO = compose(Base, New);
- if (MO.isValid())
- Composed.push_back(MO);
- }
- }
-
- Multilibs = Composed;
- }
-
- return *this;
-}
-
MultilibSet &MultilibSet::FilterOut(FilterCallback F) {
- filterInPlace(F, Multilibs);
- return *this;
-}
-
-MultilibSet &MultilibSet::FilterOut(const char *Regex) {
- llvm::Regex R(Regex);
-#ifndef NDEBUG
- std::string Error;
- if (!R.isValid(Error)) {
- llvm::errs() << Error;
- llvm_unreachable("Invalid regex!");
- }
-#endif
-
- filterInPlace([&R](const Multilib &M) { return R.match(M.gccSuffix()); },
- Multilibs);
+ llvm::erase_if(Multilibs, F);
return *this;
}
void MultilibSet::push_back(const Multilib &M) { Multilibs.push_back(M); }
-void MultilibSet::combineWith(const MultilibSet &Other) {
- Multilibs.insert(Multilibs.end(), Other.begin(), Other.end());
+bool MultilibSet::select(const Multilib::flags_list &Flags,
+ llvm::SmallVector<Multilib> &Selected) const {
+ llvm::StringSet<> FlagSet(expandFlags(Flags));
+ Selected.clear();
+ llvm::copy_if(Multilibs, std::back_inserter(Selected),
+ [&FlagSet](const Multilib &M) {
+ for (const std::string &F : M.flags())
+ if (!FlagSet.contains(F))
+ return false;
+ return true;
+ });
+ return !Selected.empty();
+}
+
+llvm::StringSet<>
+MultilibSet::expandFlags(const Multilib::flags_list &InFlags) const {
+ llvm::StringSet<> Result;
+ for (const auto &F : InFlags)
+ Result.insert(F);
+ for (const FlagMatcher &M : FlagMatchers) {
+ std::string RegexString(M.Match);
+
+ // Make the regular expression match the whole string.
+ if (!StringRef(M.Match).starts_with("^"))
+ RegexString.insert(RegexString.begin(), '^');
+ if (!StringRef(M.Match).ends_with("$"))
+ RegexString.push_back('$');
+
+ const llvm::Regex Regex(RegexString);
+ assert(Regex.isValid());
+ if (llvm::find_if(InFlags, [&Regex](StringRef F) {
+ return Regex.match(F);
+ }) != InFlags.end()) {
+ Result.insert(M.Flags.begin(), M.Flags.end());
+ }
+ }
+ return Result;
}
-static bool isFlagEnabled(StringRef Flag) {
- char Indicator = Flag.front();
- assert(Indicator == '+' || Indicator == '-');
- return Indicator == '+';
-}
+namespace {
-bool MultilibSet::select(const Multilib::flags_list &Flags, Multilib &M) const {
- llvm::StringMap<bool> FlagSet;
+// When updating this also update MULTILIB_VERSION in MultilibTest.cpp
+static const VersionTuple MultilibVersionCurrent(1, 0);
- // Stuff all of the flags into the FlagSet such that a true mappend indicates
- // the flag was enabled, and a false mappend indicates the flag was disabled.
- for (StringRef Flag : Flags)
- FlagSet[Flag.substr(1)] = isFlagEnabled(Flag);
+struct MultilibSerialization {
+ std::string Dir;
+ std::vector<std::string> Flags;
+};
- multilib_list Filtered = filterCopy([&FlagSet](const Multilib &M) {
- for (StringRef Flag : M.flags()) {
- llvm::StringMap<bool>::const_iterator SI = FlagSet.find(Flag.substr(1));
- if (SI != FlagSet.end())
- if (SI->getValue() != isFlagEnabled(Flag))
- return true;
- }
- return false;
- }, Multilibs);
+struct MultilibSetSerialization {
+ llvm::VersionTuple MultilibVersion;
+ std::vector<MultilibSerialization> Multilibs;
+ std::vector<MultilibSet::FlagMatcher> FlagMatchers;
+};
- if (Filtered.empty())
- return false;
- if (Filtered.size() == 1) {
- M = Filtered[0];
- return true;
+} // end anonymous namespace
+
+template <> struct llvm::yaml::MappingTraits<MultilibSerialization> {
+ static void mapping(llvm::yaml::IO &io, MultilibSerialization &V) {
+ io.mapRequired("Dir", V.Dir);
+ io.mapRequired("Flags", V.Flags);
+ }
+ static std::string validate(IO &io, MultilibSerialization &V) {
+ if (StringRef(V.Dir).starts_with("/"))
+ return "paths must be relative but \"" + V.Dir + "\" starts with \"/\"";
+ return std::string{};
}
+};
- // Sort multilibs by priority and select the one with the highest priority.
- llvm::sort(Filtered, [](const Multilib &a, const Multilib &b) -> bool {
- return a.priority() > b.priority();
- });
+template <> struct llvm::yaml::MappingTraits<MultilibSet::FlagMatcher> {
+ static void mapping(llvm::yaml::IO &io, MultilibSet::FlagMatcher &M) {
+ io.mapRequired("Match", M.Match);
+ io.mapRequired("Flags", M.Flags);
+ }
+ static std::string validate(IO &io, MultilibSet::FlagMatcher &M) {
+ llvm::Regex Regex(M.Match);
+ std::string RegexError;
+ if (!Regex.isValid(RegexError))
+ return RegexError;
+ if (M.Flags.empty())
+ return "value required for 'Flags'";
+ return std::string{};
+ }
+};
- if (Filtered[0].priority() > Filtered[1].priority()) {
- M = Filtered[0];
- return true;
+template <> struct llvm::yaml::MappingTraits<MultilibSetSerialization> {
+ static void mapping(llvm::yaml::IO &io, MultilibSetSerialization &M) {
+ io.mapRequired("MultilibVersion", M.MultilibVersion);
+ io.mapRequired("Variants", M.Multilibs);
+ io.mapOptional("Mappings", M.FlagMatchers);
+ }
+ static std::string validate(IO &io, MultilibSetSerialization &M) {
+ if (M.MultilibVersion.empty())
+ return "missing required key 'MultilibVersion'";
+ if (M.MultilibVersion.getMajor() != MultilibVersionCurrent.getMajor())
+ return "multilib version " + M.MultilibVersion.getAsString() +
+ " is unsupported";
+ if (M.MultilibVersion.getMinor() > MultilibVersionCurrent.getMinor())
+ return "multilib version " + M.MultilibVersion.getAsString() +
+ " is unsupported";
+ return std::string{};
+ }
+};
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(MultilibSerialization)
+LLVM_YAML_IS_SEQUENCE_VECTOR(MultilibSet::FlagMatcher)
+
+llvm::ErrorOr<MultilibSet>
+MultilibSet::parseYaml(llvm::MemoryBufferRef Input,
+ llvm::SourceMgr::DiagHandlerTy DiagHandler,
+ void *DiagHandlerCtxt) {
+ MultilibSetSerialization MS;
+ llvm::yaml::Input YamlInput(Input, nullptr, DiagHandler, DiagHandlerCtxt);
+ YamlInput >> MS;
+ if (YamlInput.error())
+ return YamlInput.error();
+
+ multilib_list Multilibs;
+ Multilibs.reserve(MS.Multilibs.size());
+ for (const auto &M : MS.Multilibs) {
+ std::string Dir;
+ if (M.Dir != ".")
+ Dir = "/" + M.Dir;
+ Multilibs.emplace_back(Dir, Dir, Dir, M.Flags);
}
- // TODO: We should consider returning llvm::Error rather than aborting.
- assert(false && "More than one multilib with the same priority");
- return false;
+ return MultilibSet(std::move(Multilibs), std::move(MS.FlagMatchers));
}
LLVM_DUMP_METHOD void MultilibSet::dump() const {
@@ -290,17 +231,6 @@ void MultilibSet::print(raw_ostream &OS) const {
OS << M << "\n";
}
-MultilibSet::multilib_list MultilibSet::filterCopy(FilterCallback F,
- const multilib_list &Ms) {
- multilib_list Copy(Ms);
- filterInPlace(F, Copy);
- return Copy;
-}
-
-void MultilibSet::filterInPlace(FilterCallback F, multilib_list &Ms) {
- llvm::erase_if(Ms, F);
-}
-
raw_ostream &clang::driver::operator<<(raw_ostream &OS, const MultilibSet &MS) {
MS.print(OS);
return OS;
diff --git a/contrib/llvm-project/clang/lib/Driver/MultilibBuilder.cpp b/contrib/llvm-project/clang/lib/Driver/MultilibBuilder.cpp
new file mode 100644
index 000000000000..15adf5017780
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/MultilibBuilder.cpp
@@ -0,0 +1,197 @@
+//===- MultilibBuilder.cpp - MultilibBuilder Implementation -===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/MultilibBuilder.h"
+#include "ToolChains/CommonArgs.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace driver;
+
+/// normalize Segment to "/foo/bar" or "".
+static void normalizePathSegment(std::string &Segment) {
+ StringRef seg = Segment;
+
+ // Prune trailing "/" or "./"
+ while (true) {
+ StringRef last = llvm::sys::path::filename(seg);
+ if (last != ".")
+ break;
+ seg = llvm::sys::path::parent_path(seg);
+ }
+
+ if (seg.empty() || seg == "/") {
+ Segment.clear();
+ return;
+ }
+
+ // Add leading '/'
+ if (seg.front() != '/') {
+ Segment = "/" + seg.str();
+ } else {
+ Segment = std::string(seg);
+ }
+}
+
+MultilibBuilder::MultilibBuilder(StringRef GCC, StringRef OS, StringRef Include)
+ : GCCSuffix(GCC), OSSuffix(OS), IncludeSuffix(Include) {
+ normalizePathSegment(GCCSuffix);
+ normalizePathSegment(OSSuffix);
+ normalizePathSegment(IncludeSuffix);
+}
+
+MultilibBuilder::MultilibBuilder(StringRef Suffix)
+ : MultilibBuilder(Suffix, Suffix, Suffix) {}
+
+MultilibBuilder &MultilibBuilder::gccSuffix(StringRef S) {
+ GCCSuffix = std::string(S);
+ normalizePathSegment(GCCSuffix);
+ return *this;
+}
+
+MultilibBuilder &MultilibBuilder::osSuffix(StringRef S) {
+ OSSuffix = std::string(S);
+ normalizePathSegment(OSSuffix);
+ return *this;
+}
+
+MultilibBuilder &MultilibBuilder::includeSuffix(StringRef S) {
+ IncludeSuffix = std::string(S);
+ normalizePathSegment(IncludeSuffix);
+ return *this;
+}
+
+bool MultilibBuilder::isValid() const {
+ llvm::StringMap<int> FlagSet;
+ for (unsigned I = 0, N = Flags.size(); I != N; ++I) {
+ StringRef Flag(Flags[I]);
+ llvm::StringMap<int>::iterator SI = FlagSet.find(Flag.substr(1));
+
+ assert(StringRef(Flag).front() == '-' || StringRef(Flag).front() == '!');
+
+ if (SI == FlagSet.end())
+ FlagSet[Flag.substr(1)] = I;
+ else if (Flags[I] != Flags[SI->getValue()])
+ return false;
+ }
+ return true;
+}
+
+MultilibBuilder &MultilibBuilder::flag(StringRef Flag, bool Disallow) {
+ tools::addMultilibFlag(!Disallow, Flag, Flags);
+ return *this;
+}
+
+Multilib MultilibBuilder::makeMultilib() const {
+ return Multilib(GCCSuffix, OSSuffix, IncludeSuffix, Flags);
+}
+
+MultilibSetBuilder &MultilibSetBuilder::Maybe(const MultilibBuilder &M) {
+ MultilibBuilder Opposite;
+ // Negate positive flags
+ for (StringRef Flag : M.flags()) {
+ if (Flag.front() == '-')
+ Opposite.flag(Flag, /*Disallow=*/true);
+ }
+ return Either(M, Opposite);
+}
+
+MultilibSetBuilder &MultilibSetBuilder::Either(const MultilibBuilder &M1,
+ const MultilibBuilder &M2) {
+ return Either({M1, M2});
+}
+
+MultilibSetBuilder &MultilibSetBuilder::Either(const MultilibBuilder &M1,
+ const MultilibBuilder &M2,
+ const MultilibBuilder &M3) {
+ return Either({M1, M2, M3});
+}
+
+MultilibSetBuilder &MultilibSetBuilder::Either(const MultilibBuilder &M1,
+ const MultilibBuilder &M2,
+ const MultilibBuilder &M3,
+ const MultilibBuilder &M4) {
+ return Either({M1, M2, M3, M4});
+}
+
+MultilibSetBuilder &MultilibSetBuilder::Either(const MultilibBuilder &M1,
+ const MultilibBuilder &M2,
+ const MultilibBuilder &M3,
+ const MultilibBuilder &M4,
+ const MultilibBuilder &M5) {
+ return Either({M1, M2, M3, M4, M5});
+}
+
+static MultilibBuilder compose(const MultilibBuilder &Base,
+ const MultilibBuilder &New) {
+ SmallString<128> GCCSuffix;
+ llvm::sys::path::append(GCCSuffix, "/", Base.gccSuffix(), New.gccSuffix());
+ SmallString<128> OSSuffix;
+ llvm::sys::path::append(OSSuffix, "/", Base.osSuffix(), New.osSuffix());
+ SmallString<128> IncludeSuffix;
+ llvm::sys::path::append(IncludeSuffix, "/", Base.includeSuffix(),
+ New.includeSuffix());
+
+ MultilibBuilder Composed(GCCSuffix, OSSuffix, IncludeSuffix);
+
+ MultilibBuilder::flags_list &Flags = Composed.flags();
+
+ Flags.insert(Flags.end(), Base.flags().begin(), Base.flags().end());
+ Flags.insert(Flags.end(), New.flags().begin(), New.flags().end());
+
+ return Composed;
+}
+
+MultilibSetBuilder &
+MultilibSetBuilder::Either(ArrayRef<MultilibBuilder> MultilibSegments) {
+ multilib_list Composed;
+
+ if (Multilibs.empty())
+ Multilibs.insert(Multilibs.end(), MultilibSegments.begin(),
+ MultilibSegments.end());
+ else {
+ for (const auto &New : MultilibSegments) {
+ for (const auto &Base : Multilibs) {
+ MultilibBuilder MO = compose(Base, New);
+ if (MO.isValid())
+ Composed.push_back(MO);
+ }
+ }
+
+ Multilibs = Composed;
+ }
+
+ return *this;
+}
+
+MultilibSetBuilder &MultilibSetBuilder::FilterOut(const char *Regex) {
+ llvm::Regex R(Regex);
+#ifndef NDEBUG
+ std::string Error;
+ if (!R.isValid(Error)) {
+ llvm::errs() << Error;
+ llvm_unreachable("Invalid regex!");
+ }
+#endif
+ llvm::erase_if(Multilibs, [&R](const MultilibBuilder &M) {
+ return R.match(M.gccSuffix());
+ });
+ return *this;
+}
+
+MultilibSet MultilibSetBuilder::makeMultilibSet() const {
+ MultilibSet Result;
+ for (const auto &M : Multilibs) {
+ Result.push_back(M.makeMultilib());
+ }
+ return Result;
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/OffloadBundler.cpp b/contrib/llvm-project/clang/lib/Driver/OffloadBundler.cpp
index cdacceb0a86a..0ddfb07fdad5 100644
--- a/contrib/llvm-project/clang/lib/Driver/OffloadBundler.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/OffloadBundler.cpp
@@ -23,7 +23,6 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Object/Archive.h"
#include "llvm/Object/ArchiveWriter.h"
#include "llvm/Object/Binary.h"
@@ -35,7 +34,6 @@
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Program.h"
@@ -43,6 +41,8 @@
#include "llvm/Support/StringSaver.h"
#include "llvm/Support/WithColor.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
@@ -72,12 +72,22 @@ OffloadTargetInfo::OffloadTargetInfo(const StringRef Target,
if (clang::StringToCudaArch(TripleOrGPU.second) != clang::CudaArch::UNKNOWN) {
auto KindTriple = TripleOrGPU.first.split('-');
this->OffloadKind = KindTriple.first;
- this->Triple = llvm::Triple(KindTriple.second);
+
+ // Enforce optional env field to standardize bundles
+ llvm::Triple t = llvm::Triple(KindTriple.second);
+ this->Triple = llvm::Triple(t.getArchName(), t.getVendorName(),
+ t.getOSName(), t.getEnvironmentName());
+
this->TargetID = Target.substr(Target.find(TripleOrGPU.second));
} else {
auto KindTriple = TargetFeatures.first.split('-');
this->OffloadKind = KindTriple.first;
- this->Triple = llvm::Triple(KindTriple.second);
+
+ // Enforce optional env field to standardize bundles
+ llvm::Triple t = llvm::Triple(KindTriple.second);
+ this->Triple = llvm::Triple(t.getArchName(), t.getVendorName(),
+ t.getOSName(), t.getEnvironmentName());
+
this->TargetID = "";
}
}
@@ -96,11 +106,11 @@ bool OffloadTargetInfo::isOffloadKindCompatible(
if (OffloadKind == TargetOffloadKind)
return true;
if (BundlerConfig.HipOpenmpCompatible) {
- bool HIPCompatibleWithOpenMP = OffloadKind.startswith_insensitive("hip") &&
+ bool HIPCompatibleWithOpenMP = OffloadKind.starts_with_insensitive("hip") &&
TargetOffloadKind == "openmp";
bool OpenMPCompatibleWithHIP =
OffloadKind == "openmp" &&
- TargetOffloadKind.startswith_insensitive("hip");
+ TargetOffloadKind.starts_with_insensitive("hip");
return HIPCompatibleWithOpenMP || OpenMPCompatibleWithHIP;
}
return false;
@@ -395,8 +405,7 @@ public:
if (!Offset || Offset + Size > FC.size())
return Error::success();
- assert(BundlesInfo.find(Triple) == BundlesInfo.end() &&
- "Triple is duplicated??");
+ assert(!BundlesInfo.contains(Triple) && "Triple is duplicated??");
BundlesInfo[Triple] = BinaryBundleInfo(Size, Offset);
}
// Set the iterator to where we will start to read.
@@ -1222,8 +1231,7 @@ Error OffloadBundler::UnbundleArchive() {
// For inserting <CompatibleTarget, list<CodeObject>> entry in
// OutputArchivesMap.
- if (OutputArchivesMap.find(CompatibleTarget) ==
- OutputArchivesMap.end()) {
+ if (!OutputArchivesMap.contains(CompatibleTarget)) {
std::vector<NewArchiveMember> ArchiveMembers;
ArchiveMembers.push_back(NewArchiveMember(MemBufRef));
diff --git a/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp b/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
index 068a34a54a92..c3ce13f93464 100644
--- a/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
@@ -13,13 +13,14 @@
#include "clang/Driver/Options.h"
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/AArch64TargetParser.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/SpecialCaseList.h"
-#include "llvm/Support/TargetParser.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/AArch64TargetParser.h"
#include "llvm/TargetParser/RISCVTargetParser.h"
+#include "llvm/TargetParser/TargetParser.h"
#include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h"
#include <memory>
@@ -35,11 +36,9 @@ static const SanitizerMask NeedsUbsanRt =
static const SanitizerMask NeedsUbsanCxxRt =
SanitizerKind::Vptr | SanitizerKind::CFI;
static const SanitizerMask NotAllowedWithTrap = SanitizerKind::Vptr;
-static const SanitizerMask NotAllowedWithMinimalRuntime =
- SanitizerKind::Function | SanitizerKind::Vptr;
+static const SanitizerMask NotAllowedWithMinimalRuntime = SanitizerKind::Vptr;
static const SanitizerMask RequiresPIE =
- SanitizerKind::DataFlow | SanitizerKind::HWAddress | SanitizerKind::Scudo |
- SanitizerKind::KCFI;
+ SanitizerKind::DataFlow | SanitizerKind::Scudo;
static const SanitizerMask NeedsUnwindTables =
SanitizerKind::Address | SanitizerKind::HWAddress | SanitizerKind::Thread |
SanitizerKind::Memory | SanitizerKind::DataFlow;
@@ -78,7 +77,7 @@ static const SanitizerMask CFIClasses =
static const SanitizerMask CompatibleWithMinimalRuntime =
TrappingSupported | SanitizerKind::Scudo | SanitizerKind::ShadowCallStack |
SanitizerKind::MemtagStack | SanitizerKind::MemtagHeap |
- SanitizerKind::MemtagGlobals;
+ SanitizerKind::MemtagGlobals | SanitizerKind::KCFI;
enum CoverageFeature {
CoverageFunc = 1 << 0,
@@ -517,7 +516,8 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
std::make_pair(SanitizerKind::MemTag,
SanitizerKind::Address | SanitizerKind::KernelAddress |
SanitizerKind::HWAddress |
- SanitizerKind::KernelHWAddress)};
+ SanitizerKind::KernelHWAddress),
+ std::make_pair(SanitizerKind::KCFI, SanitizerKind::Function)};
// Enable toolchain specific default sanitizers if not explicitly disabled.
SanitizerMask Default = TC.getDefaultSanitizers() & ~AllRemove;
@@ -543,11 +543,8 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
<< lastArgumentForMask(D, Args, Kinds & NeedsLTO) << "-flto";
}
- if ((Kinds & SanitizerKind::ShadowCallStack) &&
- ((TC.getTriple().isAArch64() &&
- !llvm::AArch64::isX18ReservedByDefault(TC.getTriple())) ||
- (TC.getTriple().isRISCV() &&
- !llvm::RISCV::isX18ReservedByDefault(TC.getTriple()))) &&
+ if ((Kinds & SanitizerKind::ShadowCallStack) && TC.getTriple().isAArch64() &&
+ !llvm::AArch64::isX18ReservedByDefault(TC.getTriple()) &&
!Args.hasArg(options::OPT_ffixed_x18) && DiagnoseErrors) {
D.Diag(diag::err_drv_argument_only_allowed_with)
<< lastArgumentForMask(D, Args, Kinds & SanitizerKind::ShadowCallStack)
@@ -713,6 +710,9 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
CfiICallGeneralizePointers =
Args.hasArg(options::OPT_fsanitize_cfi_icall_generalize_pointers);
+ CfiICallNormalizeIntegers =
+ Args.hasArg(options::OPT_fsanitize_cfi_icall_normalize_integers);
+
if (CfiCrossDso && CfiICallGeneralizePointers && DiagnoseErrors)
D.Diag(diag::err_drv_argument_not_allowed_with)
<< "-fsanitize-cfi-cross-dso"
@@ -723,8 +723,11 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
options::OPT_fno_sanitize_cfi_canonical_jump_tables, true);
}
- if (AllAddedKinds & SanitizerKind::KCFI && DiagnoseErrors) {
- if (AllAddedKinds & SanitizerKind::CFI)
+ if (AllAddedKinds & SanitizerKind::KCFI) {
+ CfiICallNormalizeIntegers =
+ Args.hasArg(options::OPT_fsanitize_cfi_icall_normalize_integers);
+
+ if (AllAddedKinds & SanitizerKind::CFI && DiagnoseErrors)
D.Diag(diag::err_drv_argument_not_allowed_with)
<< "-fsanitize=kcfi"
<< lastArgumentForMask(D, Args, SanitizerKind::CFI);
@@ -861,6 +864,16 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
}
}
+ // Parse -fsanitize-metadata-ignorelist option if enabled.
+ if (BinaryMetadataFeatures) {
+ parseSpecialCaseListArg(
+ D, Args, BinaryMetadataIgnorelistFiles,
+ options::OPT_fexperimental_sanitize_metadata_ignorelist_EQ,
+ OptSpecifier(), // Cannot clear ignore list, only append.
+ clang::diag::err_drv_malformed_sanitizer_metadata_ignorelist,
+ DiagnoseErrors);
+ }
+
SharedRuntime =
Args.hasFlag(options::OPT_shared_libsan, options::OPT_static_libsan,
TC.getTriple().isAndroid() || TC.getTriple().isOSFuchsia() ||
@@ -898,6 +911,9 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
}
}
+ StableABI = Args.hasFlag(options::OPT_fsanitize_stable_abi,
+ options::OPT_fno_sanitize_stable_abi, false);
+
AsanUseAfterScope = Args.hasFlag(
options::OPT_fsanitize_address_use_after_scope,
options::OPT_fno_sanitize_address_use_after_scope, AsanUseAfterScope);
@@ -912,14 +928,9 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
options::OPT_fno_sanitize_address_outline_instrumentation,
AsanOutlineInstrumentation);
- // As a workaround for a bug in gold 2.26 and earlier, dead stripping of
- // globals in ASan is disabled by default on most ELF targets.
- // See https://sourceware.org/bugzilla/show_bug.cgi?id=19002
AsanGlobalsDeadStripping = Args.hasFlag(
options::OPT_fsanitize_address_globals_dead_stripping,
- options::OPT_fno_sanitize_address_globals_dead_stripping,
- !TC.getTriple().isOSBinFormatELF() || TC.getTriple().isOSFuchsia() ||
- TC.getTriple().isPS());
+ options::OPT_fno_sanitize_address_globals_dead_stripping, true);
// Enable ODR indicators which allow better handling of mixed instrumented
// and uninstrumented globals. Disable them for Windows where weak odr
@@ -1084,13 +1095,16 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
// NVPTX doesn't currently support sanitizers. Bailing out here means
// that e.g. -fsanitize=address applies only to host code, which is what we
// want for now.
- //
- // AMDGPU sanitizer support is experimental and controlled by -fgpu-sanitize.
- if (TC.getTriple().isNVPTX() ||
- (TC.getTriple().isAMDGPU() &&
- !Args.hasFlag(options::OPT_fgpu_sanitize, options::OPT_fno_gpu_sanitize,
- true)))
+ if (TC.getTriple().isNVPTX())
return;
+ // AMDGPU sanitizer support is experimental and controlled by -fgpu-sanitize.
+ bool GPUSanitize = false;
+ if (TC.getTriple().isAMDGPU()) {
+ if (!Args.hasFlag(options::OPT_fgpu_sanitize, options::OPT_fno_gpu_sanitize,
+ true))
+ return;
+ GPUSanitize = true;
+ }
// Translate available CoverageFeatures to corresponding clang-cc1 flags.
// Do it even if Sanitizers.empty() since some forms of coverage don't require
@@ -1127,16 +1141,21 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
addSpecialCaseListOpt(Args, CmdArgs, "-fsanitize-coverage-ignorelist=",
CoverageIgnorelistFiles);
- // Translate available BinaryMetadataFeatures to corresponding clang-cc1
- // flags. Does not depend on any other sanitizers.
- const std::pair<int, std::string> BinaryMetadataFlags[] = {
- std::make_pair(BinaryMetadataCovered, "covered"),
- std::make_pair(BinaryMetadataAtomics, "atomics"),
- std::make_pair(BinaryMetadataUAR, "uar")};
- for (const auto &F : BinaryMetadataFlags) {
- if (BinaryMetadataFeatures & F.first)
- CmdArgs.push_back(
- Args.MakeArgString("-fexperimental-sanitize-metadata=" + F.second));
+ if (!GPUSanitize) {
+ // Translate available BinaryMetadataFeatures to corresponding clang-cc1
+ // flags. Does not depend on any other sanitizers. Unsupported on GPUs.
+ const std::pair<int, std::string> BinaryMetadataFlags[] = {
+ std::make_pair(BinaryMetadataCovered, "covered"),
+ std::make_pair(BinaryMetadataAtomics, "atomics"),
+ std::make_pair(BinaryMetadataUAR, "uar")};
+ for (const auto &F : BinaryMetadataFlags) {
+ if (BinaryMetadataFeatures & F.first)
+ CmdArgs.push_back(
+ Args.MakeArgString("-fexperimental-sanitize-metadata=" + F.second));
+ }
+ addSpecialCaseListOpt(Args, CmdArgs,
+ "-fexperimental-sanitize-metadata-ignorelist=",
+ BinaryMetadataIgnorelistFiles);
}
if (TC.getTriple().isOSWindows() && needsUbsanRt()) {
@@ -1217,6 +1236,9 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
if (CfiICallGeneralizePointers)
CmdArgs.push_back("-fsanitize-cfi-icall-generalize-pointers");
+ if (CfiICallNormalizeIntegers)
+ CmdArgs.push_back("-fsanitize-cfi-icall-experimental-normalize-integers");
+
if (CfiCanonicalJumpTables)
CmdArgs.push_back("-fsanitize-cfi-canonical-jump-tables");
@@ -1257,6 +1279,16 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
CmdArgs.push_back("-asan-instrumentation-with-call-threshold=0");
}
+ // When emitting Stable ABI instrumentation, force outlining calls and avoid
+ // inlining shadow memory poisoning. While this is a big performance burden
+ // for now it allows full abstraction from implementation details.
+ if (StableABI) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-asan-instrumentation-with-call-threshold=0");
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-asan-max-inline-poisoning-size=0");
+ }
+
// Only pass the option to the frontend if the user requested,
// otherwise the frontend will just use the codegen default.
if (AsanDtorKind != llvm::AsanDtorKind::Invalid) {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
index bc70205a6c01..d60fdbc17968 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
@@ -7,8 +7,10 @@
//===----------------------------------------------------------------------===//
#include "clang/Driver/ToolChain.h"
+#include "ToolChains/Arch/AArch64.h"
#include "ToolChains/Arch/ARM.h"
#include "ToolChains/Clang.h"
+#include "ToolChains/CommonArgs.h"
#include "ToolChains/Flang.h"
#include "ToolChains/InterfaceStubs.h"
#include "clang/Basic/ObjCRuntime.h"
@@ -24,8 +26,8 @@
#include "clang/Driver/XRayArgs.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/MC/MCTargetOptions.h"
@@ -38,9 +40,11 @@
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FileUtilities.h"
#include "llvm/Support/Path.h"
-#include "llvm/Support/TargetParser.h"
#include "llvm/Support/VersionTuple.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/AArch64TargetParser.h"
+#include "llvm/TargetParser/TargetParser.h"
+#include "llvm/TargetParser/Triple.h"
#include <cassert>
#include <cstddef>
#include <cstring>
@@ -86,7 +90,8 @@ ToolChain::ToolChain(const Driver &D, const llvm::Triple &T,
addIfExists(getLibraryPaths(), Path);
for (const auto &Path : getStdlibPaths())
addIfExists(getFilePaths(), Path);
- addIfExists(getFilePaths(), getArchSpecificLibPath());
+ for (const auto &Path : getArchSpecificLibPaths())
+ addIfExists(getFilePaths(), Path);
}
llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>>
@@ -170,6 +175,101 @@ bool ToolChain::defaultToIEEELongDouble() const {
return PPC_LINUX_DEFAULT_IEEELONGDOUBLE && getTriple().isOSLinux();
}
+static void getAArch64MultilibFlags(const Driver &D,
+ const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ Multilib::flags_list &Result) {
+ std::vector<StringRef> Features;
+ tools::aarch64::getAArch64TargetFeatures(D, Triple, Args, Features, false);
+ const auto UnifiedFeatures = tools::unifyTargetFeatures(Features);
+ llvm::DenseSet<StringRef> FeatureSet(UnifiedFeatures.begin(),
+ UnifiedFeatures.end());
+ std::vector<std::string> MArch;
+ for (const auto &Ext : AArch64::Extensions)
+ if (FeatureSet.contains(Ext.Feature))
+ MArch.push_back(Ext.Name.str());
+ for (const auto &Ext : AArch64::Extensions)
+ if (FeatureSet.contains(Ext.NegFeature))
+ MArch.push_back(("no" + Ext.Name).str());
+ MArch.insert(MArch.begin(), ("-march=" + Triple.getArchName()).str());
+ Result.push_back(llvm::join(MArch, "+"));
+}
+
+static void getARMMultilibFlags(const Driver &D,
+ const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ Multilib::flags_list &Result) {
+ std::vector<StringRef> Features;
+ llvm::ARM::FPUKind FPUKind = tools::arm::getARMTargetFeatures(
+ D, Triple, Args, Features, false /*ForAs*/, true /*ForMultilib*/);
+ const auto UnifiedFeatures = tools::unifyTargetFeatures(Features);
+ llvm::DenseSet<StringRef> FeatureSet(UnifiedFeatures.begin(),
+ UnifiedFeatures.end());
+ std::vector<std::string> MArch;
+ for (const auto &Ext : ARM::ARCHExtNames)
+ if (FeatureSet.contains(Ext.Feature))
+ MArch.push_back(Ext.Name.str());
+ for (const auto &Ext : ARM::ARCHExtNames)
+ if (FeatureSet.contains(Ext.NegFeature))
+ MArch.push_back(("no" + Ext.Name).str());
+ MArch.insert(MArch.begin(), ("-march=" + Triple.getArchName()).str());
+ Result.push_back(llvm::join(MArch, "+"));
+
+ switch (FPUKind) {
+#define ARM_FPU(NAME, KIND, VERSION, NEON_SUPPORT, RESTRICTION) \
+ case llvm::ARM::KIND: \
+ Result.push_back("-mfpu=" NAME); \
+ break;
+#include "llvm/TargetParser/ARMTargetParser.def"
+ default:
+ llvm_unreachable("Invalid FPUKind");
+ }
+
+ switch (arm::getARMFloatABI(D, Triple, Args)) {
+ case arm::FloatABI::Soft:
+ Result.push_back("-mfloat-abi=soft");
+ break;
+ case arm::FloatABI::SoftFP:
+ Result.push_back("-mfloat-abi=softfp");
+ break;
+ case arm::FloatABI::Hard:
+ Result.push_back("-mfloat-abi=hard");
+ break;
+ case arm::FloatABI::Invalid:
+ llvm_unreachable("Invalid float ABI");
+ }
+}
+
+Multilib::flags_list
+ToolChain::getMultilibFlags(const llvm::opt::ArgList &Args) const {
+ using namespace clang::driver::options;
+
+ std::vector<std::string> Result;
+ const llvm::Triple Triple(ComputeEffectiveClangTriple(Args));
+ Result.push_back("--target=" + Triple.str());
+
+ switch (Triple.getArch()) {
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_32:
+ case llvm::Triple::aarch64_be:
+ getAArch64MultilibFlags(D, Triple, Args, Result);
+ break;
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ getARMMultilibFlags(D, Triple, Args, Result);
+ break;
+ default:
+ break;
+ }
+
+ // Sort and remove duplicates.
+ std::sort(Result.begin(), Result.end());
+ Result.erase(std::unique(Result.begin(), Result.end()), Result.end());
+ return Result;
+}
+
SanitizerArgs
ToolChain::getSanitizerArgs(const llvm::opt::ArgList &JobArgs) const {
SanitizerArgs SanArgs(*this, JobArgs, !SanitizerArgsChecked);
@@ -419,6 +519,7 @@ Tool *ToolChain::getTool(Action::ActionClass AC) const {
case Action::LipoJobClass:
case Action::DsymutilJobClass:
case Action::VerifyDebugInfoJobClass:
+ case Action::BinaryAnalyzeJobClass:
llvm_unreachable("Invalid tool kind.");
case Action::CompileJobClass:
@@ -491,7 +592,9 @@ std::string ToolChain::getCompilerRTPath() const {
SmallString<128> Path(getDriver().ResourceDir);
if (isBareMetal()) {
llvm::sys::path::append(Path, "lib", getOSLibName());
- Path += SelectedMultilib.gccSuffix();
+ if (!SelectedMultilibs.empty()) {
+ Path += SelectedMultilibs.back().gccSuffix();
+ }
} else if (Triple.isOSUnknown()) {
llvm::sys::path::append(Path, "lib");
} else {
@@ -578,6 +681,27 @@ ToolChain::path_list ToolChain::getRuntimePaths() const {
addPathForTriple(getTriple());
+ // When building with per target runtime directories, various ways of naming
+ // the Arm architecture may have been normalised to simply "arm".
+ // For example "armv8l" (Armv8 AArch32 little endian) is replaced with "arm".
+ // Since an armv8l system can use libraries built for earlier architecture
+ // versions assuming endian and float ABI match.
+ //
+ // Original triple: armv8l-unknown-linux-gnueabihf
+ // Runtime triple: arm-unknown-linux-gnueabihf
+ //
+ // We do not do this for armeb (big endian) because doing so could make us
+ // select little endian libraries. In addition, all known armeb triples only
+ // use the "armeb" architecture name.
+ //
+ // M profile Arm is bare metal and we know they will not be using the per
+ // target runtime directory layout.
+ if (getTriple().getArch() == Triple::arm && !getTriple().isArmMClass()) {
+ llvm::Triple ArmTriple = getTriple();
+ ArmTriple.setArch(Triple::arm);
+ addPathForTriple(ArmTriple);
+ }
+
// Android targets may include an API level at the end. We still want to fall
// back on a path without the API level.
if (getTriple().isAndroid() &&
@@ -599,11 +723,20 @@ ToolChain::path_list ToolChain::getStdlibPaths() const {
return Paths;
}
-std::string ToolChain::getArchSpecificLibPath() const {
- SmallString<128> Path(getDriver().ResourceDir);
- llvm::sys::path::append(Path, "lib", getOSLibName(),
- llvm::Triple::getArchTypeName(getArch()));
- return std::string(Path.str());
+ToolChain::path_list ToolChain::getArchSpecificLibPaths() const {
+ path_list Paths;
+
+ auto AddPath = [&](const ArrayRef<StringRef> &SS) {
+ SmallString<128> Path(getDriver().ResourceDir);
+ llvm::sys::path::append(Path, "lib");
+ for (auto &S : SS)
+ llvm::sys::path::append(Path, S);
+ Paths.push_back(std::string(Path.str()));
+ };
+
+ AddPath({getTriple().str()});
+ AddPath({getOSLibName(), llvm::Triple::getArchTypeName(getArch())});
+ return Paths;
}
bool ToolChain::needsProfileRT(const ArgList &Args) {
@@ -630,7 +763,8 @@ Tool *ToolChain::SelectTool(const JobAction &JA) const {
if (D.IsFlangMode() && getDriver().ShouldUseFlangCompiler(JA)) return getFlang();
if (getDriver().ShouldUseClangCompiler(JA)) return getClang();
Action::ActionClass AC = JA.getKind();
- if (AC == Action::AssembleJobClass && useIntegratedAs())
+ if (AC == Action::AssembleJobClass && useIntegratedAs() &&
+ !getTriple().isOSAIX())
return getClangAs();
return getTool(AC);
}
@@ -1124,8 +1258,7 @@ SanitizerMask ToolChain::getSupportedSanitizers() const {
// platform dependent.
SanitizerMask Res =
- (SanitizerKind::Undefined & ~SanitizerKind::Vptr &
- ~SanitizerKind::Function) |
+ (SanitizerKind::Undefined & ~SanitizerKind::Vptr) |
(SanitizerKind::CFI & ~SanitizerKind::CFIICall) |
SanitizerKind::CFICastStrict | SanitizerKind::FloatDivideByZero |
SanitizerKind::KCFI | SanitizerKind::UnsignedIntegerOverflow |
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp
index abbd3ef6c68f..97217eba9ca0 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp
@@ -12,6 +12,7 @@
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Option/ArgList.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/Path.h"
@@ -112,6 +113,11 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (!(IsArch32Bit || IsArch64Bit))
llvm_unreachable("Unsupported bit width value.");
+ if (Arg *A = C.getArgs().getLastArg(options::OPT_G)) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << D.getTargetTriple();
+ }
+
// Force static linking when "-static" is present.
if (Args.hasArg(options::OPT_static))
CmdArgs.push_back("-bnso");
@@ -122,6 +128,17 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-bnoentry");
}
+ if (Args.hasFlag(options::OPT_mxcoff_roptr, options::OPT_mno_xcoff_roptr,
+ false)) {
+ if (Args.hasArg(options::OPT_shared))
+ D.Diag(diag::err_roptr_cannot_build_shared);
+
+ // The `-mxcoff-roptr` option places constants in RO sections as much as
+ // possible. Then `-bforceimprw` changes such sections to RW if they contain
+ // imported symbols that need to be resolved.
+ CmdArgs.push_back("-bforceimprw");
+ }
+
// PGO instrumentation generates symbols belonging to special sections, and
// the linker needs to place all symbols in a particular section together in
// memory; the AIX linker does that under an option.
@@ -143,6 +160,22 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.hasArg(options::OPT_coverage))
CmdArgs.push_back("-bdbg:namedsects:ss");
+ if (Arg *A =
+ Args.getLastArg(clang::driver::options::OPT_mxcoff_build_id_EQ)) {
+ StringRef BuildId = A->getValue();
+ if (BuildId[0] != '0' || BuildId[1] != 'x' ||
+ BuildId.find_if_not(llvm::isHexDigit, 2) != StringRef::npos)
+ ToolChain.getDriver().Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << BuildId;
+ else {
+ std::string LinkerFlag = "-bdbg:ldrinfo:xcoff_binary_id:0x";
+ if (BuildId.size() % 2) // Prepend a 0 if odd number of digits.
+ LinkerFlag += "0";
+ LinkerFlag += BuildId.drop_front(2).lower();
+ CmdArgs.push_back(Args.MakeArgString(LinkerFlag));
+ }
+ }
+
// Specify linker output file.
assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
if (Output.isFilename()) {
@@ -163,19 +196,19 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-bpD:0x110000000");
}
- auto getCrt0Basename = [&Args, IsArch32Bit] {
- // Enable gprofiling when "-pg" is specified.
- if (Args.hasArg(options::OPT_pg))
- return IsArch32Bit ? "gcrt0.o" : "gcrt0_64.o";
- // Enable profiling when "-p" is specified.
- else if (Args.hasArg(options::OPT_p))
- return IsArch32Bit ? "mcrt0.o" : "mcrt0_64.o";
- else
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_shared, options::OPT_r)) {
+ auto getCrt0Basename = [&Args, IsArch32Bit] {
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_p, options::OPT_pg)) {
+ // Enable gprofiling when "-pg" is specified.
+ if (A->getOption().matches(options::OPT_pg))
+ return IsArch32Bit ? "gcrt0.o" : "gcrt0_64.o";
+ // Enable profiling when "-p" is specified.
+ return IsArch32Bit ? "mcrt0.o" : "mcrt0_64.o";
+ }
return IsArch32Bit ? "crt0.o" : "crt0_64.o";
- };
+ };
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
- options::OPT_shared)) {
CmdArgs.push_back(
Args.MakeArgString(ToolChain.GetFilePath(getCrt0Basename())));
@@ -234,48 +267,50 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// Add directory to library search path.
Args.AddAllArgs(CmdArgs, options::OPT_L);
- ToolChain.AddFilePathLibArgs(Args, CmdArgs);
- ToolChain.addProfileRTLibs(Args, CmdArgs);
-
- if (getToolChain().ShouldLinkCXXStdlib(Args))
- getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
-
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
- AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
-
- // Add OpenMP runtime if -fopenmp is specified.
- if (Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
- options::OPT_fno_openmp, false)) {
- switch (ToolChain.getDriver().getOpenMPRuntime(Args)) {
- case Driver::OMPRT_OMP:
- CmdArgs.push_back("-lomp");
- break;
- case Driver::OMPRT_IOMP5:
- CmdArgs.push_back("-liomp5");
- break;
- case Driver::OMPRT_GOMP:
- CmdArgs.push_back("-lgomp");
- break;
- case Driver::OMPRT_Unknown:
- // Already diagnosed.
- break;
+ if (!Args.hasArg(options::OPT_r)) {
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+ ToolChain.addProfileRTLibs(Args, CmdArgs);
+
+ if (getToolChain().ShouldLinkCXXStdlib(Args))
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
+
+ // Add OpenMP runtime if -fopenmp is specified.
+ if (Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
+ options::OPT_fno_openmp, false)) {
+ switch (ToolChain.getDriver().getOpenMPRuntime(Args)) {
+ case Driver::OMPRT_OMP:
+ CmdArgs.push_back("-lomp");
+ break;
+ case Driver::OMPRT_IOMP5:
+ CmdArgs.push_back("-liomp5");
+ break;
+ case Driver::OMPRT_GOMP:
+ CmdArgs.push_back("-lgomp");
+ break;
+ case Driver::OMPRT_Unknown:
+ // Already diagnosed.
+ break;
+ }
}
- }
- // Support POSIX threads if "-pthreads" or "-pthread" is present.
- if (Args.hasArg(options::OPT_pthreads, options::OPT_pthread))
- CmdArgs.push_back("-lpthreads");
+ // Support POSIX threads if "-pthreads" or "-pthread" is present.
+ if (Args.hasArg(options::OPT_pthreads, options::OPT_pthread))
+ CmdArgs.push_back("-lpthreads");
- if (D.CCCIsCXX())
- CmdArgs.push_back("-lm");
+ if (D.CCCIsCXX())
+ CmdArgs.push_back("-lm");
- CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lc");
- if (Args.hasArg(options::OPT_pg)) {
- CmdArgs.push_back(Args.MakeArgString((llvm::Twine("-L") + D.SysRoot) +
- "/lib/profiled"));
- CmdArgs.push_back(Args.MakeArgString((llvm::Twine("-L") + D.SysRoot) +
- "/usr/lib/profiled"));
+ if (Args.hasArgNoClaim(options::OPT_p, options::OPT_pg)) {
+ CmdArgs.push_back(Args.MakeArgString((llvm::Twine("-L") + D.SysRoot) +
+ "/lib/profiled"));
+ CmdArgs.push_back(Args.MakeArgString((llvm::Twine("-L") + D.SysRoot) +
+ "/usr/lib/profiled"));
+ }
}
}
@@ -287,6 +322,10 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
/// AIX - AIX tool chain which can call as(1) and ld(1) directly.
AIX::AIX(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: ToolChain(D, Triple, Args) {
+ getProgramPaths().push_back(getDriver().getInstalledDir());
+ if (getDriver().getInstalledDir() != getDriver().Dir)
+ getProgramPaths().push_back(getDriver().Dir);
+
ParseInlineAsmUsingAsmParser = Args.hasFlag(
options::OPT_fintegrated_as, options::OPT_fno_integrated_as, true);
getLibraryPaths().push_back(getDriver().SysRoot + "/usr/lib");
@@ -376,6 +415,18 @@ void AIX::AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm_unreachable("Unexpected C++ library type; only libc++ is supported.");
}
+void AIX::addClangTargetOptions(
+ const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const {
+ Args.AddLastArg(CC1Args, options::OPT_mignore_xcoff_visibility);
+ Args.AddLastArg(CC1Args, options::OPT_mdefault_visibility_export_mapping_EQ);
+ Args.addOptInFlag(CC1Args, options::OPT_mxcoff_roptr, options::OPT_mno_xcoff_roptr);
+
+ if (Args.hasFlag(options::OPT_fxl_pragma_pack,
+ options::OPT_fno_xl_pragma_pack, true))
+ CC1Args.push_back("-fxl-pragma-pack");
+}
+
void AIX::addProfileRTLibs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const {
// Add linker option -u__llvm_profile_runtime to cause runtime
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h
index e03aebcc3e7f..cc74e5ea85ef 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h
@@ -80,6 +80,10 @@ public:
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
+ void addClangTargetOptions(
+ const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const override;
+
void addProfileRTLibs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp
index 5849e9cfdb9b..d0223322b56b 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp
@@ -11,17 +11,17 @@
#include "clang/Basic/TargetID.h"
#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
-#include "clang/Driver/Distro.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/Error.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/LineIterator.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/Host.h"
#include <optional>
#include <system_error>
@@ -309,13 +309,10 @@ RocmInstallationDetector::getInstallationPathCandidates() {
ROCmSearchDirs.emplace_back(D.SysRoot + "/opt/" + LatestROCm,
/*StrictChecking=*/true);
- Distro Dist(D.getVFS(), llvm::Triple(llvm::sys::getProcessTriple()));
- if (Dist.IsDebian() || Dist.IsRedhat()) {
- ROCmSearchDirs.emplace_back(D.SysRoot + "/usr/local",
- /*StrictChecking=*/true);
- ROCmSearchDirs.emplace_back(D.SysRoot + "/usr",
- /*StrictChecking=*/true);
- }
+ ROCmSearchDirs.emplace_back(D.SysRoot + "/usr/local",
+ /*StrictChecking=*/true);
+ ROCmSearchDirs.emplace_back(D.SysRoot + "/usr",
+ /*StrictChecking=*/true);
DoPrintROCmSearchDirs();
return ROCmSearchDirs;
@@ -436,8 +433,13 @@ void RocmInstallationDetector::detectDeviceLibrary() {
void RocmInstallationDetector::detectHIPRuntime() {
SmallVector<Candidate, 4> HIPSearchDirs;
if (!HIPPathArg.empty())
- HIPSearchDirs.emplace_back(HIPPathArg.str(), /*StrictChecking=*/true);
- else
+ HIPSearchDirs.emplace_back(HIPPathArg.str());
+ else if (std::optional<std::string> HIPPathEnv =
+ llvm::sys::Process::GetEnv("HIP_PATH")) {
+ if (!HIPPathEnv->empty())
+ HIPSearchDirs.emplace_back(std::move(*HIPPathEnv));
+ }
+ if (HIPSearchDirs.empty())
HIPSearchDirs.append(getInstallationPathCandidates());
auto &FS = D.getVFS();
@@ -459,10 +461,21 @@ void RocmInstallationDetector::detectHIPRuntime() {
SharePath = InstallPath;
llvm::sys::path::append(SharePath, "share");
+ // Get parent of InstallPath and append "share"
+ SmallString<0> ParentSharePath = llvm::sys::path::parent_path(InstallPath);
+ llvm::sys::path::append(ParentSharePath, "share");
+
+ auto Append = [](SmallString<0> &path, const Twine &a, const Twine &b = "",
+ const Twine &c = "", const Twine &d = "") {
+ SmallString<0> newpath = path;
+ llvm::sys::path::append(newpath, a, b, c, d);
+ return newpath;
+ };
// If HIP version file can be found and parsed, use HIP version from there.
for (const auto &VersionFilePath :
- {std::string(SharePath) + "/hip/version",
- std::string(BinPath) + "/.hipVersion"}) {
+ {Append(SharePath, "hip", "version"),
+ Append(ParentSharePath, "hip", "version"),
+ Append(BinPath, ".hipVersion")}) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> VersionFile =
FS.getBufferForFile(VersionFilePath);
if (!VersionFile)
@@ -539,7 +552,15 @@ void amdgpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
std::string Linker = getToolChain().GetProgramPath(getShortName());
ArgStringList CmdArgs;
addLinkerCompressDebugSectionsOption(getToolChain(), Args, CmdArgs);
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
+ if (C.getDriver().isUsingLTO())
+ addLTOOptions(getToolChain(), Args, CmdArgs, Output, Inputs[0],
+ C.getDriver().getLTOMode() == LTOK_Thin);
+ else if (Args.hasArg(options::OPT_mcpu_EQ))
+ CmdArgs.push_back(Args.MakeArgString(
+ "-plugin-opt=mcpu=" + Args.getLastArgValue(options::OPT_mcpu_EQ)));
+ CmdArgs.push_back("--no-undefined");
CmdArgs.push_back("-shared");
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
@@ -578,8 +599,8 @@ void amdgpu::getAMDGPUTargetFeatures(const Driver &D,
options::OPT_mno_wavefrontsize64, false))
Features.push_back("+wavefrontsize64");
- handleTargetFeaturesGroup(
- Args, Features, options::OPT_m_amdgpu_Features_Group);
+ handleTargetFeaturesGroup(D, Triple, Args, Features,
+ options::OPT_m_amdgpu_Features_Group);
}
/// AMDGPU Toolchain
@@ -611,9 +632,28 @@ AMDGPUToolChain::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
if (!DAL)
DAL = new DerivedArgList(Args.getBaseArgs());
- for (Arg *A : Args) {
- if (!shouldSkipArgument(A))
- DAL->append(A);
+ for (Arg *A : Args)
+ DAL->append(A);
+
+ // Replace -mcpu=native with detected GPU.
+ Arg *LastMCPUArg = DAL->getLastArg(options::OPT_mcpu_EQ);
+ if (LastMCPUArg && StringRef(LastMCPUArg->getValue()) == "native") {
+ DAL->eraseArg(options::OPT_mcpu_EQ);
+ auto GPUsOrErr = getSystemGPUArchs(Args);
+ if (!GPUsOrErr) {
+ getDriver().Diag(diag::err_drv_undetermined_gpu_arch)
+ << llvm::Triple::getArchTypeName(getArch())
+ << llvm::toString(GPUsOrErr.takeError()) << "-mcpu";
+ } else {
+ auto &GPUs = *GPUsOrErr;
+ if (GPUs.size() > 1) {
+ getDriver().Diag(diag::warn_drv_multi_gpu_arch)
+ << llvm::Triple::getArchTypeName(getArch())
+ << llvm::join(GPUs, ", ") << "-mcpu";
+ }
+ DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_mcpu_EQ),
+ Args.MakeArgString(GPUs.front()));
+ }
}
checkTargetID(*DAL);
@@ -703,7 +743,7 @@ bool AMDGPUToolChain::isWave64(const llvm::opt::ArgList &DriverArgs,
ROCMToolChain::ROCMToolChain(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: AMDGPUToolChain(D, Triple, Args) {
- RocmInstallation.detectDeviceLibrary();
+ RocmInstallation->detectDeviceLibrary();
}
void AMDGPUToolChain::addClangTargetOptions(
@@ -792,11 +832,11 @@ void ROCMToolChain::addClangTargetOptions(
const StringRef GpuArch = getGPUArch(DriverArgs);
auto Kind = llvm::AMDGPU::parseArchAMDGCN(GpuArch);
const StringRef CanonArch = llvm::AMDGPU::getArchNameAMDGCN(Kind);
- StringRef LibDeviceFile = RocmInstallation.getLibDeviceFile(CanonArch);
+ StringRef LibDeviceFile = RocmInstallation->getLibDeviceFile(CanonArch);
auto ABIVer = DeviceLibABIVersion::fromCodeObjectVersion(
getAMDGPUCodeObjectVersion(getDriver(), DriverArgs));
- if (!RocmInstallation.checkCommonBitcodeLibs(CanonArch, LibDeviceFile,
- ABIVer))
+ if (!RocmInstallation->checkCommonBitcodeLibs(CanonArch, LibDeviceFile,
+ ABIVer))
return;
bool Wave64 = isWave64(DriverArgs, Kind);
@@ -815,10 +855,10 @@ void ROCMToolChain::addClangTargetOptions(
// Add the OpenCL specific bitcode library.
llvm::SmallVector<std::string, 12> BCLibs;
- BCLibs.push_back(RocmInstallation.getOpenCLPath().str());
+ BCLibs.push_back(RocmInstallation->getOpenCLPath().str());
// Add the generic set of libraries.
- BCLibs.append(RocmInstallation.getCommonBitcodeLibs(
+ BCLibs.append(RocmInstallation->getCommonBitcodeLibs(
DriverArgs, LibDeviceFile, Wave64, DAZ, FiniteOnly, UnsafeMathOpt,
FastRelaxedMath, CorrectSqrt, ABIVer, false));
@@ -870,13 +910,6 @@ RocmInstallationDetector::getCommonBitcodeLibs(
return BCLibs;
}
-bool AMDGPUToolChain::shouldSkipArgument(const llvm::opt::Arg *A) const {
- Option O = A->getOption();
- if (O.matches(options::OPT_fPIE) || O.matches(options::OPT_fpie))
- return true;
- return false;
-}
-
llvm::SmallVector<std::string, 12>
ROCMToolChain::getCommonDeviceLibNames(const llvm::opt::ArgList &DriverArgs,
const std::string &GPUArch,
@@ -884,11 +917,11 @@ ROCMToolChain::getCommonDeviceLibNames(const llvm::opt::ArgList &DriverArgs,
auto Kind = llvm::AMDGPU::parseArchAMDGCN(GPUArch);
const StringRef CanonArch = llvm::AMDGPU::getArchNameAMDGCN(Kind);
- StringRef LibDeviceFile = RocmInstallation.getLibDeviceFile(CanonArch);
+ StringRef LibDeviceFile = RocmInstallation->getLibDeviceFile(CanonArch);
auto ABIVer = DeviceLibABIVersion::fromCodeObjectVersion(
getAMDGPUCodeObjectVersion(getDriver(), DriverArgs));
- if (!RocmInstallation.checkCommonBitcodeLibs(CanonArch, LibDeviceFile,
- ABIVer))
+ if (!RocmInstallation->checkCommonBitcodeLibs(CanonArch, LibDeviceFile,
+ ABIVer))
return {};
// If --hip-device-lib is not set, add the default bitcode libraries.
@@ -909,7 +942,7 @@ ROCMToolChain::getCommonDeviceLibNames(const llvm::opt::ArgList &DriverArgs,
options::OPT_fno_hip_fp32_correctly_rounded_divide_sqrt, true);
bool Wave64 = isWave64(DriverArgs, Kind);
- return RocmInstallation.getCommonBitcodeLibs(
+ return RocmInstallation->getCommonBitcodeLibs(
DriverArgs, LibDeviceFile, Wave64, DAZ, FiniteOnly, UnsafeMathOpt,
FastRelaxedMath, CorrectSqrt, ABIVer, isOpenMP);
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h
index cce70da6612b..d10d487dd44c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h
@@ -16,7 +16,7 @@
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/TargetParser/TargetParser.h"
#include <map>
@@ -64,11 +64,11 @@ public:
bool IsMathErrnoDefault() const override { return false; }
bool isCrossCompiling() const override { return true; }
- bool isPICDefault() const override { return false; }
+ bool isPICDefault() const override { return true; }
bool isPIEDefault(const llvm::opt::ArgList &Args) const override {
return false;
}
- bool isPICDefaultForced() const override { return false; }
+ bool isPICDefaultForced() const override { return true; }
bool SupportsProfiling() const override { return false; }
llvm::opt::DerivedArgList *
@@ -97,9 +97,6 @@ public:
/// Needed for translating LTO options.
const char *getDefaultLinker() const override { return "ld.lld"; }
- /// Should skip argument.
- bool shouldSkipArgument(const llvm::opt::Arg *Arg) const;
-
/// Uses amdgpu-arch tool to get arch of the system GPU. Will return error
/// if unable to find one.
virtual Expected<SmallVector<std::string>>
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
index 1a8e4294713c..b012b7cb7293 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
@@ -35,7 +35,7 @@ AMDGPUOpenMPToolChain::AMDGPUOpenMPToolChain(const Driver &D,
const ArgList &Args)
: ROCMToolChain(D, Triple, Args), HostTC(HostTC) {
// Lookup binaries into the driver directory, this is used to
- // discover the clang-offload-bundler executable.
+ // discover the 'amdgpu-arch' executable.
getProgramPaths().push_back(getDriver().Dir);
}
@@ -160,7 +160,7 @@ AMDGPUOpenMPToolChain::getDeviceLibs(const llvm::opt::ArgList &Args) const {
if (Args.hasArg(options::OPT_nogpulib))
return {};
- if (!RocmInstallation.hasDeviceLibrary()) {
+ if (!RocmInstallation->hasDeviceLibrary()) {
getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 0;
return {};
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp
index 91d89f5441a2..81f501d41734 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp
@@ -14,10 +14,10 @@
#include "clang/Driver/Options.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+#include "llvm/TargetParser/SubtargetFeature.h"
using namespace clang::driver;
using namespace clang::driver::toolchains;
@@ -498,13 +498,20 @@ void AVR::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
if (SectionAddressData) {
- CmdArgs.push_back(Args.MakeArgString(
- "-Tdata=0x" + Twine::utohexstr(*SectionAddressData)));
+ CmdArgs.push_back(
+ Args.MakeArgString("--defsym=__DATA_REGION_ORIGIN__=0x" +
+ Twine::utohexstr(*SectionAddressData)));
} else {
// We do not have an entry for this CPU in the address mapping table yet.
D.Diag(diag::warn_drv_avr_linker_section_addresses_not_implemented) << CPU;
}
+ if (D.isUsingLTO()) {
+ assert(!Inputs.empty() && "Must have at least one input.");
+ addLTOOptions(getToolChain(), Args, CmdArgs, Output, Inputs[0],
+ D.getLTOMode() == LTOK_Thin);
+ }
+
// If the family name is known, we can link with the device-specific libgcc.
// Without it, libgcc will simply not be linked. This matches avr-gcc
// behavior.
@@ -545,6 +552,9 @@ void AVR::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// Add user specified linker script.
Args.AddAllArgs(CmdArgs, options::OPT_T);
+ if (Args.hasFlag(options::OPT_mrelax, options::OPT_mno_relax, true))
+ CmdArgs.push_back("--relax");
+
// Specify the family name as the emulation mode to use.
// This is almost always required because otherwise avr-ld
// will assume 'avr2' and warn about the program being larger
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h
index ea161fe28f33..d432d81744b9 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h
@@ -36,6 +36,8 @@ public:
std::string getCompilerRT(const llvm::opt::ArgList &Args, StringRef Component,
FileType Type) const override;
+ bool HasNativeLLVMSupport() const override { return true; }
+
protected:
Tool *buildLinker() const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.cpp
index a9c13464a0d6..bc4eff348687 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.cpp
@@ -101,8 +101,8 @@ void ananas::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
Args.AddAllArgs(CmdArgs,
- {options::OPT_T_Group, options::OPT_e, options::OPT_s,
- options::OPT_t, options::OPT_Z_Flag, options::OPT_r});
+ {options::OPT_T_Group, options::OPT_s, options::OPT_t,
+ options::OPT_Z_Flag, options::OPT_r});
if (D.isUsingLTO()) {
assert(!Inputs.empty() && "Must have at least one input.");
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
index 2c559cc8b3b9..507ad9247704 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -12,9 +12,8 @@
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/Support/AArch64TargetParser.h"
-#include "llvm/Support/TargetParser.h"
-#include "llvm/Support/Host.h"
+#include "llvm/TargetParser/AArch64TargetParser.h"
+#include "llvm/TargetParser/Host.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -83,6 +82,25 @@ static bool DecodeAArch64Features(const Driver &D, StringRef text,
else
return false;
+ // +sme implies +bf16.
+ // +sme-f64f64 and +sme-i16i64 both imply +sme.
+ if (Feature == "sme") {
+ Features.push_back("+bf16");
+ } else if (Feature == "nosme") {
+ Features.push_back("-sme-f64f64");
+ Features.push_back("-sme-i16i64");
+ } else if (Feature == "sme-f64f64") {
+ Features.push_back("+sme");
+ Features.push_back("+bf16");
+ } else if (Feature == "sme-i16i64") {
+ Features.push_back("+sme");
+ Features.push_back("+bf16");
+ } else if (Feature == "nobf16") {
+ Features.push_back("-sme");
+ Features.push_back("-sme-f64f64");
+ Features.push_back("-sme-i16i64");
+ }
+
if (Feature == "sve2")
Features.push_back("+sve");
else if (Feature == "sve2-bitperm" || Feature == "sve2-sha3" ||
@@ -123,8 +141,8 @@ static bool DecodeAArch64Features(const Driver &D, StringRef text,
static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
std::vector<StringRef> &Features) {
std::pair<StringRef, StringRef> Split = Mcpu.split("+");
+ CPU = Split.first;
const llvm::AArch64::ArchInfo *ArchInfo = &llvm::AArch64::ARMV8A;
- CPU = llvm::AArch64::resolveCPUAlias(Split.first);
if (CPU == "native")
CPU = llvm::sys::getHostCPUName();
@@ -132,12 +150,15 @@ static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
if (CPU == "generic") {
Features.push_back("+neon");
} else {
- ArchInfo = &llvm::AArch64::parseCpu(CPU).Arch;
- if (*ArchInfo == llvm::AArch64::INVALID)
+ const std::optional<llvm::AArch64::CpuInfo> CpuInfo =
+ llvm::AArch64::parseCpu(CPU);
+ if (!CpuInfo)
return false;
+ ArchInfo = &CpuInfo->Arch;
+
Features.push_back(ArchInfo->ArchFeature);
- uint64_t Extension = llvm::AArch64::getDefaultExtensions(CPU, *ArchInfo);
+ uint64_t Extension = CpuInfo->getImpliedExtensions();
if (!llvm::AArch64::getExtensionFeatures(Extension, Features))
return false;
}
@@ -156,11 +177,11 @@ getAArch64ArchFeaturesFromMarch(const Driver &D, StringRef March,
std::string MarchLowerCase = March.lower();
std::pair<StringRef, StringRef> Split = StringRef(MarchLowerCase).split("+");
- const llvm::AArch64::ArchInfo *ArchInfo =
- &llvm::AArch64::parseArch(Split.first);
+ std::optional <llvm::AArch64::ArchInfo> ArchInfo =
+ llvm::AArch64::parseArch(Split.first);
if (Split.first == "native")
- ArchInfo = &llvm::AArch64::getArchForCpu(llvm::sys::getHostCPUName().str());
- if (*ArchInfo == llvm::AArch64::INVALID)
+ ArchInfo = llvm::AArch64::getArchForCpu(llvm::sys::getHostCPUName().str());
+ if (!ArchInfo)
return false;
Features.push_back(ArchInfo->ArchFeature);
@@ -289,13 +310,15 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
if (Arg *A = Args.getLastArg(options::OPT_mtp_mode_EQ)) {
StringRef Mtp = A->getValue();
- if (Mtp == "el3")
+ if (Mtp == "el3" || Mtp == "tpidr_el3")
Features.push_back("+tpidr-el3");
- else if (Mtp == "el2")
+ else if (Mtp == "el2" || Mtp == "tpidr_el2")
Features.push_back("+tpidr-el2");
- else if (Mtp == "el1")
+ else if (Mtp == "el1" || Mtp == "tpidr_el1")
Features.push_back("+tpidr-el1");
- else if (Mtp != "el0")
+ else if (Mtp == "tpidrro_el0")
+ Features.push_back("+tpidrro-el0");
+ else if (Mtp != "el0" && Mtp != "tpidr_el0")
D.Diag(diag::err_drv_invalid_mtp) << A->getAsString(Args);
}
@@ -404,9 +427,10 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
else if (*I == "+crypto") {
HasCrypto = true;
HasNoCrypto = false;
- } else if (*I == "-crypto") {
+ } else if (*I == "-crypto" || *I == "-neon") {
HasCrypto = false;
HasNoCrypto = true;
+ HasSM4 = HasSHA2 = HasSHA3 = HasAES = false;
}
// Register the iterator position if this is an architecture feature
if (ArchFeatPos == -1 && (V8Version != -1 || V9Version != -1))
@@ -603,7 +627,7 @@ fp16_fml_fallthrough:
Features.push_back("+fix-cortex-a53-835769");
else
Features.push_back("-fix-cortex-a53-835769");
- } else if (Triple.isAndroid()) {
+ } else if (Triple.isAndroid() || Triple.isOHOSFamily()) {
// Enabled A53 errata (835769) workaround by default on android
Features.push_back("+fix-cortex-a53-835769");
} else if (Triple.isOSFuchsia()) {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
index b6a9df28500a..1893bde99cd8 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
@@ -12,9 +12,8 @@
#include "clang/Driver/Options.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/Support/ARMTargetParser.h"
-#include "llvm/Support/TargetParser.h"
-#include "llvm/Support/Host.h"
+#include "llvm/TargetParser/ARMTargetParser.h"
+#include "llvm/TargetParser/Host.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -33,6 +32,20 @@ bool arm::isARMMProfile(const llvm::Triple &Triple) {
return llvm::ARM::parseArchProfile(Arch) == llvm::ARM::ProfileKind::M;
}
+// On Arm the endianness of the output file is determined by the target and
+// can be overridden by the pseudo-target flags '-mlittle-endian'/'-EL' and
+// '-mbig-endian'/'-EB'. Unlike other targets the flag does not result in a
+// normalized triple so we must handle the flag here.
+bool arm::isARMBigEndian(const llvm::Triple &Triple, const ArgList &Args) {
+ if (Arg *A = Args.getLastArg(options::OPT_mlittle_endian,
+ options::OPT_mbig_endian)) {
+ return !A->getOption().matches(options::OPT_mlittle_endian);
+ }
+
+ return Triple.getArch() == llvm::Triple::armeb ||
+ Triple.getArch() == llvm::Triple::thumbeb;
+}
+
// True if A-profile.
bool arm::isARMAProfile(const llvm::Triple &Triple) {
llvm::StringRef Arch = Triple.getArchName();
@@ -73,25 +86,25 @@ static void getARMHWDivFeatures(const Driver &D, const Arg *A,
}
// Handle -mfpu=.
-static unsigned getARMFPUFeatures(const Driver &D, const Arg *A,
- const ArgList &Args, StringRef FPU,
- std::vector<StringRef> &Features) {
- unsigned FPUID = llvm::ARM::parseFPU(FPU);
- if (!llvm::ARM::getFPUFeatures(FPUID, Features))
+static llvm::ARM::FPUKind getARMFPUFeatures(const Driver &D, const Arg *A,
+ const ArgList &Args, StringRef FPU,
+ std::vector<StringRef> &Features) {
+ llvm::ARM::FPUKind FPUKind = llvm::ARM::parseFPU(FPU);
+ if (!llvm::ARM::getFPUFeatures(FPUKind, Features))
D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
- return FPUID;
+ return FPUKind;
}
// Decode ARM features from string like +[no]featureA+[no]featureB+...
static bool DecodeARMFeatures(const Driver &D, StringRef text, StringRef CPU,
llvm::ARM::ArchKind ArchKind,
std::vector<StringRef> &Features,
- unsigned &ArgFPUID) {
+ llvm::ARM::FPUKind &ArgFPUKind) {
SmallVector<StringRef, 8> Split;
text.split(Split, StringRef("+"), -1, false);
for (StringRef Feature : Split) {
- if (!appendArchExtFeatures(CPU, ArchKind, Feature, Features, ArgFPUID))
+ if (!appendArchExtFeatures(CPU, ArchKind, Feature, Features, ArgFPUKind))
return false;
}
return true;
@@ -113,14 +126,16 @@ static void DecodeARMFeaturesFromCPU(const Driver &D, StringRef CPU,
static void checkARMArchName(const Driver &D, const Arg *A, const ArgList &Args,
llvm::StringRef ArchName, llvm::StringRef CPUName,
std::vector<StringRef> &Features,
- const llvm::Triple &Triple, unsigned &ArgFPUID) {
+ const llvm::Triple &Triple,
+ llvm::ARM::FPUKind &ArgFPUKind) {
std::pair<StringRef, StringRef> Split = ArchName.split("+");
std::string MArch = arm::getARMArch(ArchName, Triple);
llvm::ARM::ArchKind ArchKind = llvm::ARM::parseArch(MArch);
if (ArchKind == llvm::ARM::ArchKind::INVALID ||
- (Split.second.size() && !DecodeARMFeatures(D, Split.second, CPUName,
- ArchKind, Features, ArgFPUID)))
+ (Split.second.size() &&
+ !DecodeARMFeatures(D, Split.second, CPUName, ArchKind, Features,
+ ArgFPUKind)))
D.Diag(clang::diag::err_drv_unsupported_option_argument)
<< A->getSpelling() << A->getValue();
}
@@ -129,19 +144,36 @@ static void checkARMArchName(const Driver &D, const Arg *A, const ArgList &Args,
static void checkARMCPUName(const Driver &D, const Arg *A, const ArgList &Args,
llvm::StringRef CPUName, llvm::StringRef ArchName,
std::vector<StringRef> &Features,
- const llvm::Triple &Triple, unsigned &ArgFPUID) {
+ const llvm::Triple &Triple,
+ llvm::ARM::FPUKind &ArgFPUKind) {
std::pair<StringRef, StringRef> Split = CPUName.split("+");
std::string CPU = arm::getARMTargetCPU(CPUName, ArchName, Triple);
llvm::ARM::ArchKind ArchKind =
arm::getLLVMArchKindForARM(CPU, ArchName, Triple);
if (ArchKind == llvm::ARM::ArchKind::INVALID ||
- (Split.second.size() &&
- !DecodeARMFeatures(D, Split.second, CPU, ArchKind, Features, ArgFPUID)))
+ (Split.second.size() && !DecodeARMFeatures(D, Split.second, CPU, ArchKind,
+ Features, ArgFPUKind)))
D.Diag(clang::diag::err_drv_unsupported_option_argument)
<< A->getSpelling() << A->getValue();
}
+// If -mfloat-abi=hard or -mhard-float are specified explicitly then check that
+// floating point registers are available on the target CPU.
+static void checkARMFloatABI(const Driver &D, const ArgList &Args,
+ bool HasFPRegs) {
+ if (HasFPRegs)
+ return;
+ const Arg *A =
+ Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
+ options::OPT_mfloat_abi_EQ);
+ if (A && (A->getOption().matches(options::OPT_mhard_float) ||
+ (A->getOption().matches(options::OPT_mfloat_abi_EQ) &&
+ A->getValue() == StringRef("hard"))))
+ D.Diag(clang::diag::warn_drv_no_floating_point_registers)
+ << A->getAsString(Args);
+}
+
bool arm::useAAPCSForMachO(const llvm::Triple &T) {
// The backend is hardwired to assume AAPCS for M-class processors, ensure
// the frontend matches that.
@@ -166,11 +198,16 @@ arm::ReadTPMode arm::getReadTPMode(const Driver &D, const ArgList &Args,
if (Arg *A = Args.getLastArg(options::OPT_mtp_mode_EQ)) {
arm::ReadTPMode ThreadPointer =
llvm::StringSwitch<arm::ReadTPMode>(A->getValue())
- .Case("cp15", ReadTPMode::Cp15)
+ .Case("cp15", ReadTPMode::TPIDRURO)
+ .Case("tpidrurw", ReadTPMode::TPIDRURW)
+ .Case("tpidruro", ReadTPMode::TPIDRURO)
+ .Case("tpidrprw", ReadTPMode::TPIDRPRW)
.Case("soft", ReadTPMode::Soft)
.Default(ReadTPMode::Invalid);
- if (ThreadPointer == ReadTPMode::Cp15 && !isHardTPSupported(Triple) &&
- !ForAS) {
+ if ((ThreadPointer == ReadTPMode::TPIDRURW ||
+ ThreadPointer == ReadTPMode::TPIDRURO ||
+ ThreadPointer == ReadTPMode::TPIDRPRW) &&
+ !isHardTPSupported(Triple) && !ForAS) {
D.Diag(diag::err_target_unsupported_tp_hard) << Triple.getArchName();
return ReadTPMode::Invalid;
}
@@ -276,6 +313,11 @@ void arm::setArchNameInTriple(const Driver &D, const ArgList &Args,
void arm::setFloatABIInTriple(const Driver &D, const ArgList &Args,
llvm::Triple &Triple) {
+ if (Triple.isOSLiteOS()) {
+ Triple.setEnvironment(llvm::Triple::OpenHOS);
+ return;
+ }
+
bool isHardFloat =
(arm::getARMFloatABI(D, Triple, Args) == arm::FloatABI::Hard);
@@ -295,6 +337,8 @@ void arm::setFloatABIInTriple(const Driver &D, const ArgList &Args,
Triple.setEnvironment(isHardFloat ? llvm::Triple::MuslEABIHF
: llvm::Triple::MuslEABI);
break;
+ case llvm::Triple::OpenHOS:
+ break;
default: {
arm::FloatABI DefaultABI = arm::getDefaultFloatABI(Triple);
if (DefaultABI != arm::FloatABI::Invalid &&
@@ -364,6 +408,8 @@ arm::FloatABI arm::getDefaultFloatABI(const llvm::Triple &Triple) {
return FloatABI::SoftFP;
default:
+ if (Triple.isOHOSFamily())
+ return FloatABI::Soft;
switch (Triple.getEnvironment()) {
case llvm::Triple::GNUEABIHF:
case llvm::Triple::MuslEABIHF:
@@ -436,9 +482,11 @@ static bool hasIntegerMVE(const std::vector<StringRef> &F) {
(NoMVE == F.rend() || std::distance(MVE, NoMVE) > 0);
}
-void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
- const ArgList &Args,
- std::vector<StringRef> &Features, bool ForAS) {
+llvm::ARM::FPUKind arm::getARMTargetFeatures(const Driver &D,
+ const llvm::Triple &Triple,
+ const ArgList &Args,
+ std::vector<StringRef> &Features,
+ bool ForAS, bool ForMultilib) {
bool KernelOrKext =
Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
arm::FloatABI ABI = arm::getARMFloatABI(D, Triple, Args);
@@ -490,17 +538,27 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
}
}
}
+
+ // The integrated assembler doesn't implement e_flags setting behavior for
+ // -meabi=gnu (gcc -mabi={apcs-gnu,atpcs} passes -meabi=gnu to gas). For
+ // compatibility we accept but warn.
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_mabi_EQ))
+ A->ignoreTargetSpecific();
}
- if (getReadTPMode(D, Args, Triple, ForAS) == ReadTPMode::Cp15)
- Features.push_back("+read-tp-hard");
+ if (getReadTPMode(D, Args, Triple, ForAS) == ReadTPMode::TPIDRURW)
+ Features.push_back("+read-tp-tpidrurw");
+ if (getReadTPMode(D, Args, Triple, ForAS) == ReadTPMode::TPIDRURO)
+ Features.push_back("+read-tp-tpidruro");
+ if (getReadTPMode(D, Args, Triple, ForAS) == ReadTPMode::TPIDRPRW)
+ Features.push_back("+read-tp-tpidrprw");
const Arg *ArchArg = Args.getLastArg(options::OPT_march_EQ);
const Arg *CPUArg = Args.getLastArg(options::OPT_mcpu_EQ);
StringRef ArchName;
StringRef CPUName;
- unsigned ArchArgFPUID = llvm::ARM::FK_INVALID;
- unsigned CPUArgFPUID = llvm::ARM::FK_INVALID;
+ llvm::ARM::FPUKind ArchArgFPUKind = llvm::ARM::FK_INVALID;
+ llvm::ARM::FPUKind CPUArgFPUKind = llvm::ARM::FK_INVALID;
// Check -mcpu. ClangAs gives preference to -Wa,-mcpu=.
if (WaCPU) {
@@ -520,13 +578,13 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
ArchName = WaArch->second;
// This will set any features after the base architecture.
checkARMArchName(D, WaArch->first, Args, ArchName, CPUName,
- ExtensionFeatures, Triple, ArchArgFPUID);
+ ExtensionFeatures, Triple, ArchArgFPUKind);
// The base architecture was handled in ToolChain::ComputeLLVMTriple because
// triple is read only by this point.
} else if (ArchArg) {
ArchName = ArchArg->getValue();
checkARMArchName(D, ArchArg, Args, ArchName, CPUName, ExtensionFeatures,
- Triple, ArchArgFPUID);
+ Triple, ArchArgFPUKind);
}
// Add CPU features for generic CPUs
@@ -546,14 +604,14 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (CPUArg)
checkARMCPUName(D, CPUArg, Args, CPUName, ArchName, ExtensionFeatures,
- Triple, CPUArgFPUID);
+ Triple, CPUArgFPUKind);
// TODO Handle -mtune=. Suppress -Wunused-command-line-argument as a
// longstanding behavior.
(void)Args.getLastArg(options::OPT_mtune_EQ);
// Honor -mfpu=. ClangAs gives preference to -Wa,-mfpu=.
- unsigned FPUID = llvm::ARM::FK_INVALID;
+ llvm::ARM::FPUKind FPUKind = llvm::ARM::FK_INVALID;
const Arg *FPUArg = Args.getLastArg(options::OPT_mfpu_EQ);
if (WaFPU) {
if (FPUArg)
@@ -561,11 +619,11 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
<< FPUArg->getAsString(Args);
(void)getARMFPUFeatures(D, WaFPU->first, Args, WaFPU->second, Features);
} else if (FPUArg) {
- FPUID = getARMFPUFeatures(D, FPUArg, Args, FPUArg->getValue(), Features);
+ FPUKind = getARMFPUFeatures(D, FPUArg, Args, FPUArg->getValue(), Features);
} else if (Triple.isAndroid() && getARMSubArchVersionNumber(Triple) >= 7) {
const char *AndroidFPU = "neon";
- FPUID = llvm::ARM::parseFPU(AndroidFPU);
- if (!llvm::ARM::getFPUFeatures(FPUID, Features))
+ FPUKind = llvm::ARM::parseFPU(AndroidFPU);
+ if (!llvm::ARM::getFPUFeatures(FPUKind, Features))
D.Diag(clang::diag::err_drv_clang_unsupported)
<< std::string("-mfpu=") + AndroidFPU;
} else {
@@ -573,8 +631,8 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
std::string CPU = arm::getARMTargetCPU(CPUName, ArchName, Triple);
llvm::ARM::ArchKind ArchKind =
arm::getLLVMArchKindForARM(CPU, ArchName, Triple);
- FPUID = llvm::ARM::getDefaultFPU(CPU, ArchKind);
- (void)llvm::ARM::getFPUFeatures(FPUID, Features);
+ FPUKind = llvm::ARM::getDefaultFPU(CPU, ArchKind);
+ (void)llvm::ARM::getFPUFeatures(FPUKind, Features);
}
}
@@ -625,25 +683,30 @@ fp16_fml_fallthrough:
// -march/-mcpu effectively disables the FPU (GCC ignores the -mfpu options in
// this case). Note that the ABI can also be set implicitly by the target
// selected.
+ bool HasFPRegs = true;
if (ABI == arm::FloatABI::Soft) {
llvm::ARM::getFPUFeatures(llvm::ARM::FK_NONE, Features);
// Disable all features relating to hardware FP, not already disabled by the
// above call.
- Features.insert(Features.end(), {"-dotprod", "-fp16fml", "-bf16", "-mve",
- "-mve.fp", "-fpregs"});
- } else if (FPUID == llvm::ARM::FK_NONE ||
- ArchArgFPUID == llvm::ARM::FK_NONE ||
- CPUArgFPUID == llvm::ARM::FK_NONE) {
+ Features.insert(Features.end(),
+ {"-dotprod", "-fp16fml", "-bf16", "-mve", "-mve.fp"});
+ HasFPRegs = false;
+ FPUKind = llvm::ARM::FK_NONE;
+ } else if (FPUKind == llvm::ARM::FK_NONE ||
+ ArchArgFPUKind == llvm::ARM::FK_NONE ||
+ CPUArgFPUKind == llvm::ARM::FK_NONE) {
// -mfpu=none, -march=armvX+nofp or -mcpu=X+nofp is *very* similar to
// -mfloat-abi=soft, only that it should not disable MVE-I. They disable the
// FPU, but not the FPU registers, thus MVE-I, which depends only on the
// latter, is still supported.
Features.insert(Features.end(),
{"-dotprod", "-fp16fml", "-bf16", "-mve.fp"});
- if (!hasIntegerMVE(Features))
- Features.emplace_back("-fpregs");
+ HasFPRegs = hasIntegerMVE(Features);
+ FPUKind = llvm::ARM::FK_NONE;
}
+ if (!HasFPRegs)
+ Features.emplace_back("-fpregs");
// En/disable crc code generation.
if (Arg *A = Args.getLastArg(options::OPT_mcrc, options::OPT_mnocrc)) {
@@ -773,7 +836,9 @@ fp16_fml_fallthrough:
// Generate execute-only output (no data access to code sections).
// This only makes sense for the compiler, not for the assembler.
- if (!ForAS) {
+ // It's not needed for multilib selection and may hide an unused
+ // argument diagnostic if the code is always run.
+ if (!ForAS && !ForMultilib) {
// Supported only on ARMv6T2 and ARMv7 and above.
// Cannot be combined with -mno-movt.
if (Arg *A = Args.getLastArg(options::OPT_mexecute_only, options::OPT_mno_execute_only)) {
@@ -899,6 +964,10 @@ fp16_fml_fallthrough:
if (Args.getLastArg(options::OPT_mno_bti_at_return_twice))
Features.push_back("+no-bti-at-return-twice");
+
+ checkARMFloatABI(D, Args, HasFPRegs);
+
+ return FPUKind;
}
std::string arm::getARMArch(StringRef Arch, const llvm::Triple &Triple) {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h
index 782bdf3d0202..fa62ac89e3a1 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h
@@ -11,10 +11,10 @@
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/Option/ArgList.h"
#include "llvm/Option/Option.h"
-#include "llvm/Support/ARMTargetParser.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/TargetParser/ARMTargetParser.h"
+#include "llvm/TargetParser/Triple.h"
#include <string>
#include <vector>
@@ -38,7 +38,9 @@ void appendBE8LinkFlag(const llvm::opt::ArgList &Args,
enum class ReadTPMode {
Invalid,
Soft,
- Cp15,
+ TPIDRURW,
+ TPIDRURO,
+ TPIDRPRW,
};
enum class FloatABI {
@@ -64,12 +66,15 @@ bool useAAPCSForMachO(const llvm::Triple &T);
void getARMArchCPUFromArgs(const llvm::opt::ArgList &Args,
llvm::StringRef &Arch, llvm::StringRef &CPU,
bool FromAs = false);
-void getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args,
- std::vector<llvm::StringRef> &Features, bool ForAS);
+llvm::ARM::FPUKind getARMTargetFeatures(const Driver &D,
+ const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ std::vector<llvm::StringRef> &Features,
+ bool ForAS, bool ForMultilib = false);
int getARMSubArchVersionNumber(const llvm::Triple &Triple);
bool isARMMProfile(const llvm::Triple &Triple);
bool isARMAProfile(const llvm::Triple &Triple);
+bool isARMBigEndian(const llvm::Triple &Triple, const llvm::opt::ArgList &Args);
} // end namespace arm
} // end namespace tools
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.cpp
index ed8128d829e9..e94ea12f46dc 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.cpp
@@ -14,10 +14,10 @@
#include "clang/Driver/Options.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/Support/CSKYTargetParser.h"
-#include "llvm/Support/Host.h"
-#include "llvm/Support/TargetParser.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/CSKYTargetParser.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/TargetParser.h"
using namespace clang::driver;
using namespace clang::driver::tools;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp
index 576677a5f38e..856ad58f3bd9 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp
@@ -7,11 +7,12 @@
//===----------------------------------------------------------------------===//
#include "LoongArch.h"
+#include "ToolChains/CommonArgs.h"
#include "clang/Basic/DiagnosticDriver.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
-#include "llvm/Support/LoongArchTargetParser.h"
+#include "llvm/TargetParser/LoongArchTargetParser.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -25,36 +26,100 @@ StringRef loongarch::getLoongArchABI(const Driver &D, const ArgList &Args,
"Unexpected triple");
bool IsLA32 = Triple.getArch() == llvm::Triple::loongarch32;
+ // Record -mabi value for later use.
+ const Arg *MABIArg = Args.getLastArg(options::OPT_mabi_EQ);
+ StringRef MABIValue;
+ if (MABIArg) {
+ MABIValue = MABIArg->getValue();
+ }
+
+ // Parse -mfpu value for later use.
+ const Arg *MFPUArg = Args.getLastArg(options::OPT_mfpu_EQ);
+ int FPU = -1;
+ if (MFPUArg) {
+ StringRef V = MFPUArg->getValue();
+ if (V == "64")
+ FPU = 64;
+ else if (V == "32")
+ FPU = 32;
+ else if (V == "0" || V == "none")
+ FPU = 0;
+ else
+ D.Diag(diag::err_drv_loongarch_invalid_mfpu_EQ) << V;
+ }
+
// Check -m*-float firstly since they have highest priority.
if (const Arg *A = Args.getLastArg(options::OPT_mdouble_float,
options::OPT_msingle_float,
options::OPT_msoft_float)) {
- if (A->getOption().matches(options::OPT_mdouble_float))
- return IsLA32 ? "ilp32d" : "lp64d";
- if (A->getOption().matches(options::OPT_msingle_float))
- return IsLA32 ? "ilp32f" : "lp64f";
- if (A->getOption().matches(options::OPT_msoft_float))
- return IsLA32 ? "ilp32s" : "lp64s";
+ StringRef ImpliedABI;
+ int ImpliedFPU = -1;
+ if (A->getOption().matches(options::OPT_mdouble_float)) {
+ ImpliedABI = IsLA32 ? "ilp32d" : "lp64d";
+ ImpliedFPU = 64;
+ }
+ if (A->getOption().matches(options::OPT_msingle_float)) {
+ ImpliedABI = IsLA32 ? "ilp32f" : "lp64f";
+ ImpliedFPU = 32;
+ }
+ if (A->getOption().matches(options::OPT_msoft_float)) {
+ ImpliedABI = IsLA32 ? "ilp32s" : "lp64s";
+ ImpliedFPU = 0;
+ }
+
+ // Check `-mabi=` and `-mfpu=` settings and report if they conflict with
+ // the higher-priority settings implied by -m*-float.
+ //
+ // ImpliedABI and ImpliedFPU are guaranteed to have valid values because
+ // one of the match arms must match if execution can arrive here at all.
+ if (!MABIValue.empty() && ImpliedABI != MABIValue)
+ D.Diag(diag::warn_drv_loongarch_conflicting_implied_val)
+ << MABIArg->getAsString(Args) << A->getAsString(Args) << ImpliedABI;
+
+ if (FPU != -1 && ImpliedFPU != FPU)
+ D.Diag(diag::warn_drv_loongarch_conflicting_implied_val)
+ << MFPUArg->getAsString(Args) << A->getAsString(Args) << ImpliedFPU;
+
+ return ImpliedABI;
}
// If `-mabi=` is specified, use it.
- if (const Arg *A = Args.getLastArg(options::OPT_mabi_EQ))
- return A->getValue();
+ if (!MABIValue.empty())
+ return MABIValue;
// Select abi based on -mfpu=xx.
- if (const Arg *A = Args.getLastArg(options::OPT_mfpu_EQ)) {
- StringRef FPU = A->getValue();
- if (FPU == "64")
- return IsLA32 ? "ilp32d" : "lp64d";
- if (FPU == "32")
- return IsLA32 ? "ilp32f" : "lp64f";
- if (FPU == "0" || FPU == "none")
- return IsLA32 ? "ilp32s" : "lp64s";
- D.Diag(diag::err_drv_loongarch_invalid_mfpu_EQ) << FPU;
+ switch (FPU) {
+ case 64:
+ return IsLA32 ? "ilp32d" : "lp64d";
+ case 32:
+ return IsLA32 ? "ilp32f" : "lp64f";
+ case 0:
+ return IsLA32 ? "ilp32s" : "lp64s";
}
// Choose a default based on the triple.
- return IsLA32 ? "ilp32d" : "lp64d";
+ // Honor the explicit ABI modifier suffix in triple's environment part if
+ // present, falling back to {ILP32,LP64}D otherwise.
+ switch (Triple.getEnvironment()) {
+ case llvm::Triple::GNUSF:
+ return IsLA32 ? "ilp32s" : "lp64s";
+ case llvm::Triple::GNUF32:
+ return IsLA32 ? "ilp32f" : "lp64f";
+ case llvm::Triple::GNUF64:
+ // This was originally permitted (and indeed the canonical way) to
+ // represent the {ILP32,LP64}D ABIs, but in Feb 2023 Loongson decided to
+ // drop the explicit suffix in favor of unmarked `-gnu` for the
+ // "general-purpose" ABIs, among other non-technical reasons.
+ //
+ // The spec change did not mention whether existing usages of "gnuf64"
+ // shall remain valid or not, so we are going to continue recognizing it
+ // for some time, until it is clear that everyone else has migrated away
+ // from it.
+ [[fallthrough]];
+ case llvm::Triple::GNU:
+ default:
+ return IsLA32 ? "ilp32d" : "lp64d";
+ }
}
void loongarch::getLoongArchTargetFeatures(const Driver &D,
@@ -62,10 +127,8 @@ void loongarch::getLoongArchTargetFeatures(const Driver &D,
const ArgList &Args,
std::vector<StringRef> &Features) {
StringRef ArchName;
- llvm::LoongArch::ArchKind ArchKind = llvm::LoongArch::ArchKind::AK_INVALID;
if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
- ArchKind = llvm::LoongArch::parseArch(A->getValue());
- if (ArchKind == llvm::LoongArch::ArchKind::AK_INVALID) {
+ if (!llvm::LoongArch::isValidArchName(A->getValue())) {
D.Diag(clang::diag::err_drv_invalid_arch_name) << A->getAsString(Args);
return;
}
@@ -75,7 +138,7 @@ void loongarch::getLoongArchTargetFeatures(const Driver &D,
// TODO: handle -march=native and -mtune=xx.
// Select a default arch name.
- if (ArchName.empty() && Triple.getArch() == llvm::Triple::loongarch64)
+ if (ArchName.empty() && Triple.isLoongArch64())
ArchName = "loongarch64";
if (!ArchName.empty())
@@ -112,4 +175,15 @@ void loongarch::getLoongArchTargetFeatures(const Driver &D,
D.Diag(diag::err_drv_loongarch_invalid_mfpu_EQ) << FPU;
}
}
+
+ // Select the `ual` feature determined by -m[no-]unaligned-access
+ // or the alias -m[no-]strict-align.
+ AddTargetFeature(Args, Features, options::OPT_munaligned_access,
+ options::OPT_mno_unaligned_access, "ual");
+
+ // Accept but warn about these TargetSpecific options.
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_mabi_EQ))
+ A->ignoreTargetSpecific();
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_mfpu_EQ))
+ A->ignoreTargetSpecific();
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.cpp
index 119e24cedbab..963f7a187d63 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.cpp
@@ -14,8 +14,8 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/Regex.h"
+#include "llvm/TargetParser/Host.h"
#include <sstream>
using namespace clang::driver;
@@ -65,13 +65,35 @@ std::string m68k::getM68kTargetCPU(const ArgList &Args) {
return "";
}
+static void addFloatABIFeatures(const llvm::opt::ArgList &Args,
+ std::vector<llvm::StringRef> &Features) {
+ Arg *A = Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
+ options::OPT_m68881);
+ // Opt out FPU even for newer CPUs.
+ if (A && A->getOption().matches(options::OPT_msoft_float)) {
+ Features.push_back("-isa-68881");
+ Features.push_back("-isa-68882");
+ return;
+ }
+
+ std::string CPU = m68k::getM68kTargetCPU(Args);
+ // Only enable M68881 for CPU < 68020 if the related flags are present.
+ if ((A && (CPU == "M68000" || CPU == "M68010")) ||
+ // Otherwise, by default we assume newer CPUs have M68881/2.
+ CPU == "M68020")
+ Features.push_back("+isa-68881");
+ else if (CPU == "M68030" || CPU == "M68040" || CPU == "M68060")
+ // Note that although CPU >= M68040 imply M68882, we still add `isa-68882`
+ // anyway so that it's easier to add or not add the corresponding macro
+ // definitions later, in case we want to disable 68881/2 in newer CPUs
+ // (with -msoft-float, for instance).
+ Features.push_back("+isa-68882");
+}
+
void m68k::getM68kTargetFeatures(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args,
std::vector<StringRef> &Features) {
-
- m68k::FloatABI FloatABI = m68k::getM68kFloatABI(D, Args);
- if (FloatABI == m68k::FloatABI::Soft)
- Features.push_back("-hard-float");
+ addFloatABIFeatures(Args, Features);
// Handle '-ffixed-<register>' flags
if (Args.hasArg(options::OPT_ffixed_a0))
@@ -105,21 +127,3 @@ void m68k::getM68kTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (Args.hasArg(options::OPT_ffixed_d7))
Features.push_back("+reserve-d7");
}
-
-m68k::FloatABI m68k::getM68kFloatABI(const Driver &D, const ArgList &Args) {
- m68k::FloatABI ABI = m68k::FloatABI::Invalid;
- if (Arg *A =
- Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float)) {
-
- if (A->getOption().matches(options::OPT_msoft_float))
- ABI = m68k::FloatABI::Soft;
- else if (A->getOption().matches(options::OPT_mhard_float))
- ABI = m68k::FloatABI::Hard;
- }
-
- // If unspecified, choose the default based on the platform.
- if (ABI == m68k::FloatABI::Invalid)
- ABI = m68k::FloatABI::Hard;
-
- return ABI;
-}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.h
index 41d53efb940b..051e7e1af103 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.h
@@ -20,14 +20,6 @@ namespace driver {
namespace tools {
namespace m68k {
-enum class FloatABI {
- Invalid,
- Soft,
- Hard,
-};
-
-FloatABI getM68kFloatABI(const Driver &D, const llvm::opt::ArgList &Args);
-
std::string getM68kTargetCPU(const llvm::opt::ArgList &Args);
void getM68kTargetFeatures(const Driver &D, const llvm::Triple &Triple,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.cpp
index 088eecf79adb..f9f14c01b2b9 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.cpp
@@ -39,12 +39,6 @@ void mips::getMipsCPUAndABI(const ArgList &Args, const llvm::Triple &Triple,
DefMips64CPU = "mips64r6";
}
- // MIPS64r6 is the default for Android MIPS64 (mips64el-linux-android).
- if (Triple.isAndroid()) {
- DefMips32CPU = "mips32";
- DefMips64CPU = "mips64r6";
- }
-
// MIPS3 is the default for mips64*-unknown-openbsd.
if (Triple.isOSOpenBSD())
DefMips64CPU = "mips3";
@@ -467,11 +461,6 @@ bool mips::isFP64ADefault(const llvm::Triple &Triple, StringRef CPUName) {
bool mips::isFPXXDefault(const llvm::Triple &Triple, StringRef CPUName,
StringRef ABIName, mips::FloatABI FloatABI) {
- if (Triple.getVendor() != llvm::Triple::ImaginationTechnologies &&
- Triple.getVendor() != llvm::Triple::MipsTechnologies &&
- !Triple.isAndroid())
- return false;
-
if (ABIName != "32")
return false;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.h
index f4c11a7e3188..62211c711420 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.h
@@ -11,8 +11,8 @@
#include "clang/Driver/Driver.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Option/Option.h"
+#include "llvm/TargetParser/Triple.h"
#include <string>
#include <vector>
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp
index e3c025fb2468..ab24d14992cd 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp
@@ -13,7 +13,7 @@
#include "clang/Driver/Options.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/Support/Host.h"
+#include "llvm/TargetParser/Host.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -85,7 +85,8 @@ std::string ppc::getPPCTuneCPU(const ArgList &Args, const llvm::Triple &T) {
}
/// Get the (LLVM) name of the PowerPC cpu we are targeting.
-std::string ppc::getPPCTargetCPU(const ArgList &Args, const llvm::Triple &T) {
+std::string ppc::getPPCTargetCPU(const Driver &D, const ArgList &Args,
+ const llvm::Triple &T) {
if (Arg *A = Args.getLastArg(clang::driver::options::OPT_mcpu_EQ))
return normalizeCPUName(A->getValue(), T);
return getPPCGenericTargetCPU(T);
@@ -111,7 +112,8 @@ void ppc::getPPCTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (Triple.getSubArch() == llvm::Triple::PPCSubArch_spe)
Features.push_back("+spe");
- handleTargetFeaturesGroup(Args, Features, options::OPT_m_ppc_Features_Group);
+ handleTargetFeaturesGroup(D, Triple, Args, Features,
+ options::OPT_m_ppc_Features_Group);
ppc::FloatABI FloatABI = ppc::getPPCFloatABI(D, Args);
if (FloatABI == ppc::FloatABI::Soft)
@@ -126,8 +128,7 @@ ppc::ReadGOTPtrMode ppc::getPPCReadGOTPtrMode(const Driver &D, const llvm::Tripl
const ArgList &Args) {
if (Args.getLastArg(options::OPT_msecure_plt))
return ppc::ReadGOTPtrMode::SecurePlt;
- if ((Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 13) ||
- Triple.isOSNetBSD() || Triple.isOSOpenBSD() || Triple.isMusl())
+ if (Triple.isPPC32SecurePlt())
return ppc::ReadGOTPtrMode::SecurePlt;
else
return ppc::ReadGOTPtrMode::Bss;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.h
index 97ac45083852..ec5b3c8140b6 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.h
@@ -35,7 +35,7 @@ enum class ReadGOTPtrMode {
FloatABI getPPCFloatABI(const Driver &D, const llvm::opt::ArgList &Args);
-std::string getPPCTargetCPU(const llvm::opt::ArgList &Args,
+std::string getPPCTargetCPU(const Driver &D, const llvm::opt::ArgList &Args,
const llvm::Triple &T);
std::string getPPCTuneCPU(const llvm::opt::ArgList &Args,
const llvm::Triple &T);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
index faecb76d70d4..fef568f5b120 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
@@ -1,4 +1,4 @@
-//===--- RISCV.cpp - RISCV Helpers for Tools --------------------*- C++ -*-===//
+//===--- RISCV.cpp - RISC-V Helpers for Tools -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -15,9 +15,9 @@
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/Error.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/RISCVISAInfo.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
#include "llvm/TargetParser/RISCVTargetParser.h"
using namespace clang::driver;
@@ -49,12 +49,20 @@ static bool getArchFeatures(const Driver &D, StringRef Arch,
}
// Get features except standard extension feature
-static bool getRISCFeaturesFromMcpu(const llvm::Triple &Triple, StringRef Mcpu,
+static void getRISCFeaturesFromMcpu(const Driver &D, const Arg *A,
+ const llvm::Triple &Triple,
+ StringRef Mcpu,
std::vector<StringRef> &Features) {
bool Is64Bit = Triple.isRISCV64();
- llvm::RISCV::CPUKind CPUKind = llvm::RISCV::parseCPUKind(Mcpu);
- return llvm::RISCV::checkCPUKind(CPUKind, Is64Bit) &&
- llvm::RISCV::getCPUFeaturesExceptStdExt(CPUKind, Features);
+ if (!llvm::RISCV::parseCPU(Mcpu, Is64Bit)) {
+ // Try inverting Is64Bit in case the CPU is valid, but for the wrong target.
+ if (llvm::RISCV::parseCPU(Mcpu, !Is64Bit))
+ D.Diag(clang::diag::err_drv_invalid_riscv_cpu_name_for_target)
+ << Mcpu << Is64Bit;
+ else
+ D.Diag(clang::diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Mcpu;
+ }
}
void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
@@ -71,9 +79,8 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
StringRef CPU = A->getValue();
if (CPU == "native")
CPU = llvm::sys::getHostCPUName();
- if (!getRISCFeaturesFromMcpu(Triple, CPU, Features))
- D.Diag(clang::diag::err_drv_unsupported_option_argument)
- << A->getSpelling() << CPU;
+
+ getRISCFeaturesFromMcpu(D, A, Triple, CPU, Features);
}
// Handle features corresponding to "-ffixed-X" options
@@ -163,7 +170,8 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
// Now add any that the user explicitly requested on the command line,
// which may override the defaults.
- handleTargetFeaturesGroup(Args, Features, options::OPT_m_riscv_Features_Group);
+ handleTargetFeaturesGroup(D, Triple, Args, Features,
+ options::OPT_m_riscv_Features_Group);
}
StringRef riscv::getRISCVABI(const ArgList &Args, const llvm::Triple &Triple) {
@@ -283,10 +291,14 @@ StringRef riscv::getRISCVArch(const llvm::opt::ArgList &Args,
if (MABI.equals_insensitive("ilp32e"))
return "rv32e";
- else if (MABI.startswith_insensitive("ilp32"))
+ else if (MABI.starts_with_insensitive("ilp32"))
return "rv32imafdc";
- else if (MABI.startswith_insensitive("lp64"))
+ else if (MABI.starts_with_insensitive("lp64")) {
+ if (Triple.isAndroid())
+ return "rv64imafdc_zba_zbb_zbs";
+
return "rv64imafdc";
+ }
}
// 4. Choose a default based on the triple
@@ -302,6 +314,8 @@ StringRef riscv::getRISCVArch(const llvm::opt::ArgList &Args,
} else {
if (Triple.getOS() == llvm::Triple::UnknownOS)
return "rv64imac";
+ else if (Triple.isAndroid())
+ return "rv64imafdc_zba_zbb_zbs";
else
return "rv64imafdc";
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.h
index c30f1098ddda..fcaf9d57ad13 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.h
@@ -1,4 +1,4 @@
-//===--- RISCV.h - RISCV-specific Tool Helpers ------------------*- C++ -*-===//
+//===--- RISCV.h - RISC-V-specific Tool Helpers -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.cpp
index a2e9c7ab023e..11c9444fde2b 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.cpp
@@ -12,7 +12,7 @@
#include "clang/Driver/Options.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/Support/Host.h"
+#include "llvm/TargetParser/Host.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -118,12 +118,6 @@ sparc::FloatABI sparc::getSparcFloatABI(const Driver &D,
std::string sparc::getSparcTargetCPU(const Driver &D, const ArgList &Args,
const llvm::Triple &Triple) {
- if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_march_EQ)) {
- D.Diag(diag::err_drv_unsupported_opt_for_target)
- << A->getSpelling() << Triple.getTriple();
- return "";
- }
-
if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_mcpu_EQ)) {
StringRef CPUName = A->getValue();
if (CPUName == "native") {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp
index f81bf68172de..588bc3176d73 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp
@@ -11,7 +11,7 @@
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/Support/Host.h"
+#include "llvm/TargetParser/Host.h"
using namespace clang::driver;
using namespace clang::driver::tools;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp
index 3c8adf3cbc40..286bac2e7a2b 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp
@@ -14,7 +14,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/Support/Host.h"
+#include "llvm/TargetParser/Host.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -119,6 +119,15 @@ std::string x86::getX86TargetCPU(const Driver &D, const ArgList &Args,
void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args,
std::vector<StringRef> &Features) {
+ // Claim and report unsupported -mabi=. Note: we don't support "sysv_abi" or
+ // "ms_abi" as default function attributes.
+ if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_mabi_EQ)) {
+ StringRef DefaultAbi = Triple.isOSWindows() ? "ms" : "sysv";
+ if (A->getValue() != DefaultAbi)
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << Triple.getTriple();
+ }
+
// If -march=native, autodetect the feature list.
if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_march_EQ)) {
if (StringRef(A->getValue()) == "native") {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.h
index 36a2ab52899d..e07387f3ece3 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.h
@@ -11,8 +11,8 @@
#include "clang/Driver/Driver.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Option/Option.h"
+#include "llvm/TargetParser/Triple.h"
#include <string>
#include <vector>
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp
index ac9c7036ad6e..26a6276ae50a 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp
@@ -12,26 +12,27 @@
#include "Gnu.h"
#include "clang/Driver/InputInfo.h"
+#include "Arch/ARM.h"
#include "Arch/RISCV.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/MultilibBuilder.h"
#include "clang/Driver/Options.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
+#include <sstream>
+
using namespace llvm::opt;
using namespace clang;
using namespace clang::driver;
using namespace clang::driver::tools;
using namespace clang::driver::toolchains;
-static Multilib makeMultilib(StringRef commonSuffix) {
- return Multilib(commonSuffix, commonSuffix, commonSuffix);
-}
-
static bool findRISCVMultilibs(const Driver &D,
const llvm::Triple &TargetTriple,
const ArgList &Args, DetectedMultilibs &Result) {
@@ -40,36 +41,40 @@ static bool findRISCVMultilibs(const Driver &D,
StringRef Abi = tools::riscv::getRISCVABI(Args, TargetTriple);
if (TargetTriple.isRISCV64()) {
- Multilib Imac = makeMultilib("").flag("+march=rv64imac").flag("+mabi=lp64");
- Multilib Imafdc = makeMultilib("/rv64imafdc/lp64d")
- .flag("+march=rv64imafdc")
- .flag("+mabi=lp64d");
+ MultilibBuilder Imac =
+ MultilibBuilder().flag("-march=rv64imac").flag("-mabi=lp64");
+ MultilibBuilder Imafdc = MultilibBuilder("/rv64imafdc/lp64d")
+ .flag("-march=rv64imafdc")
+ .flag("-mabi=lp64d");
// Multilib reuse
bool UseImafdc =
(Arch == "rv64imafdc") || (Arch == "rv64gc"); // gc => imafdc
- addMultilibFlag((Arch == "rv64imac"), "march=rv64imac", Flags);
- addMultilibFlag(UseImafdc, "march=rv64imafdc", Flags);
- addMultilibFlag(Abi == "lp64", "mabi=lp64", Flags);
- addMultilibFlag(Abi == "lp64d", "mabi=lp64d", Flags);
+ addMultilibFlag((Arch == "rv64imac"), "-march=rv64imac", Flags);
+ addMultilibFlag(UseImafdc, "-march=rv64imafdc", Flags);
+ addMultilibFlag(Abi == "lp64", "-mabi=lp64", Flags);
+ addMultilibFlag(Abi == "lp64d", "-mabi=lp64d", Flags);
- Result.Multilibs = MultilibSet().Either(Imac, Imafdc);
- return Result.Multilibs.select(Flags, Result.SelectedMultilib);
+ Result.Multilibs =
+ MultilibSetBuilder().Either(Imac, Imafdc).makeMultilibSet();
+ return Result.Multilibs.select(Flags, Result.SelectedMultilibs);
}
if (TargetTriple.isRISCV32()) {
- Multilib Imac =
- makeMultilib("").flag("+march=rv32imac").flag("+mabi=ilp32");
- Multilib I =
- makeMultilib("/rv32i/ilp32").flag("+march=rv32i").flag("+mabi=ilp32");
- Multilib Im =
- makeMultilib("/rv32im/ilp32").flag("+march=rv32im").flag("+mabi=ilp32");
- Multilib Iac = makeMultilib("/rv32iac/ilp32")
- .flag("+march=rv32iac")
- .flag("+mabi=ilp32");
- Multilib Imafc = makeMultilib("/rv32imafc/ilp32f")
- .flag("+march=rv32imafc")
- .flag("+mabi=ilp32f");
+ MultilibBuilder Imac =
+ MultilibBuilder().flag("-march=rv32imac").flag("-mabi=ilp32");
+ MultilibBuilder I = MultilibBuilder("/rv32i/ilp32")
+ .flag("-march=rv32i")
+ .flag("-mabi=ilp32");
+ MultilibBuilder Im = MultilibBuilder("/rv32im/ilp32")
+ .flag("-march=rv32im")
+ .flag("-mabi=ilp32");
+ MultilibBuilder Iac = MultilibBuilder("/rv32iac/ilp32")
+ .flag("-march=rv32iac")
+ .flag("-mabi=ilp32");
+ MultilibBuilder Imafc = MultilibBuilder("/rv32imafc/ilp32f")
+ .flag("-march=rv32imafc")
+ .flag("-mabi=ilp32f");
// Multilib reuse
bool UseI = (Arch == "rv32i") || (Arch == "rv32ic"); // ic => i
@@ -77,16 +82,17 @@ static bool findRISCVMultilibs(const Driver &D,
bool UseImafc = (Arch == "rv32imafc") || (Arch == "rv32imafdc") ||
(Arch == "rv32gc"); // imafdc,gc => imafc
- addMultilibFlag(UseI, "march=rv32i", Flags);
- addMultilibFlag(UseIm, "march=rv32im", Flags);
- addMultilibFlag((Arch == "rv32iac"), "march=rv32iac", Flags);
- addMultilibFlag((Arch == "rv32imac"), "march=rv32imac", Flags);
- addMultilibFlag(UseImafc, "march=rv32imafc", Flags);
- addMultilibFlag(Abi == "ilp32", "mabi=ilp32", Flags);
- addMultilibFlag(Abi == "ilp32f", "mabi=ilp32f", Flags);
-
- Result.Multilibs = MultilibSet().Either(I, Im, Iac, Imac, Imafc);
- return Result.Multilibs.select(Flags, Result.SelectedMultilib);
+ addMultilibFlag(UseI, "-march=rv32i", Flags);
+ addMultilibFlag(UseIm, "-march=rv32im", Flags);
+ addMultilibFlag((Arch == "rv32iac"), "-march=rv32iac", Flags);
+ addMultilibFlag((Arch == "rv32imac"), "-march=rv32imac", Flags);
+ addMultilibFlag(UseImafc, "-march=rv32imafc", Flags);
+ addMultilibFlag(Abi == "ilp32", "-mabi=ilp32", Flags);
+ addMultilibFlag(Abi == "ilp32f", "-mabi=ilp32f", Flags);
+
+ Result.Multilibs =
+ MultilibSetBuilder().Either(I, Im, Iac, Imac, Imafc).makeMultilibSet();
+ return Result.Multilibs.select(Flags, Result.SelectedMultilibs);
}
return false;
}
@@ -101,16 +107,21 @@ BareMetal::BareMetal(const Driver &D, const llvm::Triple &Triple,
findMultilibs(D, Triple, Args);
SmallString<128> SysRoot(computeSysRoot());
if (!SysRoot.empty()) {
- llvm::sys::path::append(SysRoot, "lib");
- getFilePaths().push_back(std::string(SysRoot));
- getLibraryPaths().push_back(std::string(SysRoot));
+ for (const Multilib &M : getOrderedMultilibs()) {
+ SmallString<128> Dir(SysRoot);
+ llvm::sys::path::append(Dir, M.osSuffix(), "lib");
+ getFilePaths().push_back(std::string(Dir));
+ getLibraryPaths().push_back(std::string(Dir));
+ }
}
}
-/// Is the triple {arm,thumb}-none-none-{eabi,eabihf} ?
+/// Is the triple {arm,armeb,thumb,thumbeb}-none-none-{eabi,eabihf} ?
static bool isARMBareMetal(const llvm::Triple &Triple) {
if (Triple.getArch() != llvm::Triple::arm &&
- Triple.getArch() != llvm::Triple::thumb)
+ Triple.getArch() != llvm::Triple::thumb &&
+ Triple.getArch() != llvm::Triple::armeb &&
+ Triple.getArch() != llvm::Triple::thumbeb)
return false;
if (Triple.getVendor() != llvm::Triple::UnknownVendor)
@@ -126,9 +137,10 @@ static bool isARMBareMetal(const llvm::Triple &Triple) {
return true;
}
-/// Is the triple aarch64-none-elf?
+/// Is the triple {aarch64.aarch64_be}-none-elf?
static bool isAArch64BareMetal(const llvm::Triple &Triple) {
- if (Triple.getArch() != llvm::Triple::aarch64)
+ if (Triple.getArch() != llvm::Triple::aarch64 &&
+ Triple.getArch() != llvm::Triple::aarch64_be)
return false;
if (Triple.getVendor() != llvm::Triple::UnknownVendor)
@@ -153,36 +165,100 @@ static bool isRISCVBareMetal(const llvm::Triple &Triple) {
return Triple.getEnvironmentName() == "elf";
}
+/// Is the triple powerpc[64][le]-*-none-eabi?
+static bool isPPCBareMetal(const llvm::Triple &Triple) {
+ return Triple.isPPC() && Triple.getOS() == llvm::Triple::UnknownOS &&
+ Triple.getEnvironment() == llvm::Triple::EABI;
+}
+
+static void findMultilibsFromYAML(const ToolChain &TC, const Driver &D,
+ StringRef MultilibPath, const ArgList &Args,
+ DetectedMultilibs &Result) {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> MB =
+ D.getVFS().getBufferForFile(MultilibPath);
+ if (!MB)
+ return;
+ Multilib::flags_list Flags = TC.getMultilibFlags(Args);
+ llvm::ErrorOr<MultilibSet> ErrorOrMultilibSet =
+ MultilibSet::parseYaml(*MB.get());
+ if (ErrorOrMultilibSet.getError())
+ return;
+ Result.Multilibs = ErrorOrMultilibSet.get();
+ if (Result.Multilibs.select(Flags, Result.SelectedMultilibs))
+ return;
+ D.Diag(clang::diag::warn_drv_missing_multilib) << llvm::join(Flags, " ");
+ std::stringstream ss;
+ for (const Multilib &Multilib : Result.Multilibs)
+ ss << "\n" << llvm::join(Multilib.flags(), " ");
+ D.Diag(clang::diag::note_drv_available_multilibs) << ss.str();
+}
+
+static constexpr llvm::StringLiteral MultilibFilename = "multilib.yaml";
+
+// Get the sysroot, before multilib takes effect.
+static std::string computeBaseSysRoot(const Driver &D,
+ const llvm::Triple &Triple) {
+ if (!D.SysRoot.empty())
+ return D.SysRoot;
+
+ SmallString<128> SysRootDir(D.Dir);
+ llvm::sys::path::append(SysRootDir, "..", "lib", "clang-runtimes");
+
+ SmallString<128> MultilibPath(SysRootDir);
+ llvm::sys::path::append(MultilibPath, MultilibFilename);
+
+ // New behaviour: if multilib.yaml is found then use clang-runtimes as the
+ // sysroot.
+ if (D.getVFS().exists(MultilibPath))
+ return std::string(SysRootDir);
+
+ // Otherwise fall back to the old behaviour of appending the target triple.
+ llvm::sys::path::append(SysRootDir, D.getTargetTriple());
+ return std::string(SysRootDir);
+}
+
void BareMetal::findMultilibs(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args) {
DetectedMultilibs Result;
if (isRISCVBareMetal(Triple)) {
if (findRISCVMultilibs(D, Triple, Args, Result)) {
- SelectedMultilib = Result.SelectedMultilib;
+ SelectedMultilibs = Result.SelectedMultilibs;
Multilibs = Result.Multilibs;
}
+ } else {
+ llvm::SmallString<128> MultilibPath(computeBaseSysRoot(D, Triple));
+ llvm::sys::path::append(MultilibPath, MultilibFilename);
+ findMultilibsFromYAML(*this, D, MultilibPath, Args, Result);
+ SelectedMultilibs = Result.SelectedMultilibs;
+ Multilibs = Result.Multilibs;
}
}
bool BareMetal::handlesTarget(const llvm::Triple &Triple) {
return isARMBareMetal(Triple) || isAArch64BareMetal(Triple) ||
- isRISCVBareMetal(Triple);
+ isRISCVBareMetal(Triple) || isPPCBareMetal(Triple);
}
Tool *BareMetal::buildLinker() const {
return new tools::baremetal::Linker(*this);
}
+Tool *BareMetal::buildStaticLibTool() const {
+ return new tools::baremetal::StaticLibTool(*this);
+}
+
std::string BareMetal::computeSysRoot() const {
- if (!getDriver().SysRoot.empty())
- return getDriver().SysRoot + SelectedMultilib.osSuffix();
+ return computeBaseSysRoot(getDriver(), getTriple());
+}
- SmallString<128> SysRootDir;
- llvm::sys::path::append(SysRootDir, getDriver().Dir, "../lib/clang-runtimes",
- getDriver().getTargetTriple());
+BareMetal::OrderedMultilibs BareMetal::getOrderedMultilibs() const {
+ // Get multilibs in reverse order because they're ordered most-specific last.
+ if (!SelectedMultilibs.empty())
+ return llvm::reverse(SelectedMultilibs);
- SysRootDir += SelectedMultilib.osSuffix();
- return std::string(SysRootDir);
+ // No multilibs selected so return a single default multilib.
+ static const llvm::SmallVector<Multilib> Default = {Multilib()};
+ return llvm::reverse(Default);
}
void BareMetal::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
@@ -197,10 +273,14 @@ void BareMetal::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
}
if (!DriverArgs.hasArg(options::OPT_nostdlibinc)) {
- SmallString<128> Dir(computeSysRoot());
- if (!Dir.empty()) {
- llvm::sys::path::append(Dir, "include");
- addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ const SmallString<128> SysRoot(computeSysRoot());
+ if (!SysRoot.empty()) {
+ for (const Multilib &M : getOrderedMultilibs()) {
+ SmallString<128> Dir(SysRoot);
+ llvm::sys::path::append(Dir, M.includeSuffix());
+ llvm::sys::path::append(Dir, "include");
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ }
}
}
}
@@ -223,44 +303,47 @@ void BareMetal::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
if (SysRoot.empty())
return;
- switch (GetCXXStdlibType(DriverArgs)) {
- case ToolChain::CST_Libcxx: {
- // First check sysroot/usr/include/c++/v1 if it exists.
- SmallString<128> TargetDir(SysRoot);
- llvm::sys::path::append(TargetDir, "usr", "include", "c++", "v1");
- if (D.getVFS().exists(TargetDir)) {
- addSystemInclude(DriverArgs, CC1Args, TargetDir.str());
+ for (const Multilib &M : getOrderedMultilibs()) {
+ SmallString<128> Dir(SysRoot);
+ llvm::sys::path::append(Dir, M.gccSuffix());
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx: {
+ // First check sysroot/usr/include/c++/v1 if it exists.
+ SmallString<128> TargetDir(Dir);
+ llvm::sys::path::append(TargetDir, "usr", "include", "c++", "v1");
+ if (D.getVFS().exists(TargetDir)) {
+ addSystemInclude(DriverArgs, CC1Args, TargetDir.str());
+ break;
+ }
+ // Add generic path if nothing else succeeded so far.
+ llvm::sys::path::append(Dir, "include", "c++", "v1");
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ break;
+ }
+ case ToolChain::CST_Libstdcxx: {
+ llvm::sys::path::append(Dir, "include", "c++");
+ std::error_code EC;
+ Generic_GCC::GCCVersion Version = {"", -1, -1, -1, "", "", ""};
+ // Walk the subdirs, and find the one with the newest gcc version:
+ for (llvm::vfs::directory_iterator
+ LI = D.getVFS().dir_begin(Dir.str(), EC),
+ LE;
+ !EC && LI != LE; LI = LI.increment(EC)) {
+ StringRef VersionText = llvm::sys::path::filename(LI->path());
+ auto CandidateVersion = Generic_GCC::GCCVersion::Parse(VersionText);
+ if (CandidateVersion.Major == -1)
+ continue;
+ if (CandidateVersion <= Version)
+ continue;
+ Version = CandidateVersion;
+ }
+ if (Version.Major != -1) {
+ llvm::sys::path::append(Dir, Version.Text);
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ }
break;
}
- // Add generic path if nothing else succeeded so far.
- SmallString<128> Dir(SysRoot);
- llvm::sys::path::append(Dir, "include", "c++", "v1");
- addSystemInclude(DriverArgs, CC1Args, Dir.str());
- break;
- }
- case ToolChain::CST_Libstdcxx: {
- SmallString<128> Dir(SysRoot);
- llvm::sys::path::append(Dir, "include", "c++");
- std::error_code EC;
- Generic_GCC::GCCVersion Version = {"", -1, -1, -1, "", "", ""};
- // Walk the subdirs, and find the one with the newest gcc version:
- for (llvm::vfs::directory_iterator LI = D.getVFS().dir_begin(Dir.str(), EC),
- LE;
- !EC && LI != LE; LI = LI.increment(EC)) {
- StringRef VersionText = llvm::sys::path::filename(LI->path());
- auto CandidateVersion = Generic_GCC::GCCVersion::Parse(VersionText);
- if (CandidateVersion.Major == -1)
- continue;
- if (CandidateVersion <= Version)
- continue;
- Version = CandidateVersion;
}
- if (Version.Major == -1)
- return;
- llvm::sys::path::append(Dir, Version.Text);
- addSystemInclude(DriverArgs, CC1Args, Dir.str());
- break;
- }
}
}
@@ -300,6 +383,51 @@ void BareMetal::AddLinkRuntimeLib(const ArgList &Args,
llvm_unreachable("Unhandled RuntimeLibType.");
}
+void baremetal::StaticLibTool::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+ // Silence warnings when linking C code with a C++ '-stdlib' argument.
+ Args.ClaimAllArgs(options::OPT_stdlib_EQ);
+
+ // ar tool command "llvm-ar <options> <output_file> <input_files>".
+ ArgStringList CmdArgs;
+ // Create and insert file members with a deterministic index.
+ CmdArgs.push_back("rcsD");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (const auto &II : Inputs) {
+ if (II.isFilename()) {
+ CmdArgs.push_back(II.getFilename());
+ }
+ }
+
+ // Delete old output archive file if it already exists before generating a new
+ // archive file.
+ const char *OutputFileName = Output.getFilename();
+ if (Output.isFilename() && llvm::sys::fs::exists(OutputFileName)) {
+ if (std::error_code EC = llvm::sys::fs::remove(OutputFileName)) {
+ D.Diag(diag::err_drv_unable_to_remove_file) << EC.message();
+ return;
+ }
+ }
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetStaticLibToolPath());
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
+}
+
void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -308,14 +436,25 @@ void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
ArgStringList CmdArgs;
auto &TC = static_cast<const toolchains::BareMetal &>(getToolChain());
+ const llvm::Triple::ArchType Arch = TC.getArch();
+ const llvm::Triple &Triple = getToolChain().getEffectiveTriple();
AddLinkerInputs(TC, Inputs, Args, CmdArgs, JA);
CmdArgs.push_back("-Bstatic");
- Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
- options::OPT_e, options::OPT_s, options::OPT_t,
- options::OPT_Z_Flag, options::OPT_r});
+ if (Triple.isARM() || Triple.isThumb()) {
+ bool IsBigEndian = arm::isARMBigEndian(Triple, Args);
+ if (IsBigEndian)
+ arm::appendBE8LinkFlag(Args, CmdArgs, Triple);
+ CmdArgs.push_back(IsBigEndian ? "-EB" : "-EL");
+ } else if (Triple.isAArch64()) {
+ CmdArgs.push_back(Arch == llvm::Triple::aarch64_be ? "-EB" : "-EL");
+ }
+
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_L, options::OPT_T_Group, options::OPT_s,
+ options::OPT_t, options::OPT_Z_Flag, options::OPT_r});
TC.AddFilePathLibArgs(Args, CmdArgs);
@@ -337,10 +476,19 @@ void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
TC.AddLinkRuntimeLib(Args, CmdArgs);
}
+ if (TC.getTriple().isRISCV())
+ CmdArgs.push_back("-X");
+
+ // The R_ARM_TARGET2 relocation must be treated as R_ARM_REL32 on arm*-*-elf
+ // and arm*-*-eabi (the default is R_ARM_GOT_PREL, used on arm*-*-linux and
+ // arm*-*-*bsd).
+ if (isARMBareMetal(TC.getTriple()))
+ CmdArgs.push_back("--target2=rel");
+
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Args.MakeArgString(TC.GetLinkerPath()),
- CmdArgs, Inputs, Output));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(),
+ Args.MakeArgString(TC.GetLinkerPath()), CmdArgs, Inputs, Output));
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.h
index 2a16a5beb08d..fc39a2a10e01 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.h
@@ -32,11 +32,13 @@ public:
protected:
Tool *buildLinker() const override;
+ Tool *buildStaticLibTool() const override;
public:
bool useIntegratedAs() const override { return true; }
bool isBareMetal() const override { return true; }
bool isCrossCompiling() const override { return true; }
+ bool HasNativeLLVMSupport() const override { return true; }
bool isPICDefault() const override { return false; }
bool isPIEDefault(const llvm::opt::ArgList &Args) const override {
return false;
@@ -70,6 +72,11 @@ public:
void AddLinkRuntimeLib(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
std::string computeSysRoot() const override;
+
+private:
+ using OrderedMultilibs =
+ llvm::iterator_range<llvm::SmallVector<Multilib>::const_reverse_iterator>;
+ OrderedMultilibs getOrderedMultilibs() const;
};
} // namespace toolchains
@@ -77,6 +84,20 @@ public:
namespace tools {
namespace baremetal {
+class LLVM_LIBRARY_VISIBILITY StaticLibTool : public Tool {
+public:
+ StaticLibTool(const ToolChain &TC)
+ : Tool("baremetal::StaticLibTool", "llvm-ar", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
Linker(const ToolChain &TC) : Tool("baremetal::Linker", "ld.lld", TC) {}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.cpp
index de286faaca6d..432d61ae2fdf 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.cpp
@@ -38,13 +38,13 @@ CSKYToolChain::CSKYToolChain(const Driver &D, const llvm::Triple &Triple,
GCCInstallation.init(Triple, Args);
if (GCCInstallation.isValid()) {
Multilibs = GCCInstallation.getMultilibs();
- SelectedMultilib = GCCInstallation.getMultilib();
+ SelectedMultilibs.assign({GCCInstallation.getMultilib()});
path_list &Paths = getFilePaths();
// Add toolchain/multilib specific file paths.
- addMultilibsFilePaths(D, Multilibs, SelectedMultilib,
+ addMultilibsFilePaths(D, Multilibs, SelectedMultilibs.back(),
GCCInstallation.getInstallPath(), Paths);
getFilePaths().push_back(GCCInstallation.getInstallPath().str() +
- SelectedMultilib.osSuffix());
+ SelectedMultilibs.back().osSuffix());
ToolChain::path_list &PPaths = getProgramPaths();
// Multilib cross-compiler GCC installations put ld in a triple-prefixed
// directory off of the parent of the GCC installation.
@@ -52,11 +52,12 @@ CSKYToolChain::CSKYToolChain(const Driver &D, const llvm::Triple &Triple,
GCCInstallation.getTriple().str() + "/bin")
.str());
PPaths.push_back((GCCInstallation.getParentLibPath() + "/../bin").str());
+ getFilePaths().push_back(computeSysRoot() + "/lib" +
+ SelectedMultilibs.back().osSuffix());
} else {
getProgramPaths().push_back(D.Dir);
+ getFilePaths().push_back(computeSysRoot() + "/lib");
}
- getFilePaths().push_back(computeSysRoot() + "/lib" +
- SelectedMultilib.osSuffix());
}
Tool *CSKYToolChain::buildLinker() const {
@@ -169,8 +170,8 @@ void CSKY::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
Args.AddAllArgs(CmdArgs,
- {options::OPT_T_Group, options::OPT_e, options::OPT_s,
- options::OPT_t, options::OPT_Z_Flag, options::OPT_r});
+ {options::OPT_T_Group, options::OPT_s, options::OPT_t,
+ options::OPT_Z_Flag, options::OPT_r});
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
index 77554aa2c462..adb550d9c5da 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
@@ -45,15 +45,18 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/Support/ARMTargetParserCommon.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Compression.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
+#include "llvm/Support/RISCVISAInfo.h"
#include "llvm/Support/YAMLParser.h"
+#include "llvm/TargetParser/ARMTargetParserCommon.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
#include <cctype>
using namespace clang::driver;
@@ -400,22 +403,6 @@ static bool ShouldEnableAutolink(const ArgList &Args, const ToolChain &TC,
Default);
}
-// Convert an arg of the form "-gN" or "-ggdbN" or one of their aliases
-// to the corresponding DebugInfoKind.
-static codegenoptions::DebugInfoKind DebugLevelToInfoKind(const Arg &A) {
- assert(A.getOption().matches(options::OPT_gN_Group) &&
- "Not a -g option that specifies a debug-info level");
- if (A.getOption().matches(options::OPT_g0) ||
- A.getOption().matches(options::OPT_ggdb0))
- return codegenoptions::NoDebugInfo;
- if (A.getOption().matches(options::OPT_gline_tables_only) ||
- A.getOption().matches(options::OPT_ggdb1))
- return codegenoptions::DebugLineTablesOnly;
- if (A.getOption().matches(options::OPT_gline_directives_only))
- return codegenoptions::DebugDirectivesOnly;
- return codegenoptions::DebugInfoConstructor;
-}
-
static bool mustUseNonLeafFramePointerForTarget(const llvm::Triple &Triple) {
switch (Triple.getArch()){
default:
@@ -433,6 +420,20 @@ static bool useFramePointerForTargetByDefault(const ArgList &Args,
if (Args.hasArg(options::OPT_pg) && !Args.hasArg(options::OPT_mfentry))
return true;
+ if (Triple.isAndroid()) {
+ switch (Triple.getArch()) {
+ case llvm::Triple::aarch64:
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ case llvm::Triple::riscv64:
+ return true;
+ default:
+ break;
+ }
+ }
+
switch (Triple.getArch()) {
case llvm::Triple::xcore:
case llvm::Triple::wasm32:
@@ -472,9 +473,6 @@ static bool useFramePointerForTargetByDefault(const ArgList &Args,
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
- if (Triple.isAndroid())
- return true;
- [[fallthrough]];
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
case llvm::Triple::mips:
@@ -528,7 +526,8 @@ getFramePointerKind(const ArgList &Args, const llvm::Triple &Triple) {
bool OmitLeafFP =
Args.hasFlag(options::OPT_momit_leaf_frame_pointer,
options::OPT_mno_omit_leaf_frame_pointer,
- Triple.isAArch64() || Triple.isPS() || Triple.isVE());
+ Triple.isAArch64() || Triple.isPS() || Triple.isVE() ||
+ (Triple.isAndroid() && Triple.isRISCV64()));
if (NoOmitFP || mustUseNonLeafFramePointerForTarget(Triple) ||
(!OmitFP && useFramePointerForTargetByDefault(Args, Triple))) {
if (OmitLeafFP)
@@ -576,6 +575,16 @@ static void addDebugObjectName(const ArgList &Args, ArgStringList &CmdArgs,
// Make the path absolute in the debug infos like MSVC does.
llvm::sys::fs::make_absolute(ObjFileNameForDebug);
}
+ // If the object file name is a relative path, then always use Windows
+ // backslash style as -object-file-name is used for embedding object file path
+ // in codeview and it can only be generated when targeting on Windows.
+ // Otherwise, just use native absolute path.
+ llvm::sys::path::Style Style =
+ llvm::sys::path::is_absolute(ObjFileNameForDebug)
+ ? llvm::sys::path::Style::native
+ : llvm::sys::path::Style::windows_backslash;
+ llvm::sys::path::remove_dots(ObjFileNameForDebug, /*remove_dot_dot=*/true,
+ Style);
CmdArgs.push_back(
Args.MakeArgString(Twine("-object-file-name=") + ObjFileNameForDebug));
}
@@ -696,10 +705,10 @@ static void addDashXForInput(const ArgList &Args, const InputInfo &Input,
}
static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
- const Driver &D, const InputInfo &Output,
+ const JobAction &JA, const InputInfo &Output,
const ArgList &Args, SanitizerArgs &SanArgs,
ArgStringList &CmdArgs) {
-
+ const Driver &D = TC.getDriver();
auto *PGOGenerateArg = Args.getLastArg(options::OPT_fprofile_generate,
options::OPT_fprofile_generate_EQ,
options::OPT_fno_profile_generate);
@@ -707,12 +716,7 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
PGOGenerateArg->getOption().matches(options::OPT_fno_profile_generate))
PGOGenerateArg = nullptr;
- auto *CSPGOGenerateArg = Args.getLastArg(options::OPT_fcs_profile_generate,
- options::OPT_fcs_profile_generate_EQ,
- options::OPT_fno_profile_generate);
- if (CSPGOGenerateArg &&
- CSPGOGenerateArg->getOption().matches(options::OPT_fno_profile_generate))
- CSPGOGenerateArg = nullptr;
+ auto *CSPGOGenerateArg = getLastCSProfileGenerateArg(Args);
auto *ProfileGenerateArg = Args.getLastArg(
options::OPT_fprofile_instr_generate,
@@ -744,9 +748,6 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
}
if (TC.getTriple().isOSAIX()) {
- if (ProfileGenerateArg)
- D.Diag(diag::err_drv_unsupported_opt_for_target)
- << ProfileGenerateArg->getSpelling() << TC.getTriple().str();
if (Arg *ProfileSampleUseArg = getLastProfileSampleUseArg(Args))
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< ProfileSampleUseArg->getSpelling() << TC.getTriple().str();
@@ -814,10 +815,6 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
options::OPT_fno_test_coverage, false) ||
Args.hasArg(options::OPT_coverage);
bool EmitCovData = TC.needsGCovInstrumentation(Args);
- if (EmitCovNotes)
- CmdArgs.push_back("-ftest-coverage");
- if (EmitCovData)
- CmdArgs.push_back("-fprofile-arcs");
if (Args.hasFlag(options::OPT_fcoverage_mapping,
options::OPT_fno_coverage_mapping, false)) {
@@ -905,32 +902,41 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
Args.hasArg(options::OPT_coverage))
FProfileDir = Args.getLastArg(options::OPT_fprofile_dir);
- // Put the .gcno and .gcda files (if needed) next to the object file or
- // bitcode file in the case of LTO.
- // FIXME: There should be a simpler way to find the object file for this
- // input, and this code probably does the wrong thing for commands that
- // compile and link all at once.
- if ((Args.hasArg(options::OPT_c) || Args.hasArg(options::OPT_S)) &&
- (EmitCovNotes || EmitCovData) && Output.isFilename()) {
- SmallString<128> OutputFilename;
- if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT__SLASH_Fo))
- OutputFilename = FinalOutput->getValue();
- else if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o))
- OutputFilename = FinalOutput->getValue();
- else
- OutputFilename = llvm::sys::path::filename(Output.getBaseInput());
- SmallString<128> CoverageFilename = OutputFilename;
+ // TODO: Don't claim -c/-S to warn about -fsyntax-only -c/-S, -E -c/-S,
+ // like we warn about -fsyntax-only -E.
+ (void)(Args.hasArg(options::OPT_c) || Args.hasArg(options::OPT_S));
+
+ // Put the .gcno and .gcda files (if needed) next to the primary output file,
+ // or fall back to a file in the current directory for `clang -c --coverage
+ // d/a.c` in the absence of -o.
+ if (EmitCovNotes || EmitCovData) {
+ SmallString<128> CoverageFilename;
+ if (Arg *DumpDir = Args.getLastArgNoClaim(options::OPT_dumpdir)) {
+ // Form ${dumpdir}${basename}.gcno. Note that dumpdir may not end with a
+ // path separator.
+ CoverageFilename = DumpDir->getValue();
+ CoverageFilename += llvm::sys::path::filename(Output.getBaseInput());
+ } else if (Arg *FinalOutput =
+ C.getArgs().getLastArg(options::OPT__SLASH_Fo)) {
+ CoverageFilename = FinalOutput->getValue();
+ } else if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o)) {
+ CoverageFilename = FinalOutput->getValue();
+ } else {
+ CoverageFilename = llvm::sys::path::filename(Output.getBaseInput());
+ }
if (llvm::sys::path::is_relative(CoverageFilename))
(void)D.getVFS().makeAbsolute(CoverageFilename);
llvm::sys::path::replace_extension(CoverageFilename, "gcno");
-
- CmdArgs.push_back("-coverage-notes-file");
- CmdArgs.push_back(Args.MakeArgString(CoverageFilename));
+ if (EmitCovNotes) {
+ CmdArgs.push_back("-coverage-notes-file");
+ CmdArgs.push_back(Args.MakeArgString(CoverageFilename));
+ }
if (EmitCovData) {
if (FProfileDir) {
+ SmallString<128> Gcno = std::move(CoverageFilename);
CoverageFilename = FProfileDir->getValue();
- llvm::sys::path::append(CoverageFilename, OutputFilename);
+ llvm::sys::path::append(CoverageFilename, Gcno);
}
llvm::sys::path::replace_extension(CoverageFilename, "gcda");
CmdArgs.push_back("-coverage-data-file");
@@ -969,32 +975,12 @@ static bool UseRelaxAll(Compilation &C, const ArgList &Args) {
RelaxDefault);
}
-static void RenderDebugEnablingArgs(const ArgList &Args, ArgStringList &CmdArgs,
- codegenoptions::DebugInfoKind DebugInfoKind,
- unsigned DwarfVersion,
- llvm::DebuggerKind DebuggerTuning) {
- switch (DebugInfoKind) {
- case codegenoptions::DebugDirectivesOnly:
- CmdArgs.push_back("-debug-info-kind=line-directives-only");
- break;
- case codegenoptions::DebugLineTablesOnly:
- CmdArgs.push_back("-debug-info-kind=line-tables-only");
- break;
- case codegenoptions::DebugInfoConstructor:
- CmdArgs.push_back("-debug-info-kind=constructor");
- break;
- case codegenoptions::LimitedDebugInfo:
- CmdArgs.push_back("-debug-info-kind=limited");
- break;
- case codegenoptions::FullDebugInfo:
- CmdArgs.push_back("-debug-info-kind=standalone");
- break;
- case codegenoptions::UnusedTypeInfo:
- CmdArgs.push_back("-debug-info-kind=unused-types");
- break;
- default:
- break;
- }
+static void
+RenderDebugEnablingArgs(const ArgList &Args, ArgStringList &CmdArgs,
+ llvm::codegenoptions::DebugInfoKind DebugInfoKind,
+ unsigned DwarfVersion,
+ llvm::DebuggerKind DebuggerTuning) {
+ addDebugInfoKind(CmdArgs, DebugInfoKind);
if (DwarfVersion > 0)
CmdArgs.push_back(
Args.MakeArgString("-dwarf-version=" + Twine(DwarfVersion)));
@@ -1165,6 +1151,9 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
if (ArgM->getOption().matches(options::OPT_M) ||
ArgM->getOption().matches(options::OPT_MD))
CmdArgs.push_back("-sys-header-deps");
+ if (Args.hasFlag(options::OPT_canonical_prefixes,
+ options::OPT_no_canonical_prefixes, true))
+ CmdArgs.push_back("-canonical-system-headers");
if ((isa<PrecompileJobAction>(JA) &&
!Args.hasArg(options::OPT_fno_module_file_deps)) ||
Args.hasArg(options::OPT_fmodule_file_deps))
@@ -1190,6 +1179,34 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
if (JA.isOffloading(Action::OFK_HIP))
getToolChain().AddHIPIncludeArgs(Args, CmdArgs);
+ // If we are compiling for a GPU target we want to override the system headers
+ // with ones created by the 'libc' project if present.
+ if (!Args.hasArg(options::OPT_nostdinc) &&
+ !Args.hasArg(options::OPT_nogpuinc) &&
+ !Args.hasArg(options::OPT_nobuiltininc) &&
+ (getToolChain().getTriple().isNVPTX() ||
+ getToolChain().getTriple().isAMDGCN())) {
+
+ // Without an offloading language we will include these headers directly.
+ // Offloading languages will instead only use the declarations stored in
+ // the resource directory at clang/lib/Headers/llvm_libc_wrappers.
+ if (C.getActiveOffloadKinds() == Action::OFK_None) {
+ SmallString<128> P(llvm::sys::path::parent_path(D.InstalledDir));
+ llvm::sys::path::append(P, "include");
+ llvm::sys::path::append(P, "gpu-none-llvm");
+ CmdArgs.push_back("-c-isystem");
+ CmdArgs.push_back(Args.MakeArgString(P));
+ } else if (C.getActiveOffloadKinds() == Action::OFK_OpenMP) {
+ // TODO: CUDA / HIP include their own headers for some common functions
+ // implemented here. We'll need to clean those up so they do not conflict.
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ llvm::sys::path::append(P, "llvm_libc_wrappers");
+ CmdArgs.push_back("-internal-isystem");
+ CmdArgs.push_back(Args.MakeArgString(P));
+ }
+ }
+
// If we are offloading to a target via OpenMP we need to include the
// openmp_wrappers folder which contains alternative system headers.
if (JA.isDeviceOffloading(Action::OFK_OpenMP) &&
@@ -1295,6 +1312,9 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
} else if (A->getOption().matches(options::OPT_stdlibxx_isystem)) {
// Translated to -internal-isystem by the driver, no need to pass to cc1.
continue;
+ } else if (A->getOption().matches(options::OPT_ibuiltininc)) {
+ // This is used only by the driver. No need to pass to cc1.
+ continue;
}
// Not translated, render as usual.
@@ -1989,8 +2009,9 @@ void Clang::AddMIPSTargetArgs(const ArgList &Args,
void Clang::AddPPCTargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
+ const Driver &D = getToolChain().getDriver();
const llvm::Triple &T = getToolChain().getTriple();
- if (const Arg *A = Args.getLastArg(options::OPT_mtune_EQ)) {
+ if (Args.getLastArg(options::OPT_mtune_EQ)) {
CmdArgs.push_back("-tune-cpu");
std::string CPU = ppc::getPPCTuneCPU(Args, T);
CmdArgs.push_back(Args.MakeArgString(CPU));
@@ -2016,13 +2037,22 @@ void Clang::AddPPCTargetArgs(const ArgList &Args,
}
bool IEEELongDouble = getToolChain().defaultToIEEELongDouble();
+ bool VecExtabi = false;
for (const Arg *A : Args.filtered(options::OPT_mabi_EQ)) {
StringRef V = A->getValue();
- if (V == "ieeelongdouble")
+ if (V == "ieeelongdouble") {
IEEELongDouble = true;
- else if (V == "ibmlongdouble")
+ A->claim();
+ } else if (V == "ibmlongdouble") {
IEEELongDouble = false;
- else if (V != "altivec")
+ A->claim();
+ } else if (V == "vec-default") {
+ VecExtabi = false;
+ A->claim();
+ } else if (V == "vec-extabi") {
+ VecExtabi = true;
+ A->claim();
+ } else if (V != "altivec")
// The ppc64 linux abis are all "altivec" abis by default. Accept and ignore
// the option if given as we don't have backend support for any targets
// that don't use the altivec abi.
@@ -2030,10 +2060,14 @@ void Clang::AddPPCTargetArgs(const ArgList &Args,
}
if (IEEELongDouble)
CmdArgs.push_back("-mabi=ieeelongdouble");
+ if (VecExtabi) {
+ if (!T.isOSAIX())
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << "-mabi=vec-extabi" << T.str();
+ CmdArgs.push_back("-mabi=vec-extabi");
+ }
- ppc::FloatABI FloatABI =
- ppc::getPPCFloatABI(getToolChain().getDriver(), Args);
-
+ ppc::FloatABI FloatABI = ppc::getPPCFloatABI(D, Args);
if (FloatABI == ppc::FloatABI::Soft) {
// Floating point operations and argument passing are soft.
CmdArgs.push_back("-msoft-float");
@@ -2074,6 +2108,12 @@ static void SetRISCVSmallDataLimit(const ToolChain &TC, const ArgList &Args,
if (Args.hasArg(options::OPT_G)) {
D.Diag(diag::warn_drv_unsupported_sdata);
}
+ } else if (Triple.isAndroid()) {
+ // GP relaxation is not supported on Android.
+ SmallDataLimit = "0";
+ if (Args.hasArg(options::OPT_G)) {
+ D.Diag(diag::warn_drv_unsupported_sdata);
+ }
} else if (Arg *A = Args.getLastArg(options::OPT_G)) {
SmallDataLimit = A->getValue();
}
@@ -2103,6 +2143,50 @@ void Clang::AddRISCVTargetArgs(const ArgList &Args,
else
CmdArgs.push_back(A->getValue());
}
+
+ // Handle -mrvv-vector-bits=<bits>
+ if (Arg *A = Args.getLastArg(options::OPT_mrvv_vector_bits_EQ)) {
+ StringRef Val = A->getValue();
+ const Driver &D = getToolChain().getDriver();
+
+ // Get minimum VLen from march.
+ unsigned MinVLen = 0;
+ StringRef Arch = riscv::getRISCVArch(Args, Triple);
+ auto ISAInfo = llvm::RISCVISAInfo::parseArchString(
+ Arch, /*EnableExperimentalExtensions*/ true);
+ if (!ISAInfo) {
+ // Ignore parsing error.
+ consumeError(ISAInfo.takeError());
+ } else {
+ MinVLen = (*ISAInfo)->getMinVLen();
+ }
+
+ // If the value is "zvl", use MinVLen from march. Otherwise, try to parse
+ // as integer as long as we have a MinVLen.
+ unsigned Bits = 0;
+ if (Val.equals("zvl") && MinVLen >= llvm::RISCV::RVVBitsPerBlock) {
+ Bits = MinVLen;
+ } else if (!Val.getAsInteger(10, Bits)) {
+ // Only accept power of 2 values beteen RVVBitsPerBlock and 65536 that
+ // at least MinVLen.
+ if (Bits < MinVLen || Bits < llvm::RISCV::RVVBitsPerBlock ||
+ Bits > 65536 || !llvm::isPowerOf2_32(Bits))
+ Bits = 0;
+ }
+
+ // If we got a valid value try to use it.
+ if (Bits != 0) {
+ unsigned VScaleMin = Bits / llvm::RISCV::RVVBitsPerBlock;
+ CmdArgs.push_back(
+ Args.MakeArgString("-mvscale-max=" + llvm::Twine(VScaleMin)));
+ CmdArgs.push_back(
+ Args.MakeArgString("-mvscale-min=" + llvm::Twine(VScaleMin)));
+ } else if (!Val.equals("scalable")) {
+ // Handle the unsupported values passed to mrvv-vector-bits.
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Val;
+ }
+ }
}
void Clang::AddSparcTargetArgs(const ArgList &Args,
@@ -2440,6 +2524,9 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
Args.AddLastArg(CmdArgs, options::OPT_femit_dwarf_unwind_EQ);
+ Args.addOptInFlag(CmdArgs, options::OPT_femit_compact_unwind_non_canonical,
+ options::OPT_fno_emit_compact_unwind_non_canonical);
+
// If you add more args here, also add them to the block below that
// starts with "// If CollectArgsForIntegratedAssembler() isn't called below".
@@ -2592,7 +2679,7 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
CmdArgs.push_back(Value.data());
} else {
RenderDebugEnablingArgs(Args, CmdArgs,
- codegenoptions::DebugInfoConstructor,
+ llvm::codegenoptions::DebugInfoConstructor,
DwarfVersion, llvm::DebuggerKind::Default);
}
} else if (Value.startswith("-mcpu") || Value.startswith("-mfpu") ||
@@ -2703,6 +2790,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
FPContract = "on";
bool StrictFPModel = false;
StringRef Float16ExcessPrecision = "";
+ StringRef BFloat16ExcessPrecision = "";
if (const Arg *A = Args.getLastArg(options::OPT_flimited_precision_EQ)) {
CmdArgs.push_back("-mlimit-float-precision");
@@ -2918,6 +3006,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getSpelling() << Val;
}
+ BFloat16ExcessPrecision = Float16ExcessPrecision;
break;
}
case options::OPT_ffinite_math_only:
@@ -3093,6 +3182,9 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
if (!Float16ExcessPrecision.empty())
CmdArgs.push_back(Args.MakeArgString("-ffloat16-excess-precision=" +
Float16ExcessPrecision));
+ if (!BFloat16ExcessPrecision.empty())
+ CmdArgs.push_back(Args.MakeArgString("-fbfloat16-excess-precision=" +
+ BFloat16ExcessPrecision));
ParseMRecip(D, Args, CmdArgs);
@@ -3301,7 +3393,7 @@ static void RenderSSPOptions(const Driver &D, const ToolChain &TC,
}
}
CmdArgs.push_back("-target-feature");
- CmdArgs.push_back("+read-tp-hard");
+ CmdArgs.push_back("+read-tp-tpidruro");
}
if (EffectiveTriple.isAArch64() && Value != "sysreg" && Value != "global") {
D.Diag(diag::err_drv_invalid_value_with_suggestion)
@@ -3618,8 +3710,13 @@ static bool RenderModulesOptions(Compilation &C, const Driver &D,
// modules support by default.
bool HaveStdCXXModules =
IsCXX && Std &&
- (Std->containsValue("c++2a") || Std->containsValue("c++20") ||
- Std->containsValue("c++2b") || Std->containsValue("c++latest"));
+ (Std->containsValue("c++2a") || Std->containsValue("gnu++2a") ||
+ Std->containsValue("c++20") || Std->containsValue("gnu++20") ||
+ Std->containsValue("c++2b") || Std->containsValue("gnu++2b") ||
+ Std->containsValue("c++23") || Std->containsValue("gnu++23") ||
+ Std->containsValue("c++2c") || Std->containsValue("gnu++2c") ||
+ Std->containsValue("c++26") || Std->containsValue("gnu++26") ||
+ Std->containsValue("c++latest") || Std->containsValue("gnu++latest"));
bool HaveModules = HaveStdCXXModules;
// -fmodules enables the use of precompiled modules (off by default).
@@ -3636,11 +3733,6 @@ static bool RenderModulesOptions(Compilation &C, const Driver &D,
}
HaveModules |= HaveClangModules;
- if (Args.hasArg(options::OPT_fmodules_ts)) {
- D.Diag(diag::warn_deprecated_fmodules_ts_flag);
- CmdArgs.push_back("-fmodules-ts");
- HaveModules = true;
- }
// -fmodule-maps enables implicit reading of module map files. By default,
// this is enabled if we are using Clang's flavor of precompiled modules.
@@ -3694,12 +3786,6 @@ static bool RenderModulesOptions(Compilation &C, const Driver &D,
}
if (HaveModules) {
- // -fprebuilt-module-path specifies where to load the prebuilt module files.
- for (const Arg *A : Args.filtered(options::OPT_fprebuilt_module_path)) {
- CmdArgs.push_back(Args.MakeArgString(
- std::string("-fprebuilt-module-path=") + A->getValue()));
- A->claim();
- }
if (Args.hasFlag(options::OPT_fprebuilt_implicit_modules,
options::OPT_fno_prebuilt_implicit_modules, false))
CmdArgs.push_back("-fprebuilt-implicit-modules");
@@ -3732,9 +3818,16 @@ static bool RenderModulesOptions(Compilation &C, const Driver &D,
// names to precompiled module files (the module is loaded only if used).
// The -fmodule-file=<file> form can be used to unconditionally load
// precompiled module files (whether used or not).
- if (HaveModules)
+ if (HaveModules || Input.getType() == clang::driver::types::TY_ModuleFile) {
Args.AddAllArgs(CmdArgs, options::OPT_fmodule_file);
- else
+
+ // -fprebuilt-module-path specifies where to load the prebuilt module files.
+ for (const Arg *A : Args.filtered(options::OPT_fprebuilt_module_path)) {
+ CmdArgs.push_back(Args.MakeArgString(
+ std::string("-fprebuilt-module-path=") + A->getValue()));
+ A->claim();
+ }
+ } else
Args.ClaimAllArgs(options::OPT_fmodule_file);
// When building modules and generating crashdumps, we need to dump a module
@@ -3841,7 +3934,8 @@ static void RenderCharacterOptions(const ArgList &Args, const llvm::Triple &T,
else
CmdArgs.push_back("-fsigned-wchar");
}
- }
+ } else if (T.isOSzOS())
+ CmdArgs.push_back("-fno-signed-wchar");
}
static void RenderObjCOptions(const ToolChain &TC, const Driver &D,
@@ -3987,6 +4081,13 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args,
CmdArgs.push_back(Args.MakeArgString(Opt));
}
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fdiagnostics_misexpect_tolerance_EQ)) {
+ std::string Opt =
+ std::string("-fdiagnostics-misexpect-tolerance=") + A->getValue();
+ CmdArgs.push_back(Args.MakeArgString(Opt));
+ }
+
if (const Arg *A = Args.getLastArg(options::OPT_fdiagnostics_format_EQ)) {
CmdArgs.push_back("-fdiagnostics-format");
CmdArgs.push_back(A->getValue());
@@ -4027,6 +4128,9 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args,
Args.addOptOutFlag(CmdArgs, options::OPT_fshow_source_location,
options::OPT_fno_show_source_location);
+ Args.addOptOutFlag(CmdArgs, options::OPT_fdiagnostics_show_line_numbers,
+ options::OPT_fno_diagnostics_show_line_numbers);
+
if (Args.hasArg(options::OPT_fdiagnostics_absolute_paths))
CmdArgs.push_back("-fdiagnostics-absolute-paths");
@@ -4082,12 +4186,12 @@ static void renderDwarfFormat(const Driver &D, const llvm::Triple &T,
DwarfFormatArg->render(Args, CmdArgs);
}
-static void renderDebugOptions(const ToolChain &TC, const Driver &D,
- const llvm::Triple &T, const ArgList &Args,
- bool EmitCodeView, bool IRInput,
- ArgStringList &CmdArgs,
- codegenoptions::DebugInfoKind &DebugInfoKind,
- DwarfFissionKind &DwarfFission) {
+static void
+renderDebugOptions(const ToolChain &TC, const Driver &D, const llvm::Triple &T,
+ const ArgList &Args, bool EmitCodeView, bool IRInput,
+ ArgStringList &CmdArgs,
+ llvm::codegenoptions::DebugInfoKind &DebugInfoKind,
+ DwarfFissionKind &DwarfFission) {
if (Args.hasFlag(options::OPT_fdebug_info_for_profiling,
options::OPT_fno_debug_info_for_profiling, false) &&
checkDebugInfoOption(
@@ -4122,19 +4226,19 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
}
}
if (const Arg *A = Args.getLastArg(options::OPT_g_Group)) {
- DebugInfoKind = codegenoptions::DebugInfoConstructor;
+ DebugInfoKind = llvm::codegenoptions::DebugInfoConstructor;
// If the last option explicitly specified a debug-info level, use it.
if (checkDebugInfoOption(A, Args, D, TC) &&
A->getOption().matches(options::OPT_gN_Group)) {
- DebugInfoKind = DebugLevelToInfoKind(*A);
+ DebugInfoKind = debugLevelToInfoKind(*A);
// For -g0 or -gline-tables-only, drop -gsplit-dwarf. This gets a bit more
// complicated if you've disabled inline info in the skeleton CUs
// (SplitDWARFInlining) - then there's value in composing split-dwarf and
// line-tables-only, so let those compose naturally in that case.
- if (DebugInfoKind == codegenoptions::NoDebugInfo ||
- DebugInfoKind == codegenoptions::DebugDirectivesOnly ||
- (DebugInfoKind == codegenoptions::DebugLineTablesOnly &&
+ if (DebugInfoKind == llvm::codegenoptions::NoDebugInfo ||
+ DebugInfoKind == llvm::codegenoptions::DebugDirectivesOnly ||
+ (DebugInfoKind == llvm::codegenoptions::DebugLineTablesOnly &&
SplitDWARFInlining))
DwarfFission = DwarfFissionKind::None;
}
@@ -4168,12 +4272,12 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
// If the user asked for debug info but did not explicitly specify -gcodeview
// or -gdwarf, ask the toolchain for the default format.
if (!EmitCodeView && !EmitDwarf &&
- DebugInfoKind != codegenoptions::NoDebugInfo) {
+ DebugInfoKind != llvm::codegenoptions::NoDebugInfo) {
switch (TC.getDefaultDebugFormat()) {
- case codegenoptions::DIF_CodeView:
+ case llvm::codegenoptions::DIF_CodeView:
EmitCodeView = true;
break;
- case codegenoptions::DIF_DWARF:
+ case llvm::codegenoptions::DIF_DWARF:
EmitDwarf = true;
break;
}
@@ -4193,8 +4297,8 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
// -gline-directives-only supported only for the DWARF debug info.
if (RequestedDWARFVersion == 0 &&
- DebugInfoKind == codegenoptions::DebugDirectivesOnly)
- DebugInfoKind = codegenoptions::NoDebugInfo;
+ DebugInfoKind == llvm::codegenoptions::DebugDirectivesOnly)
+ DebugInfoKind = llvm::codegenoptions::NoDebugInfo;
// strict DWARF is set to false by default. But for DBX, we need it to be set
// as true by default.
@@ -4227,9 +4331,9 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
// wins.
if (checkDebugInfoOption(Args.getLastArg(options::OPT_gmodules), Args, D,
TC)) {
- if (DebugInfoKind != codegenoptions::DebugLineTablesOnly &&
- DebugInfoKind != codegenoptions::DebugDirectivesOnly) {
- DebugInfoKind = codegenoptions::DebugInfoConstructor;
+ if (DebugInfoKind != llvm::codegenoptions::DebugLineTablesOnly &&
+ DebugInfoKind != llvm::codegenoptions::DebugDirectivesOnly) {
+ DebugInfoKind = llvm::codegenoptions::DebugInfoConstructor;
CmdArgs.push_back("-dwarf-ext-refs");
CmdArgs.push_back("-fmodule-format=obj");
}
@@ -4251,13 +4355,13 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
if (const Arg *A = Args.getLastArg(options::OPT_fstandalone_debug))
(void)checkDebugInfoOption(A, Args, D, TC);
- if (DebugInfoKind == codegenoptions::LimitedDebugInfo ||
- DebugInfoKind == codegenoptions::DebugInfoConstructor) {
+ if (DebugInfoKind == llvm::codegenoptions::LimitedDebugInfo ||
+ DebugInfoKind == llvm::codegenoptions::DebugInfoConstructor) {
if (Args.hasFlag(options::OPT_fno_eliminate_unused_debug_types,
options::OPT_feliminate_unused_debug_types, false))
- DebugInfoKind = codegenoptions::UnusedTypeInfo;
+ DebugInfoKind = llvm::codegenoptions::UnusedTypeInfo;
else if (NeedFullDebug)
- DebugInfoKind = codegenoptions::FullDebugInfo;
+ DebugInfoKind = llvm::codegenoptions::FullDebugInfo;
}
if (Args.hasFlag(options::OPT_gembed_source, options::OPT_gno_embed_source,
@@ -4295,8 +4399,8 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
// When emitting remarks, we need at least debug lines in the output.
if (willEmitRemarks(Args) &&
- DebugInfoKind <= codegenoptions::DebugDirectivesOnly)
- DebugInfoKind = codegenoptions::DebugLineTablesOnly;
+ DebugInfoKind <= llvm::codegenoptions::DebugDirectivesOnly)
+ DebugInfoKind = llvm::codegenoptions::DebugLineTablesOnly;
// Adjust the debug info kind for the given toolchain.
TC.adjustDebugInfoKind(DebugInfoKind, Args);
@@ -4598,12 +4702,36 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(
Twine("-target-sdk-version=") +
CudaVersionToString(CTC->CudaInstallation.version())));
+ // Unsized function arguments used for variadics were introduced in
+ // CUDA-9.0. We still do not support generating code that actually uses
+ // variadic arguments yet, but we do need to allow parsing them as
+ // recent CUDA headers rely on that.
+ // https://github.com/llvm/llvm-project/issues/58410
+ if (CTC->CudaInstallation.version() >= CudaVersion::CUDA_90)
+ CmdArgs.push_back("-fcuda-allow-variadic-functions");
}
}
CmdArgs.push_back("-aux-triple");
CmdArgs.push_back(Args.MakeArgString(NormalizedTriple));
+
+ if (JA.isDeviceOffloading(Action::OFK_HIP) &&
+ getToolChain().getTriple().isAMDGPU()) {
+ // Device side compilation printf
+ if (Args.getLastArg(options::OPT_mprintf_kind_EQ)) {
+ CmdArgs.push_back(Args.MakeArgString(
+ "-mprintf-kind=" +
+ Args.getLastArgValue(options::OPT_mprintf_kind_EQ)));
+ // Force compiler error on invalid conversion specifiers
+ CmdArgs.push_back(
+ Args.MakeArgString("-Werror=format-invalid-specifier"));
+ }
+ }
}
+ // Unconditionally claim the printf option now to avoid unused diagnostic.
+ if (const Arg *PF = Args.getLastArg(options::OPT_mprintf_kind_EQ))
+ PF->claim();
+
if (Args.hasFlag(options::OPT_fsycl, options::OPT_fno_sycl, false)) {
CmdArgs.push_back("-fsycl-is-device");
@@ -4648,6 +4776,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Select the appropriate action.
RewriteKind rewriteKind = RK_None;
+ bool UnifiedLTO = false;
+ if (IsUsingLTO) {
+ UnifiedLTO = Args.hasFlag(options::OPT_funified_lto,
+ options::OPT_fno_unified_lto, Triple.isPS());
+ if (UnifiedLTO)
+ CmdArgs.push_back("-funified-lto");
+ }
+
// If CollectArgsForIntegratedAssembler() isn't called below, claim the args
// it claims when not running an assembler. Otherwise, clang would emit
// "argument unused" warnings for assembler flags when e.g. adding "-E" to
@@ -4792,11 +4928,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
assert(LTOMode == LTOK_Full || LTOMode == LTOK_Thin);
CmdArgs.push_back(Args.MakeArgString(
Twine("-flto=") + (LTOMode == LTOK_Thin ? "thin" : "full")));
- CmdArgs.push_back("-flto-unit");
+ // PS4 uses the legacy LTO API, which does not support some of the
+ // features enabled by -flto-unit.
+ if (!RawTriple.isPS4() ||
+ (D.getLTOMode() == LTOK_Full) || !UnifiedLTO)
+ CmdArgs.push_back("-flto-unit");
}
}
}
+ Args.AddLastArg(CmdArgs, options::OPT_dumpdir);
+
if (const Arg *A = Args.getLastArg(options::OPT_fthinlto_index_EQ)) {
if (!types::isLLVMIR(Input.getType()))
D.Diag(diag::err_drv_arg_requires_bitcode_input) << A->getAsString(Args);
@@ -4816,6 +4958,18 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
!MemProfArg->getOption().matches(options::OPT_fno_memory_profile))
MemProfArg->render(Args, CmdArgs);
+ if (auto *MemProfUseArg =
+ Args.getLastArg(options::OPT_fmemory_profile_use_EQ)) {
+ if (MemProfArg)
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << MemProfUseArg->getAsString(Args) << MemProfArg->getAsString(Args);
+ if (auto *PGOInstrArg = Args.getLastArg(options::OPT_fprofile_generate,
+ options::OPT_fprofile_generate_EQ))
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << MemProfUseArg->getAsString(Args) << PGOInstrArg->getAsString(Args);
+ MemProfUseArg->render(Args, CmdArgs);
+ }
+
// Embed-bitcode option.
// Only white-listed flags below are allowed to be embedded.
if (C.getDriver().embedBitcodeInObject() && !IsUsingLTO &&
@@ -4929,7 +5083,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(std::make_unique<Command>(
JA, *this, ResponseFileSupport::AtFileUTF8(), D.getClangProgramPath(),
- CmdArgs, Inputs, Output));
+ CmdArgs, Inputs, Output, D.getPrependArg()));
return;
}
@@ -5028,6 +5182,37 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Twine(std::min(Value, 65536u))));
}
+ if (Triple.isOSzOS()) {
+ // On z/OS some of the system header feature macros need to
+ // be defined to enable most cross platform projects to build
+ // successfully. Ths include the libc++ library. A
+ // complicating factor is that users can define these
+ // macros to the same or different values. We need to add
+ // the definition for these macros to the compilation command
+ // if the user hasn't already defined them.
+
+ auto findMacroDefinition = [&](const std::string &Macro) {
+ auto MacroDefs = Args.getAllArgValues(options::OPT_D);
+ return std::find_if(MacroDefs.begin(), MacroDefs.end(),
+ [&](const std::string &M) {
+ return M == Macro ||
+ M.find(Macro + '=') != std::string::npos;
+ }) != MacroDefs.end();
+ };
+
+ // _UNIX03_WITHDRAWN is required for libcxx & porting.
+ if (!findMacroDefinition("_UNIX03_WITHDRAWN"))
+ CmdArgs.push_back("-D_UNIX03_WITHDRAWN");
+ // _OPEN_DEFAULT is required for XL compat
+ if (!findMacroDefinition("_OPEN_DEFAULT"))
+ CmdArgs.push_back("-D_OPEN_DEFAULT");
+ if (D.CCCIsCXX() || types::isCXX(Input.getType())) {
+ // _XOPEN_SOURCE=600 is required for libcxx.
+ if (!findMacroDefinition("_XOPEN_SOURCE"))
+ CmdArgs.push_back("-D_XOPEN_SOURCE=600");
+ }
+ }
+
llvm::Reloc::Model RelocationModel;
unsigned PICLevel;
bool IsPIE;
@@ -5149,7 +5334,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Triple.getArch() != llvm::Triple::x86_64)
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< Name << Triple.getArchName();
- } else if (Name == "SLEEF") {
+ } else if (Name == "SLEEF" || Name == "ArmPL") {
if (Triple.getArch() != llvm::Triple::aarch64 &&
Triple.getArch() != llvm::Triple::aarch64_be)
D.Diag(diag::err_drv_unsupported_opt_for_target)
@@ -5167,27 +5352,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// LLVM Code Generator Options.
- for (const Arg *A : Args.filtered(options::OPT_frewrite_map_file_EQ)) {
- StringRef Map = A->getValue();
- if (!llvm::sys::fs::exists(Map)) {
- D.Diag(diag::err_drv_no_such_file) << Map;
- } else {
- A->render(Args, CmdArgs);
- A->claim();
- }
- }
-
- if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ_vec_extabi,
- options::OPT_mabi_EQ_vec_default)) {
- if (!Triple.isOSAIX())
- D.Diag(diag::err_drv_unsupported_opt_for_target)
- << A->getSpelling() << RawTriple.str();
- if (A->getOption().getID() == options::OPT_mabi_EQ_vec_extabi)
- CmdArgs.push_back("-mabi=vec-extabi");
- else
- CmdArgs.push_back("-mabi=vec-default");
- }
-
if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ_quadword_atomics)) {
if (!Triple.isOSAIX() || Triple.isPPC32())
D.Diag(diag::err_drv_unsupported_opt_for_target)
@@ -5204,14 +5368,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
if (Arg *A = Args.getLastArg(options::OPT_Wframe_larger_than_EQ)) {
- StringRef v = A->getValue();
- // FIXME: Validate the argument here so we don't produce meaningless errors
- // about -fwarn-stack-size=.
- if (v.empty())
- D.Diag(diag::err_drv_missing_argument) << A->getSpelling() << 1;
+ StringRef V = A->getValue(), V1 = V;
+ unsigned Size;
+ if (V1.consumeInteger(10, Size) || !V1.empty())
+ D.Diag(diag::err_drv_invalid_argument_to_option)
+ << V << A->getOption().getName();
else
- CmdArgs.push_back(Args.MakeArgString("-fwarn-stack-size=" + v));
- A->claim();
+ CmdArgs.push_back(Args.MakeArgString("-fwarn-stack-size=" + V));
}
Args.addOptOutFlag(CmdArgs, options::OPT_fjump_tables,
@@ -5518,24 +5681,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
RenderTargetOptions(Triple, Args, KernelOrKext, CmdArgs);
- // FIXME: For now we want to demote any errors to warnings, when they have
- // been raised for asking the wrong question of scalable vectors, such as
- // asking for the fixed number of elements. This may happen because code that
- // is not yet ported to work for scalable vectors uses the wrong interfaces,
- // whereas the behaviour is actually correct. Emitting a warning helps bring
- // up scalable vector support in an incremental way. When scalable vector
- // support is stable enough, all uses of wrong interfaces should be considered
- // as errors, but until then, we can live with a warning being emitted by the
- // compiler. This way, Clang can be used to compile code with scalable vectors
- // and identify possible issues.
- if (isa<AssembleJobAction>(JA) || isa<CompileJobAction>(JA) ||
- isa<BackendJobAction>(JA)) {
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-treat-scalable-fixed-error-as-warning");
- }
-
// These two are potentially updated by AddClangCLArgs.
- codegenoptions::DebugInfoKind DebugInfoKind = codegenoptions::NoDebugInfo;
+ llvm::codegenoptions::DebugInfoKind DebugInfoKind =
+ llvm::codegenoptions::NoDebugInfo;
bool EmitCodeView = false;
// Add clang-cl arguments.
@@ -5551,7 +5699,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// This controls whether or not we perform JustMyCode instrumentation.
if (Args.hasFlag(options::OPT_fjmc, options::OPT_fno_jmc, false)) {
if (TC.getTriple().isOSBinFormatELF()) {
- if (DebugInfoKind >= codegenoptions::DebugInfoConstructor)
+ if (DebugInfoKind >= llvm::codegenoptions::DebugInfoConstructor)
CmdArgs.push_back("-fjmc");
else
D.Diag(clang::diag::warn_drv_jmc_requires_debuginfo) << "-fjmc"
@@ -5565,7 +5713,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// can propagate it to the backend.
bool SplitDWARF = (DwarfFission != DwarfFissionKind::None) &&
(TC.getTriple().isOSBinFormatELF() ||
- TC.getTriple().isOSBinFormatWasm()) &&
+ TC.getTriple().isOSBinFormatWasm() ||
+ TC.getTriple().isOSBinFormatCOFF()) &&
(isa<AssembleJobAction>(JA) || isa<CompileJobAction>(JA) ||
isa<BackendJobAction>(JA));
if (SplitDWARF) {
@@ -5689,6 +5838,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_unique_internal_linkage_names);
Args.addOptInFlag(CmdArgs, options::OPT_funique_basic_block_section_names,
options::OPT_fno_unique_basic_block_section_names);
+ Args.addOptInFlag(CmdArgs, options::OPT_fconvergent_functions,
+ options::OPT_fno_convergent_functions);
if (Arg *A = Args.getLastArg(options::OPT_fsplit_machine_functions,
options::OPT_fno_split_machine_functions)) {
@@ -5710,7 +5861,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// for sampling, overhead of call arc collection is way too high and there's
// no way to collect the output.
if (!Triple.isNVPTX() && !Triple.isAMDGCN())
- addPGOAndCoverageFlags(TC, C, D, Output, Args, SanitizeArgs, CmdArgs);
+ addPGOAndCoverageFlags(TC, C, JA, Output, Args, SanitizeArgs, CmdArgs);
Args.AddLastArg(CmdArgs, options::OPT_fclang_abi_compat_EQ);
@@ -5913,26 +6064,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
addDebugPrefixMapArg(D, TC, Args, CmdArgs);
- if (Arg *A = Args.getLastArg(options::OPT_ftemplate_depth_,
- options::OPT_ftemplate_depth_EQ)) {
- CmdArgs.push_back("-ftemplate-depth");
- CmdArgs.push_back(A->getValue());
- }
-
- if (Arg *A = Args.getLastArg(options::OPT_foperator_arrow_depth_EQ)) {
- CmdArgs.push_back("-foperator-arrow-depth");
- CmdArgs.push_back(A->getValue());
- }
-
- if (Arg *A = Args.getLastArg(options::OPT_fconstexpr_depth_EQ)) {
- CmdArgs.push_back("-fconstexpr-depth");
- CmdArgs.push_back(A->getValue());
- }
-
- if (Arg *A = Args.getLastArg(options::OPT_fconstexpr_steps_EQ)) {
- CmdArgs.push_back("-fconstexpr-steps");
- CmdArgs.push_back(A->getValue());
- }
+ Args.AddLastArg(CmdArgs, options::OPT_ftemplate_depth_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_foperator_arrow_depth_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_fconstexpr_depth_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_fconstexpr_steps_EQ);
Args.AddLastArg(CmdArgs, options::OPT_fexperimental_library);
@@ -5998,25 +6133,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
else
CmdArgs.push_back("19");
- if (Arg *A = Args.getLastArg(options::OPT_fmacro_backtrace_limit_EQ)) {
- CmdArgs.push_back("-fmacro-backtrace-limit");
- CmdArgs.push_back(A->getValue());
- }
-
- if (Arg *A = Args.getLastArg(options::OPT_ftemplate_backtrace_limit_EQ)) {
- CmdArgs.push_back("-ftemplate-backtrace-limit");
- CmdArgs.push_back(A->getValue());
- }
-
- if (Arg *A = Args.getLastArg(options::OPT_fconstexpr_backtrace_limit_EQ)) {
- CmdArgs.push_back("-fconstexpr-backtrace-limit");
- CmdArgs.push_back(A->getValue());
- }
-
- if (Arg *A = Args.getLastArg(options::OPT_fspell_checking_limit_EQ)) {
- CmdArgs.push_back("-fspell-checking-limit");
- CmdArgs.push_back(A->getValue());
- }
+ Args.AddLastArg(CmdArgs, options::OPT_fconstexpr_backtrace_limit_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_fmacro_backtrace_limit_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_ftemplate_backtrace_limit_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_fspell_checking_limit_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_fcaret_diagnostics_max_lines_EQ);
// Pass -fmessage-length=.
unsigned MessageLength = 0;
@@ -6076,23 +6197,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- if (const Arg *A = Args.getLastArg(options::OPT_mignore_xcoff_visibility)) {
- if (Triple.isOSAIX())
- CmdArgs.push_back("-mignore-xcoff-visibility");
- else
- D.Diag(diag::err_drv_unsupported_opt_for_target)
- << A->getAsString(Args) << TripleStr;
- }
-
- if (const Arg *A =
- Args.getLastArg(options::OPT_mdefault_visibility_export_mapping_EQ)) {
- if (Triple.isOSAIX())
- A->render(Args, CmdArgs);
- else
- D.Diag(diag::err_drv_unsupported_opt_for_target)
- << A->getAsString(Args) << TripleStr;
- }
-
if (Args.hasFlag(options::OPT_fvisibility_inlines_hidden,
options::OPT_fno_visibility_inlines_hidden, false))
CmdArgs.push_back("-fvisibility-inlines-hidden");
@@ -6114,10 +6218,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_femit_all_decls);
Args.AddLastArg(CmdArgs, options::OPT_fheinous_gnu_extensions);
Args.AddLastArg(CmdArgs, options::OPT_fdigraphs, options::OPT_fno_digraphs);
- Args.AddLastArg(CmdArgs, options::OPT_femulated_tls,
- options::OPT_fno_emulated_tls);
Args.AddLastArg(CmdArgs, options::OPT_fzero_call_used_regs_EQ);
+ if (Args.hasFlag(options::OPT_femulated_tls, options::OPT_fno_emulated_tls,
+ Triple.hasDefaultEmulatedTLS()))
+ CmdArgs.push_back("-femulated-tls");
+
+ Args.addOptInFlag(CmdArgs, options::OPT_fcheck_new,
+ options::OPT_fno_check_new);
+
if (Arg *A = Args.getLastArg(options::OPT_fzero_call_used_regs_EQ)) {
// FIXME: There's no reason for this to be restricted to X86. The backend
// code needs to be changed to include the appropriate function calls
@@ -6235,7 +6344,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_fpatchable_function_entry_EQ)) {
StringRef S0 = A->getValue(), S = S0;
unsigned Size, Offset = 0;
- if (!Triple.isAArch64() && !Triple.isRISCV() && !Triple.isX86())
+ if (!Triple.isAArch64() && !Triple.isLoongArch() && !Triple.isRISCV() &&
+ !Triple.isX86())
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getAsString(Args) << TripleStr;
else if (S.consumeInteger(10, Size) ||
@@ -6280,12 +6390,35 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
<< A->getAsString(Args) << TripleStr;
}
}
+
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_pg)) {
+ if (TC.getTriple().isOSzOS()) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TripleStr;
+ }
+ }
if (Arg *A = Args.getLastArgNoClaim(options::OPT_p)) {
- if (!TC.getTriple().isOSAIX() && !TC.getTriple().isOSOpenBSD()) {
+ if (!(TC.getTriple().isOSAIX() || TC.getTriple().isOSOpenBSD())) {
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getAsString(Args) << TripleStr;
}
}
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_p, options::OPT_pg)) {
+ if (A->getOption().matches(options::OPT_p)) {
+ A->claim();
+ if (TC.getTriple().isOSAIX() && !Args.hasArgNoClaim(options::OPT_pg))
+ CmdArgs.push_back("-pg");
+ }
+ }
+
+ // Reject AIX-specific link options on other targets.
+ if (!TC.getTriple().isOSAIX()) {
+ for (const Arg *A : Args.filtered(options::OPT_b, options::OPT_K,
+ options::OPT_mxcoff_build_id_EQ)) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << TripleStr;
+ }
+ }
if (Args.getLastArg(options::OPT_fapple_kext) ||
(Args.hasArg(options::OPT_mkernel) && types::isCXX(InputType)))
@@ -6298,13 +6431,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_parseable_fixits);
Args.AddLastArg(CmdArgs, options::OPT_ftime_report);
Args.AddLastArg(CmdArgs, options::OPT_ftime_report_EQ);
- Args.AddLastArg(CmdArgs, options::OPT_ftime_trace);
- Args.AddLastArg(CmdArgs, options::OPT_ftime_trace_granularity_EQ);
- Args.AddLastArg(CmdArgs, options::OPT_ftime_trace_EQ);
Args.AddLastArg(CmdArgs, options::OPT_ftrapv);
Args.AddLastArg(CmdArgs, options::OPT_malign_double);
Args.AddLastArg(CmdArgs, options::OPT_fno_temp_file);
+ if (const char *Name = C.getTimeTraceFile(&JA)) {
+ CmdArgs.push_back(Args.MakeArgString("-ftime-trace=" + Twine(Name)));
+ Args.AddLastArg(CmdArgs, options::OPT_ftime_trace_granularity_EQ);
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_ftrapv_handler_EQ)) {
CmdArgs.push_back("-ftrapv-handler");
CmdArgs.push_back(A->getValue());
@@ -6478,13 +6613,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (TC.IsEncodeExtendedBlockSignatureDefault())
CmdArgs.push_back("-fencode-extended-block-signature");
- if (Args.hasFlag(options::OPT_fcoroutines_ts, options::OPT_fno_coroutines_ts,
- false) &&
- types::isCXX(InputType)) {
- D.Diag(diag::warn_deperecated_fcoroutines_ts_flag);
- CmdArgs.push_back("-fcoroutines-ts");
- }
-
if (Args.hasFlag(options::OPT_fcoro_aligned_allocation,
options::OPT_fno_coro_aligned_allocation, false) &&
types::isCXX(InputType))
@@ -6606,7 +6734,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
.Case("c++14", "-std=c++14")
.Case("c++17", "-std=c++17")
.Case("c++20", "-std=c++20")
- .Case("c++latest", "-std=c++2b")
+ // TODO add c++23 and c++26 when MSVC supports it.
+ .Case("c++latest", "-std=c++26")
.Default("");
if (LanguageStandard.empty())
D.Diag(clang::diag::warn_drv_unused_argument)
@@ -6738,6 +6867,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.addOptOutFlag(CmdArgs, options::OPT_fassume_sane_operator_new,
options::OPT_fno_assume_sane_operator_new);
+ // -fassume-unique-vtables is on by default.
+ Args.addOptOutFlag(CmdArgs, options::OPT_fassume_unique_vtables,
+ options::OPT_fno_assume_unique_vtables);
+
// -frelaxed-template-template-args is off by default, as it is a severe
// breaking change until a corresponding change to template partial ordering
// is provided.
@@ -6883,10 +7016,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.addOptInFlag(CmdArgs, options::OPT_fapple_pragma_pack,
options::OPT_fno_apple_pragma_pack);
- if (Args.hasFlag(options::OPT_fxl_pragma_pack,
- options::OPT_fno_xl_pragma_pack, RawTriple.isOSAIX()))
- CmdArgs.push_back("-fxl-pragma-pack");
-
// Remarks can be enabled with any of the `-f.*optimization-record.*` flags.
if (willEmitRemarks(Args) && checkRemarksOptions(D, Args, Triple))
renderRemarksOptions(Args, CmdArgs, Triple, Input, Output, JA);
@@ -6980,10 +7109,23 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
A->claim();
}
+ // Forward --vfsoverlay to -cc1.
+ for (const Arg *A : Args.filtered(options::OPT_vfsoverlay)) {
+ CmdArgs.push_back("--vfsoverlay");
+ CmdArgs.push_back(A->getValue());
+ A->claim();
+ }
+
+ Args.addOptInFlag(CmdArgs, options::OPT_fsafe_buffer_usage_suggestions,
+ options::OPT_fno_safe_buffer_usage_suggestions);
+
// Setup statistics file output.
SmallString<128> StatsFile = getStatsFileName(Args, Output, Input, D);
- if (!StatsFile.empty())
+ if (!StatsFile.empty()) {
CmdArgs.push_back(Args.MakeArgString(Twine("-stats-file=") + StatsFile));
+ if (D.CCPrintInternalStats)
+ CmdArgs.push_back("-stats-file-append");
+ }
// Forward -Xclang arguments to -cc1, and -mllvm arguments to the LLVM option
// parser.
@@ -7040,7 +7182,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
auto FRecordSwitches =
Args.hasFlag(options::OPT_frecord_command_line,
options::OPT_fno_record_command_line, false);
- if (FRecordSwitches && !Triple.isOSBinFormatELF())
+ if (FRecordSwitches && !Triple.isOSBinFormatELF() &&
+ !Triple.isOSBinFormatXCOFF() && !Triple.isOSBinFormatMachO())
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< Args.getLastArg(options::OPT_frecord_command_line)->getAsString(Args)
<< TripleStr;
@@ -7119,13 +7262,18 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ if (IsHIPDevice)
+ Args.addOptOutFlag(CmdArgs,
+ options::OPT_fhip_fp32_correctly_rounded_divide_sqrt,
+ options::OPT_fno_hip_fp32_correctly_rounded_divide_sqrt);
+
// OpenMP offloading device jobs take the argument -fopenmp-host-ir-file-path
// to specify the result of the compile phase on the host, so the meaningful
- // device declarations can be identified. Also, -fopenmp-is-device is passed
- // along to tell the frontend that it is generating code for a device, so that
- // only the relevant declarations are emitted.
+ // device declarations can be identified. Also, -fopenmp-is-target-device is
+ // passed along to tell the frontend that it is generating code for a device,
+ // so that only the relevant declarations are emitted.
if (IsOpenMPDevice) {
- CmdArgs.push_back("-fopenmp-is-device");
+ CmdArgs.push_back("-fopenmp-is-target-device");
if (OpenMPDeviceInput) {
CmdArgs.push_back("-fopenmp-host-ir-file-path");
CmdArgs.push_back(Args.MakeArgString(OpenMPDeviceInput->getFilename()));
@@ -7137,6 +7285,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.addOptInFlag(CmdArgs, options::OPT_munsafe_fp_atomics,
options::OPT_mno_unsafe_fp_atomics);
+ Args.addOptOutFlag(CmdArgs, options::OPT_mamdgpu_ieee,
+ options::OPT_mno_amdgpu_ieee);
}
// For all the host OpenMP offloading compile jobs we need to pass the targets
@@ -7176,22 +7326,30 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
if (WholeProgramVTables) {
- // Propagate -fwhole-program-vtables if this is an LTO compile.
- if (IsUsingLTO)
- CmdArgs.push_back("-fwhole-program-vtables");
+ // PS4 uses the legacy LTO API, which does not support this feature in
+ // ThinLTO mode.
+ bool IsPS4 = getToolChain().getTriple().isPS4();
+
// Check if we passed LTO options but they were suppressed because this is a
// device offloading action, or we passed device offload LTO options which
// were suppressed because this is not the device offload action.
+ // Check if we are using PS4 in regular LTO mode.
// Otherwise, issue an error.
- else if (!D.isUsingLTO(!IsDeviceOffloadAction))
+ if ((!IsUsingLTO && !D.isUsingLTO(!IsDeviceOffloadAction)) ||
+ (IsPS4 && !UnifiedLTO && (D.getLTOMode() != LTOK_Full)))
D.Diag(diag::err_drv_argument_only_allowed_with)
<< "-fwhole-program-vtables"
- << "-flto";
+ << ((IsPS4 && !UnifiedLTO) ? "-flto=full" : "-flto");
+
+ // Propagate -fwhole-program-vtables if this is an LTO compile.
+ if (IsUsingLTO)
+ CmdArgs.push_back("-fwhole-program-vtables");
}
bool DefaultsSplitLTOUnit =
- (WholeProgramVTables || SanitizeArgs.needsLTO()) &&
- (LTOMode == LTOK_Full || TC.canSplitThinLTOUnit());
+ ((WholeProgramVTables || SanitizeArgs.needsLTO()) &&
+ (LTOMode == LTOK_Full || TC.canSplitThinLTOUnit())) ||
+ (!Triple.isPS4() && UnifiedLTO);
bool SplitLTOUnit =
Args.hasFlag(options::OPT_fsplit_lto_unit,
options::OPT_fno_split_lto_unit, DefaultsSplitLTOUnit);
@@ -7201,6 +7359,22 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (SplitLTOUnit)
CmdArgs.push_back("-fsplit-lto-unit");
+ if (Arg *A = Args.getLastArg(options::OPT_ffat_lto_objects,
+ options::OPT_fno_fat_lto_objects)) {
+ if (IsUsingLTO && A->getOption().matches(options::OPT_ffat_lto_objects)) {
+ assert(LTOMode == LTOK_Full || LTOMode == LTOK_Thin);
+ if (!Triple.isOSBinFormatELF()) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TC.getTripleString();
+ }
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("-flto=") + (LTOMode == LTOK_Thin ? "thin" : "full")));
+ CmdArgs.push_back("-flto-unit");
+ CmdArgs.push_back("-ffat-lto-objects");
+ A->render(Args, CmdArgs);
+ }
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_fglobal_isel,
options::OPT_fno_global_isel)) {
CmdArgs.push_back("-mllvm");
@@ -7251,6 +7425,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.addOptInFlag(CmdArgs, options::OPT_fkeep_static_consts,
options::OPT_fno_keep_static_consts);
+ Args.addOptInFlag(CmdArgs, options::OPT_fkeep_persistent_storage_variables,
+ options::OPT_fno_keep_persistent_storage_variables);
Args.addOptInFlag(CmdArgs, options::OPT_fcomplete_member_pointers,
options::OPT_fno_complete_member_pointers);
Args.addOptOutFlag(CmdArgs, options::OPT_fcxx_static_destructors,
@@ -7298,7 +7474,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if ((Triple.isOSBinFormatELF() || Triple.isOSBinFormatMachO()) &&
(EH || UnwindTables || AsyncUnwindTables ||
- DebugInfoKind != codegenoptions::NoDebugInfo))
+ DebugInfoKind != llvm::codegenoptions::NoDebugInfo))
CmdArgs.push_back("-D__GCC_HAVE_DWARF2_CFI_ASM=1");
if (Arg *A = Args.getLastArg(options::OPT_fsymbol_partition_EQ)) {
@@ -7350,13 +7526,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (D.CC1Main && !D.CCGenDiagnostics) {
// Invoke the CC1 directly in this process
- C.addCommand(std::make_unique<CC1Command>(JA, *this,
- ResponseFileSupport::AtFileUTF8(),
- Exec, CmdArgs, Inputs, Output));
+ C.addCommand(std::make_unique<CC1Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs,
+ Output, D.getPrependArg()));
} else {
- C.addCommand(std::make_unique<Command>(JA, *this,
- ResponseFileSupport::AtFileUTF8(),
- Exec, CmdArgs, Inputs, Output));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs,
+ Output, D.getPrependArg()));
}
// Make the compile command echo its inputs for /showFilenames.
@@ -7600,7 +7776,7 @@ static EHFlags parseClangCLEHFlags(const Driver &D, const ArgList &Args) {
void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
ArgStringList &CmdArgs,
- codegenoptions::DebugInfoKind *DebugInfoKind,
+ llvm::codegenoptions::DebugInfoKind *DebugInfoKind,
bool *EmitCodeView) const {
bool isNVPTX = getToolChain().getTriple().isNVPTX();
@@ -7632,9 +7808,9 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
options::OPT_gline_tables_only)) {
*EmitCodeView = true;
if (DebugInfoArg->getOption().matches(options::OPT__SLASH_Z7))
- *DebugInfoKind = codegenoptions::DebugInfoConstructor;
+ *DebugInfoKind = llvm::codegenoptions::DebugInfoConstructor;
else
- *DebugInfoKind = codegenoptions::DebugLineTablesOnly;
+ *DebugInfoKind = llvm::codegenoptions::DebugLineTablesOnly;
} else {
*EmitCodeView = false;
}
@@ -7644,7 +7820,8 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
// This controls whether or not we perform JustMyCode instrumentation.
if (Args.hasFlag(options::OPT__SLASH_JMC, options::OPT__SLASH_JMC_,
/*Default=*/false)) {
- if (*EmitCodeView && *DebugInfoKind >= codegenoptions::DebugInfoConstructor)
+ if (*EmitCodeView &&
+ *DebugInfoKind >= llvm::codegenoptions::DebugInfoConstructor)
CmdArgs.push_back("-fjmc");
else
D.Diag(clang::diag::warn_drv_jmc_requires_debuginfo) << "/JMC"
@@ -7656,6 +7833,8 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
if (types::isCXX(InputType))
CmdArgs.push_back("-fcxx-exceptions");
CmdArgs.push_back("-fexceptions");
+ if (EH.Asynch)
+ CmdArgs.push_back("-fasync-exceptions");
}
if (types::isCXX(InputType) && EH.Synch && EH.NoUnwindC)
CmdArgs.push_back("-fexternc-nounwind");
@@ -7863,6 +8042,14 @@ void ClangAs::AddX86TargetArgs(const ArgList &Args,
}
}
+void ClangAs::AddLoongArchTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CmdArgs.push_back("-target-abi");
+ CmdArgs.push_back(loongarch::getLoongArchABI(getToolChain().getDriver(), Args,
+ getToolChain().getTriple())
+ .data());
+}
+
void ClangAs::AddRISCVTargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
const llvm::Triple &Triple = getToolChain().getTriple();
@@ -7870,6 +8057,12 @@ void ClangAs::AddRISCVTargetArgs(const ArgList &Args,
CmdArgs.push_back("-target-abi");
CmdArgs.push_back(ABIName.data());
+
+ if (Args.hasFlag(options::OPT_mdefault_build_attributes,
+ options::OPT_mno_default_build_attributes, true)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-riscv-add-build-attributes");
+ }
}
void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
@@ -7947,7 +8140,8 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
WantDebug = !A->getOption().matches(options::OPT_g0) &&
!A->getOption().matches(options::OPT_ggdb0);
- codegenoptions::DebugInfoKind DebugInfoKind = codegenoptions::NoDebugInfo;
+ llvm::codegenoptions::DebugInfoKind DebugInfoKind =
+ llvm::codegenoptions::NoDebugInfo;
// Add the -fdebug-compilation-dir flag if needed.
const char *DebugCompilationDir =
@@ -7959,8 +8153,8 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
// the guard for source type, however there is a test which asserts
// that some assembler invocation receives no -debug-info-kind,
// and it's not clear whether that test is just overly restrictive.
- DebugInfoKind = (WantDebug ? codegenoptions::DebugInfoConstructor
- : codegenoptions::NoDebugInfo);
+ DebugInfoKind = (WantDebug ? llvm::codegenoptions::DebugInfoConstructor
+ : llvm::codegenoptions::NoDebugInfo);
addDebugPrefixMapArg(getToolChain().getDriver(), getToolChain(), Args,
CmdArgs);
@@ -8054,6 +8248,11 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
}
break;
+ case llvm::Triple::loongarch32:
+ case llvm::Triple::loongarch64:
+ AddLoongArchTargetArgs(Args, CmdArgs);
+ break;
+
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
AddRISCVTargetArgs(Args, CmdArgs);
@@ -8072,7 +8271,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_mllvm);
- if (DebugInfoKind > codegenoptions::NoDebugInfo && Output.isFilename())
+ if (DebugInfoKind > llvm::codegenoptions::NoDebugInfo && Output.isFilename())
addDebugObjectName(Args, CmdArgs, DebugCompilationDir,
Output.getFilename());
@@ -8116,13 +8315,13 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec = getToolChain().getDriver().getClangProgramPath();
if (D.CC1Main && !D.CCGenDiagnostics) {
// Invoke cc1as directly in this process.
- C.addCommand(std::make_unique<CC1Command>(JA, *this,
- ResponseFileSupport::AtFileUTF8(),
- Exec, CmdArgs, Inputs, Output));
+ C.addCommand(std::make_unique<CC1Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs,
+ Output, D.getPrependArg()));
} else {
- C.addCommand(std::make_unique<Command>(JA, *this,
- ResponseFileSupport::AtFileUTF8(),
- Exec, CmdArgs, Inputs, Output));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs,
+ Output, D.getPrependArg()));
}
}
@@ -8189,7 +8388,7 @@ void OffloadBundler::ConstructJob(Compilation &C, const JobAction &JA,
// Extract GPUArch from -march argument in TC argument list.
for (unsigned ArgIndex = 0; ArgIndex < TCArgs.size(); ArgIndex++) {
auto ArchStr = StringRef(TCArgs.getArgString(ArgIndex));
- auto Arch = ArchStr.startswith_insensitive("-march=");
+ auto Arch = ArchStr.starts_with_insensitive("-march=");
if (Arch) {
GPUArchName = ArchStr.substr(7);
Triples += "-";
@@ -8282,7 +8481,7 @@ void OffloadBundler::ConstructJobMultipleOutputs(
// Extract GPUArch from -march argument in TC argument list.
for (unsigned ArgIndex = 0; ArgIndex < TCArgs.size(); ArgIndex++) {
StringRef ArchStr = StringRef(TCArgs.getArgString(ArgIndex));
- auto Arch = ArchStr.startswith_insensitive("-march=");
+ auto Arch = ArchStr.starts_with_insensitive("-march=");
if (Arch) {
GPUArchName = ArchStr.substr(7);
Triples += "-";
@@ -8336,7 +8535,7 @@ void OffloadPackager::ConstructJob(Compilation &C, const JobAction &JA,
C.getArgsForToolChain(TC, OffloadAction->getOffloadingArch(),
OffloadAction->getOffloadingDeviceKind());
StringRef File = C.getArgs().MakeArgString(TC->getInputFilename(Input));
- StringRef Arch = (OffloadAction->getOffloadingArch())
+ StringRef Arch = OffloadAction->getOffloadingArch()
? OffloadAction->getOffloadingArch()
: TCArgs.getLastArgValue(options::OPT_march_EQ);
StringRef Kind =
@@ -8349,14 +8548,24 @@ void OffloadPackager::ConstructJob(Compilation &C, const JobAction &JA,
llvm::copy_if(Features, std::back_inserter(FeatureArgs),
[](StringRef Arg) { return !Arg.startswith("-target"); });
+ if (TC->getTriple().isAMDGPU()) {
+ for (StringRef Feature : llvm::split(Arch.split(':').second, ':')) {
+ FeatureArgs.emplace_back(
+ Args.MakeArgString(Feature.take_back() + Feature.drop_back()));
+ }
+ }
+
+ // TODO: We need to pass in the full target-id and handle it properly in the
+ // linker wrapper.
SmallVector<std::string> Parts{
"file=" + File.str(),
"triple=" + TC->getTripleString(),
- "arch=" + Arch.str(),
+ "arch=" + getProcessorFromTargetID(TC->getTriple(), Arch).str(),
"kind=" + Kind.str(),
};
- if (TC->getDriver().isUsingLTO(/* IsOffload */ true))
+ if (TC->getDriver().isUsingLTO(/* IsOffload */ true) ||
+ TC->getTriple().isAMDGPU())
for (StringRef Feature : FeatureArgs)
Parts.emplace_back("feature=" + Feature.str());
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
index a7625dba6646..64fc86b6b0a7 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
@@ -10,13 +10,13 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CLANG_H
#include "MSVC.h"
-#include "clang/Basic/DebugInfoOptions.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/Types.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/Frontend/Debug/Options.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
class ObjCRuntime;
@@ -91,7 +91,7 @@ private:
void AddClangCLArgs(const llvm::opt::ArgList &Args, types::ID InputType,
llvm::opt::ArgStringList &CmdArgs,
- codegenoptions::DebugInfoKind *DebugInfoKind,
+ llvm::codegenoptions::DebugInfoKind *DebugInfoKind,
bool *EmitCodeView) const;
mutable std::unique_ptr<llvm::raw_fd_ostream> CompilationDatabase = nullptr;
@@ -125,6 +125,8 @@ class LLVM_LIBRARY_VISIBILITY ClangAs : public Tool {
public:
ClangAs(const ToolChain &TC)
: Tool("clang::as", "clang integrated assembler", TC) {}
+ void AddLoongArchTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
void AddMIPSTargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
void AddX86TargetArgs(const llvm::opt::ArgList &Args,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.cpp
index 9fd0529a3297..ee2cb10fde7d 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.cpp
@@ -70,8 +70,8 @@ void cloudabi::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
Args.AddAllArgs(CmdArgs,
- {options::OPT_T_Group, options::OPT_e, options::OPT_s,
- options::OPT_t, options::OPT_Z_Flag, options::OPT_r});
+ {options::OPT_T_Group, options::OPT_s, options::OPT_t,
+ options::OPT_Z_Flag, options::OPT_r});
if (D.isUsingLTO()) {
assert(!Inputs.empty() && "Must have at least one input.");
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 34640b3c450d..358d7565f47c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -54,15 +54,15 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/ScopedPrinter.h"
-#include "llvm/Support/TargetParser.h"
#include "llvm/Support/Threading.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/YAMLParser.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/TargetParser.h"
#include <optional>
using namespace clang::driver;
@@ -131,15 +131,33 @@ static void renderRemarksHotnessOptions(const ArgList &Args,
"opt-remarks-hotness-threshold=" + A->getValue()));
}
+static bool shouldIgnoreUnsupportedTargetFeature(const Arg &TargetFeatureArg,
+ llvm::Triple T,
+ StringRef Processor) {
+ // Warn no-cumode for AMDGCN processors not supporing WGP mode.
+ if (!T.isAMDGPU())
+ return false;
+ auto GPUKind = T.isAMDGCN() ? llvm::AMDGPU::parseArchAMDGCN(Processor)
+ : llvm::AMDGPU::parseArchR600(Processor);
+ auto GPUFeatures = T.isAMDGCN() ? llvm::AMDGPU::getArchAttrAMDGCN(GPUKind)
+ : llvm::AMDGPU::getArchAttrR600(GPUKind);
+ if (GPUFeatures & llvm::AMDGPU::FEATURE_WGP)
+ return false;
+ return TargetFeatureArg.getOption().matches(options::OPT_mno_cumode);
+}
+
void tools::addPathIfExists(const Driver &D, const Twine &Path,
ToolChain::path_list &Paths) {
if (D.getVFS().exists(Path))
Paths.push_back(Path.str());
}
-void tools::handleTargetFeaturesGroup(const ArgList &Args,
+void tools::handleTargetFeaturesGroup(const Driver &D,
+ const llvm::Triple &Triple,
+ const ArgList &Args,
std::vector<StringRef> &Features,
OptSpecifier Group) {
+ std::set<StringRef> Warned;
for (const Arg *A : Args.filtered(Group)) {
StringRef Name = A->getOption().getName();
A->claim();
@@ -148,9 +166,21 @@ void tools::handleTargetFeaturesGroup(const ArgList &Args,
assert(Name.startswith("m") && "Invalid feature name.");
Name = Name.substr(1);
+ auto Proc = getCPUName(D, Args, Triple);
+ if (shouldIgnoreUnsupportedTargetFeature(*A, Triple, Proc)) {
+ if (Warned.count(Name) == 0) {
+ D.getDiags().Report(
+ clang::diag::warn_drv_unsupported_option_for_processor)
+ << A->getAsString(Args) << Proc;
+ Warned.insert(Name);
+ }
+ continue;
+ }
+
bool IsNegative = Name.startswith("no-");
if (IsNegative)
Name = Name.substr(3);
+
Features.push_back(Args.MakeArgString((IsNegative ? "-" : "+") + Name));
}
}
@@ -267,22 +297,8 @@ void tools::AddLinkerInputs(const ToolChain &TC, const InputInfoList &Inputs,
TC.AddCXXStdlibLibArgs(Args, CmdArgs);
else if (A.getOption().matches(options::OPT_Z_reserved_lib_cckext))
TC.AddCCKextLibArgs(Args, CmdArgs);
- else if (A.getOption().matches(options::OPT_z)) {
- // Pass -z prefix for gcc linker compatibility.
- A.claim();
- A.render(Args, CmdArgs);
- } else if (A.getOption().matches(options::OPT_b)) {
- const llvm::Triple &T = TC.getTriple();
- if (!T.isOSAIX()) {
- TC.getDriver().Diag(diag::err_drv_unsupported_opt_for_target)
- << A.getSpelling() << T.str();
- }
- // Pass -b prefix for AIX linker.
- A.claim();
- A.render(Args, CmdArgs);
- } else {
+ else
A.renderAsInput(Args, CmdArgs);
- }
}
}
@@ -319,6 +335,7 @@ void tools::AddTargetFeature(const ArgList &Args,
/// Get the (LLVM) name of the AMDGPU gpu we are targeting.
static std::string getAMDGPUTargetGPU(const llvm::Triple &T,
const ArgList &Args) {
+ Arg *MArch = Args.getLastArg(options::OPT_march_EQ);
if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
auto GPUName = getProcessorFromTargetID(T, A->getValue());
return llvm::StringSwitch<std::string>(GPUName)
@@ -331,6 +348,8 @@ static std::string getAMDGPUTargetGPU(const llvm::Triple &T,
.Case("aruba", "cayman")
.Default(GPUName.str());
}
+ if (MArch)
+ return getProcessorFromTargetID(T, MArch->getValue()).str();
return "";
}
@@ -410,7 +429,7 @@ std::string tools::getCPUName(const Driver &D, const ArgList &Args,
case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
- return ppc::getPPCTargetCPU(Args, T);
+ return ppc::getPPCTargetCPU(D, Args, T);
case llvm::Triple::csky:
if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
@@ -458,9 +477,12 @@ std::string tools::getCPUName(const Driver &D, const ArgList &Args,
}
}
-static void getWebAssemblyTargetFeatures(const ArgList &Args,
+static void getWebAssemblyTargetFeatures(const Driver &D,
+ const llvm::Triple &Triple,
+ const ArgList &Args,
std::vector<StringRef> &Features) {
- handleTargetFeaturesGroup(Args, Features, options::OPT_m_wasm_Features_Group);
+ handleTargetFeaturesGroup(D, Triple, Args, Features,
+ options::OPT_m_wasm_Features_Group);
}
void tools::getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
@@ -505,11 +527,11 @@ void tools::getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
x86::getX86TargetFeatures(D, Triple, Args, Features);
break;
case llvm::Triple::hexagon:
- hexagon::getHexagonTargetFeatures(D, Args, Features);
+ hexagon::getHexagonTargetFeatures(D, Triple, Args, Features);
break;
case llvm::Triple::wasm32:
case llvm::Triple::wasm64:
- getWebAssemblyTargetFeatures(Args, Features);
+ getWebAssemblyTargetFeatures(D, Triple, Args, Features);
break;
case llvm::Triple::sparc:
case llvm::Triple::sparcel:
@@ -567,6 +589,7 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
ArgStringList &CmdArgs, const InputInfo &Output,
const InputInfo &Input, bool IsThinLTO) {
const bool IsOSAIX = ToolChain.getTriple().isOSAIX();
+ const bool IsAMDGCN = ToolChain.getTriple().isAMDGCN();
const char *Linker = Args.MakeArgString(ToolChain.GetLinkerPath());
const Driver &D = ToolChain.getDriver();
if (llvm::sys::path::filename(Linker) != "ld.lld" &&
@@ -594,6 +617,11 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
PluginName + Suffix,
Plugin);
CmdArgs.push_back(Args.MakeArgString(Twine(PluginPrefix) + Plugin));
+ } else {
+ // Tell LLD to find and use .llvm.lto section in regular relocatable object
+ // files
+ if (Args.hasArg(options::OPT_ffat_lto_objects))
+ CmdArgs.push_back("--fat-lto-objects");
}
const char *PluginOptPrefix = IsOSAIX ? "-bplugin_opt:" : "-plugin-opt=";
@@ -631,17 +659,23 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
OOpt = "2";
} else if (A->getOption().matches(options::OPT_O0))
OOpt = "0";
- if (!OOpt.empty())
+ if (!OOpt.empty()) {
CmdArgs.push_back(
Args.MakeArgString(Twine(PluginOptPrefix) + ExtraDash + "O" + OOpt));
+ if (IsAMDGCN)
+ CmdArgs.push_back(Args.MakeArgString(Twine("--lto-CGO") + OOpt));
+ }
}
if (Args.hasArg(options::OPT_gsplit_dwarf))
CmdArgs.push_back(Args.MakeArgString(
Twine(PluginOptPrefix) + "dwo_dir=" + Output.getFilename() + "_dwo"));
- if (IsThinLTO)
+ if (IsThinLTO && !IsOSAIX)
CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) + "thinlto"));
+ else if (IsThinLTO && IsOSAIX)
+ CmdArgs.push_back(Args.MakeArgString(Twine("-bdbg:thinlto")));
+
StringRef Parallelism = getLTOParallelism(Args, D);
if (!Parallelism.empty())
@@ -666,6 +700,10 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
}
if (IsOSAIX) {
+ if (!ToolChain.useIntegratedAs())
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-no-integrated-as=1"));
+
// On AIX, clang assumes strict-dwarf is true if any debug option is
// specified, unless it is told explicitly not to assume so.
Arg *A = Args.getLastArg(options::OPT_g_Group);
@@ -676,9 +714,16 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
CmdArgs.push_back(
Args.MakeArgString(Twine(PluginOptPrefix) + "-strict-dwarf=true"));
- if (Args.getLastArg(options::OPT_mabi_EQ_vec_extabi))
- CmdArgs.push_back(
- Args.MakeArgString(Twine(PluginOptPrefix) + "-vec-extabi"));
+ for (const Arg *A : Args.filtered_reverse(options::OPT_mabi_EQ)) {
+ StringRef V = A->getValue();
+ if (V == "vec-default")
+ break;
+ if (V == "vec-extabi") {
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-vec-extabi"));
+ break;
+ }
+ }
}
bool UseSeparateSections =
@@ -692,13 +737,38 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
CmdArgs.push_back(
Args.MakeArgString(Twine(PluginOptPrefix) + "-function-sections=0"));
+ bool DataSectionsTurnedOff = false;
if (Args.hasFlag(options::OPT_fdata_sections, options::OPT_fno_data_sections,
- UseSeparateSections))
+ UseSeparateSections)) {
CmdArgs.push_back(
Args.MakeArgString(Twine(PluginOptPrefix) + "-data-sections=1"));
- else if (Args.hasArg(options::OPT_fno_data_sections))
+ } else if (Args.hasArg(options::OPT_fno_data_sections)) {
+ DataSectionsTurnedOff = true;
CmdArgs.push_back(
Args.MakeArgString(Twine(PluginOptPrefix) + "-data-sections=0"));
+ }
+
+ if (Args.hasArg(options::OPT_mxcoff_roptr) ||
+ Args.hasArg(options::OPT_mno_xcoff_roptr)) {
+ bool HasRoptr = Args.hasFlag(options::OPT_mxcoff_roptr,
+ options::OPT_mno_xcoff_roptr, false);
+ StringRef OptStr = HasRoptr ? "-mxcoff-roptr" : "-mno-xcoff-roptr";
+
+ if (!IsOSAIX)
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << OptStr << ToolChain.getTriple().str();
+
+ if (HasRoptr) {
+ // The data sections option is on by default on AIX. We only need to error
+ // out when -fno-data-sections is specified explicitly to turn off data
+ // sections.
+ if (DataSectionsTurnedOff)
+ D.Diag(diag::err_roptr_requires_data_sections);
+
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-mxcoff-roptr"));
+ }
+ }
// Pass an option to enable split machine functions.
if (auto *A = Args.getLastArg(options::OPT_fsplit_machine_functions,
@@ -717,16 +787,7 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
"sample-profile=" + FName));
}
- auto *CSPGOGenerateArg = Args.getLastArg(options::OPT_fcs_profile_generate,
- options::OPT_fcs_profile_generate_EQ,
- options::OPT_fno_profile_generate);
- if (CSPGOGenerateArg &&
- CSPGOGenerateArg->getOption().matches(options::OPT_fno_profile_generate))
- CSPGOGenerateArg = nullptr;
-
- auto *ProfileUseArg = getLastProfileUseArg(Args);
-
- if (CSPGOGenerateArg) {
+ if (auto *CSPGOGenerateArg = getLastCSProfileGenerateArg(Args)) {
CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) + ExtraDash +
"cs-profile-generate"));
if (CSPGOGenerateArg->getOption().matches(
@@ -739,7 +800,7 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
CmdArgs.push_back(
Args.MakeArgString(Twine(PluginOptPrefix) + ExtraDash +
"cs-profile-path=default_%m.profraw"));
- } else if (ProfileUseArg) {
+ } else if (auto *ProfileUseArg = getLastProfileUseArg(Args)) {
SmallString<128> Path(
ProfileUseArg->getNumValues() == 0 ? "" : ProfileUseArg->getValue());
if (Path.empty() || llvm::sys::fs::is_directory(Path))
@@ -757,11 +818,10 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
D.Diag(clang::diag::warn_drv_fjmc_for_elf_only);
}
- if (Arg *A = Args.getLastArg(options::OPT_femulated_tls,
- options::OPT_fno_emulated_tls)) {
- bool Enable = A->getOption().getID() == options::OPT_femulated_tls;
- CmdArgs.push_back(Args.MakeArgString(
- Twine(PluginOptPrefix) + "-emulated-tls=" + (Enable ? "1" : "0")));
+ if (Args.hasFlag(options::OPT_femulated_tls, options::OPT_fno_emulated_tls,
+ ToolChain.getTriple().hasDefaultEmulatedTLS())) {
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-emulated-tls"));
}
if (Args.hasFlag(options::OPT_fstack_size_section,
@@ -798,22 +858,6 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
/*IsLTO=*/true, PluginOptPrefix);
}
-void tools::addOpenMPRuntimeSpecificRPath(const ToolChain &TC,
- const ArgList &Args,
- ArgStringList &CmdArgs) {
-
- if (Args.hasFlag(options::OPT_fopenmp_implicit_rpath,
- options::OPT_fno_openmp_implicit_rpath, true)) {
- // Default to clang lib / lib64 folder, i.e. the same location as device
- // runtime
- SmallString<256> DefaultLibPath =
- llvm::sys::path::parent_path(TC.getDriver().Dir);
- llvm::sys::path::append(DefaultLibPath, CLANG_INSTALL_LIBDIR_BASENAME);
- CmdArgs.push_back("-rpath");
- CmdArgs.push_back(Args.MakeArgString(DefaultLibPath));
- }
-}
-
void tools::addOpenMPRuntimeLibraryPath(const ToolChain &TC,
const ArgList &Args,
ArgStringList &CmdArgs) {
@@ -834,10 +878,11 @@ void tools::addArchSpecificRPath(const ToolChain &TC, const ArgList &Args,
options::OPT_fno_rtlib_add_rpath, DefaultValue))
return;
- std::string CandidateRPath = TC.getArchSpecificLibPath();
- if (TC.getVFS().exists(CandidateRPath)) {
- CmdArgs.push_back("-rpath");
- CmdArgs.push_back(Args.MakeArgString(CandidateRPath));
+ for (const auto &CandidateRPath : TC.getArchSpecificLibPaths()) {
+ if (TC.getVFS().exists(CandidateRPath)) {
+ CmdArgs.push_back("-rpath");
+ CmdArgs.push_back(Args.MakeArgString(CandidateRPath));
+ }
}
}
@@ -884,9 +929,6 @@ bool tools::addOpenMPRuntime(ArgStringList &CmdArgs, const ToolChain &TC,
CmdArgs.push_back("-lomptarget.devicertl");
addArchSpecificRPath(TC, Args, CmdArgs);
-
- if (RTKind == Driver::OMPRT_OMP)
- addOpenMPRuntimeSpecificRPath(TC, Args, CmdArgs);
addOpenMPRuntimeLibraryPath(TC, Args, CmdArgs);
return true;
@@ -908,12 +950,6 @@ void tools::addFortranRuntimeLibs(const ToolChain &TC,
void tools::addFortranRuntimeLibraryPath(const ToolChain &TC,
const llvm::opt::ArgList &Args,
ArgStringList &CmdArgs) {
- // NOTE: Generating executables by Flang is considered an "experimental"
- // feature and hence this is guarded with a command line option.
- // TODO: Make this work unconditionally once Flang is mature enough.
- if (!Args.hasArg(options::OPT_flang_experimental_exec))
- return;
-
// Default to the <driver-path>/../lib directory. This works fine on the
// platforms that we have tested so far. We will probably have to re-fine
// this in the future. In particular, on some platforms, we may need to use
@@ -979,7 +1015,7 @@ void tools::linkSanitizerRuntimeDeps(const ToolChain &TC,
CmdArgs.push_back(getAsNeededOption(TC, false));
// There's no libpthread or librt on RTEMS & Android.
if (TC.getTriple().getOS() != llvm::Triple::RTEMS &&
- !TC.getTriple().isAndroid()) {
+ !TC.getTriple().isAndroid() && !TC.getTriple().isOHOSFamily()) {
CmdArgs.push_back("-lpthread");
if (!TC.getTriple().isOSOpenBSD())
CmdArgs.push_back("-lrt");
@@ -1209,11 +1245,11 @@ bool tools::addXRayRuntime(const ToolChain&TC, const ArgList &Args, ArgStringLis
return false;
if (TC.getXRayArgs().needsXRayRt()) {
- CmdArgs.push_back("-whole-archive");
+ CmdArgs.push_back("--whole-archive");
CmdArgs.push_back(TC.getCompilerRTArgString(Args, "xray"));
for (const auto &Mode : TC.getXRayArgs().modeList())
CmdArgs.push_back(TC.getCompilerRTArgString(Args, Mode));
- CmdArgs.push_back("-no-whole-archive");
+ CmdArgs.push_back("--no-whole-archive");
return true;
}
@@ -1250,26 +1286,27 @@ const char *tools::SplitDebugName(const JobAction &JA, const ArgList &Args,
F += ".dwo";
};
if (Arg *A = Args.getLastArg(options::OPT_gsplit_dwarf_EQ))
- if (StringRef(A->getValue()) == "single")
+ if (StringRef(A->getValue()) == "single" && Output.isFilename())
return Args.MakeArgString(Output.getFilename());
- Arg *FinalOutput = Args.getLastArg(options::OPT_o);
- if (FinalOutput && Args.hasArg(options::OPT_c)) {
- SmallString<128> T(FinalOutput->getValue());
- llvm::sys::path::remove_filename(T);
- llvm::sys::path::append(T, llvm::sys::path::stem(FinalOutput->getValue()));
- AddPostfix(T);
- return Args.MakeArgString(T);
+ SmallString<128> T;
+ if (const Arg *A = Args.getLastArg(options::OPT_dumpdir)) {
+ T = A->getValue();
} else {
- // Use the compilation dir.
- Arg *A = Args.getLastArg(options::OPT_ffile_compilation_dir_EQ,
- options::OPT_fdebug_compilation_dir_EQ);
- SmallString<128> T(A ? A->getValue() : "");
- SmallString<128> F(llvm::sys::path::stem(Input.getBaseInput()));
- AddPostfix(F);
- T += F;
- return Args.MakeArgString(T);
+ Arg *FinalOutput = Args.getLastArg(options::OPT_o, options::OPT__SLASH_o);
+ if (FinalOutput && Args.hasArg(options::OPT_c)) {
+ T = FinalOutput->getValue();
+ llvm::sys::path::remove_filename(T);
+ llvm::sys::path::append(T,
+ llvm::sys::path::stem(FinalOutput->getValue()));
+ AddPostfix(T);
+ return Args.MakeArgString(T);
+ }
}
+
+ T += llvm::sys::path::stem(Input.getBaseInput());
+ AddPostfix(T);
+ return Args.MakeArgString(T);
}
void tools::SplitDebugInfo(const ToolChain &TC, Compilation &C, const Tool &T,
@@ -1311,6 +1348,17 @@ void tools::claimNoWarnArgs(const ArgList &Args) {
Args.ClaimAllArgs(options::OPT_fno_lto);
}
+Arg *tools::getLastCSProfileGenerateArg(const ArgList &Args) {
+ auto *CSPGOGenerateArg = Args.getLastArg(options::OPT_fcs_profile_generate,
+ options::OPT_fcs_profile_generate_EQ,
+ options::OPT_fno_profile_generate);
+ if (CSPGOGenerateArg &&
+ CSPGOGenerateArg->getOption().matches(options::OPT_fno_profile_generate))
+ CSPGOGenerateArg = nullptr;
+
+ return CSPGOGenerateArg;
+}
+
Arg *tools::getLastProfileUseArg(const ArgList &Args) {
auto *ProfileUseArg = Args.getLastArg(
options::OPT_fprofile_instr_use, options::OPT_fprofile_instr_use_EQ,
@@ -1403,6 +1451,10 @@ tools::ParsePICArgs(const ToolChain &ToolChain, const ArgList &Args) {
}
}
+ // OHOS-specific defaults for PIC/PIE
+ if (Triple.isOHOSFamily() && Triple.getArch() == llvm::Triple::aarch64)
+ PIC = true;
+
// OpenBSD-specific defaults for PIE
if (Triple.isOSOpenBSD()) {
switch (ToolChain.getArch()) {
@@ -1425,10 +1477,6 @@ tools::ParsePICArgs(const ToolChain &ToolChain, const ArgList &Args) {
}
}
- // AMDGPU-specific defaults for PIC.
- if (Triple.getArch() == llvm::Triple::amdgcn)
- PIC = true;
-
// The last argument relating to either PIC or PIE wins, and no
// other argument is used. If the last argument is any flavor of the
// '-fno-...' arguments, both PIC and PIE are disabled. Any PIE
@@ -1603,6 +1651,48 @@ unsigned tools::ParseFunctionAlignment(const ToolChain &TC,
return Value ? llvm::Log2_32_Ceil(std::min(Value, 65536u)) : Value;
}
+void tools::addDebugInfoKind(
+ ArgStringList &CmdArgs, llvm::codegenoptions::DebugInfoKind DebugInfoKind) {
+ switch (DebugInfoKind) {
+ case llvm::codegenoptions::DebugDirectivesOnly:
+ CmdArgs.push_back("-debug-info-kind=line-directives-only");
+ break;
+ case llvm::codegenoptions::DebugLineTablesOnly:
+ CmdArgs.push_back("-debug-info-kind=line-tables-only");
+ break;
+ case llvm::codegenoptions::DebugInfoConstructor:
+ CmdArgs.push_back("-debug-info-kind=constructor");
+ break;
+ case llvm::codegenoptions::LimitedDebugInfo:
+ CmdArgs.push_back("-debug-info-kind=limited");
+ break;
+ case llvm::codegenoptions::FullDebugInfo:
+ CmdArgs.push_back("-debug-info-kind=standalone");
+ break;
+ case llvm::codegenoptions::UnusedTypeInfo:
+ CmdArgs.push_back("-debug-info-kind=unused-types");
+ break;
+ default:
+ break;
+ }
+}
+
+// Convert an arg of the form "-gN" or "-ggdbN" or one of their aliases
+// to the corresponding DebugInfoKind.
+llvm::codegenoptions::DebugInfoKind tools::debugLevelToInfoKind(const Arg &A) {
+ assert(A.getOption().matches(options::OPT_gN_Group) &&
+ "Not a -g option that specifies a debug-info level");
+ if (A.getOption().matches(options::OPT_g0) ||
+ A.getOption().matches(options::OPT_ggdb0))
+ return llvm::codegenoptions::NoDebugInfo;
+ if (A.getOption().matches(options::OPT_gline_tables_only) ||
+ A.getOption().matches(options::OPT_ggdb1))
+ return llvm::codegenoptions::DebugLineTablesOnly;
+ if (A.getOption().matches(options::OPT_gline_directives_only))
+ return llvm::codegenoptions::DebugDirectivesOnly;
+ return llvm::codegenoptions::DebugInfoConstructor;
+}
+
static unsigned ParseDebugDefaultVersion(const ToolChain &TC,
const ArgList &Args) {
const Arg *A = Args.getLastArg(options::OPT_fdebug_default_version);
@@ -1693,6 +1783,12 @@ static LibGccType getLibGccType(const ToolChain &TC, const Driver &D,
static void AddUnwindLibrary(const ToolChain &TC, const Driver &D,
ArgStringList &CmdArgs, const ArgList &Args) {
ToolChain::UnwindLibType UNW = TC.GetUnwindLibType(Args);
+ // By default OHOS binaries are linked statically to libunwind.
+ if (TC.getTriple().isOHOSFamily() && UNW == ToolChain::UNW_CompilerRT) {
+ CmdArgs.push_back("-l:libunwind.a");
+ return;
+ }
+
// Targets that don't use unwind libraries.
if ((TC.getTriple().isAndroid() && UNW == ToolChain::UNW_Libgcc) ||
TC.getTriple().isOSIAMCU() || TC.getTriple().isOSBinFormatWasm() ||
@@ -1792,28 +1888,40 @@ SmallString<128> tools::getStatsFileName(const llvm::opt::ArgList &Args,
const InputInfo &Input,
const Driver &D) {
const Arg *A = Args.getLastArg(options::OPT_save_stats_EQ);
- if (!A)
+ if (!A && !D.CCPrintInternalStats)
return {};
- StringRef SaveStats = A->getValue();
SmallString<128> StatsFile;
- if (SaveStats == "obj" && Output.isFilename()) {
- StatsFile.assign(Output.getFilename());
- llvm::sys::path::remove_filename(StatsFile);
- } else if (SaveStats != "cwd") {
- D.Diag(diag::err_drv_invalid_value) << A->getAsString(Args) << SaveStats;
- return {};
- }
+ if (A) {
+ StringRef SaveStats = A->getValue();
+ if (SaveStats == "obj" && Output.isFilename()) {
+ StatsFile.assign(Output.getFilename());
+ llvm::sys::path::remove_filename(StatsFile);
+ } else if (SaveStats != "cwd") {
+ D.Diag(diag::err_drv_invalid_value) << A->getAsString(Args) << SaveStats;
+ return {};
+ }
- StringRef BaseName = llvm::sys::path::filename(Input.getBaseInput());
- llvm::sys::path::append(StatsFile, BaseName);
- llvm::sys::path::replace_extension(StatsFile, "stats");
+ StringRef BaseName = llvm::sys::path::filename(Input.getBaseInput());
+ llvm::sys::path::append(StatsFile, BaseName);
+ llvm::sys::path::replace_extension(StatsFile, "stats");
+ } else {
+ assert(D.CCPrintInternalStats);
+ StatsFile.assign(D.CCPrintInternalStatReportFilename.empty()
+ ? "-"
+ : D.CCPrintInternalStatReportFilename);
+ }
return StatsFile;
}
-void tools::addMultilibFlag(bool Enabled, const char *const Flag,
+void tools::addMultilibFlag(bool Enabled, const StringRef Flag,
Multilib::flags_list &Flags) {
- Flags.push_back(std::string(Enabled ? "+" : "-") + Flag);
+ assert(Flag.front() == '-');
+ if (Enabled) {
+ Flags.push_back(Flag.str());
+ } else {
+ Flags.push_back(("!" + Flag.substr(1)).str());
+ }
}
void tools::addX86AlignBranchArgs(const Driver &D, const ArgList &Args,
@@ -2186,11 +2294,7 @@ void tools::AddStaticDeviceLibs(Compilation *C, const Tool *T,
static llvm::opt::Arg *
getAMDGPUCodeObjectArgument(const Driver &D, const llvm::opt::ArgList &Args) {
- // The last of -mcode-object-v3, -mno-code-object-v3 and
- // -mcode-object-version=<version> wins.
- return Args.getLastArg(options::OPT_mcode_object_v3_legacy,
- options::OPT_mno_code_object_v3_legacy,
- options::OPT_mcode_object_version_EQ);
+ return Args.getLastArg(options::OPT_mcode_object_version_EQ);
}
void tools::checkAMDGPUCodeObjectVersion(const Driver &D,
@@ -2198,15 +2302,6 @@ void tools::checkAMDGPUCodeObjectVersion(const Driver &D,
const unsigned MinCodeObjVer = 2;
const unsigned MaxCodeObjVer = 5;
- // Emit warnings for legacy options even if they are overridden.
- if (Args.hasArg(options::OPT_mno_code_object_v3_legacy))
- D.Diag(diag::warn_drv_deprecated_arg) << "-mno-code-object-v3"
- << "-mcode-object-version=2";
-
- if (Args.hasArg(options::OPT_mcode_object_v3_legacy))
- D.Diag(diag::warn_drv_deprecated_arg) << "-mcode-object-v3"
- << "-mcode-object-version=3";
-
if (auto *CodeObjArg = getAMDGPUCodeObjectArgument(D, Args)) {
if (CodeObjArg->getOption().getID() ==
options::OPT_mcode_object_version_EQ) {
@@ -2223,17 +2318,8 @@ void tools::checkAMDGPUCodeObjectVersion(const Driver &D,
unsigned tools::getAMDGPUCodeObjectVersion(const Driver &D,
const llvm::opt::ArgList &Args) {
unsigned CodeObjVer = 4; // default
- if (auto *CodeObjArg = getAMDGPUCodeObjectArgument(D, Args)) {
- if (CodeObjArg->getOption().getID() ==
- options::OPT_mno_code_object_v3_legacy) {
- CodeObjVer = 2;
- } else if (CodeObjArg->getOption().getID() ==
- options::OPT_mcode_object_v3_legacy) {
- CodeObjVer = 3;
- } else {
- StringRef(CodeObjArg->getValue()).getAsInteger(0, CodeObjVer);
- }
- }
+ if (auto *CodeObjArg = getAMDGPUCodeObjectArgument(D, Args))
+ StringRef(CodeObjArg->getValue()).getAsInteger(0, CodeObjVer);
return CodeObjVer;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h
index d44d9214c08b..6a8de0f1c36d 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h
@@ -59,11 +59,6 @@ void AddStaticDeviceLibsLinking(Compilation &C, const Tool &T,
llvm::opt::ArgStringList &CmdArgs,
StringRef Arch, StringRef Target,
bool isBitCodeSDL, bool postClangLink);
-void AddStaticDeviceLibsPostLinking(const Driver &D,
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CmdArgs,
- StringRef Arch, StringRef Target,
- bool isBitCodeSDL, bool postClangLink);
void AddStaticDeviceLibs(Compilation *C, const Tool *T, const JobAction *JA,
const InputInfoList *Inputs, const Driver &D,
const llvm::opt::ArgList &DriverArgs,
@@ -104,6 +99,12 @@ ParsePICArgs(const ToolChain &ToolChain, const llvm::opt::ArgList &Args);
unsigned ParseFunctionAlignment(const ToolChain &TC,
const llvm::opt::ArgList &Args);
+void addDebugInfoKind(llvm::opt::ArgStringList &CmdArgs,
+ llvm::codegenoptions::DebugInfoKind DebugInfoKind);
+
+llvm::codegenoptions::DebugInfoKind
+debugLevelToInfoKind(const llvm::opt::Arg &A);
+
// Extract the integer N from a string spelled "-dwarf-N", returning 0
// on mismatch. The StringRef input (rather than an Arg) allows
// for use by the "-Xassembler" option parser.
@@ -117,9 +118,6 @@ void AddAssemblerKPIC(const ToolChain &ToolChain,
const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
-void addOpenMPRuntimeSpecificRPath(const ToolChain &TC,
- const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs);
void addArchSpecificRPath(const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
void addOpenMPRuntimeLibraryPath(const ToolChain &TC,
@@ -145,6 +143,7 @@ void addHIPRuntimeLibArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
const char *getAsNeededOption(const ToolChain &TC, bool as_needed);
+llvm::opt::Arg *getLastCSProfileGenerateArg(const llvm::opt::ArgList &Args);
llvm::opt::Arg *getLastProfileUseArg(const llvm::opt::ArgList &Args);
llvm::opt::Arg *getLastProfileSampleUseArg(const llvm::opt::ArgList &Args);
@@ -185,7 +184,8 @@ void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
/// Note: Since \p Features may contain default values before calling
/// this function, or may be appended with entries to override arguments,
/// entries in \p Features are not unique.
-void handleTargetFeaturesGroup(const llvm::opt::ArgList &Args,
+void handleTargetFeaturesGroup(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
std::vector<StringRef> &Features,
llvm::opt::OptSpecifier Group);
@@ -198,9 +198,8 @@ SmallString<128> getStatsFileName(const llvm::opt::ArgList &Args,
const InputInfo &Output,
const InputInfo &Input, const Driver &D);
-/// \p Flag must be a flag accepted by the driver with its leading '-' removed,
-// otherwise '-print-multi-lib' will not emit them correctly.
-void addMultilibFlag(bool Enabled, const char *const Flag,
+/// \p Flag must be a flag accepted by the driver.
+void addMultilibFlag(bool Enabled, const StringRef Flag,
Multilib::flags_list &Flags);
void addX86AlignBranchArgs(const Driver &D, const llvm::opt::ArgList &Args,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.cpp
index bc91449326a5..3c5dfba329cf 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.cpp
@@ -94,7 +94,8 @@ void tools::CrossWindows::Linker::ConstructJob(
CmdArgs.push_back("-m");
switch (TC.getArch()) {
default:
- llvm_unreachable("unsupported architecture");
+ D.Diag(diag::err_target_unknown_triple) << TC.getEffectiveTriple().str();
+ break;
case llvm::Triple::arm:
case llvm::Triple::thumb:
// FIXME: this is incorrect for WinCE
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.h
index 165dcdfd5d3a..0ba17bc3e305 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.h
@@ -54,7 +54,6 @@ public:
CrossWindowsToolChain(const Driver &D, const llvm::Triple &T,
const llvm::opt::ArgList &Args);
- bool IsIntegratedAssemblerDefault() const override { return true; }
UnwindTableLevel
getDefaultUnwindTableLevel(const llvm::opt::ArgList &Args) const override;
bool isPICDefault() const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
index aa125bb308e8..3a577650eb08 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
@@ -21,12 +21,12 @@
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormatAdapters.h"
#include "llvm/Support/FormatVariadic.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
-#include "llvm/Support/TargetParser.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/TargetParser.h"
#include <system_error>
using namespace clang::driver;
@@ -74,6 +74,10 @@ CudaVersion getCudaVersion(uint32_t raw_version) {
return CudaVersion::CUDA_117;
if (raw_version < 11090)
return CudaVersion::CUDA_118;
+ if (raw_version < 12010)
+ return CudaVersion::CUDA_120;
+ if (raw_version < 12020)
+ return CudaVersion::CUDA_121;
return CudaVersion::NEW;
}
@@ -668,6 +672,8 @@ void NVPTX::getNVPTXTargetFeatures(const Driver &D, const llvm::Triple &Triple,
case CudaVersion::CUDA_##CUDA_VER: \
PtxFeature = "+ptx" #PTX_VER; \
break;
+ CASE_CUDA_VERSION(121, 81);
+ CASE_CUDA_VERSION(120, 80);
CASE_CUDA_VERSION(118, 78);
CASE_CUDA_VERSION(117, 77);
CASE_CUDA_VERSION(116, 76);
@@ -695,14 +701,13 @@ void NVPTX::getNVPTXTargetFeatures(const Driver &D, const llvm::Triple &Triple,
/// toolchain.
NVPTXToolChain::NVPTXToolChain(const Driver &D, const llvm::Triple &Triple,
const llvm::Triple &HostTriple,
- const ArgList &Args)
- : ToolChain(D, Triple, Args), CudaInstallation(D, HostTriple, Args) {
- if (CudaInstallation.isValid()) {
- CudaInstallation.WarnIfUnsupportedVersion();
+ const ArgList &Args, bool Freestanding = false)
+ : ToolChain(D, Triple, Args), CudaInstallation(D, HostTriple, Args),
+ Freestanding(Freestanding) {
+ if (CudaInstallation.isValid())
getProgramPaths().push_back(std::string(CudaInstallation.getBinPath()));
- }
// Lookup binaries into the driver directory, this is used to
- // discover the clang-offload-bundler executable.
+ // discover the 'nvptx-arch' executable.
getProgramPaths().push_back(getDriver().Dir);
}
@@ -710,8 +715,8 @@ NVPTXToolChain::NVPTXToolChain(const Driver &D, const llvm::Triple &Triple,
/// system's default triple if not provided.
NVPTXToolChain::NVPTXToolChain(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
- : NVPTXToolChain(D, Triple,
- llvm::Triple(llvm::sys::getDefaultTargetTriple()), Args) {}
+ : NVPTXToolChain(D, Triple, llvm::Triple(LLVM_HOST_TRIPLE), Args,
+ /*Freestanding=*/true) {}
llvm::opt::DerivedArgList *
NVPTXToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
@@ -735,6 +740,16 @@ NVPTXToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
return DAL;
}
+void NVPTXToolChain::addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const {
+ // If we are compiling with a standalone NVPTX toolchain we want to try to
+ // mimic a standard environment as much as possible. So we enable lowering
+ // ctor / dtor functions to global symbols that can be registered.
+ if (Freestanding)
+ CC1Args.append({"-mllvm", "--nvptx-lower-global-ctor-dtor"});
+}
+
bool NVPTXToolChain::supportsDebugInfoOption(const llvm::opt::Arg *A) const {
const Option &O = A->getOption();
return (O.matches(options::OPT_gN_Group) &&
@@ -748,13 +763,14 @@ bool NVPTXToolChain::supportsDebugInfoOption(const llvm::opt::Arg *A) const {
}
void NVPTXToolChain::adjustDebugInfoKind(
- codegenoptions::DebugInfoKind &DebugInfoKind, const ArgList &Args) const {
+ llvm::codegenoptions::DebugInfoKind &DebugInfoKind,
+ const ArgList &Args) const {
switch (mustEmitDebugInfo(Args)) {
case DisableDebugInfo:
- DebugInfoKind = codegenoptions::NoDebugInfo;
+ DebugInfoKind = llvm::codegenoptions::NoDebugInfo;
break;
case DebugDirectivesOnly:
- DebugInfoKind = codegenoptions::DebugDirectivesOnly;
+ DebugInfoKind = llvm::codegenoptions::DebugDirectivesOnly;
break;
case EmitSameDebugInfoAsHost:
// Use same debug info level as the host.
@@ -788,6 +804,13 @@ void CudaToolChain::addClangTargetOptions(
if (DriverArgs.hasFlag(options::OPT_fcuda_approx_transcendentals,
options::OPT_fno_cuda_approx_transcendentals, false))
CC1Args.push_back("-fcuda-approx-transcendentals");
+
+ // Unsized function arguments used for variadics were introduced in CUDA-9.0
+ // We still do not support generating code that actually uses variadic
+ // arguments yet, but we do need to allow parsing them as recent CUDA
+ // headers rely on that. https://github.com/llvm/llvm-project/issues/58410
+ if (CudaInstallation.version() >= CudaVersion::CUDA_90)
+ CC1Args.push_back("-fcuda-allow-variadic-functions");
}
if (DriverArgs.hasArg(options::OPT_nogpulib))
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h
index bf2f369d405c..39df6e06fb26 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h
@@ -132,8 +132,8 @@ namespace toolchains {
class LLVM_LIBRARY_VISIBILITY NVPTXToolChain : public ToolChain {
public:
NVPTXToolChain(const Driver &D, const llvm::Triple &Triple,
- const llvm::Triple &HostTriple,
- const llvm::opt::ArgList &Args);
+ const llvm::Triple &HostTriple, const llvm::opt::ArgList &Args,
+ bool Freestanding);
NVPTXToolChain(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
@@ -142,6 +142,11 @@ public:
TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
Action::OffloadKind DeviceOffloadKind) const override;
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
+
// Never try to use the integrated assembler with CUDA; always fork out to
// ptxas.
bool useIntegratedAs() const override { return false; }
@@ -156,7 +161,7 @@ public:
bool IsMathErrnoDefault() const override { return false; }
bool supportsDebugInfoOption(const llvm::opt::Arg *A) const override;
- void adjustDebugInfoKind(codegenoptions::DebugInfoKind &DebugInfoKind,
+ void adjustDebugInfoKind(llvm::codegenoptions::DebugInfoKind &DebugInfoKind,
const llvm::opt::ArgList &Args) const override;
// NVPTX supports only DWARF2.
@@ -168,6 +173,9 @@ public:
protected:
Tool *buildAssembler() const override; // ptxas.
Tool *buildLinker() const override; // nvlink.
+
+private:
+ bool Freestanding = false;
};
class LLVM_LIBRARY_VISIBILITY CudaToolChain : public NVPTXToolChain {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp
index 9f95c962ee9a..65bd6c6a7eb3 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp
@@ -23,9 +23,10 @@
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/ScopedPrinter.h"
-#include "llvm/Support/TargetParser.h"
#include "llvm/Support/Threading.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/TargetParser.h"
+#include "llvm/TargetParser/Triple.h"
#include <cstdlib> // ::getenv
using namespace clang::driver;
@@ -74,7 +75,8 @@ llvm::Triple::ArchType darwin::getArchTypeForMachOArchName(StringRef Str) {
.Default(llvm::Triple::UnknownArch);
}
-void darwin::setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str) {
+void darwin::setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str,
+ const ArgList &Args) {
const llvm::Triple::ArchType Arch = getArchTypeForMachOArchName(Str);
llvm::ARM::ArchKind ArchKind = llvm::ARM::parseArch(Str);
T.setArch(Arch);
@@ -84,6 +86,17 @@ void darwin::setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str) {
if (ArchKind == llvm::ARM::ArchKind::ARMV6M ||
ArchKind == llvm::ARM::ArchKind::ARMV7M ||
ArchKind == llvm::ARM::ArchKind::ARMV7EM) {
+ // Don't reject these -version-min= if we have the appropriate triple.
+ if (T.getOS() == llvm::Triple::IOS)
+ for (Arg *A : Args.filtered(options::OPT_mios_version_min_EQ))
+ A->ignoreTargetSpecific();
+ if (T.getOS() == llvm::Triple::WatchOS)
+ for (Arg *A : Args.filtered(options::OPT_mwatchos_version_min_EQ))
+ A->ignoreTargetSpecific();
+ if (T.getOS() == llvm::Triple::TvOS)
+ for (Arg *A : Args.filtered(options::OPT_mtvos_version_min_EQ))
+ A->ignoreTargetSpecific();
+
T.setOS(llvm::Triple::UnknownOS);
T.setObjectFormat(llvm::Triple::MachO);
}
@@ -393,6 +406,13 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
}
}
+ if (Args.hasArg(options::OPT_mkernel) ||
+ Args.hasArg(options::OPT_fapple_kext) ||
+ Args.hasArg(options::OPT_ffreestanding)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-disable-atexit-based-global-dtor-lowering");
+ }
+
Args.AddLastArg(CmdArgs, options::OPT_prebind);
Args.AddLastArg(CmdArgs, options::OPT_noprebind);
Args.AddLastArg(CmdArgs, options::OPT_nofixprebinding);
@@ -442,6 +462,23 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
Args.AddAllArgs(CmdArgs, options::OPT_dylinker__install__name);
Args.AddLastArg(CmdArgs, options::OPT_dylinker);
Args.AddLastArg(CmdArgs, options::OPT_Mach);
+
+ if (LinkerIsLLD) {
+ if (auto *CSPGOGenerateArg = getLastCSProfileGenerateArg(Args)) {
+ SmallString<128> Path(CSPGOGenerateArg->getNumValues() == 0
+ ? ""
+ : CSPGOGenerateArg->getValue());
+ llvm::sys::path::append(Path, "default_%m.profraw");
+ CmdArgs.push_back("--cs-profile-generate");
+ CmdArgs.push_back(Args.MakeArgString(Twine("--cs-profile-path=") + Path));
+ } else if (auto *ProfileUseArg = getLastProfileUseArg(Args)) {
+ SmallString<128> Path(
+ ProfileUseArg->getNumValues() == 0 ? "" : ProfileUseArg->getValue());
+ if (Path.empty() || llvm::sys::fs::is_directory(Path))
+ llvm::sys::path::append(Path, "default.profdata");
+ CmdArgs.push_back(Args.MakeArgString(Twine("--cs-profile-path=") + Path));
+ }
+ }
}
/// Determine whether we are linking the ObjC runtime.
@@ -602,9 +639,9 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// It seems that the 'e' option is completely ignored for dynamic executables
// (the default), and with static executables, the last one wins, as expected.
- Args.AddAllArgs(CmdArgs, {options::OPT_d_Flag, options::OPT_s, options::OPT_t,
- options::OPT_Z_Flag, options::OPT_u_Group,
- options::OPT_e, options::OPT_r});
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_d_Flag, options::OPT_s, options::OPT_t,
+ options::OPT_Z_Flag, options::OPT_u_Group, options::OPT_r});
// Forward -ObjC when either -ObjC or -ObjC++ is used, to force loading
// members of static archive libraries which implement Objective-C classes or
@@ -1197,6 +1234,9 @@ void DarwinClang::AddLinkARCArgs(const ArgList &Args,
P += "macosx";
P += ".a";
+ if (!getVFS().exists(P))
+ getDriver().Diag(clang::diag::err_drv_darwin_sdk_missing_arclite) << P;
+
CmdArgs.push_back(Args.MakeArgString(P));
}
@@ -1426,32 +1466,52 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
const SanitizerArgs &Sanitize = getSanitizerArgs(Args);
- if (!Sanitize.needsSharedRt() && Sanitize.needsUbsanRt()) {
- getDriver().Diag(diag::err_drv_unsupported_static_ubsan_darwin);
- return;
+ if (!Sanitize.needsSharedRt()) {
+ const char *sanitizer = nullptr;
+ if (Sanitize.needsUbsanRt()) {
+ sanitizer = "UndefinedBehaviorSanitizer";
+ } else if (Sanitize.needsAsanRt()) {
+ sanitizer = "AddressSanitizer";
+ } else if (Sanitize.needsTsanRt()) {
+ sanitizer = "ThreadSanitizer";
+ }
+ if (sanitizer) {
+ getDriver().Diag(diag::err_drv_unsupported_static_sanitizer_darwin)
+ << sanitizer;
+ return;
+ }
}
- if (Sanitize.needsAsanRt())
- AddLinkSanitizerLibArgs(Args, CmdArgs, "asan");
- if (Sanitize.needsLsanRt())
- AddLinkSanitizerLibArgs(Args, CmdArgs, "lsan");
- if (Sanitize.needsUbsanRt()) {
- assert(Sanitize.needsSharedRt() && "Static sanitizer runtimes not supported");
- AddLinkSanitizerLibArgs(Args, CmdArgs,
- Sanitize.requiresMinimalRuntime() ? "ubsan_minimal"
- : "ubsan");
- }
- if (Sanitize.needsTsanRt())
- AddLinkSanitizerLibArgs(Args, CmdArgs, "tsan");
- if (Sanitize.needsFuzzer() && !Args.hasArg(options::OPT_dynamiclib)) {
- AddLinkSanitizerLibArgs(Args, CmdArgs, "fuzzer", /*shared=*/false);
+ if (Sanitize.linkRuntimes()) {
+ if (Sanitize.needsAsanRt()) {
+ assert(Sanitize.needsSharedRt() &&
+ "Static sanitizer runtimes not supported");
+ AddLinkSanitizerLibArgs(Args, CmdArgs, "asan");
+ }
+ if (Sanitize.needsLsanRt())
+ AddLinkSanitizerLibArgs(Args, CmdArgs, "lsan");
+ if (Sanitize.needsUbsanRt()) {
+ assert(Sanitize.needsSharedRt() &&
+ "Static sanitizer runtimes not supported");
+ AddLinkSanitizerLibArgs(
+ Args, CmdArgs,
+ Sanitize.requiresMinimalRuntime() ? "ubsan_minimal" : "ubsan");
+ }
+ if (Sanitize.needsTsanRt()) {
+ assert(Sanitize.needsSharedRt() &&
+ "Static sanitizer runtimes not supported");
+ AddLinkSanitizerLibArgs(Args, CmdArgs, "tsan");
+ }
+ if (Sanitize.needsFuzzer() && !Args.hasArg(options::OPT_dynamiclib)) {
+ AddLinkSanitizerLibArgs(Args, CmdArgs, "fuzzer", /*shared=*/false);
- // Libfuzzer is written in C++ and requires libcxx.
- AddCXXStdlibLibArgs(Args, CmdArgs);
- }
- if (Sanitize.needsStatsRt()) {
- AddLinkRuntimeLib(Args, CmdArgs, "stats_client", RLO_AlwaysLink);
- AddLinkSanitizerLibArgs(Args, CmdArgs, "stats");
+ // Libfuzzer is written in C++ and requires libcxx.
+ AddCXXStdlibLibArgs(Args, CmdArgs);
+ }
+ if (Sanitize.needsStatsRt()) {
+ AddLinkRuntimeLib(Args, CmdArgs, "stats_client", RLO_AlwaysLink);
+ AddLinkSanitizerLibArgs(Args, CmdArgs, "stats");
+ }
}
const XRayArgs &XRay = getXRayArgs();
@@ -1772,7 +1832,7 @@ getDeploymentTargetFromOSVersionArg(DerivedArgList &Args,
->getAsString(Args);
}
return DarwinPlatform::createOSVersionArg(Darwin::MacOS, macOSVersion,
- /*IsImulator=*/false);
+ /*IsSimulator=*/false);
} else if (iOSVersion) {
if (TvOSVersion || WatchOSVersion) {
TheDriver.Diag(diag::err_drv_argument_not_allowed_with)
@@ -2455,17 +2515,6 @@ void DarwinClang::AddClangCXXStdlibIncludeArgs(
switch (arch) {
default: break;
- case llvm::Triple::ppc:
- case llvm::Triple::ppc64:
- IsBaseFound = AddGnuCPlusPlusIncludePaths(DriverArgs, CC1Args, UsrIncludeCxx,
- "4.2.1",
- "powerpc-apple-darwin10",
- arch == llvm::Triple::ppc64 ? "ppc64" : "");
- IsBaseFound |= AddGnuCPlusPlusIncludePaths(DriverArgs, CC1Args, UsrIncludeCxx,
- "4.0.0", "powerpc-apple-darwin10",
- arch == llvm::Triple::ppc64 ? "ppc64" : "");
- break;
-
case llvm::Triple::x86:
case llvm::Triple::x86_64:
IsBaseFound = AddGnuCPlusPlusIncludePaths(DriverArgs, CC1Args, UsrIncludeCxx,
@@ -2925,8 +2974,10 @@ ToolChain::UnwindTableLevel MachO::getDefaultUnwindTableLevel(const ArgList &Arg
(GetExceptionModel(Args) != llvm::ExceptionHandling::SjLj &&
Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions,
true)))
- return getArch() == llvm::Triple::aarch64 ? UnwindTableLevel::Synchronous
- : UnwindTableLevel::Asynchronous;
+ return (getArch() == llvm::Triple::aarch64 ||
+ getArch() == llvm::Triple::aarch64_32)
+ ? UnwindTableLevel::Synchronous
+ : UnwindTableLevel::Asynchronous;
return UnwindTableLevel::None;
}
@@ -3253,7 +3304,6 @@ SanitizerMask Darwin::getSupportedSanitizers() const {
Res |= SanitizerKind::Leak;
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
- Res |= SanitizerKind::Function;
Res |= SanitizerKind::ObjCCast;
// Prior to 10.9, macOS shipped a version of the C++ standard library without
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h
index f64e7180d0af..815449ae8f37 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h
@@ -28,7 +28,8 @@ namespace tools {
namespace darwin {
llvm::Triple::ArchType getArchTypeForMachOArchName(StringRef Str);
-void setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str);
+void setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str,
+ const llvm::opt::ArgList &Args);
class LLVM_LIBRARY_VISIBILITY MachOTool : public Tool {
virtual void anchor();
@@ -238,10 +239,6 @@ public:
// expected to use /usr/include/Block.h.
return true;
}
- bool IsIntegratedAssemblerDefault() const override {
- // Default integrated assembler to on for Apple's MachO targets.
- return true;
- }
bool IsMathErrnoDefault() const override { return false; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp
index 4ee046be9ea9..bdc75f51b7a2 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp
@@ -11,6 +11,7 @@
#include "CommonArgs.h"
#include "clang/Driver/Options.h"
+#include "llvm/Frontend/Debug/Options.h"
#include <cassert>
@@ -29,19 +30,27 @@ static void addDashXForInput(const ArgList &Args, const InputInfo &Input,
void Flang::addFortranDialectOptions(const ArgList &Args,
ArgStringList &CmdArgs) const {
- Args.AddAllArgs(
- CmdArgs, {options::OPT_ffixed_form, options::OPT_ffree_form,
- options::OPT_ffixed_line_length_EQ, options::OPT_fopenmp,
- options::OPT_fopenacc, options::OPT_finput_charset_EQ,
- options::OPT_fimplicit_none, options::OPT_fno_implicit_none,
- options::OPT_fbackslash, options::OPT_fno_backslash,
- options::OPT_flogical_abbreviations,
- options::OPT_fno_logical_abbreviations,
- options::OPT_fxor_operator, options::OPT_fno_xor_operator,
- options::OPT_falternative_parameter_statement,
- options::OPT_fdefault_real_8, options::OPT_fdefault_integer_8,
- options::OPT_fdefault_double_8, options::OPT_flarge_sizes,
- options::OPT_fno_automatic});
+ Args.AddAllArgs(CmdArgs, {options::OPT_ffixed_form,
+ options::OPT_ffree_form,
+ options::OPT_ffixed_line_length_EQ,
+ options::OPT_fopenmp,
+ options::OPT_fopenmp_version_EQ,
+ options::OPT_fopenacc,
+ options::OPT_finput_charset_EQ,
+ options::OPT_fimplicit_none,
+ options::OPT_fno_implicit_none,
+ options::OPT_fbackslash,
+ options::OPT_fno_backslash,
+ options::OPT_flogical_abbreviations,
+ options::OPT_fno_logical_abbreviations,
+ options::OPT_fxor_operator,
+ options::OPT_fno_xor_operator,
+ options::OPT_falternative_parameter_statement,
+ options::OPT_fdefault_real_8,
+ options::OPT_fdefault_integer_8,
+ options::OPT_fdefault_double_8,
+ options::OPT_flarge_sizes,
+ options::OPT_fno_automatic});
}
void Flang::addPreprocessingOptions(const ArgList &Args,
@@ -51,12 +60,90 @@ void Flang::addPreprocessingOptions(const ArgList &Args,
options::OPT_I, options::OPT_cpp, options::OPT_nocpp});
}
+/// @C shouldLoopVersion
+///
+/// Check if Loop Versioning should be enabled.
+/// We look for the last of one of the following:
+/// -Ofast, -O4, -O<number> and -f[no-]version-loops-for-stride.
+/// Loop versioning is disabled if the last option is
+/// -fno-version-loops-for-stride.
+/// Loop versioning is enabled if the last option is one of:
+/// -floop-versioning
+/// -Ofast
+/// -O4
+/// -O3
+/// For all other cases, loop versioning is is disabled.
+///
+/// The gfortran compiler automatically enables the option for -O3 or -Ofast.
+///
+/// @return true if loop-versioning should be enabled, otherwise false.
+static bool shouldLoopVersion(const ArgList &Args) {
+ const Arg *LoopVersioningArg = Args.getLastArg(
+ options::OPT_Ofast, options::OPT_O, options::OPT_O4,
+ options::OPT_floop_versioning, options::OPT_fno_loop_versioning);
+ if (!LoopVersioningArg)
+ return false;
+
+ if (LoopVersioningArg->getOption().matches(options::OPT_fno_loop_versioning))
+ return false;
+
+ if (LoopVersioningArg->getOption().matches(options::OPT_floop_versioning))
+ return true;
+
+ if (LoopVersioningArg->getOption().matches(options::OPT_Ofast) ||
+ LoopVersioningArg->getOption().matches(options::OPT_O4))
+ return true;
+
+ if (LoopVersioningArg->getOption().matches(options::OPT_O)) {
+ StringRef S(LoopVersioningArg->getValue());
+ unsigned OptLevel = 0;
+ // Note -Os or Oz woould "fail" here, so return false. Which is the
+ // desiered behavior.
+ if (S.getAsInteger(10, OptLevel))
+ return false;
+
+ return OptLevel > 2;
+ }
+
+ llvm_unreachable("We should not end up here");
+ return false;
+}
+
void Flang::addOtherOptions(const ArgList &Args, ArgStringList &CmdArgs) const {
Args.AddAllArgs(CmdArgs,
{options::OPT_module_dir, options::OPT_fdebug_module_writer,
options::OPT_fintrinsic_modules_path, options::OPT_pedantic,
options::OPT_std_EQ, options::OPT_W_Joined,
- options::OPT_fconvert_EQ, options::OPT_fpass_plugin_EQ});
+ options::OPT_fconvert_EQ, options::OPT_fpass_plugin_EQ,
+ options::OPT_funderscoring, options::OPT_fno_underscoring});
+
+ llvm::codegenoptions::DebugInfoKind DebugInfoKind;
+ if (Args.hasArg(options::OPT_gN_Group)) {
+ Arg *gNArg = Args.getLastArg(options::OPT_gN_Group);
+ DebugInfoKind = debugLevelToInfoKind(*gNArg);
+ } else if (Args.hasArg(options::OPT_g_Flag)) {
+ DebugInfoKind = llvm::codegenoptions::DebugLineTablesOnly;
+ } else {
+ DebugInfoKind = llvm::codegenoptions::NoDebugInfo;
+ }
+ addDebugInfoKind(CmdArgs, DebugInfoKind);
+}
+
+void Flang::addCodegenOptions(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ Arg *stackArrays =
+ Args.getLastArg(options::OPT_Ofast, options::OPT_fstack_arrays,
+ options::OPT_fno_stack_arrays);
+ if (stackArrays &&
+ !stackArrays->getOption().matches(options::OPT_fno_stack_arrays))
+ CmdArgs.push_back("-fstack-arrays");
+
+ if (Args.hasArg(options::OPT_flang_experimental_hlfir))
+ CmdArgs.push_back("-flang-experimental-hlfir");
+ if (Args.hasArg(options::OPT_flang_experimental_polymorphism))
+ CmdArgs.push_back("-flang-experimental-polymorphism");
+ if (shouldLoopVersion(Args))
+ CmdArgs.push_back("-fversion-loops-for-stride");
}
void Flang::addPicOptions(const ArgList &Args, ArgStringList &CmdArgs) const {
@@ -96,8 +183,10 @@ void Flang::addTargetOptions(const ArgList &Args,
switch (TC.getArch()) {
default:
break;
+ case llvm::Triple::r600:
+ case llvm::Triple::amdgcn:
case llvm::Triple::aarch64:
- [[fallthrough]];
+ case llvm::Triple::riscv64:
case llvm::Triple::x86_64:
getTargetFeatures(D, Triple, Args, CmdArgs, /*ForAs*/ false);
break;
@@ -106,6 +195,69 @@ void Flang::addTargetOptions(const ArgList &Args,
// TODO: Add target specific flags, ABI, mtune option etc.
}
+void Flang::addOffloadOptions(Compilation &C, const InputInfoList &Inputs,
+ const JobAction &JA, const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ bool IsOpenMPDevice = JA.isDeviceOffloading(Action::OFK_OpenMP);
+ bool IsHostOffloadingAction = JA.isHostOffloading(Action::OFK_OpenMP) ||
+ JA.isHostOffloading(C.getActiveOffloadKinds());
+
+ // Skips the primary input file, which is the input file that the compilation
+ // proccess will be executed upon (e.g. the host bitcode file) and
+ // adds other secondary input (e.g. device bitcode files for embedding to the
+ // -fembed-offload-object argument or the host IR file for proccessing
+ // during device compilation to the fopenmp-host-ir-file-path argument via
+ // OpenMPDeviceInput). This is condensed logic from the ConstructJob
+ // function inside of the Clang driver for pushing on further input arguments
+ // needed for offloading during various phases of compilation.
+ for (size_t i = 1; i < Inputs.size(); ++i) {
+ if (Inputs[i].getType() == types::TY_Nothing) {
+ // contains nothing, so it's skippable
+ } else if (IsHostOffloadingAction) {
+ CmdArgs.push_back(
+ Args.MakeArgString("-fembed-offload-object=" +
+ getToolChain().getInputFilename(Inputs[i])));
+ } else if (IsOpenMPDevice) {
+ if (Inputs[i].getFilename()) {
+ CmdArgs.push_back("-fopenmp-host-ir-file-path");
+ CmdArgs.push_back(Args.MakeArgString(Inputs[i].getFilename()));
+ } else {
+ llvm_unreachable("missing openmp host-ir file for device offloading");
+ }
+ } else {
+ llvm_unreachable(
+ "unexpectedly given multiple inputs or given unknown input");
+ }
+ }
+
+ if (IsOpenMPDevice) {
+ // -fopenmp-is-target-device is passed along to tell the frontend that it is
+ // generating code for a device, so that only the relevant code is emitted.
+ CmdArgs.push_back("-fopenmp-is-target-device");
+
+ // When in OpenMP offloading mode, enable debugging on the device.
+ Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_target_debug_EQ);
+ if (Args.hasFlag(options::OPT_fopenmp_target_debug,
+ options::OPT_fno_openmp_target_debug, /*Default=*/false))
+ CmdArgs.push_back("-fopenmp-target-debug");
+
+ // When in OpenMP offloading mode, forward assumptions information about
+ // thread and team counts in the device.
+ if (Args.hasFlag(options::OPT_fopenmp_assume_teams_oversubscription,
+ options::OPT_fno_openmp_assume_teams_oversubscription,
+ /*Default=*/false))
+ CmdArgs.push_back("-fopenmp-assume-teams-oversubscription");
+ if (Args.hasFlag(options::OPT_fopenmp_assume_threads_oversubscription,
+ options::OPT_fno_openmp_assume_threads_oversubscription,
+ /*Default=*/false))
+ CmdArgs.push_back("-fopenmp-assume-threads-oversubscription");
+ if (Args.hasArg(options::OPT_fopenmp_assume_no_thread_state))
+ CmdArgs.push_back("-fopenmp-assume-no-thread-state");
+ if (Args.hasArg(options::OPT_fopenmp_assume_no_nested_parallelism))
+ CmdArgs.push_back("-fopenmp-assume-no-nested-parallelism");
+ }
+}
+
static void addFloatingPointOptions(const Driver &D, const ArgList &Args,
ArgStringList &CmdArgs) {
StringRef FPContract;
@@ -244,6 +396,7 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
const Driver &D = TC.getDriver();
ArgStringList CmdArgs;
+ DiagnosticsEngine &Diags = D.getDiags();
// Invoke ourselves in -fc1 mode.
CmdArgs.push_back("-fc1");
@@ -291,9 +444,21 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
// to avoid warn_drv_unused_argument.
Args.getLastArg(options::OPT_fcolor_diagnostics,
options::OPT_fno_color_diagnostics);
- if (D.getDiags().getDiagnosticOptions().ShowColors)
+ if (Diags.getDiagnosticOptions().ShowColors)
CmdArgs.push_back("-fcolor-diagnostics");
+ // LTO mode is parsed by the Clang driver library.
+ LTOKind LTOMode = D.getLTOMode(/* IsOffload */ false);
+ assert(LTOMode != LTOK_Unknown && "Unknown LTO mode.");
+ if (LTOMode == LTOK_Full)
+ CmdArgs.push_back("-flto=full");
+ else if (LTOMode == LTOK_Thin) {
+ Diags.Report(
+ Diags.getCustomDiagID(DiagnosticsEngine::Warning,
+ "the option '-flto=thin' is a work in progress"));
+ CmdArgs.push_back("-flto=thin");
+ }
+
// -fPIC and related options.
addPicOptions(Args, CmdArgs);
@@ -303,9 +468,15 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
// Add target args, features, etc.
addTargetOptions(Args, CmdArgs);
+ // Add Codegen options
+ addCodegenOptions(Args, CmdArgs);
+
// Add other compile options
addOtherOptions(Args, CmdArgs);
+ // Offloading related options
+ addOffloadOptions(C, Inputs, JA, Args, CmdArgs);
+
// Forward -Xflang arguments to -fc1
Args.AddAllArgValues(CmdArgs, options::OPT_Xflang);
@@ -321,6 +492,13 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
A->render(Args, CmdArgs);
}
+ // Remove any unsupported gfortran diagnostic options
+ for (const Arg *A : Args.filtered(options::OPT_flang_ignored_w_Group)) {
+ A->claim();
+ D.Diag(diag::warn_drv_unsupported_diag_option_for_flang)
+ << A->getOption().getName();
+ }
+
// Optimization level for CodeGen.
if (const Arg *A = Args.getLastArg(options::OPT_O_Group)) {
if (A->getOption().matches(options::OPT_O4)) {
@@ -342,6 +520,9 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
assert(Input.isFilename() && "Invalid input.");
+ if (Args.getLastArg(options::OPT_save_temps_EQ))
+ Args.AddLastArg(CmdArgs, options::OPT_save_temps_EQ);
+
addDashXForInput(Args, Input, CmdArgs);
CmdArgs.push_back(Input.getFilename());
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.h
index 4c85c602e267..962b4ae60172 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.h
@@ -56,6 +56,25 @@ private:
void addTargetOptions(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
+ /// Extract offload options from the driver arguments and add them to
+ /// the command arguments.
+ /// \param [in] C The current compilation for the driver invocation
+ /// \param [in] Inputs The input infomration on the current file inputs
+ /// \param [in] JA The job action
+ /// \param [in] Args The list of input driver arguments
+ /// \param [out] CmdArgs The list of output command arguments
+ void addOffloadOptions(Compilation &C, const InputInfoList &Inputs,
+ const JobAction &JA, const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+
+ /// Extract options for code generation from the driver arguments and add them
+ /// to the command arguments.
+ ///
+ /// \param [in] Args The list of input driver arguments
+ /// \param [out] CmdArgs The list of output command arguments
+ void addCodegenOptions(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+
/// Extract other compilation options from the driver arguments and add them
/// to the command arguments.
///
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
index 80d49c28f497..84e257741702 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
@@ -269,7 +269,6 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
- Args.AddAllArgs(CmdArgs, options::OPT_e);
Args.AddAllArgs(CmdArgs, options::OPT_s);
Args.AddAllArgs(CmdArgs, options::OPT_t);
Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
@@ -443,12 +442,12 @@ void FreeBSD::AddCXXStdlibLibArgs(const ArgList &Args,
void FreeBSD::AddCudaIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
+ CudaInstallation->AddCudaIncludeArgs(DriverArgs, CC1Args);
}
void FreeBSD::AddHIPIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+ RocmInstallation->AddHIPIncludeArgs(DriverArgs, CC1Args);
}
Tool *FreeBSD::buildAssembler() const {
@@ -482,9 +481,6 @@ SanitizerMask FreeBSD::getSupportedSanitizers() const {
Res |= SanitizerKind::Leak;
Res |= SanitizerKind::Thread;
}
- if (IsX86 || IsX86_64) {
- Res |= SanitizerKind::Function;
- }
if (IsAArch64 || IsX86 || IsX86_64) {
Res |= SanitizerKind::SafeStack;
Res |= SanitizerKind::Fuzzer;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
index ea6d7d697770..65692cc7f954 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
@@ -12,6 +12,7 @@
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/MultilibBuilder.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
@@ -186,7 +187,53 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lc");
}
- C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
+}
+
+void fuchsia::StaticLibTool::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+ // Silence warnings when linking C code with a C++ '-stdlib' argument.
+ Args.ClaimAllArgs(options::OPT_stdlib_EQ);
+
+ // ar tool command "llvm-ar <options> <output_file> <input_files>".
+ ArgStringList CmdArgs;
+ // Create and insert file members with a deterministic index.
+ CmdArgs.push_back("rcsD");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (const auto &II : Inputs) {
+ if (II.isFilename()) {
+ CmdArgs.push_back(II.getFilename());
+ }
+ }
+
+ // Delete old output archive file if it already exists before generating a new
+ // archive file.
+ const char *OutputFileName = Output.getFilename();
+ if (Output.isFilename() && llvm::sys::fs::exists(OutputFileName)) {
+ if (std::error_code EC = llvm::sys::fs::remove(OutputFileName)) {
+ D.Diag(diag::err_drv_unable_to_remove_file) << EC.message();
+ return;
+ }
+ }
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetStaticLibToolPath());
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
Exec, CmdArgs, Inputs, Output));
}
@@ -217,53 +264,35 @@ Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
Multilibs.push_back(Multilib());
// Use the noexcept variant with -fno-exceptions to avoid the extra overhead.
- Multilibs.push_back(Multilib("noexcept", {}, {}, 1)
- .flag("-fexceptions")
- .flag("+fno-exceptions"));
+ Multilibs.push_back(MultilibBuilder("noexcept", {}, {})
+ .flag("-fexceptions", /*Disallow=*/true)
+ .flag("-fno-exceptions")
+ .makeMultilib());
// ASan has higher priority because we always want the instrumentated version.
- Multilibs.push_back(Multilib("asan", {}, {}, 2)
- .flag("+fsanitize=address"));
+ Multilibs.push_back(MultilibBuilder("asan", {}, {})
+ .flag("-fsanitize=address")
+ .makeMultilib());
// Use the asan+noexcept variant with ASan and -fno-exceptions.
- Multilibs.push_back(Multilib("asan+noexcept", {}, {}, 3)
- .flag("+fsanitize=address")
- .flag("-fexceptions")
- .flag("+fno-exceptions"));
+ Multilibs.push_back(MultilibBuilder("asan+noexcept", {}, {})
+ .flag("-fsanitize=address")
+ .flag("-fexceptions", /*Disallow=*/true)
+ .flag("-fno-exceptions")
+ .makeMultilib());
// HWASan has higher priority because we always want the instrumentated
// version.
- Multilibs.push_back(
- Multilib("hwasan", {}, {}, 4).flag("+fsanitize=hwaddress"));
+ Multilibs.push_back(MultilibBuilder("hwasan", {}, {})
+ .flag("-fsanitize=hwaddress")
+ .makeMultilib());
// Use the hwasan+noexcept variant with HWASan and -fno-exceptions.
- Multilibs.push_back(Multilib("hwasan+noexcept", {}, {}, 5)
- .flag("+fsanitize=hwaddress")
- .flag("-fexceptions")
- .flag("+fno-exceptions"));
- // Use the relative vtables ABI.
- // TODO: Remove these multilibs once relative vtables are enabled by default
- // for Fuchsia.
- Multilibs.push_back(Multilib("relative-vtables", {}, {}, 6)
- .flag("+fexperimental-relative-c++-abi-vtables"));
- Multilibs.push_back(Multilib("relative-vtables+noexcept", {}, {}, 7)
- .flag("+fexperimental-relative-c++-abi-vtables")
- .flag("-fexceptions")
- .flag("+fno-exceptions"));
- Multilibs.push_back(Multilib("relative-vtables+asan", {}, {}, 8)
- .flag("+fexperimental-relative-c++-abi-vtables")
- .flag("+fsanitize=address"));
- Multilibs.push_back(Multilib("relative-vtables+asan+noexcept", {}, {}, 9)
- .flag("+fexperimental-relative-c++-abi-vtables")
- .flag("+fsanitize=address")
- .flag("-fexceptions")
- .flag("+fno-exceptions"));
- Multilibs.push_back(Multilib("relative-vtables+hwasan", {}, {}, 10)
- .flag("+fexperimental-relative-c++-abi-vtables")
- .flag("+fsanitize=hwaddress"));
- Multilibs.push_back(Multilib("relative-vtables+hwasan+noexcept", {}, {}, 11)
- .flag("+fexperimental-relative-c++-abi-vtables")
- .flag("+fsanitize=hwaddress")
- .flag("-fexceptions")
- .flag("+fno-exceptions"));
+ Multilibs.push_back(MultilibBuilder("hwasan+noexcept", {}, {})
+ .flag("-fsanitize=hwaddress")
+ .flag("-fexceptions", /*Disallow=*/true)
+ .flag("-fno-exceptions")
+ .makeMultilib());
// Use Itanium C++ ABI for the compat multilib.
- Multilibs.push_back(Multilib("compat", {}, {}, 12).flag("+fc++-abi=itanium"));
+ Multilibs.push_back(MultilibBuilder("compat", {}, {})
+ .flag("-fc++-abi=itanium")
+ .makeMultilib());
Multilibs.FilterOut([&](const Multilib &M) {
std::vector<std::string> RD = FilePaths(M);
@@ -271,30 +300,31 @@ Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
});
Multilib::flags_list Flags;
- addMultilibFlag(
- Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions, true),
- "fexceptions", Flags);
- addMultilibFlag(getSanitizerArgs(Args).needsAsanRt(), "fsanitize=address",
- Flags);
- addMultilibFlag(getSanitizerArgs(Args).needsHwasanRt(), "fsanitize=hwaddress",
+ bool Exceptions =
+ Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions, true);
+ addMultilibFlag(Exceptions, "-fexceptions", Flags);
+ addMultilibFlag(!Exceptions, "-fno-exceptions", Flags);
+ addMultilibFlag(getSanitizerArgs(Args).needsAsanRt(), "-fsanitize=address",
Flags);
+ addMultilibFlag(getSanitizerArgs(Args).needsHwasanRt(),
+ "-fsanitize=hwaddress", Flags);
- addMultilibFlag(
- Args.hasFlag(options::OPT_fexperimental_relative_cxx_abi_vtables,
- options::OPT_fno_experimental_relative_cxx_abi_vtables,
- /*default=*/false),
- "fexperimental-relative-c++-abi-vtables", Flags);
addMultilibFlag(Args.getLastArgValue(options::OPT_fcxx_abi_EQ) == "itanium",
- "fc++-abi=itanium", Flags);
+ "-fc++-abi=itanium", Flags);
Multilibs.setFilePathsCallback(FilePaths);
- if (Multilibs.select(Flags, SelectedMultilib))
- if (!SelectedMultilib.isDefault())
+ if (Multilibs.select(Flags, SelectedMultilibs)) {
+ // Ensure that -print-multi-directory only outputs one multilib directory.
+ Multilib LastSelected = SelectedMultilibs.back();
+ SelectedMultilibs = {LastSelected};
+
+ if (!SelectedMultilibs.back().isDefault())
if (const auto &PathsCallback = Multilibs.filePathsCallback())
- for (const auto &Path : PathsCallback(SelectedMultilib))
+ for (const auto &Path : PathsCallback(SelectedMultilibs.back()))
// Prepend the multilib path to ensure it takes the precedence.
getFilePaths().insert(getFilePaths().begin(), Path);
+ }
}
std::string Fuchsia::ComputeEffectiveClangTriple(const ArgList &Args,
@@ -307,6 +337,10 @@ Tool *Fuchsia::buildLinker() const {
return new tools::fuchsia::Linker(*this);
}
+Tool *Fuchsia::buildStaticLibTool() const {
+ return new tools::fuchsia::StaticLibTool(*this);
+}
+
ToolChain::RuntimeLibType Fuchsia::GetRuntimeLibType(
const ArgList &Args) const {
if (Arg *A = Args.getLastArg(clang::driver::options::OPT_rtlib_EQ)) {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h
index e43cb3b0dddf..95e1785c9fac 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h
@@ -18,6 +18,20 @@ namespace clang {
namespace driver {
namespace tools {
namespace fuchsia {
+class LLVM_LIBRARY_VISIBILITY StaticLibTool : public Tool {
+public:
+ StaticLibTool(const ToolChain &TC)
+ : Tool("fuchsia::StaticLibTool", "llvm-ar", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
Linker(const ToolChain &TC) : Tool("fuchsia::Linker", "ld.lld", TC) {}
@@ -41,9 +55,7 @@ public:
const llvm::opt::ArgList &Args);
bool HasNativeLLVMSupport() const override { return true; }
- bool IsIntegratedAssemblerDefault() const override { return true; }
bool IsMathErrnoDefault() const override { return false; }
- bool useRelaxRelocations() const override { return true; };
RuntimeLibType GetDefaultRuntimeLibType() const override {
return ToolChain::RLT_CompilerRT;
}
@@ -100,6 +112,7 @@ public:
protected:
Tool *buildLinker() const override;
+ Tool *buildStaticLibTool() const override;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp
index 4f2340316654..b64fff8b14be 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp
@@ -9,6 +9,7 @@
#include "Gnu.h"
#include "Arch/ARM.h"
#include "Arch/CSKY.h"
+#include "Arch/LoongArch.h"
#include "Arch/Mips.h"
#include "Arch/PPC.h"
#include "Arch/RISCV.h"
@@ -20,6 +21,7 @@
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/MultilibBuilder.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
@@ -28,8 +30,8 @@
#include "llvm/Option/ArgList.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Path.h"
-#include "llvm/Support/TargetParser.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/TargetParser.h"
#include <system_error>
using namespace clang::driver;
@@ -218,30 +220,6 @@ void tools::gcc::Linker::RenderExtraToolArgs(const JobAction &JA,
// The types are (hopefully) good enough.
}
-// On Arm the endianness of the output file is determined by the target and
-// can be overridden by the pseudo-target flags '-mlittle-endian'/'-EL' and
-// '-mbig-endian'/'-EB'. Unlike other targets the flag does not result in a
-// normalized triple so we must handle the flag here.
-static bool isArmBigEndian(const llvm::Triple &Triple,
- const ArgList &Args) {
- bool IsBigEndian = false;
- switch (Triple.getArch()) {
- case llvm::Triple::armeb:
- case llvm::Triple::thumbeb:
- IsBigEndian = true;
- [[fallthrough]];
- case llvm::Triple::arm:
- case llvm::Triple::thumb:
- if (Arg *A = Args.getLastArg(options::OPT_mlittle_endian,
- options::OPT_mbig_endian))
- IsBigEndian = !A->getOption().matches(options::OPT_mlittle_endian);
- break;
- default:
- break;
- }
- return IsBigEndian;
-}
-
static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
switch (T.getArch()) {
case llvm::Triple::x86:
@@ -256,7 +234,8 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
case llvm::Triple::thumb:
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
- return isArmBigEndian(T, Args) ? "armelfb_linux_eabi" : "armelf_linux_eabi";
+ return tools::arm::isARMBigEndian(T, Args) ? "armelfb_linux_eabi"
+ : "armelf_linux_eabi";
case llvm::Triple::m68k:
return "m68kelf";
case llvm::Triple::ppc:
@@ -404,6 +383,7 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const llvm::Triple &Triple = getToolChain().getEffectiveTriple();
const llvm::Triple::ArchType Arch = ToolChain.getArch();
+ const bool isOHOSFamily = ToolChain.getTriple().isOHOSFamily();
const bool isAndroid = ToolChain.getTriple().isAndroid();
const bool IsIAMCU = ToolChain.getTriple().isOSIAMCU();
const bool IsVE = ToolChain.getTriple().isVE();
@@ -444,17 +424,18 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_s))
CmdArgs.push_back("-s");
- if (Triple.isARM() || Triple.isThumb() || Triple.isAArch64()) {
- bool IsBigEndian = isArmBigEndian(Triple, Args);
+ if (Triple.isARM() || Triple.isThumb()) {
+ bool IsBigEndian = arm::isARMBigEndian(Triple, Args);
if (IsBigEndian)
arm::appendBE8LinkFlag(Args, CmdArgs, Triple);
- IsBigEndian = IsBigEndian || Arch == llvm::Triple::aarch64_be;
CmdArgs.push_back(IsBigEndian ? "-EB" : "-EL");
+ } else if (Triple.isAArch64()) {
+ CmdArgs.push_back(Arch == llvm::Triple::aarch64_be ? "-EB" : "-EL");
}
// Most Android ARM64 targets should enable the linker fix for erratum
// 843419. Only non-Cortex-A53 devices are allowed to skip this flag.
- if (Arch == llvm::Triple::aarch64 && isAndroid) {
+ if (Arch == llvm::Triple::aarch64 && (isAndroid || isOHOSFamily)) {
std::string CPU = getCPUName(D, Args, Triple);
if (CPU.empty() || CPU == "generic" || CPU == "cortex-a53")
CmdArgs.push_back("--fix-cortex-a53-843419");
@@ -640,7 +621,9 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--pop-state");
}
- if (WantPthread && !isAndroid)
+ // We don't need libpthread neither for bionic (Android) nor for musl,
+ // (used by OHOS as runtime library).
+ if (WantPthread && !isAndroid && !isOHOSFamily)
CmdArgs.push_back("-lpthread");
if (Args.hasArg(options::OPT_fsplit_stack))
@@ -815,7 +798,7 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
case llvm::Triple::thumb:
case llvm::Triple::thumbeb: {
const llvm::Triple &Triple2 = getToolChain().getTriple();
- CmdArgs.push_back(isArmBigEndian(Triple2, Args) ? "-EB" : "-EL");
+ CmdArgs.push_back(arm::isARMBigEndian(Triple2, Args) ? "-EB" : "-EL");
switch (Triple2.getSubArch()) {
case llvm::Triple::ARMSubArch_v7:
CmdArgs.push_back("-mfpu=neon");
@@ -844,6 +827,11 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
normalizeCPUNamesForAssembler(Args, CmdArgs);
Args.AddLastArg(CmdArgs, options::OPT_mfpu_EQ);
+ // The integrated assembler doesn't implement e_flags setting behavior for
+ // -meabi=gnu (gcc -mabi={apcs-gnu,atpcs} passes -meabi=gnu to gas). For
+ // compatibility we accept but warn.
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_mabi_EQ))
+ A->ignoreTargetSpecific();
break;
}
case llvm::Triple::aarch64:
@@ -855,6 +843,13 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
break;
}
+ // TODO: handle loongarch32.
+ case llvm::Triple::loongarch64: {
+ StringRef ABIName =
+ loongarch::getLoongArchABI(D, Args, getToolChain().getTriple());
+ CmdArgs.push_back(Args.MakeArgString("-mabi=" + ABIName));
+ break;
+ }
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
@@ -1045,46 +1040,47 @@ static bool isMSP430(llvm::Triple::ArchType Arch) {
return Arch == llvm::Triple::msp430;
}
-static Multilib makeMultilib(StringRef commonSuffix) {
- return Multilib(commonSuffix, commonSuffix, commonSuffix);
-}
-
static bool findMipsCsMultilibs(const Multilib::flags_list &Flags,
FilterNonExistent &NonExistent,
DetectedMultilibs &Result) {
// Check for Code Sourcery toolchain multilibs
MultilibSet CSMipsMultilibs;
{
- auto MArchMips16 = makeMultilib("/mips16").flag("+m32").flag("+mips16");
+ auto MArchMips16 = MultilibBuilder("/mips16").flag("-m32").flag("-mips16");
auto MArchMicroMips =
- makeMultilib("/micromips").flag("+m32").flag("+mmicromips");
+ MultilibBuilder("/micromips").flag("-m32").flag("-mmicromips");
- auto MArchDefault = makeMultilib("").flag("-mips16").flag("-mmicromips");
+ auto MArchDefault = MultilibBuilder("")
+ .flag("-mips16", /*Disallow=*/true)
+ .flag("-mmicromips", /*Disallow=*/true);
- auto UCLibc = makeMultilib("/uclibc").flag("+muclibc");
+ auto UCLibc = MultilibBuilder("/uclibc").flag("-muclibc");
- auto SoftFloat = makeMultilib("/soft-float").flag("+msoft-float");
+ auto SoftFloat = MultilibBuilder("/soft-float").flag("-msoft-float");
- auto Nan2008 = makeMultilib("/nan2008").flag("+mnan=2008");
+ auto Nan2008 = MultilibBuilder("/nan2008").flag("-mnan=2008");
- auto DefaultFloat =
- makeMultilib("").flag("-msoft-float").flag("-mnan=2008");
+ auto DefaultFloat = MultilibBuilder("")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008", /*Disallow=*/true);
- auto BigEndian = makeMultilib("").flag("+EB").flag("-EL");
+ auto BigEndian =
+ MultilibBuilder("").flag("-EB").flag("-EL", /*Disallow=*/true);
- auto LittleEndian = makeMultilib("/el").flag("+EL").flag("-EB");
+ auto LittleEndian =
+ MultilibBuilder("/el").flag("-EL").flag("-EB", /*Disallow=*/true);
// Note that this one's osSuffix is ""
- auto MAbi64 = makeMultilib("")
+ auto MAbi64 = MultilibBuilder("")
.gccSuffix("/64")
.includeSuffix("/64")
- .flag("+mabi=n64")
- .flag("-mabi=n32")
- .flag("-m32");
+ .flag("-mabi=n64")
+ .flag("-mabi=n32", /*Disallow=*/true)
+ .flag("-m32", /*Disallow=*/true);
CSMipsMultilibs =
- MultilibSet()
+ MultilibSetBuilder()
.Either(MArchMips16, MArchMicroMips, MArchDefault)
.Maybe(UCLibc)
.Either(SoftFloat, Nan2008, DefaultFloat)
@@ -1094,6 +1090,7 @@ static bool findMipsCsMultilibs(const Multilib::flags_list &Flags,
.Maybe(MAbi64)
.FilterOut("/mips16.*/64")
.FilterOut("/micromips.*/64")
+ .makeMultilibSet()
.FilterOut(NonExistent)
.setIncludeDirsCallback([](const Multilib &M) {
std::vector<std::string> Dirs({"/include"});
@@ -1108,21 +1105,27 @@ static bool findMipsCsMultilibs(const Multilib::flags_list &Flags,
MultilibSet DebianMipsMultilibs;
{
- Multilib MAbiN32 =
- Multilib().gccSuffix("/n32").includeSuffix("/n32").flag("+mabi=n32");
+ MultilibBuilder MAbiN32 =
+ MultilibBuilder().gccSuffix("/n32").includeSuffix("/n32").flag(
+ "-mabi=n32");
- Multilib M64 = Multilib()
- .gccSuffix("/64")
- .includeSuffix("/64")
- .flag("+m64")
- .flag("-m32")
- .flag("-mabi=n32");
+ MultilibBuilder M64 = MultilibBuilder()
+ .gccSuffix("/64")
+ .includeSuffix("/64")
+ .flag("-m64")
+ .flag("-m32", /*Disallow=*/true)
+ .flag("-mabi=n32", /*Disallow=*/true);
- Multilib M32 =
- Multilib().gccSuffix("/32").flag("-m64").flag("+m32").flag("-mabi=n32");
+ MultilibBuilder M32 = MultilibBuilder()
+ .gccSuffix("/32")
+ .flag("-m64", /*Disallow=*/true)
+ .flag("-m32")
+ .flag("-mabi=n32", /*Disallow=*/true);
- DebianMipsMultilibs =
- MultilibSet().Either(M32, M64, MAbiN32).FilterOut(NonExistent);
+ DebianMipsMultilibs = MultilibSetBuilder()
+ .Either(M32, M64, MAbiN32)
+ .makeMultilibSet()
+ .FilterOut(NonExistent);
}
// Sort candidates. Toolchain that best meets the directories tree goes first.
@@ -1131,7 +1134,7 @@ static bool findMipsCsMultilibs(const Multilib::flags_list &Flags,
if (CSMipsMultilibs.size() < DebianMipsMultilibs.size())
std::iter_swap(Candidates, Candidates + 1);
for (const MultilibSet *Candidate : Candidates) {
- if (Candidate->select(Flags, Result.SelectedMultilib)) {
+ if (Candidate->select(Flags, Result.SelectedMultilibs)) {
if (Candidate == &DebianMipsMultilibs)
Result.BiarchSibling = Multilib();
Result.Multilibs = *Candidate;
@@ -1147,25 +1150,32 @@ static bool findMipsAndroidMultilibs(llvm::vfs::FileSystem &VFS, StringRef Path,
DetectedMultilibs &Result) {
MultilibSet AndroidMipsMultilibs =
- MultilibSet()
- .Maybe(Multilib("/mips-r2").flag("+march=mips32r2"))
- .Maybe(Multilib("/mips-r6").flag("+march=mips32r6"))
+ MultilibSetBuilder()
+ .Maybe(MultilibBuilder("/mips-r2", {}, {}).flag("-march=mips32r2"))
+ .Maybe(MultilibBuilder("/mips-r6", {}, {}).flag("-march=mips32r6"))
+ .makeMultilibSet()
.FilterOut(NonExistent);
MultilibSet AndroidMipselMultilibs =
- MultilibSet()
- .Either(Multilib().flag("+march=mips32"),
- Multilib("/mips-r2", "", "/mips-r2").flag("+march=mips32r2"),
- Multilib("/mips-r6", "", "/mips-r6").flag("+march=mips32r6"))
+ MultilibSetBuilder()
+ .Either(MultilibBuilder().flag("-march=mips32"),
+ MultilibBuilder("/mips-r2", "", "/mips-r2")
+ .flag("-march=mips32r2"),
+ MultilibBuilder("/mips-r6", "", "/mips-r6")
+ .flag("-march=mips32r6"))
+ .makeMultilibSet()
.FilterOut(NonExistent);
MultilibSet AndroidMips64elMultilibs =
- MultilibSet()
- .Either(
- Multilib().flag("+march=mips64r6"),
- Multilib("/32/mips-r1", "", "/mips-r1").flag("+march=mips32"),
- Multilib("/32/mips-r2", "", "/mips-r2").flag("+march=mips32r2"),
- Multilib("/32/mips-r6", "", "/mips-r6").flag("+march=mips32r6"))
+ MultilibSetBuilder()
+ .Either(MultilibBuilder().flag("-march=mips64r6"),
+ MultilibBuilder("/32/mips-r1", "", "/mips-r1")
+ .flag("-march=mips32"),
+ MultilibBuilder("/32/mips-r2", "", "/mips-r2")
+ .flag("-march=mips32r2"),
+ MultilibBuilder("/32/mips-r6", "", "/mips-r6")
+ .flag("-march=mips32r6"))
+ .makeMultilibSet()
.FilterOut(NonExistent);
MultilibSet *MS = &AndroidMipsMultilibs;
@@ -1173,7 +1183,7 @@ static bool findMipsAndroidMultilibs(llvm::vfs::FileSystem &VFS, StringRef Path,
MS = &AndroidMipselMultilibs;
else if (VFS.exists(Path + "/32"))
MS = &AndroidMips64elMultilibs;
- if (MS->select(Flags, Result.SelectedMultilib)) {
+ if (MS->select(Flags, Result.SelectedMultilibs)) {
Result.Multilibs = *MS;
return true;
}
@@ -1186,18 +1196,20 @@ static bool findMipsMuslMultilibs(const Multilib::flags_list &Flags,
// Musl toolchain multilibs
MultilibSet MuslMipsMultilibs;
{
- auto MArchMipsR2 = makeMultilib("")
+ auto MArchMipsR2 = MultilibBuilder("")
.osSuffix("/mips-r2-hard-musl")
- .flag("+EB")
- .flag("-EL")
- .flag("+march=mips32r2");
+ .flag("-EB")
+ .flag("-EL", /*Disallow=*/true)
+ .flag("-march=mips32r2");
- auto MArchMipselR2 = makeMultilib("/mipsel-r2-hard-musl")
- .flag("-EB")
- .flag("+EL")
- .flag("+march=mips32r2");
+ auto MArchMipselR2 = MultilibBuilder("/mipsel-r2-hard-musl")
+ .flag("-EB", /*Disallow=*/true)
+ .flag("-EL")
+ .flag("-march=mips32r2");
- MuslMipsMultilibs = MultilibSet().Either(MArchMipsR2, MArchMipselR2);
+ MuslMipsMultilibs = MultilibSetBuilder()
+ .Either(MArchMipsR2, MArchMipselR2)
+ .makeMultilibSet();
// Specify the callback that computes the include directories.
MuslMipsMultilibs.setIncludeDirsCallback([](const Multilib &M) {
@@ -1205,7 +1217,7 @@ static bool findMipsMuslMultilibs(const Multilib::flags_list &Flags,
{"/../sysroot" + M.osSuffix() + "/usr/include"});
});
}
- if (MuslMipsMultilibs.select(Flags, Result.SelectedMultilib)) {
+ if (MuslMipsMultilibs.select(Flags, Result.SelectedMultilibs)) {
Result.Multilibs = MuslMipsMultilibs;
return true;
}
@@ -1218,48 +1230,54 @@ static bool findMipsMtiMultilibs(const Multilib::flags_list &Flags,
// CodeScape MTI toolchain v1.2 and early.
MultilibSet MtiMipsMultilibsV1;
{
- auto MArchMips32 = makeMultilib("/mips32")
- .flag("+m32")
+ auto MArchMips32 = MultilibBuilder("/mips32")
+ .flag("-m32")
+ .flag("-m64", /*Disallow=*/true)
+ .flag("-mmicromips", /*Disallow=*/true)
+ .flag("-march=mips32");
+
+ auto MArchMicroMips = MultilibBuilder("/micromips")
+ .flag("-m32")
+ .flag("-m64", /*Disallow=*/true)
+ .flag("-mmicromips");
+
+ auto MArchMips64r2 = MultilibBuilder("/mips64r2")
+ .flag("-m32", /*Disallow=*/true)
+ .flag("-m64")
+ .flag("-march=mips64r2");
+
+ auto MArchMips64 = MultilibBuilder("/mips64")
+ .flag("-m32", /*Disallow=*/true)
.flag("-m64")
- .flag("-mmicromips")
- .flag("+march=mips32");
-
- auto MArchMicroMips = makeMultilib("/micromips")
- .flag("+m32")
- .flag("-m64")
- .flag("+mmicromips");
-
- auto MArchMips64r2 = makeMultilib("/mips64r2")
- .flag("-m32")
- .flag("+m64")
- .flag("+march=mips64r2");
-
- auto MArchMips64 = makeMultilib("/mips64").flag("-m32").flag("+m64").flag(
- "-march=mips64r2");
+ .flag("-march=mips64r2", /*Disallow=*/true);
- auto MArchDefault = makeMultilib("")
- .flag("+m32")
- .flag("-m64")
- .flag("-mmicromips")
- .flag("+march=mips32r2");
+ auto MArchDefault = MultilibBuilder("")
+ .flag("-m32")
+ .flag("-m64", /*Disallow=*/true)
+ .flag("-mmicromips", /*Disallow=*/true)
+ .flag("-march=mips32r2");
- auto Mips16 = makeMultilib("/mips16").flag("+mips16");
+ auto Mips16 = MultilibBuilder("/mips16").flag("-mips16");
- auto UCLibc = makeMultilib("/uclibc").flag("+muclibc");
+ auto UCLibc = MultilibBuilder("/uclibc").flag("-muclibc");
- auto MAbi64 =
- makeMultilib("/64").flag("+mabi=n64").flag("-mabi=n32").flag("-m32");
+ auto MAbi64 = MultilibBuilder("/64")
+ .flag("-mabi=n64")
+ .flag("-mabi=n32", /*Disallow=*/true)
+ .flag("-m32", /*Disallow=*/true);
- auto BigEndian = makeMultilib("").flag("+EB").flag("-EL");
+ auto BigEndian =
+ MultilibBuilder("").flag("-EB").flag("-EL", /*Disallow=*/true);
- auto LittleEndian = makeMultilib("/el").flag("+EL").flag("-EB");
+ auto LittleEndian =
+ MultilibBuilder("/el").flag("-EL").flag("-EB", /*Disallow=*/true);
- auto SoftFloat = makeMultilib("/sof").flag("+msoft-float");
+ auto SoftFloat = MultilibBuilder("/sof").flag("-msoft-float");
- auto Nan2008 = makeMultilib("/nan2008").flag("+mnan=2008");
+ auto Nan2008 = MultilibBuilder("/nan2008").flag("-mnan=2008");
MtiMipsMultilibsV1 =
- MultilibSet()
+ MultilibSetBuilder()
.Either(MArchMips32, MArchMicroMips, MArchMips64r2, MArchMips64,
MArchDefault)
.Maybe(UCLibc)
@@ -1276,6 +1294,7 @@ static bool findMipsMtiMultilibs(const Multilib::flags_list &Flags,
.Maybe(SoftFloat)
.Maybe(Nan2008)
.FilterOut(".*sof/nan2008")
+ .makeMultilibSet()
.FilterOut(NonExistent)
.setIncludeDirsCallback([](const Multilib &M) {
std::vector<std::string> Dirs({"/include"});
@@ -1290,80 +1309,87 @@ static bool findMipsMtiMultilibs(const Multilib::flags_list &Flags,
// CodeScape IMG toolchain starting from v1.3.
MultilibSet MtiMipsMultilibsV2;
{
- auto BeHard = makeMultilib("/mips-r2-hard")
- .flag("+EB")
+ auto BeHard = MultilibBuilder("/mips-r2-hard")
+ .flag("-EB")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008", /*Disallow=*/true)
+ .flag("-muclibc", /*Disallow=*/true);
+ auto BeSoft = MultilibBuilder("/mips-r2-soft")
+ .flag("-EB")
.flag("-msoft-float")
- .flag("-mnan=2008")
- .flag("-muclibc");
- auto BeSoft = makeMultilib("/mips-r2-soft")
- .flag("+EB")
- .flag("+msoft-float")
- .flag("-mnan=2008");
- auto ElHard = makeMultilib("/mipsel-r2-hard")
- .flag("+EL")
+ .flag("-mnan=2008", /*Disallow=*/true);
+ auto ElHard = MultilibBuilder("/mipsel-r2-hard")
+ .flag("-EL")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008", /*Disallow=*/true)
+ .flag("-muclibc", /*Disallow=*/true);
+ auto ElSoft = MultilibBuilder("/mipsel-r2-soft")
+ .flag("-EL")
.flag("-msoft-float")
- .flag("-mnan=2008")
- .flag("-muclibc");
- auto ElSoft = makeMultilib("/mipsel-r2-soft")
- .flag("+EL")
- .flag("+msoft-float")
- .flag("-mnan=2008")
- .flag("-mmicromips");
- auto BeHardNan = makeMultilib("/mips-r2-hard-nan2008")
- .flag("+EB")
- .flag("-msoft-float")
- .flag("+mnan=2008")
- .flag("-muclibc");
- auto ElHardNan = makeMultilib("/mipsel-r2-hard-nan2008")
- .flag("+EL")
- .flag("-msoft-float")
- .flag("+mnan=2008")
- .flag("-muclibc")
- .flag("-mmicromips");
- auto BeHardNanUclibc = makeMultilib("/mips-r2-hard-nan2008-uclibc")
- .flag("+EB")
- .flag("-msoft-float")
- .flag("+mnan=2008")
- .flag("+muclibc");
- auto ElHardNanUclibc = makeMultilib("/mipsel-r2-hard-nan2008-uclibc")
- .flag("+EL")
- .flag("-msoft-float")
- .flag("+mnan=2008")
- .flag("+muclibc");
- auto BeHardUclibc = makeMultilib("/mips-r2-hard-uclibc")
- .flag("+EB")
- .flag("-msoft-float")
- .flag("-mnan=2008")
- .flag("+muclibc");
- auto ElHardUclibc = makeMultilib("/mipsel-r2-hard-uclibc")
- .flag("+EL")
- .flag("-msoft-float")
- .flag("-mnan=2008")
- .flag("+muclibc");
- auto ElMicroHardNan = makeMultilib("/micromipsel-r2-hard-nan2008")
- .flag("+EL")
- .flag("-msoft-float")
- .flag("+mnan=2008")
- .flag("+mmicromips");
- auto ElMicroSoft = makeMultilib("/micromipsel-r2-soft")
- .flag("+EL")
- .flag("+msoft-float")
- .flag("-mnan=2008")
- .flag("+mmicromips");
-
- auto O32 =
- makeMultilib("/lib").osSuffix("").flag("-mabi=n32").flag("-mabi=n64");
- auto N32 =
- makeMultilib("/lib32").osSuffix("").flag("+mabi=n32").flag("-mabi=n64");
- auto N64 =
- makeMultilib("/lib64").osSuffix("").flag("-mabi=n32").flag("+mabi=n64");
+ .flag("-mnan=2008", /*Disallow=*/true)
+ .flag("-mmicromips", /*Disallow=*/true);
+ auto BeHardNan = MultilibBuilder("/mips-r2-hard-nan2008")
+ .flag("-EB")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008")
+ .flag("-muclibc", /*Disallow=*/true);
+ auto ElHardNan = MultilibBuilder("/mipsel-r2-hard-nan2008")
+ .flag("-EL")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008")
+ .flag("-muclibc", /*Disallow=*/true)
+ .flag("-mmicromips", /*Disallow=*/true);
+ auto BeHardNanUclibc = MultilibBuilder("/mips-r2-hard-nan2008-uclibc")
+ .flag("-EB")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008")
+ .flag("-muclibc");
+ auto ElHardNanUclibc = MultilibBuilder("/mipsel-r2-hard-nan2008-uclibc")
+ .flag("-EL")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008")
+ .flag("-muclibc");
+ auto BeHardUclibc = MultilibBuilder("/mips-r2-hard-uclibc")
+ .flag("-EB")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008", /*Disallow=*/true)
+ .flag("-muclibc");
+ auto ElHardUclibc = MultilibBuilder("/mipsel-r2-hard-uclibc")
+ .flag("-EL")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008", /*Disallow=*/true)
+ .flag("-muclibc");
+ auto ElMicroHardNan = MultilibBuilder("/micromipsel-r2-hard-nan2008")
+ .flag("-EL")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008")
+ .flag("-mmicromips");
+ auto ElMicroSoft = MultilibBuilder("/micromipsel-r2-soft")
+ .flag("-EL")
+ .flag("-msoft-float")
+ .flag("-mnan=2008", /*Disallow=*/true)
+ .flag("-mmicromips");
+
+ auto O32 = MultilibBuilder("/lib")
+ .osSuffix("")
+ .flag("-mabi=n32", /*Disallow=*/true)
+ .flag("-mabi=n64", /*Disallow=*/true);
+ auto N32 = MultilibBuilder("/lib32")
+ .osSuffix("")
+ .flag("-mabi=n32")
+ .flag("-mabi=n64", /*Disallow=*/true);
+ auto N64 = MultilibBuilder("/lib64")
+ .osSuffix("")
+ .flag("-mabi=n32", /*Disallow=*/true)
+ .flag("-mabi=n64");
MtiMipsMultilibsV2 =
- MultilibSet()
+ MultilibSetBuilder()
.Either({BeHard, BeSoft, ElHard, ElSoft, BeHardNan, ElHardNan,
BeHardNanUclibc, ElHardNanUclibc, BeHardUclibc,
ElHardUclibc, ElMicroHardNan, ElMicroSoft})
.Either(O32, N32, N64)
+ .makeMultilibSet()
.FilterOut(NonExistent)
.setIncludeDirsCallback([](const Multilib &M) {
return std::vector<std::string>({"/../../../../sysroot" +
@@ -1376,7 +1402,7 @@ static bool findMipsMtiMultilibs(const Multilib::flags_list &Flags,
});
}
for (auto *Candidate : {&MtiMipsMultilibsV1, &MtiMipsMultilibsV2}) {
- if (Candidate->select(Flags, Result.SelectedMultilib)) {
+ if (Candidate->select(Flags, Result.SelectedMultilibs)) {
Result.Multilibs = *Candidate;
return true;
}
@@ -1390,18 +1416,24 @@ static bool findMipsImgMultilibs(const Multilib::flags_list &Flags,
// CodeScape IMG toolchain v1.2 and early.
MultilibSet ImgMultilibsV1;
{
- auto Mips64r6 = makeMultilib("/mips64r6").flag("+m64").flag("-m32");
+ auto Mips64r6 = MultilibBuilder("/mips64r6")
+ .flag("-m64")
+ .flag("-m32", /*Disallow=*/true);
- auto LittleEndian = makeMultilib("/el").flag("+EL").flag("-EB");
+ auto LittleEndian =
+ MultilibBuilder("/el").flag("-EL").flag("-EB", /*Disallow=*/true);
- auto MAbi64 =
- makeMultilib("/64").flag("+mabi=n64").flag("-mabi=n32").flag("-m32");
+ auto MAbi64 = MultilibBuilder("/64")
+ .flag("-mabi=n64")
+ .flag("-mabi=n32", /*Disallow=*/true)
+ .flag("-m32", /*Disallow=*/true);
ImgMultilibsV1 =
- MultilibSet()
+ MultilibSetBuilder()
.Maybe(Mips64r6)
.Maybe(MAbi64)
.Maybe(LittleEndian)
+ .makeMultilibSet()
.FilterOut(NonExistent)
.setIncludeDirsCallback([](const Multilib &M) {
return std::vector<std::string>(
@@ -1412,51 +1444,58 @@ static bool findMipsImgMultilibs(const Multilib::flags_list &Flags,
// CodeScape IMG toolchain starting from v1.3.
MultilibSet ImgMultilibsV2;
{
- auto BeHard = makeMultilib("/mips-r6-hard")
- .flag("+EB")
+ auto BeHard = MultilibBuilder("/mips-r6-hard")
+ .flag("-EB")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mmicromips", /*Disallow=*/true);
+ auto BeSoft = MultilibBuilder("/mips-r6-soft")
+ .flag("-EB")
.flag("-msoft-float")
- .flag("-mmicromips");
- auto BeSoft = makeMultilib("/mips-r6-soft")
- .flag("+EB")
- .flag("+msoft-float")
- .flag("-mmicromips");
- auto ElHard = makeMultilib("/mipsel-r6-hard")
- .flag("+EL")
+ .flag("-mmicromips", /*Disallow=*/true);
+ auto ElHard = MultilibBuilder("/mipsel-r6-hard")
+ .flag("-EL")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mmicromips", /*Disallow=*/true);
+ auto ElSoft = MultilibBuilder("/mipsel-r6-soft")
+ .flag("-EL")
.flag("-msoft-float")
- .flag("-mmicromips");
- auto ElSoft = makeMultilib("/mipsel-r6-soft")
- .flag("+EL")
- .flag("+msoft-float")
- .flag("-mmicromips");
- auto BeMicroHard = makeMultilib("/micromips-r6-hard")
- .flag("+EB")
+ .flag("-mmicromips", /*Disallow=*/true);
+ auto BeMicroHard = MultilibBuilder("/micromips-r6-hard")
+ .flag("-EB")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mmicromips");
+ auto BeMicroSoft = MultilibBuilder("/micromips-r6-soft")
+ .flag("-EB")
.flag("-msoft-float")
- .flag("+mmicromips");
- auto BeMicroSoft = makeMultilib("/micromips-r6-soft")
- .flag("+EB")
- .flag("+msoft-float")
- .flag("+mmicromips");
- auto ElMicroHard = makeMultilib("/micromipsel-r6-hard")
- .flag("+EL")
+ .flag("-mmicromips");
+ auto ElMicroHard = MultilibBuilder("/micromipsel-r6-hard")
+ .flag("-EL")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mmicromips");
+ auto ElMicroSoft = MultilibBuilder("/micromipsel-r6-soft")
+ .flag("-EL")
.flag("-msoft-float")
- .flag("+mmicromips");
- auto ElMicroSoft = makeMultilib("/micromipsel-r6-soft")
- .flag("+EL")
- .flag("+msoft-float")
- .flag("+mmicromips");
-
- auto O32 =
- makeMultilib("/lib").osSuffix("").flag("-mabi=n32").flag("-mabi=n64");
- auto N32 =
- makeMultilib("/lib32").osSuffix("").flag("+mabi=n32").flag("-mabi=n64");
- auto N64 =
- makeMultilib("/lib64").osSuffix("").flag("-mabi=n32").flag("+mabi=n64");
+ .flag("-mmicromips");
+
+ auto O32 = MultilibBuilder("/lib")
+ .osSuffix("")
+ .flag("-mabi=n32", /*Disallow=*/true)
+ .flag("-mabi=n64", /*Disallow=*/true);
+ auto N32 = MultilibBuilder("/lib32")
+ .osSuffix("")
+ .flag("-mabi=n32")
+ .flag("-mabi=n64", /*Disallow=*/true);
+ auto N64 = MultilibBuilder("/lib64")
+ .osSuffix("")
+ .flag("-mabi=n32", /*Disallow=*/true)
+ .flag("-mabi=n64");
ImgMultilibsV2 =
- MultilibSet()
+ MultilibSetBuilder()
.Either({BeHard, BeSoft, ElHard, ElSoft, BeMicroHard, BeMicroSoft,
ElMicroHard, ElMicroSoft})
.Either(O32, N32, N64)
+ .makeMultilibSet()
.FilterOut(NonExistent)
.setIncludeDirsCallback([](const Multilib &M) {
return std::vector<std::string>({"/../../../../sysroot" +
@@ -1469,7 +1508,7 @@ static bool findMipsImgMultilibs(const Multilib::flags_list &Flags,
});
}
for (auto *Candidate : {&ImgMultilibsV1, &ImgMultilibsV2}) {
- if (Candidate->select(Flags, Result.SelectedMultilib)) {
+ if (Candidate->select(Flags, Result.SelectedMultilibs)) {
Result.Multilibs = *Candidate;
return true;
}
@@ -1490,30 +1529,30 @@ bool clang::driver::findMIPSMultilibs(const Driver &D,
llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
Multilib::flags_list Flags;
- addMultilibFlag(TargetTriple.isMIPS32(), "m32", Flags);
- addMultilibFlag(TargetTriple.isMIPS64(), "m64", Flags);
- addMultilibFlag(isMips16(Args), "mips16", Flags);
- addMultilibFlag(CPUName == "mips32", "march=mips32", Flags);
+ addMultilibFlag(TargetTriple.isMIPS32(), "-m32", Flags);
+ addMultilibFlag(TargetTriple.isMIPS64(), "-m64", Flags);
+ addMultilibFlag(isMips16(Args), "-mips16", Flags);
+ addMultilibFlag(CPUName == "mips32", "-march=mips32", Flags);
addMultilibFlag(CPUName == "mips32r2" || CPUName == "mips32r3" ||
CPUName == "mips32r5" || CPUName == "p5600",
- "march=mips32r2", Flags);
- addMultilibFlag(CPUName == "mips32r6", "march=mips32r6", Flags);
- addMultilibFlag(CPUName == "mips64", "march=mips64", Flags);
+ "-march=mips32r2", Flags);
+ addMultilibFlag(CPUName == "mips32r6", "-march=mips32r6", Flags);
+ addMultilibFlag(CPUName == "mips64", "-march=mips64", Flags);
addMultilibFlag(CPUName == "mips64r2" || CPUName == "mips64r3" ||
CPUName == "mips64r5" || CPUName == "octeon" ||
CPUName == "octeon+",
- "march=mips64r2", Flags);
- addMultilibFlag(CPUName == "mips64r6", "march=mips64r6", Flags);
- addMultilibFlag(isMicroMips(Args), "mmicromips", Flags);
- addMultilibFlag(tools::mips::isUCLibc(Args), "muclibc", Flags);
- addMultilibFlag(tools::mips::isNaN2008(D, Args, TargetTriple), "mnan=2008",
+ "-march=mips64r2", Flags);
+ addMultilibFlag(CPUName == "mips64r6", "-march=mips64r6", Flags);
+ addMultilibFlag(isMicroMips(Args), "-mmicromips", Flags);
+ addMultilibFlag(tools::mips::isUCLibc(Args), "-muclibc", Flags);
+ addMultilibFlag(tools::mips::isNaN2008(D, Args, TargetTriple), "-mnan=2008",
Flags);
- addMultilibFlag(ABIName == "n32", "mabi=n32", Flags);
- addMultilibFlag(ABIName == "n64", "mabi=n64", Flags);
- addMultilibFlag(isSoftFloatABI(Args), "msoft-float", Flags);
- addMultilibFlag(!isSoftFloatABI(Args), "mhard-float", Flags);
- addMultilibFlag(isMipsEL(TargetArch), "EL", Flags);
- addMultilibFlag(!isMipsEL(TargetArch), "EB", Flags);
+ addMultilibFlag(ABIName == "n32", "-mabi=n32", Flags);
+ addMultilibFlag(ABIName == "n64", "-mabi=n64", Flags);
+ addMultilibFlag(isSoftFloatABI(Args), "-msoft-float", Flags);
+ addMultilibFlag(!isSoftFloatABI(Args), "-mhard-float", Flags);
+ addMultilibFlag(isMipsEL(TargetArch), "-EL", Flags);
+ addMultilibFlag(!isMipsEL(TargetArch), "-EB", Flags);
if (TargetTriple.isAndroid())
return findMipsAndroidMultilibs(D.getVFS(), Path, Flags, NonExistent,
@@ -1542,7 +1581,7 @@ bool clang::driver::findMIPSMultilibs(const Driver &D,
Result.Multilibs.push_back(Default);
Result.Multilibs.FilterOut(NonExistent);
- if (Result.Multilibs.select(Flags, Result.SelectedMultilib)) {
+ if (Result.Multilibs.select(Flags, Result.SelectedMultilibs)) {
Result.BiarchSibling = Multilib();
return true;
}
@@ -1556,22 +1595,23 @@ static void findAndroidArmMultilibs(const Driver &D,
DetectedMultilibs &Result) {
// Find multilibs with subdirectories like armv7-a, thumb, armv7-a/thumb.
FilterNonExistent NonExistent(Path, "/crtbegin.o", D.getVFS());
- Multilib ArmV7Multilib = makeMultilib("/armv7-a")
- .flag("+march=armv7-a")
- .flag("-mthumb");
- Multilib ThumbMultilib = makeMultilib("/thumb")
- .flag("-march=armv7-a")
- .flag("+mthumb");
- Multilib ArmV7ThumbMultilib = makeMultilib("/armv7-a/thumb")
- .flag("+march=armv7-a")
- .flag("+mthumb");
- Multilib DefaultMultilib = makeMultilib("")
- .flag("-march=armv7-a")
- .flag("-mthumb");
+ MultilibBuilder ArmV7Multilib = MultilibBuilder("/armv7-a")
+ .flag("-march=armv7-a")
+ .flag("-mthumb", /*Disallow=*/true);
+ MultilibBuilder ThumbMultilib = MultilibBuilder("/thumb")
+ .flag("-march=armv7-a", /*Disallow=*/true)
+ .flag("-mthumb");
+ MultilibBuilder ArmV7ThumbMultilib =
+ MultilibBuilder("/armv7-a/thumb").flag("-march=armv7-a").flag("-mthumb");
+ MultilibBuilder DefaultMultilib =
+ MultilibBuilder("")
+ .flag("-march=armv7-a", /*Disallow=*/true)
+ .flag("-mthumb", /*Disallow=*/true);
MultilibSet AndroidArmMultilibs =
- MultilibSet()
- .Either(ThumbMultilib, ArmV7Multilib,
- ArmV7ThumbMultilib, DefaultMultilib)
+ MultilibSetBuilder()
+ .Either(ThumbMultilib, ArmV7Multilib, ArmV7ThumbMultilib,
+ DefaultMultilib)
+ .makeMultilibSet()
.FilterOut(NonExistent);
Multilib::flags_list Flags;
@@ -1585,10 +1625,10 @@ static void findAndroidArmMultilibs(const Driver &D,
bool IsArmV7Mode = (IsArmArch || IsThumbArch) &&
(llvm::ARM::parseArchVersion(Arch) == 7 ||
(IsArmArch && Arch == "" && IsV7SubArch));
- addMultilibFlag(IsArmV7Mode, "march=armv7-a", Flags);
- addMultilibFlag(IsThumbMode, "mthumb", Flags);
+ addMultilibFlag(IsArmV7Mode, "-march=armv7-a", Flags);
+ addMultilibFlag(IsThumbMode, "-mthumb", Flags);
- if (AndroidArmMultilibs.select(Flags, Result.SelectedMultilib))
+ if (AndroidArmMultilibs.select(Flags, Result.SelectedMultilibs))
Result.Multilibs = AndroidArmMultilibs;
}
@@ -1597,22 +1637,24 @@ static bool findMSP430Multilibs(const Driver &D,
StringRef Path, const ArgList &Args,
DetectedMultilibs &Result) {
FilterNonExistent NonExistent(Path, "/crtbegin.o", D.getVFS());
- Multilib WithoutExceptions = makeMultilib("/430").flag("-exceptions");
- Multilib WithExceptions = makeMultilib("/430/exceptions").flag("+exceptions");
+ MultilibBuilder WithoutExceptions =
+ MultilibBuilder("/430").flag("-exceptions", /*Disallow=*/true);
+ MultilibBuilder WithExceptions =
+ MultilibBuilder("/430/exceptions").flag("-exceptions");
// FIXME: when clang starts to support msp430x ISA additional logic
// to select between multilib must be implemented
- // Multilib MSP430xMultilib = makeMultilib("/large");
+ // MultilibBuilder MSP430xMultilib = MultilibBuilder("/large");
- Result.Multilibs.push_back(WithoutExceptions);
- Result.Multilibs.push_back(WithExceptions);
+ Result.Multilibs.push_back(WithoutExceptions.makeMultilib());
+ Result.Multilibs.push_back(WithExceptions.makeMultilib());
Result.Multilibs.FilterOut(NonExistent);
Multilib::flags_list Flags;
addMultilibFlag(Args.hasFlag(options::OPT_fexceptions,
options::OPT_fno_exceptions, false),
- "exceptions", Flags);
- if (Result.Multilibs.select(Flags, Result.SelectedMultilib))
+ "-exceptions", Flags);
+ if (Result.Multilibs.select(Flags, Result.SelectedMultilibs))
return true;
return false;
@@ -1632,52 +1674,54 @@ static void findCSKYMultilibs(const Driver &D, const llvm::Triple &TargetTriple,
auto ARCHName = *Res;
Multilib::flags_list Flags;
- addMultilibFlag(TheFloatABI == tools::csky::FloatABI::Hard, "hard-fp", Flags);
- addMultilibFlag(TheFloatABI == tools::csky::FloatABI::SoftFP, "soft-fp",
+ addMultilibFlag(TheFloatABI == tools::csky::FloatABI::Hard, "-hard-fp",
Flags);
- addMultilibFlag(TheFloatABI == tools::csky::FloatABI::Soft, "soft", Flags);
- addMultilibFlag(ARCHName == "ck801", "march=ck801", Flags);
- addMultilibFlag(ARCHName == "ck802", "march=ck802", Flags);
- addMultilibFlag(ARCHName == "ck803", "march=ck803", Flags);
- addMultilibFlag(ARCHName == "ck804", "march=ck804", Flags);
- addMultilibFlag(ARCHName == "ck805", "march=ck805", Flags);
- addMultilibFlag(ARCHName == "ck807", "march=ck807", Flags);
- addMultilibFlag(ARCHName == "ck810", "march=ck810", Flags);
- addMultilibFlag(ARCHName == "ck810v", "march=ck810v", Flags);
- addMultilibFlag(ARCHName == "ck860", "march=ck860", Flags);
- addMultilibFlag(ARCHName == "ck860v", "march=ck860v", Flags);
+ addMultilibFlag(TheFloatABI == tools::csky::FloatABI::SoftFP, "-soft-fp",
+ Flags);
+ addMultilibFlag(TheFloatABI == tools::csky::FloatABI::Soft, "-soft", Flags);
+ addMultilibFlag(ARCHName == "ck801", "-march=ck801", Flags);
+ addMultilibFlag(ARCHName == "ck802", "-march=ck802", Flags);
+ addMultilibFlag(ARCHName == "ck803", "-march=ck803", Flags);
+ addMultilibFlag(ARCHName == "ck804", "-march=ck804", Flags);
+ addMultilibFlag(ARCHName == "ck805", "-march=ck805", Flags);
+ addMultilibFlag(ARCHName == "ck807", "-march=ck807", Flags);
+ addMultilibFlag(ARCHName == "ck810", "-march=ck810", Flags);
+ addMultilibFlag(ARCHName == "ck810v", "-march=ck810v", Flags);
+ addMultilibFlag(ARCHName == "ck860", "-march=ck860", Flags);
+ addMultilibFlag(ARCHName == "ck860v", "-march=ck860v", Flags);
bool isBigEndian = false;
if (Arg *A = Args.getLastArg(options::OPT_mlittle_endian,
options::OPT_mbig_endian))
isBigEndian = !A->getOption().matches(options::OPT_mlittle_endian);
- addMultilibFlag(isBigEndian, "EB", Flags);
-
- auto HardFloat = makeMultilib("/hard-fp").flag("+hard-fp");
- auto SoftFpFloat = makeMultilib("/soft-fp").flag("+soft-fp");
- auto SoftFloat = makeMultilib("").flag("+soft");
- auto Arch801 = makeMultilib("/ck801").flag("+march=ck801");
- auto Arch802 = makeMultilib("/ck802").flag("+march=ck802");
- auto Arch803 = makeMultilib("/ck803").flag("+march=ck803");
+ addMultilibFlag(isBigEndian, "-EB", Flags);
+
+ auto HardFloat = MultilibBuilder("/hard-fp").flag("-hard-fp");
+ auto SoftFpFloat = MultilibBuilder("/soft-fp").flag("-soft-fp");
+ auto SoftFloat = MultilibBuilder("").flag("-soft");
+ auto Arch801 = MultilibBuilder("/ck801").flag("-march=ck801");
+ auto Arch802 = MultilibBuilder("/ck802").flag("-march=ck802");
+ auto Arch803 = MultilibBuilder("/ck803").flag("-march=ck803");
// CK804 use the same library as CK803
- auto Arch804 = makeMultilib("/ck803").flag("+march=ck804");
- auto Arch805 = makeMultilib("/ck805").flag("+march=ck805");
- auto Arch807 = makeMultilib("/ck807").flag("+march=ck807");
- auto Arch810 = makeMultilib("").flag("+march=ck810");
- auto Arch810v = makeMultilib("/ck810v").flag("+march=ck810v");
- auto Arch860 = makeMultilib("/ck860").flag("+march=ck860");
- auto Arch860v = makeMultilib("/ck860v").flag("+march=ck860v");
- auto BigEndian = makeMultilib("/big").flag("+EB");
+ auto Arch804 = MultilibBuilder("/ck803").flag("-march=ck804");
+ auto Arch805 = MultilibBuilder("/ck805").flag("-march=ck805");
+ auto Arch807 = MultilibBuilder("/ck807").flag("-march=ck807");
+ auto Arch810 = MultilibBuilder("").flag("-march=ck810");
+ auto Arch810v = MultilibBuilder("/ck810v").flag("-march=ck810v");
+ auto Arch860 = MultilibBuilder("/ck860").flag("-march=ck860");
+ auto Arch860v = MultilibBuilder("/ck860v").flag("-march=ck860v");
+ auto BigEndian = MultilibBuilder("/big").flag("-EB");
MultilibSet CSKYMultilibs =
- MultilibSet()
+ MultilibSetBuilder()
.Maybe(BigEndian)
.Either({Arch801, Arch802, Arch803, Arch804, Arch805, Arch807,
Arch810, Arch810v, Arch860, Arch860v})
.Either(HardFloat, SoftFpFloat, SoftFloat)
+ .makeMultilibSet()
.FilterOut(NonExistent);
- if (CSKYMultilibs.select(Flags, Result.SelectedMultilib))
+ if (CSKYMultilibs.select(Flags, Result.SelectedMultilibs))
Result.Multilibs = CSKYMultilibs;
}
@@ -1697,17 +1741,19 @@ static void findRISCVBareMetalMultilibs(const Driver &D,
{"rv32imac", "ilp32"}, {"rv32imafc", "ilp32f"}, {"rv64imac", "lp64"},
{"rv64imafdc", "lp64d"}};
- std::vector<Multilib> Ms;
+ std::vector<MultilibBuilder> Ms;
for (auto Element : RISCVMultilibSet) {
// multilib path rule is ${march}/${mabi}
Ms.emplace_back(
- makeMultilib((Twine(Element.march) + "/" + Twine(Element.mabi)).str())
- .flag(Twine("+march=", Element.march).str())
- .flag(Twine("+mabi=", Element.mabi).str()));
+ MultilibBuilder(
+ (Twine(Element.march) + "/" + Twine(Element.mabi)).str())
+ .flag(Twine("-march=", Element.march).str())
+ .flag(Twine("-mabi=", Element.mabi).str()));
}
MultilibSet RISCVMultilibs =
- MultilibSet()
- .Either(ArrayRef<Multilib>(Ms))
+ MultilibSetBuilder()
+ .Either(Ms)
+ .makeMultilibSet()
.FilterOut(NonExistent)
.setFilePathsCallback([](const Multilib &M) {
return std::vector<std::string>(
@@ -1716,22 +1762,21 @@ static void findRISCVBareMetalMultilibs(const Driver &D,
"/../../../../riscv32-unknown-elf/lib" + M.gccSuffix()});
});
-
Multilib::flags_list Flags;
llvm::StringSet<> Added_ABIs;
StringRef ABIName = tools::riscv::getRISCVABI(Args, TargetTriple);
StringRef MArch = tools::riscv::getRISCVArch(Args, TargetTriple);
for (auto Element : RISCVMultilibSet) {
addMultilibFlag(MArch == Element.march,
- Twine("march=", Element.march).str().c_str(), Flags);
+ Twine("-march=", Element.march).str().c_str(), Flags);
if (!Added_ABIs.count(Element.mabi)) {
Added_ABIs.insert(Element.mabi);
addMultilibFlag(ABIName == Element.mabi,
- Twine("mabi=", Element.mabi).str().c_str(), Flags);
+ Twine("-mabi=", Element.mabi).str().c_str(), Flags);
}
}
- if (RISCVMultilibs.select(Flags, Result.SelectedMultilib))
+ if (RISCVMultilibs.select(Flags, Result.SelectedMultilibs))
Result.Multilibs = RISCVMultilibs;
}
@@ -1742,33 +1787,38 @@ static void findRISCVMultilibs(const Driver &D,
return findRISCVBareMetalMultilibs(D, TargetTriple, Path, Args, Result);
FilterNonExistent NonExistent(Path, "/crtbegin.o", D.getVFS());
- Multilib Ilp32 = makeMultilib("lib32/ilp32").flag("+m32").flag("+mabi=ilp32");
- Multilib Ilp32f =
- makeMultilib("lib32/ilp32f").flag("+m32").flag("+mabi=ilp32f");
- Multilib Ilp32d =
- makeMultilib("lib32/ilp32d").flag("+m32").flag("+mabi=ilp32d");
- Multilib Lp64 = makeMultilib("lib64/lp64").flag("+m64").flag("+mabi=lp64");
- Multilib Lp64f = makeMultilib("lib64/lp64f").flag("+m64").flag("+mabi=lp64f");
- Multilib Lp64d = makeMultilib("lib64/lp64d").flag("+m64").flag("+mabi=lp64d");
+ MultilibBuilder Ilp32 =
+ MultilibBuilder("lib32/ilp32").flag("-m32").flag("-mabi=ilp32");
+ MultilibBuilder Ilp32f =
+ MultilibBuilder("lib32/ilp32f").flag("-m32").flag("-mabi=ilp32f");
+ MultilibBuilder Ilp32d =
+ MultilibBuilder("lib32/ilp32d").flag("-m32").flag("-mabi=ilp32d");
+ MultilibBuilder Lp64 =
+ MultilibBuilder("lib64/lp64").flag("-m64").flag("-mabi=lp64");
+ MultilibBuilder Lp64f =
+ MultilibBuilder("lib64/lp64f").flag("-m64").flag("-mabi=lp64f");
+ MultilibBuilder Lp64d =
+ MultilibBuilder("lib64/lp64d").flag("-m64").flag("-mabi=lp64d");
MultilibSet RISCVMultilibs =
- MultilibSet()
+ MultilibSetBuilder()
.Either({Ilp32, Ilp32f, Ilp32d, Lp64, Lp64f, Lp64d})
+ .makeMultilibSet()
.FilterOut(NonExistent);
Multilib::flags_list Flags;
bool IsRV64 = TargetTriple.getArch() == llvm::Triple::riscv64;
StringRef ABIName = tools::riscv::getRISCVABI(Args, TargetTriple);
- addMultilibFlag(!IsRV64, "m32", Flags);
- addMultilibFlag(IsRV64, "m64", Flags);
- addMultilibFlag(ABIName == "ilp32", "mabi=ilp32", Flags);
- addMultilibFlag(ABIName == "ilp32f", "mabi=ilp32f", Flags);
- addMultilibFlag(ABIName == "ilp32d", "mabi=ilp32d", Flags);
- addMultilibFlag(ABIName == "lp64", "mabi=lp64", Flags);
- addMultilibFlag(ABIName == "lp64f", "mabi=lp64f", Flags);
- addMultilibFlag(ABIName == "lp64d", "mabi=lp64d", Flags);
+ addMultilibFlag(!IsRV64, "-m32", Flags);
+ addMultilibFlag(IsRV64, "-m64", Flags);
+ addMultilibFlag(ABIName == "ilp32", "-mabi=ilp32", Flags);
+ addMultilibFlag(ABIName == "ilp32f", "-mabi=ilp32f", Flags);
+ addMultilibFlag(ABIName == "ilp32d", "-mabi=ilp32d", Flags);
+ addMultilibFlag(ABIName == "lp64", "-mabi=lp64", Flags);
+ addMultilibFlag(ABIName == "lp64f", "-mabi=lp64f", Flags);
+ addMultilibFlag(ABIName == "lp64d", "-mabi=lp64d", Flags);
- if (RISCVMultilibs.select(Flags, Result.SelectedMultilib))
+ if (RISCVMultilibs.select(Flags, Result.SelectedMultilibs))
Result.Multilibs = RISCVMultilibs;
}
@@ -1777,7 +1827,7 @@ static bool findBiarchMultilibs(const Driver &D,
StringRef Path, const ArgList &Args,
bool NeedsBiarchSuffix,
DetectedMultilibs &Result) {
- Multilib Default;
+ MultilibBuilder DefaultBuilder;
// Some versions of SUSE and Fedora on ppc64 put 32-bit libs
// in what would normally be GCCInstallPath and put the 64-bit
@@ -1803,24 +1853,27 @@ static bool findBiarchMultilibs(const Driver &D,
}
}
- Multilib Alt64 = Multilib()
+ Multilib Alt64 = MultilibBuilder()
.gccSuffix(Suff64)
.includeSuffix(Suff64)
- .flag("-m32")
- .flag("+m64")
- .flag("-mx32");
- Multilib Alt32 = Multilib()
+ .flag("-m32", /*Disallow=*/true)
+ .flag("-m64")
+ .flag("-mx32", /*Disallow=*/true)
+ .makeMultilib();
+ Multilib Alt32 = MultilibBuilder()
.gccSuffix("/32")
.includeSuffix("/32")
- .flag("+m32")
- .flag("-m64")
- .flag("-mx32");
- Multilib Altx32 = Multilib()
+ .flag("-m32")
+ .flag("-m64", /*Disallow=*/true)
+ .flag("-mx32", /*Disallow=*/true)
+ .makeMultilib();
+ Multilib Altx32 = MultilibBuilder()
.gccSuffix("/x32")
.includeSuffix("/x32")
- .flag("-m32")
- .flag("-m64")
- .flag("+mx32");
+ .flag("-m32", /*Disallow=*/true)
+ .flag("-m64", /*Disallow=*/true)
+ .flag("-mx32")
+ .makeMultilib();
// GCC toolchain for IAMCU doesn't have crtbegin.o, so look for libgcc.a.
FilterNonExistent NonExistent(
@@ -1846,14 +1899,22 @@ static bool findBiarchMultilibs(const Driver &D,
}
if (Want == WANT32)
- Default.flag("+m32").flag("-m64").flag("-mx32");
+ DefaultBuilder.flag("-m32")
+ .flag("-m64", /*Disallow=*/true)
+ .flag("-mx32", /*Disallow=*/true);
else if (Want == WANT64)
- Default.flag("-m32").flag("+m64").flag("-mx32");
+ DefaultBuilder.flag("-m32", /*Disallow=*/true)
+ .flag("-m64")
+ .flag("-mx32", /*Disallow=*/true);
else if (Want == WANTX32)
- Default.flag("-m32").flag("-m64").flag("+mx32");
+ DefaultBuilder.flag("-m32", /*Disallow=*/true)
+ .flag("-m64", /*Disallow=*/true)
+ .flag("-mx32");
else
return false;
+ Multilib Default = DefaultBuilder.makeMultilib();
+
Result.Multilibs.push_back(Default);
Result.Multilibs.push_back(Alt64);
Result.Multilibs.push_back(Alt32);
@@ -1862,15 +1923,16 @@ static bool findBiarchMultilibs(const Driver &D,
Result.Multilibs.FilterOut(NonExistent);
Multilib::flags_list Flags;
- addMultilibFlag(TargetTriple.isArch64Bit() && !IsX32, "m64", Flags);
- addMultilibFlag(TargetTriple.isArch32Bit(), "m32", Flags);
- addMultilibFlag(TargetTriple.isArch64Bit() && IsX32, "mx32", Flags);
+ addMultilibFlag(TargetTriple.isArch64Bit() && !IsX32, "-m64", Flags);
+ addMultilibFlag(TargetTriple.isArch32Bit(), "-m32", Flags);
+ addMultilibFlag(TargetTriple.isArch64Bit() && IsX32, "-mx32", Flags);
- if (!Result.Multilibs.select(Flags, Result.SelectedMultilib))
+ if (!Result.Multilibs.select(Flags, Result.SelectedMultilibs))
return false;
- if (Result.SelectedMultilib == Alt64 || Result.SelectedMultilib == Alt32 ||
- Result.SelectedMultilib == Altx32)
+ if (Result.SelectedMultilibs.back() == Alt64 ||
+ Result.SelectedMultilibs.back() == Alt32 ||
+ Result.SelectedMultilibs.back() == Altx32)
Result.BiarchSibling = Default;
return true;
@@ -2389,9 +2451,6 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
static const char *const AArch64AndroidTriples[] = {
"aarch64-linux-android"};
static const char *const ARMAndroidTriples[] = {"arm-linux-androideabi"};
- static const char *const MIPSELAndroidTriples[] = {"mipsel-linux-android"};
- static const char *const MIPS64ELAndroidTriples[] = {
- "mips64el-linux-android"};
static const char *const X86AndroidTriples[] = {"i686-linux-android"};
static const char *const X86_64AndroidTriples[] = {"x86_64-linux-android"};
@@ -2406,22 +2465,6 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
LibDirs.append(begin(ARMLibDirs), end(ARMLibDirs));
TripleAliases.append(begin(ARMAndroidTriples), end(ARMAndroidTriples));
break;
- case llvm::Triple::mipsel:
- LibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
- TripleAliases.append(begin(MIPSELAndroidTriples),
- end(MIPSELAndroidTriples));
- BiarchLibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
- BiarchTripleAliases.append(begin(MIPS64ELAndroidTriples),
- end(MIPS64ELAndroidTriples));
- break;
- case llvm::Triple::mips64el:
- LibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
- TripleAliases.append(begin(MIPS64ELAndroidTriples),
- end(MIPS64ELAndroidTriples));
- BiarchLibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
- BiarchTripleAliases.append(begin(MIPSELAndroidTriples),
- end(MIPSELAndroidTriples));
- break;
case llvm::Triple::x86_64:
LibDirs.append(begin(X86_64LibDirs), end(X86_64LibDirs));
TripleAliases.append(begin(X86_64AndroidTriples),
@@ -2654,7 +2697,9 @@ bool Generic_GCC::GCCInstallationDetector::ScanGCCForMultilibs(
}
Multilibs = Detected.Multilibs;
- SelectedMultilib = Detected.SelectedMultilib;
+ SelectedMultilib = Detected.SelectedMultilibs.empty()
+ ? Multilib()
+ : Detected.SelectedMultilibs.back();
BiarchSibling = Detected.BiarchSibling;
return true;
@@ -2849,8 +2894,8 @@ Tool *Generic_GCC::buildLinker() const { return new tools::gcc::Linker(*this); }
void Generic_GCC::printVerboseInfo(raw_ostream &OS) const {
// Print the information about how we detected the GCC installation.
GCCInstallation.print(OS);
- CudaInstallation.print(OS);
- RocmInstallation.print(OS);
+ CudaInstallation->print(OS);
+ RocmInstallation->print(OS);
}
ToolChain::UnwindTableLevel
@@ -2861,6 +2906,8 @@ Generic_GCC::getDefaultUnwindTableLevel(const ArgList &Args) const {
case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
case llvm::Triple::x86:
case llvm::Triple::x86_64:
return UnwindTableLevel::Asynchronous;
@@ -2891,44 +2938,12 @@ bool Generic_GCC::isPICDefaultForced() const {
bool Generic_GCC::IsIntegratedAssemblerDefault() const {
switch (getTriple().getArch()) {
- case llvm::Triple::aarch64:
- case llvm::Triple::aarch64_be:
- case llvm::Triple::amdgcn:
- case llvm::Triple::arm:
- case llvm::Triple::armeb:
- case llvm::Triple::avr:
- case llvm::Triple::bpfel:
- case llvm::Triple::bpfeb:
- case llvm::Triple::csky:
- case llvm::Triple::hexagon:
- case llvm::Triple::lanai:
- case llvm::Triple::loongarch32:
- case llvm::Triple::loongarch64:
- case llvm::Triple::m68k:
- case llvm::Triple::mips:
- case llvm::Triple::mipsel:
- case llvm::Triple::mips64:
- case llvm::Triple::mips64el:
- case llvm::Triple::msp430:
- case llvm::Triple::ppc:
- case llvm::Triple::ppcle:
- case llvm::Triple::ppc64:
- case llvm::Triple::ppc64le:
- case llvm::Triple::r600:
- case llvm::Triple::riscv32:
- case llvm::Triple::riscv64:
- case llvm::Triple::sparc:
- case llvm::Triple::sparcel:
- case llvm::Triple::sparcv9:
- case llvm::Triple::systemz:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
- case llvm::Triple::ve:
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- return true;
- default:
+ case llvm::Triple::nvptx:
+ case llvm::Triple::nvptx64:
+ case llvm::Triple::xcore:
return false;
+ default:
+ return getTriple().getVendor() != llvm::Triple::Myriad;
}
}
@@ -2954,6 +2969,7 @@ void Generic_GCC::AddMultilibPaths(const Driver &D,
path_list &Paths) {
// Add the multilib suffixed paths where they are available.
if (GCCInstallation.isValid()) {
+ assert(!SelectedMultilibs.empty());
const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
const std::string &LibPath =
std::string(GCCInstallation.getParentLibPath());
@@ -2961,13 +2977,14 @@ void Generic_GCC::AddMultilibPaths(const Driver &D,
// Sourcery CodeBench MIPS toolchain holds some libraries under
// a biarch-like suffix of the GCC installation.
if (const auto &PathsCallback = Multilibs.filePathsCallback())
- for (const auto &Path : PathsCallback(SelectedMultilib))
+ for (const auto &Path : PathsCallback(SelectedMultilibs.back()))
addPathIfExists(D, GCCInstallation.getInstallPath() + Path, Paths);
// Add lib/gcc/$triple/$version, with an optional /multilib suffix.
- addPathIfExists(
- D, GCCInstallation.getInstallPath() + SelectedMultilib.gccSuffix(),
- Paths);
+ addPathIfExists(D,
+ GCCInstallation.getInstallPath() +
+ SelectedMultilibs.back().gccSuffix(),
+ Paths);
// Add lib/gcc/$triple/$libdir
// For GCC built with --enable-version-specific-runtime-libs.
@@ -2994,7 +3011,7 @@ void Generic_GCC::AddMultilibPaths(const Driver &D,
// Clang diverges from GCC's behavior.
addPathIfExists(D,
LibPath + "/../" + GCCTriple.str() + "/lib/../" + OSLibDir +
- SelectedMultilib.osSuffix(),
+ SelectedMultilibs.back().osSuffix(),
Paths);
// If the GCC installation we found is inside of the sysroot, we want to
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.h
index b8610724103b..6d335c9edb22 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.h
@@ -10,6 +10,7 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_GNU_H
#include "Cuda.h"
+#include "LazyDetector.h"
#include "ROCm.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
@@ -22,8 +23,8 @@ struct DetectedMultilibs {
/// The set of multilibs that the detected installation supports.
MultilibSet Multilibs;
- /// The primary multilib appropriate for the given flags.
- Multilib SelectedMultilib;
+ /// The multilibs appropriate for the given flags.
+ llvm::SmallVector<Multilib> SelectedMultilibs;
/// On Biarch systems, this corresponds to the default multilib when
/// targeting the non-default multilib. Otherwise, it is empty.
@@ -286,8 +287,8 @@ public:
protected:
GCCInstallationDetector GCCInstallation;
- CudaInstallationDetector CudaInstallation;
- RocmInstallationDetector RocmInstallation;
+ LazyDetector<CudaInstallationDetector> CudaInstallation;
+ LazyDetector<RocmInstallationDetector> RocmInstallation;
public:
Generic_GCC(const Driver &D, const llvm::Triple &Triple,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp
index a555fe5830e0..e509a01f2f97 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp
@@ -21,7 +21,7 @@
#include "llvm/Support/Alignment.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/TargetParser/TargetParser.h"
using namespace clang::driver;
using namespace clang::driver::toolchains;
@@ -142,18 +142,34 @@ void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
if (IsThinLTO)
LldArgs.push_back(Args.MakeArgString("-plugin-opt=-force-import-all"));
- for (const Arg *A : Args.filtered(options::OPT_mllvm)) {
- LldArgs.push_back(
- Args.MakeArgString(Twine("-plugin-opt=") + A->getValue(0)));
- }
-
if (C.getDriver().isSaveTempsEnabled())
LldArgs.push_back("-save-temps");
addLinkerCompressDebugSectionsOption(TC, Args, LldArgs);
- for (auto *Arg : Args.filtered(options::OPT_Xoffload_linker))
- LldArgs.push_back(Arg->getValue(1));
+ // Given that host and device linking happen in separate processes, the device
+ // linker doesn't always have the visibility as to which device symbols are
+ // needed by a program, especially for the device symbol dependencies that are
+ // introduced through the host symbol resolution.
+ // For example: host_A() (A.obj) --> host_B(B.obj) --> device_kernel_B()
+ // (B.obj) In this case, the device linker doesn't know that A.obj actually
+ // depends on the kernel functions in B.obj. When linking to static device
+ // library, the device linker may drop some of the device global symbols if
+ // they aren't referenced. As a workaround, we are adding to the
+ // --whole-archive flag such that all global symbols would be linked in.
+ LldArgs.push_back("--whole-archive");
+
+ for (auto *Arg : Args.filtered(options::OPT_Xoffload_linker)) {
+ StringRef ArgVal = Arg->getValue(1);
+ auto SplitArg = ArgVal.split("-mllvm=");
+ if (!SplitArg.second.empty()) {
+ LldArgs.push_back(
+ Args.MakeArgString(Twine("-plugin-opt=") + SplitArg.second));
+ } else {
+ LldArgs.push_back(Args.MakeArgString(ArgVal));
+ }
+ Arg->claim();
+ }
LldArgs.append({"-o", Output.getFilename()});
for (auto Input : Inputs)
@@ -167,6 +183,8 @@ void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
/*IsBitCodeSDL=*/true,
/*PostClangLink=*/false);
+ LldArgs.push_back("--no-whole-archive");
+
const char *Lld = Args.MakeArgString(getToolChain().GetProgramPath("lld"));
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
Lld, LldArgs, Inputs, Output));
@@ -270,8 +288,7 @@ HIPAMDToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
const OptTable &Opts = getDriver().getOpts();
for (Arg *A : Args) {
- if (!shouldSkipArgument(A) &&
- !shouldSkipSanitizeOption(*this, Args, BoundArch, A))
+ if (!shouldSkipSanitizeOption(*this, Args, BoundArch, A))
DAL->append(A);
}
@@ -315,7 +332,7 @@ void HIPAMDToolChain::AddIAMCUIncludeArgs(const ArgList &Args,
void HIPAMDToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+ RocmInstallation->AddHIPIncludeArgs(DriverArgs, CC1Args);
}
SanitizerMask HIPAMDToolChain::getSupportedSanitizers() const {
@@ -344,7 +361,7 @@ HIPAMDToolChain::getDeviceLibs(const llvm::opt::ArgList &DriverArgs) const {
ArgStringList LibraryPaths;
// Find in --hip-device-lib-path and HIP_LIBRARY_PATH.
- for (StringRef Path : RocmInstallation.getRocmDeviceLibPathArg())
+ for (StringRef Path : RocmInstallation->getRocmDeviceLibPathArg())
LibraryPaths.push_back(DriverArgs.MakeArgString(Path));
addDirectoryList(DriverArgs, LibraryPaths, "", "HIP_DEVICE_LIB_PATH");
@@ -366,7 +383,7 @@ HIPAMDToolChain::getDeviceLibs(const llvm::opt::ArgList &DriverArgs) const {
getDriver().Diag(diag::err_drv_no_such_file) << BCName;
});
} else {
- if (!RocmInstallation.hasDeviceLibrary()) {
+ if (!RocmInstallation->hasDeviceLibrary()) {
getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 0;
return {};
}
@@ -377,7 +394,7 @@ HIPAMDToolChain::getDeviceLibs(const llvm::opt::ArgList &DriverArgs) const {
if (DriverArgs.hasFlag(options::OPT_fgpu_sanitize,
options::OPT_fno_gpu_sanitize, true) &&
getSanitizerArgs(DriverArgs).needsAsanRt()) {
- auto AsanRTL = RocmInstallation.getAsanRTLPath();
+ auto AsanRTL = RocmInstallation->getAsanRTLPath();
if (AsanRTL.empty()) {
unsigned DiagID = getDriver().getDiags().getCustomDiagID(
DiagnosticsEngine::Error,
@@ -391,7 +408,7 @@ HIPAMDToolChain::getDeviceLibs(const llvm::opt::ArgList &DriverArgs) const {
}
// Add the HIP specific bitcode library.
- BCLibs.push_back(RocmInstallation.getHIPPath());
+ BCLibs.push_back(RocmInstallation->getHIPPath());
// Add common device libraries like ocml etc.
for (StringRef N : getCommonDeviceLibNames(DriverArgs, GpuArch.str()))
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.cpp
index 78566ca9a652..ea6a16029130 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.cpp
@@ -283,10 +283,10 @@ VersionTuple HIPSPVToolChain::computeMSVCVersion(const Driver *D,
}
void HIPSPVToolChain::adjustDebugInfoKind(
- codegenoptions::DebugInfoKind &DebugInfoKind,
+ llvm::codegenoptions::DebugInfoKind &DebugInfoKind,
const llvm::opt::ArgList &Args) const {
// Debug info generation is disabled for SPIRV-LLVM-Translator
// which currently aborts on the presence of DW_OP_LLVM_convert.
// TODO: Enable debug info when the SPIR-V backend arrives.
- DebugInfoKind = codegenoptions::NoDebugInfo;
+ DebugInfoKind = llvm::codegenoptions::NoDebugInfo;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.h
index 1c4c474cdf69..1eaef432171e 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.h
@@ -77,9 +77,8 @@ public:
computeMSVCVersion(const Driver *D,
const llvm::opt::ArgList &Args) const override;
- void adjustDebugInfoKind(codegenoptions::DebugInfoKind &DebugInfoKind,
+ void adjustDebugInfoKind(llvm::codegenoptions::DebugInfoKind &DebugInfoKind,
const llvm::opt::ArgList &Args) const override;
- bool IsIntegratedAssemblerDefault() const override { return true; }
bool IsMathErrnoDefault() const override { return false; }
bool useIntegratedAs() const override { return true; }
bool isCrossCompiling() const override { return true; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp
index 6f8c563c22aa..8b9d8db90ffa 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp
@@ -10,8 +10,8 @@
#include "CommonArgs.h"
#include "clang/Driver/Compilation.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Path.h"
+#include "llvm/TargetParser/Triple.h"
using namespace clang::driver;
using namespace clang::driver::tools;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.cpp
index 174146145777..33f3ed1c638b 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.cpp
@@ -8,9 +8,11 @@
#include "HLSL.h"
#include "CommonArgs.h"
+#include "clang/Driver/Compilation.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Job.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/TargetParser/Triple.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -133,10 +135,49 @@ bool isLegalValidatorVersion(StringRef ValVersionStr, const Driver &D) {
} // namespace
+void tools::hlsl::Validator::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ std::string DxvPath = getToolChain().GetProgramPath("dxv");
+ assert(DxvPath != "dxv" && "cannot find dxv");
+
+ ArgStringList CmdArgs;
+ assert(Inputs.size() == 1 && "Unable to handle multiple inputs.");
+ const InputInfo &Input = Inputs[0];
+ assert(Input.isFilename() && "Unexpected verify input");
+ // Grabbing the output of the earlier cc1 run.
+ CmdArgs.push_back(Input.getFilename());
+ // Use the same name as output.
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Input.getFilename());
+
+ const char *Exec = Args.MakeArgString(DxvPath);
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs, Input));
+}
+
/// DirectX Toolchain
HLSLToolChain::HLSLToolChain(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
- : ToolChain(D, Triple, Args) {}
+ : ToolChain(D, Triple, Args) {
+ if (Args.hasArg(options::OPT_dxc_validator_path_EQ))
+ getProgramPaths().push_back(
+ Args.getLastArgValue(options::OPT_dxc_validator_path_EQ).str());
+}
+
+Tool *clang::driver::toolchains::HLSLToolChain::getTool(
+ Action::ActionClass AC) const {
+ switch (AC) {
+ case Action::BinaryAnalyzeJobClass:
+ if (!Validator)
+ Validator.reset(new tools::hlsl::Validator(*this));
+ return Validator.get();
+ default:
+ return ToolChain::getTool(AC);
+ }
+}
std::optional<std::string>
clang::driver::toolchains::HLSLToolChain::parseTargetProfile(
@@ -212,3 +253,15 @@ HLSLToolChain::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
// See: https://github.com/llvm/llvm-project/issues/57876
return DAL;
}
+
+bool HLSLToolChain::requiresValidation(DerivedArgList &Args) const {
+ if (Args.getLastArg(options::OPT_dxc_disable_validation))
+ return false;
+
+ std::string DxvPath = GetProgramPath("dxv");
+ if (DxvPath != "dxv")
+ return true;
+
+ getDriver().Diag(diag::warn_drv_dxc_missing_dxv);
+ return false;
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.h
index 47eefdc24238..7b775b897431 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.h
@@ -9,17 +9,37 @@
#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HLSL_H
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HLSL_H
+#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
namespace clang {
namespace driver {
+namespace tools {
+
+namespace hlsl {
+class LLVM_LIBRARY_VISIBILITY Validator : public Tool {
+public:
+ Validator(const ToolChain &TC) : Tool("hlsl::Validator", "dxv", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // namespace hlsl
+} // namespace tools
+
namespace toolchains {
class LLVM_LIBRARY_VISIBILITY HLSLToolChain : public ToolChain {
public:
HLSLToolChain(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
+ Tool *getTool(Action::ActionClass AC) const override;
+
bool isPICDefault() const override { return false; }
bool isPIEDefault(const llvm::opt::ArgList &Args) const override {
return false;
@@ -30,6 +50,10 @@ public:
TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
Action::OffloadKind DeviceOffloadKind) const override;
static std::optional<std::string> parseTargetProfile(StringRef TargetProfile);
+ bool requiresValidation(llvm::opt::DerivedArgList &Args) const;
+
+private:
+ mutable std::unique_ptr<tools::hlsl::Validator> Validator;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp
index 09d2f41ab066..7acc600a6aa4 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp
@@ -159,9 +159,11 @@ static void handleHVXTargetFeatures(const Driver &D, const ArgList &Args,
}
// Hexagon target features.
-void hexagon::getHexagonTargetFeatures(const Driver &D, const ArgList &Args,
+void hexagon::getHexagonTargetFeatures(const Driver &D,
+ const llvm::Triple &Triple,
+ const ArgList &Args,
std::vector<StringRef> &Features) {
- handleTargetFeaturesGroup(Args, Features,
+ handleTargetFeaturesGroup(D, Triple, Args, Features,
options::OPT_m_hexagon_Features_Group);
bool UseLongCalls = false;
@@ -361,9 +363,8 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
CmdArgs.push_back(
Args.MakeArgString(StringRef("-L") + D.SysRoot + "/usr/lib"));
- Args.AddAllArgs(CmdArgs,
- {options::OPT_T_Group, options::OPT_e, options::OPT_s,
- options::OPT_t, options::OPT_u_Group});
+ Args.AddAllArgs(CmdArgs, {options::OPT_T_Group, options::OPT_s,
+ options::OPT_t, options::OPT_u_Group});
AddLinkerInputs(HTC, Inputs, Args, CmdArgs, JA);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
@@ -444,9 +445,8 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
//----------------------------------------------------------------------------
//
//----------------------------------------------------------------------------
- Args.AddAllArgs(CmdArgs,
- {options::OPT_T_Group, options::OPT_e, options::OPT_s,
- options::OPT_t, options::OPT_u_Group});
+ Args.AddAllArgs(CmdArgs, {options::OPT_T_Group, options::OPT_s,
+ options::OPT_t, options::OPT_u_Group});
AddLinkerInputs(HTC, Inputs, Args, CmdArgs, JA);
@@ -540,7 +540,9 @@ HexagonToolChain::getSmallDataThreshold(const ArgList &Args) {
std::string HexagonToolChain::getCompilerRTPath() const {
SmallString<128> Dir(getDriver().SysRoot);
llvm::sys::path::append(Dir, "usr", "lib");
- Dir += SelectedMultilib.gccSuffix();
+ if (!SelectedMultilibs.empty()) {
+ Dir += SelectedMultilibs.back().gccSuffix();
+ }
return std::string(Dir.str());
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h
index 47a3304c46ae..4799c3028ff9 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h
@@ -50,7 +50,8 @@ public:
const char *LinkingOutput) const override;
};
-void getHexagonTargetFeatures(const Driver &D, const llvm::opt::ArgList &Args,
+void getHexagonTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
std::vector<StringRef> &Features);
} // end namespace hexagon.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.cpp
index 48b9ccadf36f..2dfc90ef37f7 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.cpp
@@ -65,7 +65,7 @@ Hurd::Hurd(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
GCCInstallation.init(Triple, Args);
Multilibs = GCCInstallation.getMultilibs();
- SelectedMultilib = GCCInstallation.getMultilib();
+ SelectedMultilibs.assign({GCCInstallation.getMultilib()});
std::string SysRoot = computeSysRoot();
ToolChain::path_list &PPaths = getProgramPaths();
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/LazyDetector.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/LazyDetector.h
new file mode 100644
index 000000000000..813d00a87bb8
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/LazyDetector.h
@@ -0,0 +1,45 @@
+//===--- LazyDetector.h - Lazy ToolChain Detection --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_LAZYDETECTOR_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_LAZYDETECTOR_H
+
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
+#include <optional>
+
+namespace clang {
+
+/// Simple wrapper for toolchain detector with costly initialization. This
+/// delays the creation of the actual detector until its first usage.
+
+template <class T> class LazyDetector {
+ const driver::Driver &D;
+ llvm::Triple Triple;
+ const llvm::opt::ArgList &Args;
+
+ std::optional<T> Detector;
+
+public:
+ LazyDetector(const driver::Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args)
+ : D(D), Triple(Triple), Args(Args) {}
+ T *operator->() {
+ if (!Detector)
+ Detector.emplace(D, Triple, Args);
+ return &*Detector;
+ }
+ const T *operator->() const {
+ return const_cast<T const *>(
+ const_cast<LazyDetector &>(*this).operator->());
+ }
+};
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_LAZYDETECTOR_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
index c6fb290ffdb4..1ba222bf83b1 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
@@ -86,14 +86,45 @@ std::string Linux::getMultiarchTriple(const Driver &D,
case llvm::Triple::aarch64_be:
return "aarch64_be-linux-gnu";
+ case llvm::Triple::loongarch64: {
+ const char *Libc;
+ const char *FPFlavor;
+
+ if (TargetTriple.isGNUEnvironment()) {
+ Libc = "gnu";
+ } else if (TargetTriple.isMusl()) {
+ Libc = "musl";
+ } else {
+ return TargetTriple.str();
+ }
+
+ switch (TargetEnvironment) {
+ default:
+ return TargetTriple.str();
+ case llvm::Triple::GNUSF:
+ FPFlavor = "sf";
+ break;
+ case llvm::Triple::GNUF32:
+ FPFlavor = "f32";
+ break;
+ case llvm::Triple::GNU:
+ case llvm::Triple::GNUF64:
+ // This was going to be "f64" in an earlier Toolchain Conventions
+ // revision, but starting from Feb 2023 the F64 ABI variants are
+ // unmarked in their canonical forms.
+ FPFlavor = "";
+ break;
+ }
+
+ return (Twine("loongarch64-linux-") + Libc + FPFlavor).str();
+ }
+
case llvm::Triple::m68k:
return "m68k-linux-gnu";
case llvm::Triple::mips:
return IsMipsR6 ? "mipsisa32r6-linux-gnu" : "mips-linux-gnu";
case llvm::Triple::mipsel:
- if (IsAndroid)
- return "mipsel-linux-android";
return IsMipsR6 ? "mipsisa32r6el-linux-gnu" : "mipsel-linux-gnu";
case llvm::Triple::mips64: {
std::string MT = std::string(IsMipsR6 ? "mipsisa64r6" : "mips64") +
@@ -105,8 +136,6 @@ std::string Linux::getMultiarchTriple(const Driver &D,
break;
}
case llvm::Triple::mips64el: {
- if (IsAndroid)
- return "mips64el-linux-android";
std::string MT = std::string(IsMipsR6 ? "mipsisa64r6el" : "mips64el") +
"-linux-" + (IsMipsN32Abi ? "gnuabin32" : "gnuabi64");
if (D.getVFS().exists(concat(SysRoot, "/lib", MT)))
@@ -126,6 +155,8 @@ std::string Linux::getMultiarchTriple(const Driver &D,
case llvm::Triple::ppc64le:
return "powerpc64le-linux-gnu";
case llvm::Triple::riscv64:
+ if (IsAndroid)
+ return "riscv64-linux-android";
return "riscv64-linux-gnu";
case llvm::Triple::sparc:
return "sparc-linux-gnu";
@@ -182,7 +213,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
GCCInstallation.init(Triple, Args);
Multilibs = GCCInstallation.getMultilibs();
- SelectedMultilib = GCCInstallation.getMultilib();
+ SelectedMultilibs.assign({GCCInstallation.getMultilib()});
llvm::Triple::ArchType Arch = Triple.getArch();
std::string SysRoot = computeSysRoot();
ToolChain::path_list &PPaths = getProgramPaths();
@@ -226,8 +257,8 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
const bool IsRISCV = Triple.isRISCV();
const bool IsCSKY = Triple.isCSKY();
- if (IsCSKY)
- SysRoot = SysRoot + SelectedMultilib.osSuffix();
+ if (IsCSKY && !SelectedMultilibs.empty())
+ SysRoot = SysRoot + SelectedMultilibs.back().osSuffix();
if ((IsMips || IsCSKY) && !SysRoot.empty())
ExtraOpts.push_back("--sysroot=" + SysRoot);
@@ -301,13 +332,6 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
Generic_GCC::AddMultiarchPaths(D, SysRoot, OSLibDir, Paths);
- // The deprecated -DLLVM_ENABLE_PROJECTS=libcxx configuration installs
- // libc++.so in D.Dir+"/../lib/". Detect this path.
- // TODO Remove once LLVM_ENABLE_PROJECTS=libcxx is unsupported.
- if (StringRef(D.Dir).startswith(SysRoot) &&
- D.getVFS().exists(D.Dir + "/../lib/libc++.so"))
- addPathIfExists(D, D.Dir + "/../lib", Paths);
-
addPathIfExists(D, concat(SysRoot, "/lib"), Paths);
addPathIfExists(D, concat(SysRoot, "/usr/lib"), Paths);
}
@@ -402,9 +426,17 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
const Distro Distro(getDriver().getVFS(), Triple);
- if (Triple.isAndroid())
+ if (Triple.isAndroid()) {
+ if (getSanitizerArgs(Args).needsHwasanRt() &&
+ !Triple.isAndroidVersionLT(34) && Triple.isArch64Bit()) {
+ // On Android 14 and newer, there is a special linker_hwasan64 that
+ // allows to run HWASan binaries on non-HWASan system images. This
+ // is also available on HWASan system images, so we can just always
+ // use that instead.
+ return "/system/bin/linker_hwasan64";
+ }
return Triple.isArch64Bit() ? "/system/bin/linker64" : "/system/bin/linker";
-
+ }
if (Triple.isMusl()) {
std::string ArchName;
bool IsArm = false;
@@ -681,23 +713,23 @@ void Linux::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
void Linux::AddCudaIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
+ CudaInstallation->AddCudaIncludeArgs(DriverArgs, CC1Args);
}
void Linux::AddHIPIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+ RocmInstallation->AddHIPIncludeArgs(DriverArgs, CC1Args);
}
void Linux::AddHIPRuntimeLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
CmdArgs.push_back(
- Args.MakeArgString(StringRef("-L") + RocmInstallation.getLibPath()));
+ Args.MakeArgString(StringRef("-L") + RocmInstallation->getLibPath()));
- if (Args.hasFlag(options::OPT_offload_add_rpath,
- options::OPT_no_offload_add_rpath, false))
+ if (Args.hasFlag(options::OPT_frtlib_add_rpath,
+ options::OPT_fno_rtlib_add_rpath, false))
CmdArgs.append(
- {"-rpath", Args.MakeArgString(RocmInstallation.getLibPath())});
+ {"-rpath", Args.MakeArgString(RocmInstallation->getLibPath())});
CmdArgs.push_back("-lamdhip64");
}
@@ -763,7 +795,7 @@ SanitizerMask Linux::getSupportedSanitizers() const {
Res |= SanitizerKind::Memory;
Res |= SanitizerKind::Vptr;
Res |= SanitizerKind::SafeStack;
- if (IsX86_64 || IsMIPS64 || IsAArch64)
+ if (IsX86_64 || IsMIPS64 || IsAArch64 || IsLoongArch64)
Res |= SanitizerKind::DataFlow;
if (IsX86_64 || IsMIPS64 || IsAArch64 || IsX86 || IsArmArch || IsPowerPC64 ||
IsRISCV64 || IsSystemZ || IsHexagon || IsLoongArch64)
@@ -771,12 +803,10 @@ SanitizerMask Linux::getSupportedSanitizers() const {
if (IsX86_64 || IsMIPS64 || IsAArch64 || IsPowerPC64 || IsSystemZ ||
IsLoongArch64)
Res |= SanitizerKind::Thread;
- if (IsX86_64)
+ if (IsX86_64 || IsSystemZ)
Res |= SanitizerKind::KernelMemory;
- if (IsX86 || IsX86_64)
- Res |= SanitizerKind::Function;
if (IsX86_64 || IsMIPS64 || IsAArch64 || IsX86 || IsMIPS || IsArmArch ||
- IsPowerPC64 || IsHexagon || IsLoongArch64)
+ IsPowerPC64 || IsHexagon || IsLoongArch64 || IsRISCV64)
Res |= SanitizerKind::Scudo;
if (IsX86_64 || IsAArch64 || IsRISCV64) {
Res |= SanitizerKind::HWAddress;
@@ -784,6 +814,9 @@ SanitizerMask Linux::getSupportedSanitizers() const {
if (IsX86_64 || IsAArch64) {
Res |= SanitizerKind::KernelHWAddress;
}
+ // Work around "Cannot represent a difference across sections".
+ if (getTriple().getArch() == llvm::Triple::ppc64)
+ Res &= ~SanitizerKind::Function;
return Res;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp
index 4edc1d2f0a1f..2805c28f0029 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp
@@ -280,7 +280,6 @@ void msp430::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--gc-sections");
Args.AddAllArgs(CmdArgs, {
- options::OPT_e,
options::OPT_n,
options::OPT_s,
options::OPT_t,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
index b8aa21b7a766..a9fe9da4620f 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
@@ -23,11 +23,11 @@
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/Host.h"
#include <cstdio>
#ifdef _WIN32
@@ -269,6 +269,26 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
AddRunTimeLibs(TC, TC.getDriver(), CmdArgs, Args);
}
+ StringRef Linker =
+ Args.getLastArgValue(options::OPT_fuse_ld_EQ, CLANG_DEFAULT_LINKER);
+ if (Linker.empty())
+ Linker = "link";
+ // We need to translate 'lld' into 'lld-link'.
+ else if (Linker.equals_insensitive("lld"))
+ Linker = "lld-link";
+
+ if (Linker == "lld-link") {
+ for (Arg *A : Args.filtered(options::OPT_vfsoverlay))
+ CmdArgs.push_back(
+ Args.MakeArgString(std::string("/vfsoverlay:") + A->getValue()));
+
+ if (C.getDriver().isUsingLTO() &&
+ Args.hasFlag(options::OPT_gsplit_dwarf, options::OPT_gno_split_dwarf,
+ false))
+ CmdArgs.push_back(Args.MakeArgString(Twine("/dwodir:") +
+ Output.getFilename() + "_dwo"));
+ }
+
// Add filenames, libraries, and other linker inputs.
for (const auto &Input : Inputs) {
if (Input.isFilename()) {
@@ -301,17 +321,9 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
std::vector<const char *> Environment;
- // We need to special case some linker paths. In the case of lld, we need to
- // translate 'lld' into 'lld-link', and in the case of the regular msvc
+ // We need to special case some linker paths. In the case of the regular msvc
// linker, we need to use a special search algorithm.
llvm::SmallString<128> linkPath;
- StringRef Linker
- = Args.getLastArgValue(options::OPT_fuse_ld_EQ, CLANG_DEFAULT_LINKER);
- if (Linker.empty())
- Linker = "link";
- if (Linker.equals_insensitive("lld"))
- Linker = "lld-link";
-
if (Linker.equals_insensitive("link")) {
// If we're using the MSVC linker, it's not sufficient to just use link
// from the program PATH, because other environments like GnuWin32 install
@@ -331,6 +343,11 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ // Clang handles passing the proper asan libs to the linker, which goes
+ // against link.exe's /INFERASANLIBS which automatically finds asan libs.
+ if (TC.getSanitizerArgs(Args).needsAsanRt())
+ CmdArgs.push_back("/INFERASANLIBS:NO");
+
#ifdef _WIN32
// When cross-compiling with VS2017 or newer, link.exe expects to have
// its containing bin directory at the top of PATH, followed by the
@@ -371,7 +388,7 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// find it.
for (const char *Cursor = EnvBlock.data(); *Cursor != '\0';) {
llvm::StringRef EnvVar(Cursor);
- if (EnvVar.startswith_insensitive("path=")) {
+ if (EnvVar.starts_with_insensitive("path=")) {
constexpr size_t PrefixLen = 5; // strlen("path=")
Environment.push_back(Args.MakeArgString(
EnvVar.substr(0, PrefixLen) +
@@ -430,8 +447,8 @@ MSVCToolChain::MSVCToolChain(const Driver &D, const llvm::Triple &Triple,
WinSysRoot, VCToolChainPath, VSLayout) ||
llvm::findVCToolChainViaEnvironment(getVFS(), VCToolChainPath,
VSLayout) ||
- llvm::findVCToolChainViaSetupConfig(getVFS(), VCToolChainPath,
- VSLayout) ||
+ llvm::findVCToolChainViaSetupConfig(getVFS(), VCToolsVersion,
+ VCToolChainPath, VSLayout) ||
llvm::findVCToolChainViaRegistry(VCToolChainPath, VSLayout);
}
@@ -446,10 +463,6 @@ Tool *MSVCToolChain::buildAssembler() const {
return nullptr;
}
-bool MSVCToolChain::IsIntegratedAssemblerDefault() const {
- return true;
-}
-
ToolChain::UnwindTableLevel
MSVCToolChain::getDefaultUnwindTableLevel(const ArgList &Args) const {
// Don't emit unwind tables by default for MachO targets.
@@ -536,6 +549,10 @@ bool MSVCToolChain::getWindowsSDKLibraryPath(const ArgList &Args,
llvm::SmallString<128> libPath(sdkPath);
llvm::sys::path::append(libPath, "Lib");
+ if (sdkMajor >= 10)
+ if (!(WinSdkDir.has_value() || WinSysRoot.has_value()) &&
+ WinSdkVersion.has_value())
+ windowsSDKLibVersion = *WinSdkVersion;
if (sdkMajor >= 8)
llvm::sys::path::append(libPath, windowsSDKLibVersion, "um");
return llvm::appendArchToWindowsSDKLibPath(sdkMajor, libPath, getArch(),
@@ -557,6 +574,10 @@ bool MSVCToolChain::getUniversalCRTLibraryPath(const ArgList &Args,
UCRTVersion))
return false;
+ if (!(WinSdkDir.has_value() || WinSysRoot.has_value()) &&
+ WinSdkVersion.has_value())
+ UCRTVersion = *WinSdkVersion;
+
StringRef ArchName = llvm::archToWindowsSDKArch(getArch());
if (ArchName.empty())
return false;
@@ -686,6 +707,9 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
if (llvm::getUniversalCRTSdkDir(getVFS(), WinSdkDir, WinSdkVersion,
WinSysRoot, UniversalCRTSdkPath,
UCRTVersion)) {
+ if (!(WinSdkDir.has_value() || WinSysRoot.has_value()) &&
+ WinSdkVersion.has_value())
+ UCRTVersion = *WinSdkVersion;
AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, UniversalCRTSdkPath,
"Include", UCRTVersion, "ucrt");
}
@@ -698,6 +722,10 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
if (llvm::getWindowsSDKDir(getVFS(), WinSdkDir, WinSdkVersion, WinSysRoot,
WindowsSDKDir, major, windowsSDKIncludeVersion,
windowsSDKLibVersion)) {
+ if (major >= 10)
+ if (!(WinSdkDir.has_value() || WinSysRoot.has_value()) &&
+ WinSdkVersion.has_value())
+ windowsSDKIncludeVersion = windowsSDKLibVersion = *WinSdkVersion;
if (major >= 8) {
// Note: windowsSDKIncludeVersion is empty for SDKs prior to v10.
// Anyway, llvm::sys::path::append is able to manage it.
@@ -760,6 +788,9 @@ VersionTuple MSVCToolChain::computeMSVCVersion(const Driver *D,
Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
IsWindowsMSVC)) {
// -fms-compatibility-version=19.20 is default, aka 2019, 16.x
+ // NOTE: when changing this value, also update
+ // clang/docs/CommandGuide/clang.rst and clang/docs/UsersManual.rst
+ // accordingly.
MSVT = VersionTuple(19, 20);
}
return MSVT;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h
index 2826ee6aee28..0f687bc70ae4 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h
@@ -11,10 +11,10 @@
#include "AMDGPU.h"
#include "Cuda.h"
-#include "clang/Basic/DebugInfoOptions.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
+#include "llvm/Frontend/Debug/Options.h"
#include "llvm/WindowsDriver/MSVCPaths.h"
namespace clang {
@@ -50,7 +50,6 @@ public:
TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
Action::OffloadKind DeviceOffloadKind) const override;
- bool IsIntegratedAssemblerDefault() const override;
UnwindTableLevel
getDefaultUnwindTableLevel(const llvm::opt::ArgList &Args) const override;
bool isPICDefault() const override;
@@ -60,9 +59,10 @@ public:
/// Set CodeView as the default debug info format for non-MachO binary
/// formats, and to DWARF otherwise. Users can use -gcodeview and -gdwarf to
/// override the default.
- codegenoptions::DebugInfoFormat getDefaultDebugFormat() const override {
- return getTriple().isOSBinFormatMachO() ? codegenoptions::DIF_DWARF
- : codegenoptions::DIF_CodeView;
+ llvm::codegenoptions::DebugInfoFormat getDefaultDebugFormat() const override {
+ return getTriple().isOSBinFormatMachO()
+ ? llvm::codegenoptions::DIF_DWARF
+ : llvm::codegenoptions::DIF_CodeView;
}
/// Set the debugger tuning to "default", since we're definitely not tuning
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
index bac486bab885..b47041dcca70 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
@@ -135,7 +135,7 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("arm64pe");
break;
default:
- llvm_unreachable("Unsupported target architecture.");
+ D.Diag(diag::err_target_unknown_triple) << TC.getEffectiveTriple().str();
}
Arg *SubsysArg =
@@ -192,7 +192,6 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
} else
CmdArgs.push_back(OutputFile);
- Args.AddAllArgs(CmdArgs, options::OPT_e);
// FIXME: add -N, -n flags
Args.AddLastArg(CmdArgs, options::OPT_r);
Args.AddLastArg(CmdArgs, options::OPT_s);
@@ -519,8 +518,6 @@ toolchains::MinGW::MinGW(const Driver &D, const llvm::Triple &Triple,
.equals_insensitive("lld");
}
-bool toolchains::MinGW::IsIntegratedAssemblerDefault() const { return true; }
-
Tool *toolchains::MinGW::getTool(Action::ActionClass AC) const {
switch (AC) {
case Action::PreprocessJobClass:
@@ -701,6 +698,9 @@ void toolchains::MinGW::addClangTargetOptions(
<< A->getSpelling() << GuardArgs;
}
}
+
+ if (Arg *A = DriverArgs.getLastArgNoClaim(options::OPT_mthreads))
+ A->ignoreTargetSpecific();
}
void toolchains::MinGW::AddClangCXXStdlibIncludeArgs(
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.h
index 2919d57e8957..6d5feeacdadd 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.h
@@ -65,7 +65,6 @@ public:
bool HasNativeLLVMSupport() const override;
- bool IsIntegratedAssemblerDefault() const override;
UnwindTableLevel
getDefaultUnwindTableLevel(const llvm::opt::ArgList &Args) const override;
bool isPICDefault() const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp
index 9c58583bca77..eacdcbf730b6 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp
@@ -30,7 +30,7 @@ MipsLLVMToolChain::MipsLLVMToolChain(const Driver &D,
DetectedMultilibs Result;
findMIPSMultilibs(D, Triple, "", Args, Result);
Multilibs = Result.Multilibs;
- SelectedMultilib = Result.SelectedMultilib;
+ SelectedMultilibs = Result.SelectedMultilibs;
// Find out the library suffix based on the ABI.
LibSuffix = tools::mips::getMipsABILibSuffix(Args, Triple);
@@ -56,7 +56,7 @@ void MipsLLVMToolChain::AddClangSystemIncludeArgs(
const auto &Callback = Multilibs.includeDirsCallback();
if (Callback) {
- for (const auto &Path : Callback(SelectedMultilib))
+ for (const auto &Path : Callback(SelectedMultilibs.back()))
addExternCSystemIncludeIfExists(DriverArgs, CC1Args,
D.getInstalledDir() + Path);
}
@@ -68,11 +68,11 @@ Tool *MipsLLVMToolChain::buildLinker() const {
std::string MipsLLVMToolChain::computeSysRoot() const {
if (!getDriver().SysRoot.empty())
- return getDriver().SysRoot + SelectedMultilib.osSuffix();
+ return getDriver().SysRoot + SelectedMultilibs.back().osSuffix();
const std::string InstalledDir(getDriver().getInstalledDir());
std::string SysRootPath =
- InstalledDir + "/../sysroot" + SelectedMultilib.osSuffix();
+ InstalledDir + "/../sysroot" + SelectedMultilibs.back().osSuffix();
if (llvm::sys::fs::exists(SysRootPath))
return SysRootPath;
@@ -96,7 +96,7 @@ void MipsLLVMToolChain::addLibCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
if (const auto &Callback = Multilibs.includeDirsCallback()) {
- for (std::string Path : Callback(SelectedMultilib)) {
+ for (std::string Path : Callback(SelectedMultilibs.back())) {
Path = getDriver().getInstalledDir() + Path + "/c++/v1";
if (llvm::sys::fs::exists(Path)) {
addSystemInclude(DriverArgs, CC1Args, Path);
@@ -122,7 +122,7 @@ std::string MipsLLVMToolChain::getCompilerRT(const ArgList &Args,
StringRef Component,
FileType Type) const {
SmallString<128> Path(getDriver().ResourceDir);
- llvm::sys::path::append(Path, SelectedMultilib.osSuffix(), "lib" + LibSuffix,
+ llvm::sys::path::append(Path, SelectedMultilibs.back().osSuffix(), "lib" + LibSuffix,
getOS());
const char *Suffix;
switch (Type) {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.h
index 31b547c0063c..a968804f2a6e 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.h
@@ -53,7 +53,6 @@ public:
}
private:
- Multilib SelectedMultilib;
std::string LibSuffix;
};
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.cpp
index eebf6dfc7d10..30424ff49e64 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.cpp
@@ -161,9 +161,9 @@ void tools::Myriad::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crtbegin.o")));
}
- Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
- options::OPT_e, options::OPT_s, options::OPT_t,
- options::OPT_Z_Flag, options::OPT_r});
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_L, options::OPT_T_Group, options::OPT_s,
+ options::OPT_t, options::OPT_Z_Flag, options::OPT_r});
TC.AddFilePathLibArgs(Args, CmdArgs);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp
index 7a7c905e3e7a..ab028f59deaa 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp
@@ -256,7 +256,6 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
- Args.AddAllArgs(CmdArgs, options::OPT_e);
Args.AddAllArgs(CmdArgs, options::OPT_s);
Args.AddAllArgs(CmdArgs, options::OPT_t);
Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
@@ -272,28 +271,25 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(ToolChain.getCompilerRTPath()));
}
- VersionTuple OsVersion = Triple.getOSVersion();
bool useLibgcc = true;
- if (OsVersion >= VersionTuple(7) || OsVersion.getMajor() == 0) {
- switch (ToolChain.getArch()) {
- case llvm::Triple::aarch64:
- case llvm::Triple::aarch64_be:
- case llvm::Triple::arm:
- case llvm::Triple::armeb:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
- case llvm::Triple::ppc:
- case llvm::Triple::ppc64:
- case llvm::Triple::ppc64le:
- case llvm::Triple::sparc:
- case llvm::Triple::sparcv9:
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- useLibgcc = false;
- break;
- default:
- break;
- }
+ switch (ToolChain.getArch()) {
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcv9:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ useLibgcc = false;
+ break;
+ default:
+ break;
}
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs,
@@ -412,26 +408,23 @@ Tool *NetBSD::buildAssembler() const {
Tool *NetBSD::buildLinker() const { return new tools::netbsd::Linker(*this); }
ToolChain::CXXStdlibType NetBSD::GetDefaultCXXStdlibType() const {
- VersionTuple OsVersion = getTriple().getOSVersion();
- if (OsVersion >= VersionTuple(7) || OsVersion.getMajor() == 0) {
- switch (getArch()) {
- case llvm::Triple::aarch64:
- case llvm::Triple::aarch64_be:
- case llvm::Triple::arm:
- case llvm::Triple::armeb:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
- case llvm::Triple::ppc:
- case llvm::Triple::ppc64:
- case llvm::Triple::ppc64le:
- case llvm::Triple::sparc:
- case llvm::Triple::sparcv9:
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- return ToolChain::CST_Libcxx;
- default:
- break;
- }
+ switch (getArch()) {
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcv9:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return ToolChain::CST_Libcxx;
+ default:
+ break;
}
return ToolChain::CST_Libstdcxx;
}
@@ -514,7 +507,6 @@ SanitizerMask NetBSD::getSupportedSanitizers() const {
Res |= SanitizerKind::Address;
Res |= SanitizerKind::PointerCompare;
Res |= SanitizerKind::PointerSubtract;
- Res |= SanitizerKind::Function;
Res |= SanitizerKind::Leak;
Res |= SanitizerKind::SafeStack;
Res |= SanitizerKind::Scudo;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.cpp
new file mode 100644
index 000000000000..1e50c9d71d59
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.cpp
@@ -0,0 +1,419 @@
+//===--- OHOS.cpp - OHOS ToolChain Implementations --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "OHOS.h"
+#include "Arch/ARM.h"
+#include "CommonArgs.h"
+#include "clang/Config/config.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/SanitizerArgs.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/Support/ScopedPrinter.h"
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm::opt;
+using namespace clang::driver::tools::arm;
+
+using tools::addMultilibFlag;
+using tools::addPathIfExists;
+
+static bool findOHOSMuslMultilibs(const Multilib::flags_list &Flags,
+ DetectedMultilibs &Result) {
+ MultilibSet Multilibs;
+ Multilibs.push_back(Multilib());
+ // -mcpu=cortex-a7
+ // -mfloat-abi=soft -mfloat-abi=softfp -mfloat-abi=hard
+ // -mfpu=neon-vfpv4
+ Multilibs.push_back(
+ Multilib("/a7_soft", {}, {}, {"-mcpu=cortex-a7", "-mfloat-abi=soft"}));
+
+ Multilibs.push_back(
+ Multilib("/a7_softfp_neon-vfpv4", {}, {},
+ {"-mcpu=cortex-a7", "-mfloat-abi=softfp", "-mfpu=neon-vfpv4"}));
+
+ Multilibs.push_back(
+ Multilib("/a7_hard_neon-vfpv4", {}, {},
+ {"-mcpu=cortex-a7", "-mfloat-abi=hard", "-mfpu=neon-vfpv4"}));
+
+ if (Multilibs.select(Flags, Result.SelectedMultilibs)) {
+ Result.Multilibs = Multilibs;
+ return true;
+ }
+ return false;
+}
+
+static bool findOHOSMultilibs(const Driver &D,
+ const ToolChain &TC,
+ const llvm::Triple &TargetTriple,
+ StringRef Path, const ArgList &Args,
+ DetectedMultilibs &Result) {
+ Multilib::flags_list Flags;
+ bool IsA7 = false;
+ if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ IsA7 = A->getValue() == StringRef("cortex-a7");
+ addMultilibFlag(IsA7, "-mcpu=cortex-a7", Flags);
+
+ bool IsMFPU = false;
+ if (const Arg *A = Args.getLastArg(options::OPT_mfpu_EQ))
+ IsMFPU = A->getValue() == StringRef("neon-vfpv4");
+ addMultilibFlag(IsMFPU, "-mfpu=neon-vfpv4", Flags);
+
+ tools::arm::FloatABI ARMFloatABI = getARMFloatABI(D, TargetTriple, Args);
+ addMultilibFlag((ARMFloatABI == tools::arm::FloatABI::Soft),
+ "-mfloat-abi=soft", Flags);
+ addMultilibFlag((ARMFloatABI == tools::arm::FloatABI::SoftFP),
+ "-mfloat-abi=softfp", Flags);
+ addMultilibFlag((ARMFloatABI == tools::arm::FloatABI::Hard),
+ "-mfloat-abi=hard", Flags);
+
+ return findOHOSMuslMultilibs(Flags, Result);
+}
+
+std::string OHOS::getMultiarchTriple(const llvm::Triple &T) const {
+ // For most architectures, just use whatever we have rather than trying to be
+ // clever.
+ switch (T.getArch()) {
+ default:
+ break;
+
+ // We use the existence of '/lib/<triple>' as a directory to detect some
+ // common linux triples that don't quite match the Clang triple for both
+ // 32-bit and 64-bit targets. Multiarch fixes its install triples to these
+ // regardless of what the actual target triple is.
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ return T.isOSLiteOS() ? "arm-liteos-ohos" : "arm-linux-ohos";
+ case llvm::Triple::riscv32:
+ return "riscv32-linux-ohos";
+ case llvm::Triple::riscv64:
+ return "riscv64-linux-ohos";
+ case llvm::Triple::mipsel:
+ return "mipsel-linux-ohos";
+ case llvm::Triple::x86:
+ return "i686-linux-ohos";
+ case llvm::Triple::x86_64:
+ return "x86_64-linux-ohos";
+ case llvm::Triple::aarch64:
+ return "aarch64-linux-ohos";
+ }
+ return T.str();
+}
+
+std::string OHOS::getMultiarchTriple(const Driver &D,
+ const llvm::Triple &TargetTriple,
+ StringRef SysRoot) const {
+ return getMultiarchTriple(TargetTriple);
+}
+
+static std::string makePath(const std::initializer_list<std::string> &IL) {
+ SmallString<128> P;
+ for (const auto &S : IL)
+ llvm::sys::path::append(P, S);
+ return static_cast<std::string>(P.str());
+}
+
+/// OHOS Toolchain
+OHOS::OHOS(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+ std::string SysRoot = computeSysRoot();
+
+ // Select the correct multilib according to the given arguments.
+ DetectedMultilibs Result;
+ findOHOSMultilibs(D, *this, Triple, "", Args, Result);
+ Multilibs = Result.Multilibs;
+ SelectedMultilibs = Result.SelectedMultilibs;
+ if (!SelectedMultilibs.empty()) {
+ SelectedMultilib = SelectedMultilibs.back();
+ }
+
+ getFilePaths().clear();
+ for (const auto &CandidateLibPath : getArchSpecificLibPaths())
+ if (getVFS().exists(CandidateLibPath))
+ getFilePaths().push_back(CandidateLibPath);
+
+ getLibraryPaths().clear();
+ for (auto &Path : getRuntimePaths())
+ if (getVFS().exists(Path))
+ getLibraryPaths().push_back(Path);
+
+ // OHOS sysroots contain a library directory for each supported OS
+ // version as well as some unversioned libraries in the usual multiarch
+ // directory. Support --target=aarch64-linux-ohosX.Y.Z or
+ // --target=aarch64-linux-ohosX.Y or --target=aarch64-linux-ohosX
+ path_list &Paths = getFilePaths();
+ std::string SysRootLibPath = makePath({SysRoot, "usr", "lib"});
+ std::string MultiarchTriple = getMultiarchTriple(getTriple());
+ addPathIfExists(D, makePath({SysRootLibPath, SelectedMultilib.gccSuffix()}),
+ Paths);
+ addPathIfExists(D,
+ makePath({D.Dir, "..", "lib", MultiarchTriple,
+ SelectedMultilib.gccSuffix()}),
+ Paths);
+
+ addPathIfExists(
+ D,
+ makePath({SysRootLibPath, MultiarchTriple, SelectedMultilib.gccSuffix()}),
+ Paths);
+}
+
+ToolChain::RuntimeLibType OHOS::GetRuntimeLibType(
+ const ArgList &Args) const {
+ if (Arg *A = Args.getLastArg(clang::driver::options::OPT_rtlib_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value != "compiler-rt")
+ getDriver().Diag(clang::diag::err_drv_invalid_rtlib_name)
+ << A->getAsString(Args);
+ }
+
+ return ToolChain::RLT_CompilerRT;
+}
+
+ToolChain::CXXStdlibType
+OHOS::GetCXXStdlibType(const ArgList &Args) const {
+ if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value != "libc++")
+ getDriver().Diag(diag::err_drv_invalid_stdlib_name)
+ << A->getAsString(Args);
+ }
+
+ return ToolChain::CST_Libcxx;
+}
+
+void OHOS::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+ const llvm::Triple &Triple = getTriple();
+ std::string SysRoot = computeSysRoot();
+
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ // Check for configure-time C include directories.
+ StringRef CIncludeDirs(C_INCLUDE_DIRS);
+ if (CIncludeDirs != "") {
+ SmallVector<StringRef, 5> dirs;
+ CIncludeDirs.split(dirs, ":");
+ for (StringRef dir : dirs) {
+ StringRef Prefix =
+ llvm::sys::path::is_absolute(dir) ? StringRef(SysRoot) : "";
+ addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
+ }
+ return;
+ }
+
+ addExternCSystemInclude(DriverArgs, CC1Args,
+ SysRoot + "/usr/include/" +
+ getMultiarchTriple(Triple));
+ addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/include");
+ addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include");
+}
+
+void OHOS::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx: {
+ std::string IncPath = makePath({getDriver().Dir, "..", "include"});
+ std::string IncTargetPath =
+ makePath({IncPath, getMultiarchTriple(getTriple()), "c++", "v1"});
+ if (getVFS().exists(IncTargetPath)) {
+ addSystemInclude(DriverArgs, CC1Args, makePath({IncPath, "c++", "v1"}));
+ addSystemInclude(DriverArgs, CC1Args, IncTargetPath);
+ }
+ break;
+ }
+
+ default:
+ llvm_unreachable("invalid stdlib name");
+ }
+}
+
+void OHOS::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ switch (GetCXXStdlibType(Args)) {
+ case ToolChain::CST_Libcxx:
+ CmdArgs.push_back("-lc++");
+ CmdArgs.push_back("-lc++abi");
+ CmdArgs.push_back("-lunwind");
+ break;
+
+ case ToolChain::CST_Libstdcxx:
+ llvm_unreachable("invalid stdlib name");
+ }
+}
+
+std::string OHOS::computeSysRoot() const {
+ std::string SysRoot =
+ !getDriver().SysRoot.empty()
+ ? getDriver().SysRoot
+ : makePath({getDriver().getInstalledDir(), "..", "..", "sysroot"});
+ if (!llvm::sys::fs::exists(SysRoot))
+ return std::string();
+
+ std::string ArchRoot = makePath({SysRoot, getMultiarchTriple(getTriple())});
+ return llvm::sys::fs::exists(ArchRoot) ? ArchRoot : SysRoot;
+}
+
+ToolChain::path_list OHOS::getRuntimePaths() const {
+ SmallString<128> P;
+ path_list Paths;
+ const Driver &D = getDriver();
+ const llvm::Triple &Triple = getTriple();
+
+ // First try the triple passed to driver as --target=<triple>.
+ P.assign(D.ResourceDir);
+ llvm::sys::path::append(P, "lib", D.getTargetTriple(), SelectedMultilib.gccSuffix());
+ Paths.push_back(P.c_str());
+
+ // Second try the normalized triple.
+ P.assign(D.ResourceDir);
+ llvm::sys::path::append(P, "lib", Triple.str(), SelectedMultilib.gccSuffix());
+ Paths.push_back(P.c_str());
+
+ // Third try the effective triple.
+ P.assign(D.ResourceDir);
+ std::string SysRoot = computeSysRoot();
+ llvm::sys::path::append(P, "lib", getMultiarchTriple(Triple),
+ SelectedMultilib.gccSuffix());
+ Paths.push_back(P.c_str());
+
+ return Paths;
+}
+
+std::string OHOS::getDynamicLinker(const ArgList &Args) const {
+ const llvm::Triple &Triple = getTriple();
+ const llvm::Triple::ArchType Arch = getArch();
+
+ assert(Triple.isMusl());
+ std::string ArchName;
+ bool IsArm = false;
+
+ switch (Arch) {
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ ArchName = "arm";
+ IsArm = true;
+ break;
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumbeb:
+ ArchName = "armeb";
+ IsArm = true;
+ break;
+ default:
+ ArchName = Triple.getArchName().str();
+ }
+ if (IsArm &&
+ (tools::arm::getARMFloatABI(*this, Args) == tools::arm::FloatABI::Hard))
+ ArchName += "hf";
+
+ return "/lib/ld-musl-" + ArchName + ".so.1";
+}
+
+std::string OHOS::getCompilerRT(const ArgList &Args, StringRef Component,
+ FileType Type) const {
+ SmallString<128> Path(getDriver().ResourceDir);
+ llvm::sys::path::append(Path, "lib", getMultiarchTriple(getTriple()),
+ SelectedMultilib.gccSuffix());
+ const char *Prefix =
+ Type == ToolChain::FT_Object ? "" : "lib";
+ const char *Suffix;
+ switch (Type) {
+ case ToolChain::FT_Object:
+ Suffix = ".o";
+ break;
+ case ToolChain::FT_Static:
+ Suffix = ".a";
+ break;
+ case ToolChain::FT_Shared:
+ Suffix = ".so";
+ break;
+ }
+ llvm::sys::path::append(
+ Path, Prefix + Twine("clang_rt.") + Component + Suffix);
+ return static_cast<std::string>(Path.str());
+}
+
+void OHOS::addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const {
+ CmdArgs.push_back("-z");
+ CmdArgs.push_back("now");
+ CmdArgs.push_back("-z");
+ CmdArgs.push_back("relro");
+ CmdArgs.push_back("-z");
+ CmdArgs.push_back("max-page-size=4096");
+ // .gnu.hash section is not compatible with the MIPS target
+ if (getArch() != llvm::Triple::mipsel)
+ CmdArgs.push_back("--hash-style=both");
+#ifdef ENABLE_LINKER_BUILD_ID
+ CmdArgs.push_back("--build-id");
+#endif
+ CmdArgs.push_back("--enable-new-dtags");
+}
+
+SanitizerMask OHOS::getSupportedSanitizers() const {
+ SanitizerMask Res = ToolChain::getSupportedSanitizers();
+ Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::PointerCompare;
+ Res |= SanitizerKind::PointerSubtract;
+ Res |= SanitizerKind::Fuzzer;
+ Res |= SanitizerKind::FuzzerNoLink;
+ Res |= SanitizerKind::Memory;
+ Res |= SanitizerKind::Vptr;
+ Res |= SanitizerKind::SafeStack;
+ Res |= SanitizerKind::Scudo;
+ // TODO: kASAN for liteos ??
+ // TODO: Support TSAN and HWASAN and update mask.
+ return Res;
+}
+
+// TODO: Make a base class for Linux and OHOS and move this there.
+void OHOS::addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {
+ // Add linker option -u__llvm_profile_runtime to cause runtime
+ // initialization module to be linked in.
+ if (needsProfileRT(Args))
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("-u", llvm::getInstrProfRuntimeHookVarName())));
+ ToolChain::addProfileRTLibs(Args, CmdArgs);
+}
+
+ToolChain::path_list OHOS::getArchSpecificLibPaths() const {
+ ToolChain::path_list Paths;
+ llvm::Triple Triple = getTriple();
+ Paths.push_back(
+ makePath({getDriver().ResourceDir, "lib", getMultiarchTriple(Triple)}));
+ return Paths;
+}
+
+ToolChain::UnwindLibType OHOS::GetUnwindLibType(const llvm::opt::ArgList &Args) const {
+ if (Args.getLastArg(options::OPT_unwindlib_EQ))
+ return Generic_ELF::GetUnwindLibType(Args);
+ return GetDefaultUnwindLibType();
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.h
new file mode 100644
index 000000000000..2a380420922d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.h
@@ -0,0 +1,95 @@
+//===--- OHOS.h - OHOS ToolChain Implementations ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_OHOS_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_OHOS_H
+
+#include "Linux.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
+
+namespace clang {
+namespace driver {
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY OHOS : public Generic_ELF {
+public:
+ OHOS(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ bool HasNativeLLVMSupport() const override { return true; }
+
+ bool IsMathErrnoDefault() const override { return false; }
+
+ RuntimeLibType GetDefaultRuntimeLibType() const override {
+ return ToolChain::RLT_CompilerRT;
+ }
+ CXXStdlibType GetDefaultCXXStdlibType() const override {
+ return ToolChain::CST_Libcxx;
+ }
+ // Not add -funwind-tables by default
+ bool isPICDefault() const override { return false; }
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override { return true; }
+ bool isPICDefaultForced() const override { return false; }
+ UnwindLibType GetUnwindLibType(const llvm::opt::ArgList &Args) const override;
+ UnwindLibType GetDefaultUnwindLibType() const override { return UNW_CompilerRT; }
+
+ RuntimeLibType
+ GetRuntimeLibType(const llvm::opt::ArgList &Args) const override;
+ CXXStdlibType
+ GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void
+ AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ std::string computeSysRoot() const override;
+ std::string getDynamicLinker(const llvm::opt::ArgList &Args) const override;
+
+ std::string
+ getCompilerRT(const llvm::opt::ArgList &Args, StringRef Component,
+ FileType Type = ToolChain::FT_Static) const override;
+
+ const char *getDefaultLinker() const override {
+ return "ld.lld";
+ }
+
+ Tool *buildLinker() const override {
+ return new tools::gnutools::Linker(*this);
+ }
+ Tool *buildAssembler() const override {
+ return new tools::gnutools::Assembler(*this);
+ }
+
+ path_list getRuntimePaths() const;
+
+protected:
+ std::string getMultiarchTriple(const llvm::Triple &T) const;
+ std::string getMultiarchTriple(const Driver &D,
+ const llvm::Triple &TargetTriple,
+ StringRef SysRoot) const override;
+ void addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const override;
+ SanitizerMask getSupportedSanitizers() const override;
+ void addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+ path_list getArchSpecificLibPaths() const override;
+
+private:
+ Multilib SelectedMultilib;
+};
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_OHOS_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp
index c80c650e18fb..061f1c53f6b5 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp
@@ -116,6 +116,11 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const Driver &D = ToolChain.getDriver();
const llvm::Triple::ArchType Arch = ToolChain.getArch();
ArgStringList CmdArgs;
+ bool Static = Args.hasArg(options::OPT_static);
+ bool Shared = Args.hasArg(options::OPT_shared);
+ bool Profiling = Args.hasArg(options::OPT_pg);
+ bool Pie = Args.hasArg(options::OPT_pie);
+ bool Nopie = Args.hasArg(options::OPT_nopie);
// Silence warning for "clang -g foo.o -o foo"
Args.ClaimAllArgs(options::OPT_g_Group);
@@ -133,19 +138,19 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
else if (Arch == llvm::Triple::mips64el)
CmdArgs.push_back("-EL");
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_shared)) {
+ if (!Args.hasArg(options::OPT_nostdlib) && !Shared) {
CmdArgs.push_back("-e");
CmdArgs.push_back("__start");
}
CmdArgs.push_back("--eh-frame-hdr");
- if (Args.hasArg(options::OPT_static)) {
+ if (Static) {
CmdArgs.push_back("-Bstatic");
} else {
if (Args.hasArg(options::OPT_rdynamic))
CmdArgs.push_back("-export-dynamic");
CmdArgs.push_back("-Bdynamic");
- if (Args.hasArg(options::OPT_shared)) {
+ if (Shared) {
CmdArgs.push_back("-shared");
} else if (!Args.hasArg(options::OPT_r)) {
CmdArgs.push_back("-dynamic-linker");
@@ -153,9 +158,9 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- if (Args.hasArg(options::OPT_pie))
+ if (Pie)
CmdArgs.push_back("-pie");
- if (Args.hasArg(options::OPT_nopie) || Args.hasArg(options::OPT_pg))
+ if (Nopie || Profiling)
CmdArgs.push_back("-nopie");
if (Arch == llvm::Triple::riscv64)
@@ -172,11 +177,10 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_r)) {
const char *crt0 = nullptr;
const char *crtbegin = nullptr;
- if (!Args.hasArg(options::OPT_shared)) {
- if (Args.hasArg(options::OPT_pg))
+ if (!Shared) {
+ if (Profiling)
crt0 = "gcrt0.o";
- else if (Args.hasArg(options::OPT_static) &&
- !Args.hasArg(options::OPT_nopie))
+ else if (Static && !Nopie)
crt0 = "rcrt0.o";
else
crt0 = "crt0.o";
@@ -192,9 +196,9 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
- Args.AddAllArgs(CmdArgs, {options::OPT_T_Group, options::OPT_e,
- options::OPT_s, options::OPT_t,
- options::OPT_Z_Flag, options::OPT_r});
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_T_Group, options::OPT_s, options::OPT_t,
+ options::OPT_Z_Flag, options::OPT_r});
bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
@@ -203,14 +207,13 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs,
options::OPT_r)) {
// Use the static OpenMP runtime with -static-openmp
- bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) &&
- !Args.hasArg(options::OPT_static);
+ bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) && !Static;
addOpenMPRuntime(CmdArgs, ToolChain, Args, StaticOpenMP);
if (D.CCCIsCXX()) {
if (ToolChain.ShouldLinkCXXStdlib(Args))
ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
- if (Args.hasArg(options::OPT_pg))
+ if (Profiling)
CmdArgs.push_back("-lm_p");
else
CmdArgs.push_back("-lm");
@@ -228,14 +231,14 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lcompiler_rt");
if (Args.hasArg(options::OPT_pthread)) {
- if (!Args.hasArg(options::OPT_shared) && Args.hasArg(options::OPT_pg))
+ if (!Shared && Profiling)
CmdArgs.push_back("-lpthread_p");
else
CmdArgs.push_back("-lpthread");
}
- if (!Args.hasArg(options::OPT_shared)) {
- if (Args.hasArg(options::OPT_pg))
+ if (!Shared) {
+ if (Profiling)
CmdArgs.push_back("-lc_p");
else
CmdArgs.push_back("-lc");
@@ -247,7 +250,7 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
options::OPT_r)) {
const char *crtend = nullptr;
- if (!Args.hasArg(options::OPT_shared))
+ if (!Shared)
crtend = "crtend.o";
else
crtend = "crtendS.o";
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp
index 643f815c5835..2f43d33bf0f1 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp
@@ -8,6 +8,7 @@
#include "PS4CPU.h"
#include "CommonArgs.h"
+#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
@@ -159,18 +160,12 @@ void tools::PScpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const bool IsPS5 = TC.getTriple().isPS5();
assert(IsPS4 || IsPS5);
+ const char *PS4LTOArgs = "";
auto AddCodeGenFlag = [&](Twine Flag) {
- const char *Prefix = nullptr;
- if (IsPS4 && D.getLTOMode() == LTOK_Thin)
- Prefix = "-lto-thin-debug-options=";
- else if (IsPS4 && D.getLTOMode() == LTOK_Full)
- Prefix = "-lto-debug-options=";
+ if (IsPS4)
+ PS4LTOArgs = Args.MakeArgString(Twine(PS4LTOArgs) + " " + Flag);
else if (IsPS5)
- Prefix = "-plugin-opt=";
- else
- llvm_unreachable("new LTO mode?");
-
- CmdArgs.push_back(Args.MakeArgString(Twine(Prefix) + Flag));
+ CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=") + Flag));
};
if (UseLTO) {
@@ -184,14 +179,40 @@ void tools::PScpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_fcrash_diagnostics_dir))
AddCodeGenFlag(Twine("-crash-diagnostics-dir=") + A->getValue());
+
+ StringRef Parallelism = getLTOParallelism(Args, D);
+ if (!Parallelism.empty()) {
+ if (IsPS4)
+ AddCodeGenFlag(Twine("-threads=") + Parallelism);
+ else
+ CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=jobs=") + Parallelism));
+ }
+
+ if (IsPS4) {
+ const char *Prefix = nullptr;
+ if (D.getLTOMode() == LTOK_Thin)
+ Prefix = "-lto-thin-debug-options=";
+ else if (D.getLTOMode() == LTOK_Full)
+ Prefix = "-lto-debug-options=";
+ else
+ llvm_unreachable("new LTO mode?");
+
+ CmdArgs.push_back(Args.MakeArgString(Twine(Prefix) + PS4LTOArgs));
+ }
}
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
TC.addSanitizerArgs(Args, CmdArgs, "-l", "");
+ if (D.isUsingLTO() && Args.hasArg(options::OPT_funified_lto)) {
+ if (D.getLTOMode() == LTOK_Thin)
+ CmdArgs.push_back("--lto=thin");
+ else if (D.getLTOMode() == LTOK_Full)
+ CmdArgs.push_back("--lto=full");
+ }
+
Args.AddAllArgs(CmdArgs, options::OPT_L);
Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
- Args.AddAllArgs(CmdArgs, options::OPT_e);
Args.AddAllArgs(CmdArgs, options::OPT_s);
Args.AddAllArgs(CmdArgs, options::OPT_t);
Args.AddAllArgs(CmdArgs, options::OPT_r);
@@ -235,33 +256,25 @@ toolchains::PS4PS5Base::PS4PS5Base(const Driver &D, const llvm::Triple &Triple,
D.Diag(clang::diag::err_drv_unsupported_opt_for_target)
<< "-static" << Platform;
- // Determine where to find the PS4/PS5 libraries. We use the EnvVar
- // if it exists; otherwise use the driver's installation path, which
- // should be <SDK_DIR>/host_tools/bin.
-
- SmallString<512> SDKDir;
- if (const char *EnvValue = getenv(EnvVar)) {
- if (!llvm::sys::fs::exists(EnvValue))
- D.Diag(clang::diag::warn_drv_ps_sdk_dir) << EnvVar << EnvValue;
- SDKDir = EnvValue;
- } else {
- SDKDir = D.Dir;
- llvm::sys::path::append(SDKDir, "/../../");
- }
-
- // By default, the driver won't report a warning if it can't find the
- // SDK include or lib directories. This behavior could be changed if
- // -Weverything or -Winvalid-or-nonexistent-directory options are passed.
+ // Determine where to find the PS4/PS5 libraries.
// If -isysroot was passed, use that as the SDK base path.
- std::string PrefixDir;
+ // If not, we use the EnvVar if it exists; otherwise use the driver's
+ // installation path, which should be <SDK_DIR>/host_tools/bin.
+ SmallString<80> Whence;
if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
- PrefixDir = A->getValue();
- if (!llvm::sys::fs::exists(PrefixDir))
- D.Diag(clang::diag::warn_missing_sysroot) << PrefixDir;
- } else
- PrefixDir = std::string(SDKDir.str());
+ SDKRootDir = A->getValue();
+ if (!llvm::sys::fs::exists(SDKRootDir))
+ D.Diag(clang::diag::warn_missing_sysroot) << SDKRootDir;
+ Whence = A->getSpelling();
+ } else if (const char *EnvValue = getenv(EnvVar)) {
+ SDKRootDir = EnvValue;
+ Whence = { "environment variable '", EnvVar, "'" };
+ } else {
+ SDKRootDir = D.Dir + "/../../";
+ Whence = "compiler's location";
+ }
- SmallString<512> SDKIncludeDir(PrefixDir);
+ SmallString<512> SDKIncludeDir(SDKRootDir);
llvm::sys::path::append(SDKIncludeDir, "target/include");
if (!Args.hasArg(options::OPT_nostdinc) &&
!Args.hasArg(options::OPT_nostdlibinc) &&
@@ -269,10 +282,10 @@ toolchains::PS4PS5Base::PS4PS5Base(const Driver &D, const llvm::Triple &Triple,
!Args.hasArg(options::OPT__sysroot_EQ) &&
!llvm::sys::fs::exists(SDKIncludeDir)) {
D.Diag(clang::diag::warn_drv_unable_to_find_directory_expected)
- << Twine(Platform, " system headers").str() << SDKIncludeDir;
+ << Twine(Platform, " system headers").str() << SDKIncludeDir << Whence;
}
- SmallString<512> SDKLibDir(SDKDir);
+ SmallString<512> SDKLibDir(SDKRootDir);
llvm::sys::path::append(SDKLibDir, "target/lib");
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nodefaultlibs) &&
@@ -281,12 +294,35 @@ toolchains::PS4PS5Base::PS4PS5Base(const Driver &D, const llvm::Triple &Triple,
!Args.hasArg(options::OPT_emit_ast) &&
!llvm::sys::fs::exists(SDKLibDir)) {
D.Diag(clang::diag::warn_drv_unable_to_find_directory_expected)
- << Twine(Platform, " system libraries").str() << SDKLibDir;
+ << Twine(Platform, " system libraries").str() << SDKLibDir << Whence;
return;
}
getFilePaths().push_back(std::string(SDKLibDir.str()));
}
+void toolchains::PS4PS5Base::AddClangSystemIncludeArgs(
+ const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> Dir(D.ResourceDir);
+ llvm::sys::path::append(Dir, "include");
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ addExternCSystemInclude(DriverArgs, CC1Args,
+ SDKRootDir + "/target/include");
+ addExternCSystemInclude(DriverArgs, CC1Args,
+ SDKRootDir + "/target/include_common");
+}
+
Tool *toolchains::PS4CPU::buildAssembler() const {
return new tools::PScpu::Assembler(*this);
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h
index 954e7d8d8d68..a51351d367be 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h
@@ -63,6 +63,9 @@ public:
const llvm::opt::ArgList &Args, StringRef Platform,
const char *EnvVar);
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
// No support for finding a C++ standard library yet.
void addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override {
@@ -98,8 +101,6 @@ public:
return llvm::DenormalMode::getPreserveSign();
}
- bool useRelaxRelocations() const override { return true; }
-
// Helper methods for PS4/PS5.
virtual const char *getLinkerBaseName() const = 0;
virtual std::string qualifyPSCmdName(StringRef CmdName) const = 0;
@@ -111,6 +112,10 @@ public:
protected:
Tool *buildLinker() const override;
+
+private:
+ // We compute the SDK root dir in the ctor, and use it later.
+ std::string SDKRootDir;
};
// PS4-specific Toolchain class.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
index 3491de22d371..eb990eb80966 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
@@ -1,4 +1,4 @@
-//===--- RISCVToolchain.cpp - RISCV ToolChain Implementations ---*- C++ -*-===//
+//===--- RISCVToolchain.cpp - RISC-V ToolChain Implementations --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -46,17 +46,17 @@ bool RISCVToolChain::hasGCCToolchain(const Driver &D,
return llvm::sys::fs::exists(GCCDir);
}
-/// RISCV Toolchain
+/// RISC-V Toolchain
RISCVToolChain::RISCVToolChain(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
GCCInstallation.init(Triple, Args);
if (GCCInstallation.isValid()) {
Multilibs = GCCInstallation.getMultilibs();
- SelectedMultilib = GCCInstallation.getMultilib();
+ SelectedMultilibs.assign({GCCInstallation.getMultilib()});
path_list &Paths = getFilePaths();
// Add toolchain/multilib specific file paths.
- addMultilibsFilePaths(D, Multilibs, SelectedMultilib,
+ addMultilibsFilePaths(D, Multilibs, SelectedMultilibs.back(),
GCCInstallation.getInstallPath(), Paths);
getFilePaths().push_back(GCCInstallation.getInstallPath().str());
ToolChain::path_list &PPaths = getProgramPaths();
@@ -194,8 +194,8 @@ void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_u);
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
Args.AddAllArgs(CmdArgs,
- {options::OPT_T_Group, options::OPT_e, options::OPT_s,
- options::OPT_t, options::OPT_Z_Flag, options::OPT_r});
+ {options::OPT_T_Group, options::OPT_s, options::OPT_t,
+ options::OPT_Z_Flag, options::OPT_r});
// TODO: add C++ includes and libs if compiling C++.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h
index 46b94bdb54e0..de6960726f1c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h
@@ -1,4 +1,4 @@
-//===--- RISCVToolchain.h - RISCV ToolChain Implementations -----*- C++ -*-===//
+//===--- RISCVToolchain.h - RISC-V ToolChain Implementations ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h
index 5c1431f3270c..554d8a6929ac 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h
@@ -15,9 +15,9 @@
#include "clang/Driver/Options.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/VersionTuple.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace driver {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h
index bb2904f76128..3cd613d9aac2 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h
@@ -17,9 +17,6 @@ namespace driver {
namespace tools {
namespace SPIRV {
-void addTranslatorArgs(const llvm::opt::ArgList &InArgs,
- llvm::opt::ArgStringList &OutArgs);
-
void constructTranslateCommand(Compilation &C, const Tool &T,
const JobAction &JA, const InputInfo &Output,
const InputInfo &Input,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp
index 7cc872c71775..335a5a88cdfa 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp
@@ -121,8 +121,8 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
getToolChain().AddFilePathLibArgs(Args, CmdArgs);
- Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
- options::OPT_e, options::OPT_r});
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_L, options::OPT_T_Group, options::OPT_r});
bool NeedsSanitizerDeps = addSanitizerRuntimes(getToolChain(), Args, CmdArgs);
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
@@ -225,7 +225,6 @@ Solaris::Solaris(const Driver &D, const llvm::Triple &Triple,
SanitizerMask Solaris::getSupportedSanitizers() const {
const bool IsX86 = getTriple().getArch() == llvm::Triple::x86;
- const bool IsX86_64 = getTriple().getArch() == llvm::Triple::x86_64;
SanitizerMask Res = ToolChain::getSupportedSanitizers();
// FIXME: Omit X86_64 until 64-bit support is figured out.
if (IsX86) {
@@ -233,8 +232,6 @@ SanitizerMask Solaris::getSupportedSanitizers() const {
Res |= SanitizerKind::PointerCompare;
Res |= SanitizerKind::PointerSubtract;
}
- if (IsX86 || IsX86_64)
- Res |= SanitizerKind::Function;
Res |= SanitizerKind::Vptr;
return Res;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp
index 9be239262db8..e72db208f587 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp
@@ -33,7 +33,7 @@ VEToolChain::VEToolChain(const Driver &D, const llvm::Triple &Triple,
// These are OK.
// Default file paths are following:
- // ${RESOURCEDIR}/lib/linux/ve, (== getArchSpecificLibPath)
+ // ${RESOURCEDIR}/lib/linux/ve, (== getArchSpecificLibPaths)
// /lib/../lib64,
// /usr/lib/../lib64,
// ${BINPATH}/../lib,
@@ -45,12 +45,13 @@ VEToolChain::VEToolChain(const Driver &D, const llvm::Triple &Triple,
getFilePaths().clear();
// Add library directories:
- // ${BINPATH}/../lib/ve-unknown-linux-gnu, (== getStdlibPath)
- // ${RESOURCEDIR}/lib/linux/ve, (== getArchSpecificLibPath)
+ // ${BINPATH}/../lib/ve-unknown-linux-gnu, (== getStdlibPaths)
+ // ${RESOURCEDIR}/lib/linux/ve, (== getArchSpecificLibPaths)
// ${SYSROOT}/opt/nec/ve/lib,
for (auto &Path : getStdlibPaths())
getFilePaths().push_back(std::move(Path));
- getFilePaths().push_back(getArchSpecificLibPath());
+ for (const auto &Path : getArchSpecificLibPaths())
+ getFilePaths().push_back(Path);
getFilePaths().push_back(computeSysRoot() + "/opt/nec/ve/lib");
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp
index a1c4cd9ef9c7..36bed3166ff3 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp
@@ -101,13 +101,16 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
<< CM << A->getOption().getName();
}
}
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles))
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles, options::OPT_shared))
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(Crt1)));
if (Entry) {
CmdArgs.push_back(Args.MakeArgString("--entry"));
CmdArgs.push_back(Args.MakeArgString(Entry));
}
+ if (Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back(Args.MakeArgString("-shared"));
+
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
@@ -209,8 +212,6 @@ bool WebAssembly::isPIEDefault(const llvm::opt::ArgList &Args) const {
bool WebAssembly::isPICDefaultForced() const { return false; }
-bool WebAssembly::IsIntegratedAssemblerDefault() const { return true; }
-
bool WebAssembly::hasBlocksRuntime() const { return false; }
// TODO: Support profiling.
@@ -459,6 +460,9 @@ SanitizerMask WebAssembly::getSupportedSanitizers() const {
if (getTriple().isOSEmscripten()) {
Res |= SanitizerKind::Vptr | SanitizerKind::Leak | SanitizerKind::Address;
}
+ // -fsanitize=function places two words before the function label, which are
+ // -unsupported.
+ Res &= ~SanitizerKind::Function;
return Res;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h
index 5b9b8a0fe4e6..39589ffc1e3b 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h
@@ -47,7 +47,6 @@ private:
bool isPICDefault() const override;
bool isPIEDefault(const llvm::opt::ArgList &Args) const override;
bool isPICDefaultForced() const override;
- bool IsIntegratedAssemblerDefault() const override;
bool hasBlocksRuntime() const override;
bool SupportsProfiling() const override;
bool HasNativeLLVMSupport() const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.h
index d9a05da3c678..f2e66350243e 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.h
@@ -57,6 +57,7 @@ protected:
Tool *buildLinker() const override;
public:
+ bool IsIntegratedAssemblerDefault() const override { return false; }
bool isPICDefault() const override;
bool isPIEDefault(const llvm::opt::ArgList &Args) const override;
bool isPICDefaultForced() const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.cpp
index f921227076a5..db10567ca28e 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.cpp
@@ -11,11 +11,17 @@
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/Support/WithColor.h"
+using namespace clang;
using namespace clang::driver;
+using namespace clang::driver::tools;
using namespace clang::driver::toolchains;
+using namespace llvm;
using namespace llvm::opt;
-using namespace clang;
+using namespace llvm::sys;
ZOS::ZOS(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: ToolChain(D, Triple, Args) {}
@@ -31,3 +37,305 @@ void ZOS::addClangTargetOptions(const ArgList &DriverArgs,
options::OPT_fno_aligned_allocation))
CC1Args.push_back("-faligned-alloc-unavailable");
}
+
+void zos::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ // Specify assembler output file.
+ assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ }
+
+ // Specify assembler input file.
+ // The system assembler on z/OS takes exactly one input file. The driver is
+ // expected to invoke as(1) separately for each assembler source input file.
+ if (Inputs.size() != 1)
+ llvm_unreachable("Invalid number of input files.");
+ const InputInfo &II = Inputs[0];
+ assert((II.isFilename() || II.isNothing()) && "Invalid input.");
+ if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
+}
+
+static std::string getLEHLQ(const ArgList &Args) {
+ if (Args.hasArg(options::OPT_mzos_hlq_le_EQ)) {
+ Arg *LEHLQArg = Args.getLastArg(options::OPT_mzos_hlq_le_EQ);
+ StringRef HLQ = LEHLQArg->getValue();
+ if (!HLQ.empty())
+ return HLQ.str();
+ }
+ return "CEE";
+}
+
+static std::string getClangHLQ(const ArgList &Args) {
+ if (Args.hasArg(options::OPT_mzos_hlq_clang_EQ)) {
+ Arg *ClangHLQArg = Args.getLastArg(options::OPT_mzos_hlq_clang_EQ);
+ StringRef HLQ = ClangHLQArg->getValue();
+ if (!HLQ.empty())
+ return HLQ.str();
+ }
+ return getLEHLQ(Args);
+}
+
+static std::string getCSSHLQ(const ArgList &Args) {
+ if (Args.hasArg(options::OPT_mzos_hlq_csslib_EQ)) {
+ Arg *CsslibHLQArg = Args.getLastArg(options::OPT_mzos_hlq_csslib_EQ);
+ StringRef HLQ = CsslibHLQArg->getValue();
+ if (!HLQ.empty())
+ return HLQ.str();
+ }
+ return "SYS1";
+}
+
+void zos::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs, const ArgList &Args,
+ const char *LinkingOutput) const {
+ const ZOS &ToolChain = static_cast<const ZOS &>(getToolChain());
+ ArgStringList CmdArgs;
+
+ const bool IsSharedLib =
+ Args.hasFlag(options::OPT_shared, options::OPT_static, false);
+
+ assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ }
+
+ SmallString<128> LinkerOptions;
+ LinkerOptions = "AMODE=";
+ LinkerOptions += "64";
+ LinkerOptions += ",LIST";
+ LinkerOptions += ",DYNAM=DLL";
+ LinkerOptions += ",MSGLEVEL=4";
+ LinkerOptions += ",CASE=MIXED";
+ LinkerOptions += ",REUS=RENT";
+
+ CmdArgs.push_back("-b");
+ CmdArgs.push_back(Args.MakeArgString(LinkerOptions));
+
+ if (!IsSharedLib) {
+ CmdArgs.push_back("-e");
+ CmdArgs.push_back("CELQSTRT");
+
+ CmdArgs.push_back("-O");
+ CmdArgs.push_back("CELQSTRT");
+
+ CmdArgs.push_back("-u");
+ CmdArgs.push_back("CELQMAIN");
+ }
+
+ // Generate side file if -shared option is present.
+ if (IsSharedLib) {
+ StringRef OutputName = Output.getFilename();
+ // Strip away the last file suffix in presence from output name and add
+ // a new .x suffix.
+ size_t Suffix = OutputName.find_last_of(".");
+ const char *SideDeckName =
+ Args.MakeArgString(OutputName.substr(0, Suffix) + ".x");
+ CmdArgs.push_back("-x");
+ CmdArgs.push_back(SideDeckName);
+ } else {
+ // We need to direct side file to /dev/null to suppress linker warning when
+ // the object file contains exported symbols, and -shared or
+ // -Wl,-x<sidedeck>.x is not specified.
+ CmdArgs.push_back("-x");
+ CmdArgs.push_back("/dev/null");
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_u);
+
+ // Add archive library search paths.
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+
+ // Specify linker input file(s)
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
+
+ // z/OS tool chain depends on LE data sets and the CSSLIB data set.
+ // These data sets can have different high level qualifiers (HLQs)
+ // as each installation can define them differently.
+
+ std::string LEHLQ = getLEHLQ(Args);
+ std::string CsslibHLQ = getCSSHLQ(Args);
+
+ StringRef ld_env_var = StringRef(getenv("_LD_SYSLIB")).trim();
+ if (ld_env_var.empty()) {
+ CmdArgs.push_back("-S");
+ CmdArgs.push_back(Args.MakeArgString("//'" + LEHLQ + ".SCEEBND2'"));
+ CmdArgs.push_back("-S");
+ CmdArgs.push_back(Args.MakeArgString("//'" + CsslibHLQ + ".CSSLIB'"));
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ ld_env_var = StringRef(getenv("_LD_SIDE_DECKS")).trim();
+ if (ld_env_var.empty()) {
+ CmdArgs.push_back(
+ Args.MakeArgString("//'" + LEHLQ + ".SCEELIB(CELQS001)'"));
+ CmdArgs.push_back(
+ Args.MakeArgString("//'" + LEHLQ + ".SCEELIB(CELQS003)'"));
+ } else {
+ char *ld_side_deck = strdup(ld_env_var.str().c_str());
+ ld_side_deck = strtok(ld_side_deck, ":");
+ while (ld_side_deck != nullptr) {
+ CmdArgs.push_back(ld_side_deck);
+ ld_side_deck = strtok(nullptr, ":");
+ }
+ }
+ }
+ // Link libc++ library
+ if (ToolChain.ShouldLinkCXXStdlib(Args)) {
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ }
+
+ // Specify compiler-rt library path for linker
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
+ AddRunTimeLibs(ToolChain, ToolChain.getDriver(), CmdArgs, Args);
+
+ const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
+}
+
+ToolChain::RuntimeLibType ZOS::GetDefaultRuntimeLibType() const {
+ return ToolChain::RLT_CompilerRT;
+}
+
+ToolChain::CXXStdlibType ZOS::GetDefaultCXXStdlibType() const {
+ return ToolChain::CST_Libcxx;
+}
+
+void ZOS::AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {
+ switch (GetCXXStdlibType(Args)) {
+ case ToolChain::CST_Libstdcxx:
+ llvm::report_fatal_error("linking libstdc++ is unimplemented on z/OS");
+ break;
+ case ToolChain::CST_Libcxx: {
+ std::string ClangHLQ = getClangHLQ(Args);
+ CmdArgs.push_back(
+ Args.MakeArgString("//'" + ClangHLQ + ".SCEELIB(CRTDQCXE)'"));
+ CmdArgs.push_back(
+ Args.MakeArgString("//'" + ClangHLQ + ".SCEELIB(CRTDQCXS)'"));
+ CmdArgs.push_back(
+ Args.MakeArgString("//'" + ClangHLQ + ".SCEELIB(CRTDQCXP)'"));
+ CmdArgs.push_back(
+ Args.MakeArgString("//'" + ClangHLQ + ".SCEELIB(CRTDQCXA)'"));
+ CmdArgs.push_back(
+ Args.MakeArgString("//'" + ClangHLQ + ".SCEELIB(CRTDQXLA)'"));
+ CmdArgs.push_back(
+ Args.MakeArgString("//'" + ClangHLQ + ".SCEELIB(CRTDQUNW)'"));
+ } break;
+ }
+}
+
+auto ZOS::buildAssembler() const -> Tool * { return new zos::Assembler(*this); }
+
+auto ZOS::buildLinker() const -> Tool * { return new zos::Linker(*this); }
+
+void ZOS::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ const Driver &D = getDriver();
+
+ // resolve ResourceDir
+ std::string ResourceDir(D.ResourceDir);
+
+ // zos_wrappers must take highest precedence
+
+ // - <clang>/lib/clang/<ver>/include/zos_wrappers
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> P(ResourceDir);
+ path::append(P, "include", "zos_wrappers");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+
+ // - <clang>/lib/clang/<ver>/include
+ SmallString<128> P2(ResourceDir);
+ path::append(P2, "include");
+ addSystemInclude(DriverArgs, CC1Args, P2.str());
+ }
+
+ // - /usr/include
+ if (Arg *SysIncludeArg =
+ DriverArgs.getLastArg(options::OPT_mzos_sys_include_EQ)) {
+ StringRef SysInclude = SysIncludeArg->getValue();
+
+ // fall back to the default include path
+ if (!SysInclude.empty()) {
+
+ // -mzos-sys-include opton can have colon separated
+ // list of paths, so we need to parse the value.
+ StringRef PathLE(SysInclude);
+ size_t Colon = PathLE.find(':');
+ if (Colon == StringRef::npos) {
+ addSystemInclude(DriverArgs, CC1Args, PathLE.str());
+ return;
+ }
+
+ while (Colon != StringRef::npos) {
+ SmallString<128> P = PathLE.substr(0, Colon);
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ PathLE = PathLE.substr(Colon + 1);
+ Colon = PathLE.find(':');
+ }
+ if (PathLE.size())
+ addSystemInclude(DriverArgs, CC1Args, PathLE.str());
+
+ return;
+ }
+ }
+
+ addSystemInclude(DriverArgs, CC1Args, "/usr/include");
+}
+
+void ZOS::TryAddIncludeFromPath(llvm::SmallString<128> Path,
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ if (!getVFS().exists(Path)) {
+ if (DriverArgs.hasArg(options::OPT_v))
+ WithColor::warning(errs(), "Clang")
+ << "ignoring nonexistent directory \"" << Path << "\"\n";
+ if (!DriverArgs.hasArg(options::OPT__HASH_HASH_HASH))
+ return;
+ }
+ addSystemInclude(DriverArgs, CC1Args, Path);
+}
+
+void ZOS::AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx) ||
+ DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx: {
+ // <install>/bin/../include/c++/v1
+ llvm::SmallString<128> InstallBin =
+ llvm::StringRef(getDriver().getInstalledDir());
+ llvm::sys::path::append(InstallBin, "..", "include", "c++", "v1");
+ TryAddIncludeFromPath(InstallBin, DriverArgs, CC1Args);
+ break;
+ }
+ case ToolChain::CST_Libstdcxx:
+ llvm::report_fatal_error(
+ "picking up libstdc++ headers is unimplemented on z/OS");
+ break;
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.h
index 53138306fd41..548b432ade76 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.h
@@ -14,6 +14,39 @@
namespace clang {
namespace driver {
+namespace tools {
+
+/// zos -- Directly call system default assembler and linker.
+namespace zos {
+
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
+public:
+ Assembler(const ToolChain &TC) : Tool("zos::Assembler", "assembler", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+public:
+ Linker(const ToolChain &TC) : Tool("zos::Linker", "linker", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+} // end namespace zos
+} // end namespace tools
+
namespace toolchains {
class LLVM_LIBRARY_VISIBILITY ZOS : public ToolChain {
@@ -28,13 +61,34 @@ public:
}
bool isPICDefaultForced() const override { return false; }
- bool IsIntegratedAssemblerDefault() const override { return true; }
+ void TryAddIncludeFromPath(llvm::SmallString<128> Path,
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
unsigned GetDefaultDwarfVersion() const override { return 4; }
+ CXXStdlibType GetDefaultCXXStdlibType() const override;
+
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ RuntimeLibType GetDefaultRuntimeLibType() const override;
void addClangTargetOptions(
const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadingKind) const override;
+
+ const char *getDefaultLinker() const override { return "/bin/ld"; }
+
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/Types.cpp b/contrib/llvm-project/clang/lib/Driver/Types.cpp
index a890cc58ee42..7d6308d757bc 100644
--- a/contrib/llvm-project/clang/lib/Driver/Types.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Types.cpp
@@ -331,6 +331,10 @@ types::ID types::lookupTypeForExtension(llvm::StringRef Ext) {
.Case("cui", TY_PP_CUDA)
.Case("cxx", TY_CXX)
.Case("CXX", TY_CXX)
+ .Case("F03", TY_Fortran)
+ .Case("f03", TY_PP_Fortran)
+ .Case("F08", TY_Fortran)
+ .Case("f08", TY_PP_Fortran)
.Case("F90", TY_Fortran)
.Case("f90", TY_PP_Fortran)
.Case("F95", TY_Fortran)
diff --git a/contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp b/contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp
index cf9b5780c455..8c5134e25013 100644
--- a/contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp
@@ -22,12 +22,7 @@ using namespace clang;
using namespace clang::driver;
using namespace llvm::opt;
-namespace {
-constexpr char XRayInstrumentOption[] = "-fxray-instrument";
-constexpr char XRayInstructionThresholdOption[] =
- "-fxray-instruction-threshold=";
-constexpr const char *const XRaySupportedModes[] = {"xray-fdr", "xray-basic"};
-} // namespace
+constexpr const char *XRaySupportedModes[] = {"xray-fdr", "xray-basic"};
XRayArgs::XRayArgs(const ToolChain &TC, const ArgList &Args) {
const Driver &D = TC.getDriver();
@@ -35,79 +30,49 @@ XRayArgs::XRayArgs(const ToolChain &TC, const ArgList &Args) {
if (!Args.hasFlag(options::OPT_fxray_instrument,
options::OPT_fno_xray_instrument, false))
return;
- if (Triple.getOS() == llvm::Triple::Linux) {
+ XRayInstrument = Args.getLastArg(options::OPT_fxray_instrument);
+ if (Triple.isMacOSX()) {
+ switch (Triple.getArch()) {
+ case llvm::Triple::aarch64:
+ case llvm::Triple::x86_64:
+ break;
+ default:
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << XRayInstrument->getSpelling() << Triple.str();
+ break;
+ }
+ } else if (Triple.isOSBinFormatELF()) {
switch (Triple.getArch()) {
case llvm::Triple::x86_64:
case llvm::Triple::arm:
case llvm::Triple::aarch64:
case llvm::Triple::hexagon:
case llvm::Triple::ppc64le:
+ case llvm::Triple::loongarch64:
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
break;
default:
- D.Diag(diag::err_drv_clang_unsupported)
- << (std::string(XRayInstrumentOption) + " on " + Triple.str());
- }
- } else if (Triple.isOSFreeBSD() || Triple.isOSOpenBSD() ||
- Triple.isOSNetBSD() || Triple.isMacOSX()) {
- if (Triple.getArch() != llvm::Triple::x86_64) {
- D.Diag(diag::err_drv_clang_unsupported)
- << (std::string(XRayInstrumentOption) + " on " + Triple.str());
- }
- } else if (Triple.getOS() == llvm::Triple::Fuchsia) {
- switch (Triple.getArch()) {
- case llvm::Triple::x86_64:
- case llvm::Triple::aarch64:
- break;
- default:
- D.Diag(diag::err_drv_clang_unsupported)
- << (std::string(XRayInstrumentOption) + " on " + Triple.str());
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << XRayInstrument->getSpelling() << Triple.str();
}
} else {
- D.Diag(diag::err_drv_clang_unsupported)
- << (std::string(XRayInstrumentOption) + " on " + Triple.str());
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << XRayInstrument->getSpelling() << Triple.str();
}
// Both XRay and -fpatchable-function-entry use
// TargetOpcode::PATCHABLE_FUNCTION_ENTER.
if (Arg *A = Args.getLastArg(options::OPT_fpatchable_function_entry_EQ))
D.Diag(diag::err_drv_argument_not_allowed_with)
- << "-fxray-instrument" << A->getSpelling();
-
- XRayInstrument = true;
- if (const Arg *A =
- Args.getLastArg(options::OPT_fxray_instruction_threshold_EQ)) {
- StringRef S = A->getValue();
- if (S.getAsInteger(0, InstructionThreshold) || InstructionThreshold < 0)
- D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
- }
-
- // By default, the back-end will not emit the lowering for XRay customevent
- // calls if the function is not instrumented. In the future we will change
- // this default to be the reverse, but in the meantime we're going to
- // introduce the new functionality behind a flag.
- if (Args.hasFlag(options::OPT_fxray_always_emit_customevents,
- options::OPT_fno_xray_always_emit_customevents, false))
- XRayAlwaysEmitCustomEvents = true;
-
- if (Args.hasFlag(options::OPT_fxray_always_emit_typedevents,
- options::OPT_fno_xray_always_emit_typedevents, false))
- XRayAlwaysEmitTypedEvents = true;
+ << XRayInstrument->getSpelling() << A->getSpelling();
if (!Args.hasFlag(options::OPT_fxray_link_deps,
- options::OPT_fnoxray_link_deps, true))
+ options::OPT_fno_xray_link_deps, true))
XRayRT = false;
- if (Args.hasFlag(options::OPT_fxray_ignore_loops,
- options::OPT_fno_xray_ignore_loops, false))
- XRayIgnoreLoops = true;
-
- XRayFunctionIndex = Args.hasFlag(options::OPT_fxray_function_index,
- options::OPT_fno_xray_function_index, true);
-
auto Bundles =
Args.getAllArgValues(options::OPT_fxray_instrumentation_bundle);
if (Bundles.empty())
@@ -186,21 +151,6 @@ XRayArgs::XRayArgs(const ToolChain &TC, const ArgList &Args) {
Modes.push_back(std::string(M));
}
- if (const Arg *A = Args.getLastArg(options::OPT_fxray_function_groups)) {
- StringRef S = A->getValue();
- if (S.getAsInteger(0, XRayFunctionGroups) || XRayFunctionGroups < 1)
- D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
- }
-
- if (const Arg *A =
- Args.getLastArg(options::OPT_fxray_selected_function_group)) {
- StringRef S = A->getValue();
- if (S.getAsInteger(0, XRaySelectedFunctionGroup) ||
- XRaySelectedFunctionGroup < 0 ||
- XRaySelectedFunctionGroup >= XRayFunctionGroups)
- D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
- }
-
// Then we want to sort and unique the modes we've collected.
llvm::sort(Modes);
Modes.erase(std::unique(Modes.begin(), Modes.end()), Modes.end());
@@ -210,34 +160,52 @@ void XRayArgs::addArgs(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs, types::ID InputType) const {
if (!XRayInstrument)
return;
+ const Driver &D = TC.getDriver();
+ XRayInstrument->render(Args, CmdArgs);
- CmdArgs.push_back(XRayInstrumentOption);
-
- if (XRayAlwaysEmitCustomEvents)
- CmdArgs.push_back("-fxray-always-emit-customevents");
-
- if (XRayAlwaysEmitTypedEvents)
- CmdArgs.push_back("-fxray-always-emit-typedevents");
-
- if (XRayIgnoreLoops)
- CmdArgs.push_back("-fxray-ignore-loops");
+ // By default, the back-end will not emit the lowering for XRay customevent
+ // calls if the function is not instrumented. In the future we will change
+ // this default to be the reverse, but in the meantime we're going to
+ // introduce the new functionality behind a flag.
+ Args.addOptInFlag(CmdArgs, options::OPT_fxray_always_emit_customevents,
+ options::OPT_fno_xray_always_emit_customevents);
- if (!XRayFunctionIndex)
- CmdArgs.push_back("-fno-xray-function-index");
+ Args.addOptInFlag(CmdArgs, options::OPT_fxray_always_emit_typedevents,
+ options::OPT_fno_xray_always_emit_typedevents);
+ Args.addOptInFlag(CmdArgs, options::OPT_fxray_ignore_loops,
+ options::OPT_fno_xray_ignore_loops);
+ Args.addOptOutFlag(CmdArgs, options::OPT_fxray_function_index,
+ options::OPT_fno_xray_function_index);
- if (XRayFunctionGroups > 1) {
- CmdArgs.push_back(Args.MakeArgString(Twine("-fxray-function-groups=") +
- Twine(XRayFunctionGroups)));
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fxray_instruction_threshold_EQ)) {
+ int Value;
+ StringRef S = A->getValue();
+ if (S.getAsInteger(0, Value) || Value < 0)
+ D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ else
+ A->render(Args, CmdArgs);
}
- if (XRaySelectedFunctionGroup != 0) {
- CmdArgs.push_back(
- Args.MakeArgString(Twine("-fxray-selected-function-group=") +
- Twine(XRaySelectedFunctionGroup)));
+ int XRayFunctionGroups = 1;
+ int XRaySelectedFunctionGroup = 0;
+ if (const Arg *A = Args.getLastArg(options::OPT_fxray_function_groups)) {
+ StringRef S = A->getValue();
+ if (S.getAsInteger(0, XRayFunctionGroups) || XRayFunctionGroups < 1)
+ D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ if (XRayFunctionGroups > 1)
+ A->render(Args, CmdArgs);
+ }
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fxray_selected_function_group)) {
+ StringRef S = A->getValue();
+ if (S.getAsInteger(0, XRaySelectedFunctionGroup) ||
+ XRaySelectedFunctionGroup < 0 ||
+ XRaySelectedFunctionGroup >= XRayFunctionGroups)
+ D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ if (XRaySelectedFunctionGroup != 0)
+ A->render(Args, CmdArgs);
}
-
- CmdArgs.push_back(Args.MakeArgString(Twine(XRayInstructionThresholdOption) +
- Twine(InstructionThreshold)));
for (const auto &Always : AlwaysInstrumentFiles) {
SmallString<64> AlwaysInstrumentOpt("-fxray-always-instrument=");
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/API.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/API.cpp
index 553b7bbe710f..10e79b37de73 100644
--- a/contrib/llvm-project/clang/lib/ExtractAPI/API.cpp
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/API.cpp
@@ -249,10 +249,7 @@ APIRecord *APISet::findRecordForUSR(StringRef USR) const {
if (USR.empty())
return nullptr;
- auto It = USRBasedLookupTable.find(USR);
- if (It != USRBasedLookupTable.end())
- return It->second;
- return nullptr;
+ return USRBasedLookupTable.lookup(USR);
}
StringRef APISet::recordUSR(const Decl *D) {
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/APIIgnoresList.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/APIIgnoresList.cpp
index 1d65ae2b8e31..d6bbc6692d2b 100644
--- a/contrib/llvm-project/clang/lib/ExtractAPI/APIIgnoresList.cpp
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/APIIgnoresList.cpp
@@ -31,20 +31,29 @@ std::error_code IgnoresFileNotFound::convertToErrorCode() const {
return llvm::inconvertibleErrorCode();
}
-Expected<APIIgnoresList> APIIgnoresList::create(StringRef IgnoresFilePath,
- FileManager &FM) {
- auto BufferOrErr = FM.getBufferForFile(IgnoresFilePath);
- if (!BufferOrErr)
- return make_error<IgnoresFileNotFound>(IgnoresFilePath);
-
- auto Buffer = std::move(BufferOrErr.get());
+Expected<APIIgnoresList>
+APIIgnoresList::create(const FilePathList &IgnoresFilePathList,
+ FileManager &FM) {
SmallVector<StringRef, 32> Lines;
- Buffer->getBuffer().split(Lines, '\n', /*MaxSplit*/ -1, /*KeepEmpty*/ false);
- // Symbol names don't have spaces in them, let's just remove these in case the
- // input is slighlty malformed.
+ BufferList symbolBufferList;
+
+ for (const auto &CurrentIgnoresFilePath : IgnoresFilePathList) {
+ auto BufferOrErr = FM.getBufferForFile(CurrentIgnoresFilePath);
+
+ if (!BufferOrErr)
+ return make_error<IgnoresFileNotFound>(CurrentIgnoresFilePath);
+
+ auto Buffer = std::move(BufferOrErr.get());
+ Buffer->getBuffer().split(Lines, '\n', /*MaxSplit*/ -1,
+ /*KeepEmpty*/ false);
+ symbolBufferList.push_back(std::move(Buffer));
+ }
+
+ // Symbol names don't have spaces in them, let's just remove these in case
+ // the input is slighlty malformed.
transform(Lines, Lines.begin(), [](StringRef Line) { return Line.trim(); });
sort(Lines);
- return APIIgnoresList(std::move(Lines), std::move(Buffer));
+ return APIIgnoresList(std::move(Lines), std::move(symbolBufferList));
}
bool APIIgnoresList::shouldIgnore(StringRef SymbolName) const {
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/AvailabilityInfo.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/AvailabilityInfo.cpp
index ada64cfb92e6..1df852fdbf93 100644
--- a/contrib/llvm-project/clang/lib/ExtractAPI/AvailabilityInfo.cpp
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/AvailabilityInfo.cpp
@@ -42,8 +42,8 @@ AvailabilitySet::AvailabilitySet(const Decl *Decl) {
Availability->Obsoleted = Attr->getObsoleted();
} else {
Availabilities.emplace_back(Domain, Attr->getIntroduced(),
- Attr->getDeprecated(),
- Attr->getObsoleted());
+ Attr->getDeprecated(), Attr->getObsoleted(),
+ Attr->getUnavailable());
}
}
}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/DeclarationFragments.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/DeclarationFragments.cpp
index 12c91c582aa9..1e52f221c798 100644
--- a/contrib/llvm-project/clang/lib/ExtractAPI/DeclarationFragments.cpp
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/DeclarationFragments.cpp
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/ExtractAPI/DeclarationFragments.h"
-#include "TypedefUnderlyingTypeResolver.h"
+#include "clang/ExtractAPI/TypedefUnderlyingTypeResolver.h"
#include "clang/Index/USRGeneration.h"
#include "llvm/ADT/StringSwitch.h"
@@ -160,14 +160,26 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForType(
DeclarationFragments Fragments;
// Declaration fragments of a pointer type is the declaration fragments of
- // the pointee type followed by a `*`, except for Objective-C `id` and `Class`
- // pointers, where we do not spell out the `*`.
- if (T->isPointerType() ||
- (T->isObjCObjectPointerType() &&
- !T->getAs<ObjCObjectPointerType>()->isObjCIdOrClassType())) {
+ // the pointee type followed by a `*`,
+ if (T->isPointerType())
return Fragments
.append(getFragmentsForType(T->getPointeeType(), Context, After))
.append(" *", DeclarationFragments::FragmentKind::Text);
+
+ // For Objective-C `id` and `Class` pointers
+ // we do not spell out the `*`.
+ if (T->isObjCObjectPointerType() &&
+ !T->getAs<ObjCObjectPointerType>()->isObjCIdOrClassType()) {
+
+ Fragments.append(getFragmentsForType(T->getPointeeType(), Context, After));
+
+ // id<protocol> is an qualified id type
+ // id<protocol>* is not an qualified id type
+ if (!T->getAs<ObjCObjectPointerType>()->isObjCQualifiedIdType()) {
+ Fragments.append(" *", DeclarationFragments::FragmentKind::Text);
+ }
+
+ return Fragments;
}
// Declaration fragments of a lvalue reference type is the declaration
@@ -243,26 +255,30 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForType(
return Fragments.append(getFragmentsForType(ET->desugar(), Context, After));
}
- // Everything we care about has been handled now, reduce to the canonical
- // unqualified base type.
- QualType Base = T->getCanonicalTypeUnqualified();
-
- // Render Objective-C `id`/`instancetype` as keywords.
- if (T->isObjCIdType())
- return Fragments.append(Base.getAsString(),
- DeclarationFragments::FragmentKind::Keyword);
-
// If the type is a typedefed type, get the underlying TypedefNameDecl for a
// direct reference to the typedef instead of the wrapped type.
+
+ // 'id' type is a typedef for an ObjCObjectPointerType
+ // we treat it as a typedef
if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(T)) {
const TypedefNameDecl *Decl = TypedefTy->getDecl();
TypedefUnderlyingTypeResolver TypedefResolver(Context);
std::string USR = TypedefResolver.getUSRForType(QualType(T, 0));
+
+ if (T->isObjCIdType()) {
+ return Fragments.append(Decl->getName(),
+ DeclarationFragments::FragmentKind::Keyword);
+ }
+
return Fragments.append(
Decl->getName(), DeclarationFragments::FragmentKind::TypeIdentifier,
USR, TypedefResolver.getUnderlyingTypeDecl(QualType(T, 0)));
}
+ // Everything we care about has been handled now, reduce to the canonical
+ // unqualified base type.
+ QualType Base = T->getCanonicalTypeUnqualified();
+
// If the base type is a TagType (struct/interface/union/class/enum), let's
// get the underlying Decl for better names and USRs.
if (const TagType *TagTy = dyn_cast<TagType>(Base)) {
@@ -441,7 +457,7 @@ DeclarationFragmentsBuilder::getFragmentsForFunction(const FunctionDecl *Func) {
Fragments.append(")", DeclarationFragments::FragmentKind::Text);
// FIXME: Handle exception specifiers: throw, noexcept
- return Fragments;
+ return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
}
DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForEnumConstant(
@@ -470,7 +486,7 @@ DeclarationFragmentsBuilder::getFragmentsForEnum(const EnumDecl *EnumDecl) {
getFragmentsForType(IntegerType, EnumDecl->getASTContext(), After))
.append(std::move(After));
- return Fragments;
+ return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
}
DeclarationFragments
@@ -493,7 +509,8 @@ DeclarationFragmentsBuilder::getFragmentsForStruct(const RecordDecl *Record) {
if (!Record->getName().empty())
Fragments.appendSpace().append(
Record->getName(), DeclarationFragments::FragmentKind::Identifier);
- return Fragments;
+
+ return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
}
DeclarationFragments
@@ -621,7 +638,7 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForObjCProperty(
// Build the Objective-C property keyword.
Fragments.append("@property", DeclarationFragments::FragmentKind::Keyword);
- const auto Attributes = Property->getPropertyAttributes();
+ const auto Attributes = Property->getPropertyAttributesAsWritten();
// Build the attributes if there is any associated with the property.
if (Attributes != ObjCPropertyAttribute::kind_noattr) {
// No leading comma for the first attribute.
@@ -743,7 +760,7 @@ DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForTypedef(
.appendSpace()
.append(Decl->getName(), DeclarationFragments::FragmentKind::Identifier);
- return Fragments;
+ return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
}
template <typename FunctionT>
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp
index 644845efb819..eb533a934367 100644
--- a/contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp
@@ -12,8 +12,10 @@
///
//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
#include "clang/Basic/DiagnosticFrontend.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
@@ -26,13 +28,16 @@
#include "clang/Frontend/ASTConsumers.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendOptions.h"
+#include "clang/Frontend/MultiplexConsumer.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/PPCallbacks.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -219,11 +224,48 @@ private:
llvm::DenseSet<const FileEntry *> ExternalFileEntries;
};
+struct BatchExtractAPIVisitor : ExtractAPIVisitor<BatchExtractAPIVisitor> {
+ bool shouldDeclBeIncluded(const Decl *D) const {
+ bool ShouldBeIncluded = true;
+ // Check that we have the definition for redeclarable types.
+ if (auto *TD = llvm::dyn_cast<TagDecl>(D))
+ ShouldBeIncluded = TD->isThisDeclarationADefinition();
+ else if (auto *Interface = llvm::dyn_cast<ObjCInterfaceDecl>(D))
+ ShouldBeIncluded = Interface->isThisDeclarationADefinition();
+ else if (auto *Protocol = llvm::dyn_cast<ObjCProtocolDecl>(D))
+ ShouldBeIncluded = Protocol->isThisDeclarationADefinition();
+
+ ShouldBeIncluded = ShouldBeIncluded && LCF(D->getLocation());
+ return ShouldBeIncluded;
+ }
+
+ BatchExtractAPIVisitor(LocationFileChecker &LCF, ASTContext &Context,
+ APISet &API)
+ : ExtractAPIVisitor<BatchExtractAPIVisitor>(Context, API), LCF(LCF) {}
+
+private:
+ LocationFileChecker &LCF;
+};
+
+class WrappingExtractAPIConsumer : public ASTConsumer {
+public:
+ WrappingExtractAPIConsumer(ASTContext &Context, APISet &API)
+ : Visitor(Context, API) {}
+
+ void HandleTranslationUnit(ASTContext &Context) override {
+ // Use ExtractAPIVisitor to traverse symbol declarations in the context.
+ Visitor.TraverseDecl(Context.getTranslationUnitDecl());
+ }
+
+private:
+ ExtractAPIVisitor<> Visitor;
+};
+
class ExtractAPIConsumer : public ASTConsumer {
public:
ExtractAPIConsumer(ASTContext &Context,
std::unique_ptr<LocationFileChecker> LCF, APISet &API)
- : Visitor(Context, *LCF, API), LCF(std::move(LCF)) {}
+ : Visitor(*LCF, Context, API), LCF(std::move(LCF)) {}
void HandleTranslationUnit(ASTContext &Context) override {
// Use ExtractAPIVisitor to traverse symbol declarations in the context.
@@ -231,15 +273,14 @@ public:
}
private:
- ExtractAPIVisitor Visitor;
+ BatchExtractAPIVisitor Visitor;
std::unique_ptr<LocationFileChecker> LCF;
};
class MacroCallback : public PPCallbacks {
public:
- MacroCallback(const SourceManager &SM, LocationFileChecker &LCF, APISet &API,
- Preprocessor &PP)
- : SM(SM), LCF(LCF), API(API), PP(PP) {}
+ MacroCallback(const SourceManager &SM, APISet &API, Preprocessor &PP)
+ : SM(SM), API(API), PP(PP) {}
void MacroDefined(const Token &MacroNameToken,
const MacroDirective *MD) override {
@@ -279,7 +320,7 @@ public:
if (PM.MD->getMacroInfo()->isUsedForHeaderGuard())
continue;
- if (!LCF(PM.MacroNameToken.getLocation()))
+ if (!shouldMacroBeIncluded(PM))
continue;
StringRef Name = PM.MacroNameToken.getIdentifierInfo()->getName();
@@ -297,7 +338,7 @@ public:
PendingMacros.clear();
}
-private:
+protected:
struct PendingMacro {
Token MacroNameToken;
const MacroDirective *MD;
@@ -306,18 +347,58 @@ private:
: MacroNameToken(MacroNameToken), MD(MD) {}
};
+ virtual bool shouldMacroBeIncluded(const PendingMacro &PM) { return true; }
+
const SourceManager &SM;
- LocationFileChecker &LCF;
APISet &API;
Preprocessor &PP;
llvm::SmallVector<PendingMacro> PendingMacros;
};
+class APIMacroCallback : public MacroCallback {
+public:
+ APIMacroCallback(const SourceManager &SM, APISet &API, Preprocessor &PP,
+ LocationFileChecker &LCF)
+ : MacroCallback(SM, API, PP), LCF(LCF) {}
+
+ bool shouldMacroBeIncluded(const PendingMacro &PM) override {
+ // Do not include macros from external files
+ return LCF(PM.MacroNameToken.getLocation());
+ }
+
+private:
+ LocationFileChecker &LCF;
+};
+
} // namespace
+void ExtractAPIActionBase::ImplEndSourceFileAction() {
+ if (!OS)
+ return;
+
+ // Setup a SymbolGraphSerializer to write out collected API information in
+ // the Symbol Graph format.
+ // FIXME: Make the kind of APISerializer configurable.
+ SymbolGraphSerializer SGSerializer(*API, IgnoresList);
+ SGSerializer.serialize(*OS);
+ OS.reset();
+}
+
+std::unique_ptr<raw_pwrite_stream>
+ExtractAPIAction::CreateOutputFile(CompilerInstance &CI, StringRef InFile) {
+ std::unique_ptr<raw_pwrite_stream> OS;
+ OS = CI.createDefaultOutputFile(/*Binary=*/false, InFile,
+ /*Extension=*/"json",
+ /*RemoveFileOnSignal=*/false);
+ if (!OS)
+ return nullptr;
+ return OS;
+}
+
std::unique_ptr<ASTConsumer>
ExtractAPIAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
OS = CreateOutputFile(CI, InFile);
+
if (!OS)
return nullptr;
@@ -331,17 +412,17 @@ ExtractAPIAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
auto LCF = std::make_unique<LocationFileChecker>(CI, KnownInputFiles);
- CI.getPreprocessor().addPPCallbacks(std::make_unique<MacroCallback>(
- CI.getSourceManager(), *LCF, *API, CI.getPreprocessor()));
+ CI.getPreprocessor().addPPCallbacks(std::make_unique<APIMacroCallback>(
+ CI.getSourceManager(), *API, CI.getPreprocessor(), *LCF));
// Do not include location in anonymous decls.
PrintingPolicy Policy = CI.getASTContext().getPrintingPolicy();
Policy.AnonymousTagLocations = false;
CI.getASTContext().setPrintingPolicy(Policy);
- if (!CI.getFrontendOpts().ExtractAPIIgnoresFile.empty()) {
+ if (!CI.getFrontendOpts().ExtractAPIIgnoresFileList.empty()) {
llvm::handleAllErrors(
- APIIgnoresList::create(CI.getFrontendOpts().ExtractAPIIgnoresFile,
+ APIIgnoresList::create(CI.getFrontendOpts().ExtractAPIIgnoresFileList,
CI.getFileManager())
.moveInto(IgnoresList),
[&CI](const IgnoresFileNotFound &Err) {
@@ -412,23 +493,88 @@ bool ExtractAPIAction::PrepareToExecuteAction(CompilerInstance &CI) {
return true;
}
-void ExtractAPIAction::EndSourceFileAction() {
+void ExtractAPIAction::EndSourceFileAction() { ImplEndSourceFileAction(); }
+
+std::unique_ptr<ASTConsumer>
+WrappingExtractAPIAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ auto OtherConsumer = WrapperFrontendAction::CreateASTConsumer(CI, InFile);
+ if (!OtherConsumer)
+ return nullptr;
+
+ CreatedASTConsumer = true;
+
+ OS = CreateOutputFile(CI, InFile);
if (!OS)
- return;
+ return nullptr;
- // Setup a SymbolGraphSerializer to write out collected API information in
- // the Symbol Graph format.
- // FIXME: Make the kind of APISerializer configurable.
- SymbolGraphSerializer SGSerializer(*API, IgnoresList);
- SGSerializer.serialize(*OS);
- OS.reset();
+ auto ProductName = CI.getFrontendOpts().ProductName;
+
+ // Now that we have enough information about the language options and the
+ // target triple, let's create the APISet before anyone uses it.
+ API = std::make_unique<APISet>(
+ CI.getTarget().getTriple(),
+ CI.getFrontendOpts().Inputs.back().getKind().getLanguage(), ProductName);
+
+ CI.getPreprocessor().addPPCallbacks(std::make_unique<MacroCallback>(
+ CI.getSourceManager(), *API, CI.getPreprocessor()));
+
+ // Do not include location in anonymous decls.
+ PrintingPolicy Policy = CI.getASTContext().getPrintingPolicy();
+ Policy.AnonymousTagLocations = false;
+ CI.getASTContext().setPrintingPolicy(Policy);
+
+ if (!CI.getFrontendOpts().ExtractAPIIgnoresFileList.empty()) {
+ llvm::handleAllErrors(
+ APIIgnoresList::create(CI.getFrontendOpts().ExtractAPIIgnoresFileList,
+ CI.getFileManager())
+ .moveInto(IgnoresList),
+ [&CI](const IgnoresFileNotFound &Err) {
+ CI.getDiagnostics().Report(
+ diag::err_extract_api_ignores_file_not_found)
+ << Err.Path;
+ });
+ }
+
+ auto WrappingConsumer =
+ std::make_unique<WrappingExtractAPIConsumer>(CI.getASTContext(), *API);
+ std::vector<std::unique_ptr<ASTConsumer>> Consumers;
+ Consumers.push_back(std::move(OtherConsumer));
+ Consumers.push_back(std::move(WrappingConsumer));
+
+ return std::make_unique<MultiplexConsumer>(std::move(Consumers));
+}
+
+void WrappingExtractAPIAction::EndSourceFileAction() {
+ // Invoke wrapped action's method.
+ WrapperFrontendAction::EndSourceFileAction();
+
+ if (CreatedASTConsumer) {
+ ImplEndSourceFileAction();
+ }
}
std::unique_ptr<raw_pwrite_stream>
-ExtractAPIAction::CreateOutputFile(CompilerInstance &CI, StringRef InFile) {
- std::unique_ptr<raw_pwrite_stream> OS =
- CI.createDefaultOutputFile(/*Binary=*/false, InFile, /*Extension=*/"json",
- /*RemoveFileOnSignal=*/false);
+WrappingExtractAPIAction::CreateOutputFile(CompilerInstance &CI,
+ StringRef InFile) {
+ std::unique_ptr<raw_pwrite_stream> OS;
+ std::string OutputDir = CI.getFrontendOpts().SymbolGraphOutputDir;
+
+ // The symbol graphs need to be generated as a side effect of regular
+ // compilation so the output should be dumped in the directory provided with
+ // the command line option.
+ llvm::SmallString<128> OutFilePath(OutputDir);
+ auto Seperator = llvm::sys::path::get_separator();
+ auto Infilename = llvm::sys::path::filename(InFile);
+ OutFilePath.append({Seperator, Infilename});
+ llvm::sys::path::replace_extension(OutFilePath, "json");
+ // StringRef outputFilePathref = *OutFilePath;
+
+ // don't use the default output file
+ OS = CI.createOutputFile(/*OutputPath=*/OutFilePath, /*Binary=*/false,
+ /*RemoveFileOnSignal=*/true,
+ /*UseTemporary=*/true,
+ /*CreateMissingDirectories=*/true);
if (!OS)
return nullptr;
return OS;
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIVisitor.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIVisitor.cpp
deleted file mode 100644
index 24260cf89383..000000000000
--- a/contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIVisitor.cpp
+++ /dev/null
@@ -1,560 +0,0 @@
-//===- ExtractAPI/ExtractAPIVisitor.cpp -------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// \file
-/// This file implements the ExtractAPIVisitor an ASTVisitor to collect API
-/// information.
-///
-//===----------------------------------------------------------------------===//
-
-#include "clang/ExtractAPI/ExtractAPIVisitor.h"
-
-#include "TypedefUnderlyingTypeResolver.h"
-#include "clang/AST/ASTConsumer.h"
-#include "clang/AST/ASTContext.h"
-#include "clang/AST/Decl.h"
-#include "clang/AST/DeclCXX.h"
-#include "clang/AST/ParentMapContext.h"
-#include "clang/AST/RawCommentList.h"
-#include "clang/Basic/SourceLocation.h"
-#include "clang/Basic/SourceManager.h"
-#include "clang/Basic/TargetInfo.h"
-#include "clang/ExtractAPI/API.h"
-#include "clang/ExtractAPI/AvailabilityInfo.h"
-#include "clang/ExtractAPI/DeclarationFragments.h"
-#include "clang/Frontend/ASTConsumers.h"
-#include "clang/Frontend/FrontendOptions.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace clang;
-using namespace extractapi;
-
-namespace {
-
-StringRef getTypedefName(const TagDecl *Decl) {
- if (const auto *TypedefDecl = Decl->getTypedefNameForAnonDecl())
- return TypedefDecl->getName();
-
- return {};
-}
-
-template <class DeclTy>
-bool isInSystemHeader(const ASTContext &Context, const DeclTy *D) {
- return Context.getSourceManager().isInSystemHeader(D->getLocation());
-}
-
-} // namespace
-
-bool ExtractAPIVisitor::VisitVarDecl(const VarDecl *Decl) {
- // skip function parameters.
- if (isa<ParmVarDecl>(Decl))
- return true;
-
- // Skip non-global variables in records (struct/union/class).
- if (Decl->getDeclContext()->isRecord())
- return true;
-
- // Skip local variables inside function or method.
- if (!Decl->isDefinedOutsideFunctionOrMethod())
- return true;
-
- // If this is a template but not specialization or instantiation, skip.
- if (Decl->getASTContext().getTemplateOrSpecializationInfo(Decl) &&
- Decl->getTemplateSpecializationKind() == TSK_Undeclared)
- return true;
-
- if (!LocationChecker(Decl->getLocation()))
- return true;
-
- // Collect symbol information.
- StringRef Name = Decl->getName();
- StringRef USR = API.recordUSR(Decl);
- PresumedLoc Loc =
- Context.getSourceManager().getPresumedLoc(Decl->getLocation());
- LinkageInfo Linkage = Decl->getLinkageAndVisibility();
- DocComment Comment;
- if (auto *RawComment = Context.getRawCommentForDeclNoCache(Decl))
- Comment = RawComment->getFormattedLines(Context.getSourceManager(),
- Context.getDiagnostics());
-
- // Build declaration fragments and sub-heading for the variable.
- DeclarationFragments Declaration =
- DeclarationFragmentsBuilder::getFragmentsForVar(Decl);
- DeclarationFragments SubHeading =
- DeclarationFragmentsBuilder::getSubHeading(Decl);
-
- // Add the global variable record to the API set.
- API.addGlobalVar(Name, USR, Loc, AvailabilitySet(Decl), Linkage, Comment,
- Declaration, SubHeading, isInSystemHeader(Context, Decl));
- return true;
-}
-
-bool ExtractAPIVisitor::VisitFunctionDecl(const FunctionDecl *Decl) {
- if (const auto *Method = dyn_cast<CXXMethodDecl>(Decl)) {
- // Skip member function in class templates.
- if (Method->getParent()->getDescribedClassTemplate() != nullptr)
- return true;
-
- // Skip methods in records.
- for (auto P : Context.getParents(*Method)) {
- if (P.get<CXXRecordDecl>())
- return true;
- }
-
- // Skip ConstructorDecl and DestructorDecl.
- if (isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method))
- return true;
- }
-
- // Skip templated functions.
- switch (Decl->getTemplatedKind()) {
- case FunctionDecl::TK_NonTemplate:
- case FunctionDecl::TK_DependentNonTemplate:
- break;
- case FunctionDecl::TK_MemberSpecialization:
- case FunctionDecl::TK_FunctionTemplateSpecialization:
- if (auto *TemplateInfo = Decl->getTemplateSpecializationInfo()) {
- if (!TemplateInfo->isExplicitInstantiationOrSpecialization())
- return true;
- }
- break;
- case FunctionDecl::TK_FunctionTemplate:
- case FunctionDecl::TK_DependentFunctionTemplateSpecialization:
- return true;
- }
-
- if (!LocationChecker(Decl->getLocation()))
- return true;
-
- // Collect symbol information.
- StringRef Name = Decl->getName();
- StringRef USR = API.recordUSR(Decl);
- PresumedLoc Loc =
- Context.getSourceManager().getPresumedLoc(Decl->getLocation());
- LinkageInfo Linkage = Decl->getLinkageAndVisibility();
- DocComment Comment;
- if (auto *RawComment = Context.getRawCommentForDeclNoCache(Decl))
- Comment = RawComment->getFormattedLines(Context.getSourceManager(),
- Context.getDiagnostics());
-
- // Build declaration fragments, sub-heading, and signature of the function.
- DeclarationFragments Declaration =
- DeclarationFragmentsBuilder::getFragmentsForFunction(Decl);
- DeclarationFragments SubHeading =
- DeclarationFragmentsBuilder::getSubHeading(Decl);
- FunctionSignature Signature =
- DeclarationFragmentsBuilder::getFunctionSignature(Decl);
-
- // Add the function record to the API set.
- API.addGlobalFunction(Name, USR, Loc, AvailabilitySet(Decl), Linkage, Comment,
- Declaration, SubHeading, Signature,
- isInSystemHeader(Context, Decl));
- return true;
-}
-
-bool ExtractAPIVisitor::VisitEnumDecl(const EnumDecl *Decl) {
- if (!Decl->isComplete())
- return true;
-
- // Skip forward declaration.
- if (!Decl->isThisDeclarationADefinition())
- return true;
-
- if (!LocationChecker(Decl->getLocation()))
- return true;
-
- SmallString<128> QualifiedNameBuffer;
- // Collect symbol information.
- StringRef Name = Decl->getName();
- if (Name.empty())
- Name = getTypedefName(Decl);
- if (Name.empty()) {
- llvm::raw_svector_ostream OS(QualifiedNameBuffer);
- Decl->printQualifiedName(OS);
- Name = QualifiedNameBuffer.str();
- }
-
- StringRef USR = API.recordUSR(Decl);
- PresumedLoc Loc =
- Context.getSourceManager().getPresumedLoc(Decl->getLocation());
- DocComment Comment;
- if (auto *RawComment = Context.getRawCommentForDeclNoCache(Decl))
- Comment = RawComment->getFormattedLines(Context.getSourceManager(),
- Context.getDiagnostics());
-
- // Build declaration fragments and sub-heading for the enum.
- DeclarationFragments Declaration =
- DeclarationFragmentsBuilder::getFragmentsForEnum(Decl);
- DeclarationFragments SubHeading =
- DeclarationFragmentsBuilder::getSubHeading(Decl);
-
- EnumRecord *EnumRecord = API.addEnum(
- API.copyString(Name), USR, Loc, AvailabilitySet(Decl), Comment,
- Declaration, SubHeading, isInSystemHeader(Context, Decl));
-
- // Now collect information about the enumerators in this enum.
- recordEnumConstants(EnumRecord, Decl->enumerators());
-
- return true;
-}
-
-bool ExtractAPIVisitor::VisitRecordDecl(const RecordDecl *Decl) {
- if (!Decl->isCompleteDefinition())
- return true;
-
- // Skip C++ structs/classes/unions
- // TODO: support C++ records
- if (isa<CXXRecordDecl>(Decl))
- return true;
-
- if (!LocationChecker(Decl->getLocation()))
- return true;
-
- // Collect symbol information.
- StringRef Name = Decl->getName();
- if (Name.empty())
- Name = getTypedefName(Decl);
- if (Name.empty())
- return true;
-
- StringRef USR = API.recordUSR(Decl);
- PresumedLoc Loc =
- Context.getSourceManager().getPresumedLoc(Decl->getLocation());
- DocComment Comment;
- if (auto *RawComment = Context.getRawCommentForDeclNoCache(Decl))
- Comment = RawComment->getFormattedLines(Context.getSourceManager(),
- Context.getDiagnostics());
-
- // Build declaration fragments and sub-heading for the struct.
- DeclarationFragments Declaration =
- DeclarationFragmentsBuilder::getFragmentsForStruct(Decl);
- DeclarationFragments SubHeading =
- DeclarationFragmentsBuilder::getSubHeading(Decl);
-
- StructRecord *StructRecord =
- API.addStruct(Name, USR, Loc, AvailabilitySet(Decl), Comment, Declaration,
- SubHeading, isInSystemHeader(Context, Decl));
-
- // Now collect information about the fields in this struct.
- recordStructFields(StructRecord, Decl->fields());
-
- return true;
-}
-
-bool ExtractAPIVisitor::VisitObjCInterfaceDecl(const ObjCInterfaceDecl *Decl) {
- // Skip forward declaration for classes (@class)
- if (!Decl->isThisDeclarationADefinition())
- return true;
-
- if (!LocationChecker(Decl->getLocation()))
- return true;
-
- // Collect symbol information.
- StringRef Name = Decl->getName();
- StringRef USR = API.recordUSR(Decl);
- PresumedLoc Loc =
- Context.getSourceManager().getPresumedLoc(Decl->getLocation());
- LinkageInfo Linkage = Decl->getLinkageAndVisibility();
- DocComment Comment;
- if (auto *RawComment = Context.getRawCommentForDeclNoCache(Decl))
- Comment = RawComment->getFormattedLines(Context.getSourceManager(),
- Context.getDiagnostics());
-
- // Build declaration fragments and sub-heading for the interface.
- DeclarationFragments Declaration =
- DeclarationFragmentsBuilder::getFragmentsForObjCInterface(Decl);
- DeclarationFragments SubHeading =
- DeclarationFragmentsBuilder::getSubHeading(Decl);
-
- // Collect super class information.
- SymbolReference SuperClass;
- if (const auto *SuperClassDecl = Decl->getSuperClass()) {
- SuperClass.Name = SuperClassDecl->getObjCRuntimeNameAsString();
- SuperClass.USR = API.recordUSR(SuperClassDecl);
- }
-
- ObjCInterfaceRecord *ObjCInterfaceRecord = API.addObjCInterface(
- Name, USR, Loc, AvailabilitySet(Decl), Linkage, Comment, Declaration,
- SubHeading, SuperClass, isInSystemHeader(Context, Decl));
-
- // Record all methods (selectors). This doesn't include automatically
- // synthesized property methods.
- recordObjCMethods(ObjCInterfaceRecord, Decl->methods());
- recordObjCProperties(ObjCInterfaceRecord, Decl->properties());
- recordObjCInstanceVariables(ObjCInterfaceRecord, Decl->ivars());
- recordObjCProtocols(ObjCInterfaceRecord, Decl->protocols());
-
- return true;
-}
-
-bool ExtractAPIVisitor::VisitObjCProtocolDecl(const ObjCProtocolDecl *Decl) {
- // Skip forward declaration for protocols (@protocol).
- if (!Decl->isThisDeclarationADefinition())
- return true;
-
- if (!LocationChecker(Decl->getLocation()))
- return true;
-
- // Collect symbol information.
- StringRef Name = Decl->getName();
- StringRef USR = API.recordUSR(Decl);
- PresumedLoc Loc =
- Context.getSourceManager().getPresumedLoc(Decl->getLocation());
- DocComment Comment;
- if (auto *RawComment = Context.getRawCommentForDeclNoCache(Decl))
- Comment = RawComment->getFormattedLines(Context.getSourceManager(),
- Context.getDiagnostics());
-
- // Build declaration fragments and sub-heading for the protocol.
- DeclarationFragments Declaration =
- DeclarationFragmentsBuilder::getFragmentsForObjCProtocol(Decl);
- DeclarationFragments SubHeading =
- DeclarationFragmentsBuilder::getSubHeading(Decl);
-
- ObjCProtocolRecord *ObjCProtocolRecord = API.addObjCProtocol(
- Name, USR, Loc, AvailabilitySet(Decl), Comment, Declaration, SubHeading,
- isInSystemHeader(Context, Decl));
-
- recordObjCMethods(ObjCProtocolRecord, Decl->methods());
- recordObjCProperties(ObjCProtocolRecord, Decl->properties());
- recordObjCProtocols(ObjCProtocolRecord, Decl->protocols());
-
- return true;
-}
-
-bool ExtractAPIVisitor::VisitTypedefNameDecl(const TypedefNameDecl *Decl) {
- // Skip ObjC Type Parameter for now.
- if (isa<ObjCTypeParamDecl>(Decl))
- return true;
-
- if (!Decl->isDefinedOutsideFunctionOrMethod())
- return true;
-
- if (!LocationChecker(Decl->getLocation()))
- return true;
-
- PresumedLoc Loc =
- Context.getSourceManager().getPresumedLoc(Decl->getLocation());
- StringRef Name = Decl->getName();
- StringRef USR = API.recordUSR(Decl);
- DocComment Comment;
- if (auto *RawComment = Context.getRawCommentForDeclNoCache(Decl))
- Comment = RawComment->getFormattedLines(Context.getSourceManager(),
- Context.getDiagnostics());
-
- QualType Type = Decl->getUnderlyingType();
- SymbolReference SymRef =
- TypedefUnderlyingTypeResolver(Context).getSymbolReferenceForType(Type,
- API);
-
- API.addTypedef(Name, USR, Loc, AvailabilitySet(Decl), Comment,
- DeclarationFragmentsBuilder::getFragmentsForTypedef(Decl),
- DeclarationFragmentsBuilder::getSubHeading(Decl), SymRef,
- isInSystemHeader(Context, Decl));
-
- return true;
-}
-
-bool ExtractAPIVisitor::VisitObjCCategoryDecl(const ObjCCategoryDecl *Decl) {
- // Collect symbol information.
- StringRef Name = Decl->getName();
- StringRef USR = API.recordUSR(Decl);
- PresumedLoc Loc =
- Context.getSourceManager().getPresumedLoc(Decl->getLocation());
- DocComment Comment;
- if (auto *RawComment = Context.getRawCommentForDeclNoCache(Decl))
- Comment = RawComment->getFormattedLines(Context.getSourceManager(),
- Context.getDiagnostics());
- // Build declaration fragments and sub-heading for the category.
- DeclarationFragments Declaration =
- DeclarationFragmentsBuilder::getFragmentsForObjCCategory(Decl);
- DeclarationFragments SubHeading =
- DeclarationFragmentsBuilder::getSubHeading(Decl);
-
- const ObjCInterfaceDecl *InterfaceDecl = Decl->getClassInterface();
- SymbolReference Interface(InterfaceDecl->getName(),
- API.recordUSR(InterfaceDecl));
-
- ObjCCategoryRecord *ObjCCategoryRecord = API.addObjCCategory(
- Name, USR, Loc, AvailabilitySet(Decl), Comment, Declaration, SubHeading,
- Interface, isInSystemHeader(Context, Decl));
-
- recordObjCMethods(ObjCCategoryRecord, Decl->methods());
- recordObjCProperties(ObjCCategoryRecord, Decl->properties());
- recordObjCInstanceVariables(ObjCCategoryRecord, Decl->ivars());
- recordObjCProtocols(ObjCCategoryRecord, Decl->protocols());
-
- return true;
-}
-
-/// Collect API information for the enum constants and associate with the
-/// parent enum.
-void ExtractAPIVisitor::recordEnumConstants(
- EnumRecord *EnumRecord, const EnumDecl::enumerator_range Constants) {
- for (const auto *Constant : Constants) {
- // Collect symbol information.
- StringRef Name = Constant->getName();
- StringRef USR = API.recordUSR(Constant);
- PresumedLoc Loc =
- Context.getSourceManager().getPresumedLoc(Constant->getLocation());
- DocComment Comment;
- if (auto *RawComment = Context.getRawCommentForDeclNoCache(Constant))
- Comment = RawComment->getFormattedLines(Context.getSourceManager(),
- Context.getDiagnostics());
-
- // Build declaration fragments and sub-heading for the enum constant.
- DeclarationFragments Declaration =
- DeclarationFragmentsBuilder::getFragmentsForEnumConstant(Constant);
- DeclarationFragments SubHeading =
- DeclarationFragmentsBuilder::getSubHeading(Constant);
-
- API.addEnumConstant(EnumRecord, Name, USR, Loc, AvailabilitySet(Constant),
- Comment, Declaration, SubHeading,
- isInSystemHeader(Context, Constant));
- }
-}
-
-/// Collect API information for the struct fields and associate with the
-/// parent struct.
-void ExtractAPIVisitor::recordStructFields(
- StructRecord *StructRecord, const RecordDecl::field_range Fields) {
- for (const auto *Field : Fields) {
- // Collect symbol information.
- StringRef Name = Field->getName();
- StringRef USR = API.recordUSR(Field);
- PresumedLoc Loc =
- Context.getSourceManager().getPresumedLoc(Field->getLocation());
- DocComment Comment;
- if (auto *RawComment = Context.getRawCommentForDeclNoCache(Field))
- Comment = RawComment->getFormattedLines(Context.getSourceManager(),
- Context.getDiagnostics());
-
- // Build declaration fragments and sub-heading for the struct field.
- DeclarationFragments Declaration =
- DeclarationFragmentsBuilder::getFragmentsForField(Field);
- DeclarationFragments SubHeading =
- DeclarationFragmentsBuilder::getSubHeading(Field);
-
- API.addStructField(StructRecord, Name, USR, Loc, AvailabilitySet(Field),
- Comment, Declaration, SubHeading,
- isInSystemHeader(Context, Field));
- }
-}
-
-/// Collect API information for the Objective-C methods and associate with the
-/// parent container.
-void ExtractAPIVisitor::recordObjCMethods(
- ObjCContainerRecord *Container,
- const ObjCContainerDecl::method_range Methods) {
- for (const auto *Method : Methods) {
- // Don't record selectors for properties.
- if (Method->isPropertyAccessor())
- continue;
-
- StringRef Name = API.copyString(Method->getSelector().getAsString());
- StringRef USR = API.recordUSR(Method);
- PresumedLoc Loc =
- Context.getSourceManager().getPresumedLoc(Method->getLocation());
- DocComment Comment;
- if (auto *RawComment = Context.getRawCommentForDeclNoCache(Method))
- Comment = RawComment->getFormattedLines(Context.getSourceManager(),
- Context.getDiagnostics());
-
- // Build declaration fragments, sub-heading, and signature for the method.
- DeclarationFragments Declaration =
- DeclarationFragmentsBuilder::getFragmentsForObjCMethod(Method);
- DeclarationFragments SubHeading =
- DeclarationFragmentsBuilder::getSubHeading(Method);
- FunctionSignature Signature =
- DeclarationFragmentsBuilder::getFunctionSignature(Method);
-
- API.addObjCMethod(Container, Name, USR, Loc, AvailabilitySet(Method),
- Comment, Declaration, SubHeading, Signature,
- Method->isInstanceMethod(),
- isInSystemHeader(Context, Method));
- }
-}
-
-void ExtractAPIVisitor::recordObjCProperties(
- ObjCContainerRecord *Container,
- const ObjCContainerDecl::prop_range Properties) {
- for (const auto *Property : Properties) {
- StringRef Name = Property->getName();
- StringRef USR = API.recordUSR(Property);
- PresumedLoc Loc =
- Context.getSourceManager().getPresumedLoc(Property->getLocation());
- DocComment Comment;
- if (auto *RawComment = Context.getRawCommentForDeclNoCache(Property))
- Comment = RawComment->getFormattedLines(Context.getSourceManager(),
- Context.getDiagnostics());
-
- // Build declaration fragments and sub-heading for the property.
- DeclarationFragments Declaration =
- DeclarationFragmentsBuilder::getFragmentsForObjCProperty(Property);
- DeclarationFragments SubHeading =
- DeclarationFragmentsBuilder::getSubHeading(Property);
-
- StringRef GetterName =
- API.copyString(Property->getGetterName().getAsString());
- StringRef SetterName =
- API.copyString(Property->getSetterName().getAsString());
-
- // Get the attributes for property.
- unsigned Attributes = ObjCPropertyRecord::NoAttr;
- if (Property->getPropertyAttributes() &
- ObjCPropertyAttribute::kind_readonly)
- Attributes |= ObjCPropertyRecord::ReadOnly;
-
- API.addObjCProperty(
- Container, Name, USR, Loc, AvailabilitySet(Property), Comment,
- Declaration, SubHeading,
- static_cast<ObjCPropertyRecord::AttributeKind>(Attributes), GetterName,
- SetterName, Property->isOptional(),
- !(Property->getPropertyAttributes() &
- ObjCPropertyAttribute::kind_class),
- isInSystemHeader(Context, Property));
- }
-}
-
-void ExtractAPIVisitor::recordObjCInstanceVariables(
- ObjCContainerRecord *Container,
- const llvm::iterator_range<
- DeclContext::specific_decl_iterator<ObjCIvarDecl>>
- Ivars) {
- for (const auto *Ivar : Ivars) {
- StringRef Name = Ivar->getName();
- StringRef USR = API.recordUSR(Ivar);
- PresumedLoc Loc =
- Context.getSourceManager().getPresumedLoc(Ivar->getLocation());
- DocComment Comment;
- if (auto *RawComment = Context.getRawCommentForDeclNoCache(Ivar))
- Comment = RawComment->getFormattedLines(Context.getSourceManager(),
- Context.getDiagnostics());
-
- // Build declaration fragments and sub-heading for the instance variable.
- DeclarationFragments Declaration =
- DeclarationFragmentsBuilder::getFragmentsForField(Ivar);
- DeclarationFragments SubHeading =
- DeclarationFragmentsBuilder::getSubHeading(Ivar);
-
- ObjCInstanceVariableRecord::AccessControl Access =
- Ivar->getCanonicalAccessControl();
-
- API.addObjCInstanceVariable(
- Container, Name, USR, Loc, AvailabilitySet(Ivar), Comment, Declaration,
- SubHeading, Access, isInSystemHeader(Context, Ivar));
- }
-}
-
-void ExtractAPIVisitor::recordObjCProtocols(
- ObjCContainerRecord *Container,
- ObjCInterfaceDecl::protocol_range Protocols) {
- for (const auto *Protocol : Protocols)
- Container->Protocols.emplace_back(Protocol->getName(),
- API.recordUSR(Protocol));
-}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SerializerBase.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SerializerBase.cpp
deleted file mode 100644
index 71fd25b2b2ab..000000000000
--- a/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SerializerBase.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-//===- ExtractAPI/Serialization/SerializerBase.cpp --------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// \file
-/// This file implements the APISerializer interface.
-///
-//===----------------------------------------------------------------------===//
-
-#include "clang/ExtractAPI/Serialization/SerializerBase.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace clang::extractapi;
-
-void APISerializer::serialize(llvm::raw_ostream &os) {}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
index 01e9b37d2680..534e9288cc71 100644
--- a/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
@@ -14,16 +14,11 @@
#include "clang/ExtractAPI/Serialization/SymbolGraphSerializer.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Version.h"
-#include "clang/ExtractAPI/API.h"
-#include "clang/ExtractAPI/APIIgnoresList.h"
#include "clang/ExtractAPI/DeclarationFragments.h"
-#include "clang/ExtractAPI/Serialization/SerializerBase.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/STLFunctionalExtras.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
-#include "llvm/Support/JSON.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/VersionTuple.h"
#include <optional>
@@ -171,12 +166,16 @@ serializeAvailability(const AvailabilitySet &Availabilities) {
for (const auto &AvailInfo : Availabilities) {
Object Availability;
Availability["domain"] = AvailInfo.Domain;
- serializeObject(Availability, "introducedVersion",
- serializeSemanticVersion(AvailInfo.Introduced));
- serializeObject(Availability, "deprecatedVersion",
- serializeSemanticVersion(AvailInfo.Deprecated));
- serializeObject(Availability, "obsoletedVersion",
- serializeSemanticVersion(AvailInfo.Obsoleted));
+ if (AvailInfo.Unavailable)
+ Availability["isUnconditionallyUnavailable"] = true;
+ else {
+ serializeObject(Availability, "introducedVersion",
+ serializeSemanticVersion(AvailInfo.Introduced));
+ serializeObject(Availability, "deprecatedVersion",
+ serializeSemanticVersion(AvailInfo.Deprecated));
+ serializeObject(Availability, "obsoletedVersion",
+ serializeSemanticVersion(AvailInfo.Obsoleted));
+ }
AvailabilityArray.emplace_back(std::move(Availability));
}
@@ -487,6 +486,7 @@ bool generatePathComponents(
SmallVector<PathComponent, 4> ReverseComponenents;
ReverseComponenents.emplace_back(Record.USR, Record.Name, Record.getKind());
const auto *CurrentParent = &Record.ParentInformation;
+ bool FailedToFindParent = false;
while (CurrentParent && !CurrentParent->empty()) {
PathComponent CurrentParentComponent(CurrentParent->ParentUSR,
CurrentParent->ParentName,
@@ -509,8 +509,10 @@ bool generatePathComponents(
// The parent record doesn't exist which means the symbol shouldn't be
// treated as part of the current product.
- if (!ParentRecord)
- return true;
+ if (!ParentRecord) {
+ FailedToFindParent = true;
+ break;
+ }
ReverseComponenents.push_back(std::move(CurrentParentComponent));
CurrentParent = &ParentRecord->ParentInformation;
@@ -519,8 +521,9 @@ bool generatePathComponents(
for (const auto &PC : reverse(ReverseComponenents))
ComponentTransformer(PC);
- return false;
+ return FailedToFindParent;
}
+
Object serializeParentContext(const PathComponent &PC, Language Lang) {
Object ParentContextElem;
ParentContextElem["usr"] = PC.USR;
@@ -533,20 +536,16 @@ template <typename RecordTy>
Array generateParentContexts(const RecordTy &Record, const APISet &API,
Language Lang) {
Array ParentContexts;
- if (generatePathComponents(
- Record, API, [Lang, &ParentContexts](const PathComponent &PC) {
- ParentContexts.push_back(serializeParentContext(PC, Lang));
- }))
- ParentContexts.clear();
- ParentContexts.pop_back();
+ generatePathComponents(
+ Record, API, [Lang, &ParentContexts](const PathComponent &PC) {
+ ParentContexts.push_back(serializeParentContext(PC, Lang));
+ });
return ParentContexts;
}
} // namespace
-void SymbolGraphSerializer::anchor() {}
-
/// Defines the format version emitted by SymbolGraphSerializer.
const VersionTuple SymbolGraphSerializer::FormatVersion{0, 5, 3};
@@ -663,7 +662,7 @@ void SymbolGraphSerializer::serializeRelationship(RelationshipKind Kind,
Relationships.emplace_back(std::move(Relationship));
}
-void SymbolGraphSerializer::serializeGlobalFunctionRecord(
+void SymbolGraphSerializer::visitGlobalFunctionRecord(
const GlobalFunctionRecord &Record) {
auto Obj = serializeAPIRecord(Record);
if (!Obj)
@@ -672,7 +671,7 @@ void SymbolGraphSerializer::serializeGlobalFunctionRecord(
Symbols.emplace_back(std::move(*Obj));
}
-void SymbolGraphSerializer::serializeGlobalVariableRecord(
+void SymbolGraphSerializer::visitGlobalVariableRecord(
const GlobalVariableRecord &Record) {
auto Obj = serializeAPIRecord(Record);
if (!Obj)
@@ -681,7 +680,7 @@ void SymbolGraphSerializer::serializeGlobalVariableRecord(
Symbols.emplace_back(std::move(*Obj));
}
-void SymbolGraphSerializer::serializeEnumRecord(const EnumRecord &Record) {
+void SymbolGraphSerializer::visitEnumRecord(const EnumRecord &Record) {
auto Enum = serializeAPIRecord(Record);
if (!Enum)
return;
@@ -690,7 +689,7 @@ void SymbolGraphSerializer::serializeEnumRecord(const EnumRecord &Record) {
serializeMembers(Record, Record.Constants);
}
-void SymbolGraphSerializer::serializeStructRecord(const StructRecord &Record) {
+void SymbolGraphSerializer::visitStructRecord(const StructRecord &Record) {
auto Struct = serializeAPIRecord(Record);
if (!Struct)
return;
@@ -699,7 +698,7 @@ void SymbolGraphSerializer::serializeStructRecord(const StructRecord &Record) {
serializeMembers(Record, Record.Fields);
}
-void SymbolGraphSerializer::serializeObjCContainerRecord(
+void SymbolGraphSerializer::visitObjCContainerRecord(
const ObjCContainerRecord &Record) {
auto ObjCContainer = serializeAPIRecord(Record);
if (!ObjCContainer)
@@ -736,7 +735,7 @@ void SymbolGraphSerializer::serializeObjCContainerRecord(
}
}
-void SymbolGraphSerializer::serializeMacroDefinitionRecord(
+void SymbolGraphSerializer::visitMacroDefinitionRecord(
const MacroDefinitionRecord &Record) {
auto Macro = serializeAPIRecord(Record);
@@ -751,28 +750,28 @@ void SymbolGraphSerializer::serializeSingleRecord(const APIRecord *Record) {
case APIRecord::RK_Unknown:
llvm_unreachable("Records should have a known kind!");
case APIRecord::RK_GlobalFunction:
- serializeGlobalFunctionRecord(*cast<GlobalFunctionRecord>(Record));
+ visitGlobalFunctionRecord(*cast<GlobalFunctionRecord>(Record));
break;
case APIRecord::RK_GlobalVariable:
- serializeGlobalVariableRecord(*cast<GlobalVariableRecord>(Record));
+ visitGlobalVariableRecord(*cast<GlobalVariableRecord>(Record));
break;
case APIRecord::RK_Enum:
- serializeEnumRecord(*cast<EnumRecord>(Record));
+ visitEnumRecord(*cast<EnumRecord>(Record));
break;
case APIRecord::RK_Struct:
- serializeStructRecord(*cast<StructRecord>(Record));
+ visitStructRecord(*cast<StructRecord>(Record));
break;
case APIRecord::RK_ObjCInterface:
- serializeObjCContainerRecord(*cast<ObjCInterfaceRecord>(Record));
+ visitObjCContainerRecord(*cast<ObjCInterfaceRecord>(Record));
break;
case APIRecord::RK_ObjCProtocol:
- serializeObjCContainerRecord(*cast<ObjCProtocolRecord>(Record));
+ visitObjCContainerRecord(*cast<ObjCProtocolRecord>(Record));
break;
case APIRecord::RK_MacroDefinition:
- serializeMacroDefinitionRecord(*cast<MacroDefinitionRecord>(Record));
+ visitMacroDefinitionRecord(*cast<MacroDefinitionRecord>(Record));
break;
case APIRecord::RK_Typedef:
- serializeTypedefRecord(*cast<TypedefRecord>(Record));
+ visitTypedefRecord(*cast<TypedefRecord>(Record));
break;
default:
if (auto Obj = serializeAPIRecord(*Record)) {
@@ -786,8 +785,7 @@ void SymbolGraphSerializer::serializeSingleRecord(const APIRecord *Record) {
}
}
-void SymbolGraphSerializer::serializeTypedefRecord(
- const TypedefRecord &Record) {
+void SymbolGraphSerializer::visitTypedefRecord(const TypedefRecord &Record) {
// Typedefs of anonymous types have their entries unified with the underlying
// type.
bool ShouldDrop = Record.UnderlyingType.Name.empty();
@@ -807,35 +805,7 @@ void SymbolGraphSerializer::serializeTypedefRecord(
}
Object SymbolGraphSerializer::serialize() {
- // Serialize global variables in the API set.
- for (const auto &GlobalVar : API.getGlobalVariables())
- serializeGlobalVariableRecord(*GlobalVar.second);
-
- for (const auto &GlobalFunction : API.getGlobalFunctions())
- serializeGlobalFunctionRecord(*GlobalFunction.second);
-
- // Serialize enum records in the API set.
- for (const auto &Enum : API.getEnums())
- serializeEnumRecord(*Enum.second);
-
- // Serialize struct records in the API set.
- for (const auto &Struct : API.getStructs())
- serializeStructRecord(*Struct.second);
-
- // Serialize Objective-C interface records in the API set.
- for (const auto &ObjCInterface : API.getObjCInterfaces())
- serializeObjCContainerRecord(*ObjCInterface.second);
-
- // Serialize Objective-C protocol records in the API set.
- for (const auto &ObjCProtocol : API.getObjCProtocols())
- serializeObjCContainerRecord(*ObjCProtocol.second);
-
- for (const auto &Macro : API.getMacros())
- serializeMacroDefinitionRecord(*Macro.second);
-
- for (const auto &Typedef : API.getTypedefs())
- serializeTypedefRecord(*Typedef.second);
-
+ traverseAPISet();
return serializeCurrentGraph();
}
@@ -865,6 +835,9 @@ SymbolGraphSerializer::serializeSingleSymbolSGF(StringRef USR,
if (!Record)
return {};
+ if (isa<ObjCCategoryRecord>(Record))
+ return {};
+
Object Root;
APIIgnoresList EmptyIgnores;
SymbolGraphSerializer Serializer(API, EmptyIgnores,
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp
index 3da2424ea726..3a5f62c9b2e6 100644
--- a/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp
@@ -11,7 +11,7 @@
///
//===----------------------------------------------------------------------===//
-#include "TypedefUnderlyingTypeResolver.h"
+#include "clang/ExtractAPI/TypedefUnderlyingTypeResolver.h"
#include "clang/Index/USRGeneration.h"
using namespace clang;
diff --git a/contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp b/contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp
index e65457437146..bf124d73e89e 100644
--- a/contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp
+++ b/contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp
@@ -135,7 +135,7 @@ bool AffectedRangeManager::nonPPLineAffected(
Line->First->NewlinesBefore == 0;
bool IsContinuedComment =
- Line->First->is(tok::comment) && Line->First->Next == nullptr &&
+ Line->First->is(tok::comment) && !Line->First->Next &&
Line->First->NewlinesBefore < 2 && PreviousLine &&
PreviousLine->Affected && PreviousLine->Last->is(tok::comment);
diff --git a/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp b/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp
index b3ef2a895d7f..af1e0748fafa 100644
--- a/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp
+++ b/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp
@@ -82,9 +82,9 @@ getCommentSplit(StringRef Text, unsigned ContentStartColumn,
NumChars < MaxSplit && MaxSplitBytes < Text.size();) {
unsigned BytesInChar =
encoding::getCodePointNumBytes(Text[MaxSplitBytes], Encoding);
- NumChars +=
- encoding::columnWidthWithTabs(Text.substr(MaxSplitBytes, BytesInChar),
- ContentStartColumn, TabWidth, Encoding);
+ NumChars += encoding::columnWidthWithTabs(
+ Text.substr(MaxSplitBytes, BytesInChar), ContentStartColumn + NumChars,
+ TabWidth, Encoding);
MaxSplitBytes += BytesInChar;
}
@@ -590,10 +590,8 @@ unsigned BreakableBlockComment::getContentIndent(unsigned LineIndex) const {
ContentWithNoDecoration = ContentWithNoDecoration.substr(1).ltrim(Blanks);
StringRef FirstWord = ContentWithNoDecoration.substr(
0, ContentWithNoDecoration.find_first_of(Blanks));
- if (ContentIndentingJavadocAnnotations.find(FirstWord) !=
- ContentIndentingJavadocAnnotations.end()) {
+ if (ContentIndentingJavadocAnnotations.contains(FirstWord))
return Style.ContinuationIndentWidth;
- }
return 0;
}
diff --git a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
index 412c57b850b5..0ca297a5f957 100644
--- a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
+++ b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
@@ -18,6 +18,7 @@
#include "WhitespaceManager.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TokenKinds.h"
#include "clang/Format/Format.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Debug.h"
@@ -356,11 +357,14 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
if (Current.MustBreakBefore ||
(Current.is(TT_InlineASMColon) &&
(Style.BreakBeforeInlineASMColon == FormatStyle::BBIAS_Always ||
- Style.BreakBeforeInlineASMColon == FormatStyle::BBIAS_OnlyMultiline))) {
+ (Style.BreakBeforeInlineASMColon == FormatStyle::BBIAS_OnlyMultiline &&
+ Style.ColumnLimit > 0)))) {
return true;
}
if (CurrentState.BreakBeforeClosingBrace &&
- Current.closesBlockOrBlockTypeList(Style)) {
+ (Current.closesBlockOrBlockTypeList(Style) ||
+ (Current.is(tok::r_brace) &&
+ Current.isBlockIndentedInitRBrace(Style)))) {
return true;
}
if (CurrentState.BreakBeforeClosingParen && Current.is(tok::r_paren))
@@ -613,10 +617,10 @@ unsigned ContinuationIndenter::addTokenToState(LineState &State, bool Newline,
assert(!State.Stack.empty());
State.NoContinuation = false;
- if ((Current.is(TT_ImplicitStringLiteral) &&
- (Previous.Tok.getIdentifierInfo() == nullptr ||
- Previous.Tok.getIdentifierInfo()->getPPKeywordID() ==
- tok::pp_not_keyword))) {
+ if (Current.is(TT_ImplicitStringLiteral) &&
+ (!Previous.Tok.getIdentifierInfo() ||
+ Previous.Tok.getIdentifierInfo()->getPPKeywordID() ==
+ tok::pp_not_keyword)) {
unsigned EndColumn =
SourceMgr.getSpellingColumnNumber(Current.WhitespaceRange.getEnd());
if (Current.LastNewlineOffset != 0) {
@@ -739,10 +743,16 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
if (Previous.is(TT_TemplateString) && Previous.opensScope())
CurrentState.NoLineBreak = true;
+ // Align following lines within parentheses / brackets if configured.
+ // Note: This doesn't apply to macro expansion lines, which are MACRO( , , )
+ // with args as children of the '(' and ',' tokens. It does not make sense to
+ // align the commas with the opening paren.
if (Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign &&
!CurrentState.IsCSharpGenericTypeConstraint && Previous.opensScope() &&
Previous.isNot(TT_ObjCMethodExpr) && Previous.isNot(TT_RequiresClause) &&
- (Current.isNot(TT_LineComment) || Previous.is(BK_BracedInit))) {
+ !(Current.MacroParent && Previous.MacroParent) &&
+ (Current.isNot(TT_LineComment) ||
+ Previous.isOneOf(BK_BracedInit, TT_VerilogMultiLineListLParen))) {
CurrentState.Indent = State.Column + Spaces;
CurrentState.IsAligned = true;
}
@@ -1053,13 +1063,16 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
bool PreviousIsBreakingCtorInitializerColon =
PreviousNonComment && PreviousNonComment->is(TT_CtorInitializerColon) &&
Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon;
+ bool AllowAllConstructorInitializersOnNextLine =
+ Style.PackConstructorInitializers == FormatStyle::PCIS_NextLine ||
+ Style.PackConstructorInitializers == FormatStyle::PCIS_NextLineOnly;
if (!(Previous.isOneOf(tok::l_paren, tok::l_brace, TT_BinaryOperator) ||
PreviousIsBreakingCtorInitializerColon) ||
(!Style.AllowAllParametersOfDeclarationOnNextLine &&
State.Line->MustBeDeclaration) ||
(!Style.AllowAllArgumentsOnNextLine &&
!State.Line->MustBeDeclaration) ||
- (Style.PackConstructorInitializers != FormatStyle::PCIS_NextLine &&
+ (!AllowAllConstructorInitializersOnNextLine &&
PreviousIsBreakingCtorInitializerColon) ||
Previous.is(TT_DictLiteral)) {
CurrentState.BreakBeforeParameter = true;
@@ -1069,7 +1082,7 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
// and we allow all arguments on the next line, we should not break
// before the next parameter.
if (PreviousIsBreakingCtorInitializerColon &&
- Style.PackConstructorInitializers == FormatStyle::PCIS_NextLine) {
+ AllowAllConstructorInitializersOnNextLine) {
CurrentState.BreakBeforeParameter = false;
}
}
@@ -1116,8 +1129,15 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
Style.IndentWidth;
}
- if (NextNonComment->is(tok::l_brace) && NextNonComment->is(BK_Block))
- return Current.NestingLevel == 0 ? State.FirstIndent : CurrentState.Indent;
+ if ((NextNonComment->is(tok::l_brace) && NextNonComment->is(BK_Block)) ||
+ (Style.isVerilog() && Keywords.isVerilogBegin(*NextNonComment))) {
+ if (Current.NestingLevel == 0 ||
+ (Style.LambdaBodyIndentation == FormatStyle::LBI_OuterScope &&
+ State.NextToken->is(TT_LambdaLBrace))) {
+ return State.FirstIndent;
+ }
+ return CurrentState.Indent;
+ }
if ((Current.isOneOf(tok::r_brace, tok::r_square) ||
(Current.is(tok::greater) &&
(Style.Language == FormatStyle::LK_Proto ||
@@ -1150,12 +1170,23 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
return State.Stack[State.Stack.size() - 2].LastSpace;
}
if (Style.AlignAfterOpenBracket == FormatStyle::BAS_BlockIndent &&
- Current.is(tok::r_paren) && State.Stack.size() > 1) {
+ (Current.is(tok::r_paren) ||
+ (Current.is(tok::r_brace) &&
+ Current.MatchingParen->is(BK_BracedInit))) &&
+ State.Stack.size() > 1) {
return State.Stack[State.Stack.size() - 2].LastSpace;
}
if (NextNonComment->is(TT_TemplateString) && NextNonComment->closesScope())
return State.Stack[State.Stack.size() - 2].LastSpace;
+ // Field labels in a nested type should be aligned to the brace. For example
+ // in ProtoBuf:
+ // optional int32 b = 2 [(foo_options) = {aaaaaaaaaaaaaaaaaaa: 123,
+ // bbbbbbbbbbbbbbbbbbbbbbbb:"baz"}];
+ // For Verilog, a quote following a brace is treated as an identifier. And
+ // Both braces and colons get annotated as TT_DictLiteral. So we have to
+ // check.
if (Current.is(tok::identifier) && Current.Next &&
+ (!Style.isVerilog() || Current.Next->is(tok::colon)) &&
(Current.Next->is(TT_DictLiteral) ||
((Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto) &&
@@ -1264,8 +1295,13 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
return ContinuationIndent;
}
- if (State.Line->InPragmaDirective)
- return CurrentState.Indent + Style.ContinuationIndentWidth;
+ // OpenMP clauses want to get additional indentation when they are pushed onto
+ // the next line.
+ if (State.Line->InPragmaDirective) {
+ FormatToken *PragmaType = State.Line->First->Next->Next;
+ if (PragmaType && PragmaType->TokenText.equals("omp"))
+ return CurrentState.Indent + Style.ContinuationIndentWidth;
+ }
// This ensure that we correctly format ObjC methods calls without inputs,
// i.e. where the last element isn't selector like: [callee method];
@@ -1401,7 +1437,8 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
if (Style.PackConstructorInitializers > FormatStyle::PCIS_BinPack) {
CurrentState.AvoidBinPacking = true;
CurrentState.BreakBeforeParameter =
- Style.PackConstructorInitializers != FormatStyle::PCIS_NextLine;
+ Style.PackConstructorInitializers != FormatStyle::PCIS_NextLine &&
+ Style.PackConstructorInitializers != FormatStyle::PCIS_NextLineOnly;
} else {
CurrentState.BreakBeforeParameter = false;
}
@@ -1644,10 +1681,14 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
if (Current.opensBlockOrBlockTypeList(Style)) {
NewIndent = Style.IndentWidth +
std::min(State.Column, CurrentState.NestedBlockIndent);
+ } else if (Current.is(tok::l_brace)) {
+ NewIndent =
+ CurrentState.LastSpace + Style.BracedInitializerIndentWidth.value_or(
+ Style.ContinuationIndentWidth);
} else {
NewIndent = CurrentState.LastSpace + Style.ContinuationIndentWidth;
}
- const FormatToken *NextNoComment = Current.getNextNonComment();
+ const FormatToken *NextNonComment = Current.getNextNonComment();
bool EndsInComma = Current.MatchingParen &&
Current.MatchingParen->Previous &&
Current.MatchingParen->Previous->is(tok::comma);
@@ -1655,9 +1696,9 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto ||
!Style.BinPackArguments ||
- (NextNoComment &&
- NextNoComment->isOneOf(TT_DesignatedInitializerPeriod,
- TT_DesignatedInitializerLSquare));
+ (NextNonComment && NextNonComment->isOneOf(
+ TT_DesignatedInitializerPeriod,
+ TT_DesignatedInitializerLSquare));
BreakBeforeParameter = EndsInComma;
if (Current.ParameterCount > 1)
NestedBlockIndent = std::max(NestedBlockIndent, State.Column + 1);
@@ -1746,11 +1787,11 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
NewState.BreakBeforeParameter = BreakBeforeParameter;
NewState.HasMultipleNestedBlocks = (Current.BlockParameterCount > 1);
- if (Style.BraceWrapping.BeforeLambdaBody && Current.Next != nullptr &&
+ if (Style.BraceWrapping.BeforeLambdaBody && Current.Next &&
Current.is(tok::l_paren)) {
// Search for any parameter that is a lambda.
FormatToken const *next = Current.Next;
- while (next != nullptr) {
+ while (next) {
if (next->is(TT_LambdaLSquare)) {
NewState.HasMultipleNestedBlocks = true;
break;
@@ -1815,6 +1856,10 @@ void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
}
void ContinuationIndenter::moveStateToNewBlock(LineState &State) {
+ if (Style.LambdaBodyIndentation == FormatStyle::LBI_OuterScope &&
+ State.NextToken->is(TT_LambdaLBrace)) {
+ State.Stack.back().NestedBlockIndent = State.FirstIndent;
+ }
unsigned NestedBlockIndent = State.Stack.back().NestedBlockIndent;
// ObjC block sometimes follow special indentation rules.
unsigned NewIndent =
@@ -2165,7 +2210,7 @@ ContinuationIndenter::createBreakableToken(const FormatToken &Current,
Current, StartColumn, Current.OriginalColumn, !Current.Previous,
State.Line->InPPDirective, Encoding, Style, Whitespaces.useCRLF());
} else if (Current.is(TT_LineComment) &&
- (Current.Previous == nullptr ||
+ (!Current.Previous ||
Current.Previous->isNot(TT_ImplicitStringLiteral))) {
bool RegularComments = [&]() {
for (const FormatToken *T = &Current; T && T->is(TT_LineComment);
diff --git a/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.cpp b/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.cpp
index 5c006e2d037b..576c6597b27a 100644
--- a/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.cpp
+++ b/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.cpp
@@ -52,10 +52,10 @@ void DefinitionBlockSeparator::separateBlocks(
for (const FormatToken *CurrentToken = Line->First; CurrentToken;
CurrentToken = CurrentToken->Next) {
if (BracketLevel == 0) {
- if ((CurrentToken->isOneOf(tok::kw_class, tok::kw_struct,
- tok::kw_union) ||
- (Style.isJavaScript() &&
- CurrentToken->is(ExtraKeywords.kw_function)))) {
+ if (CurrentToken->isOneOf(tok::kw_class, tok::kw_struct,
+ tok::kw_union) ||
+ (Style.isJavaScript() &&
+ CurrentToken->is(ExtraKeywords.kw_function))) {
return true;
}
if (!ExcludeEnum && CurrentToken->is(tok::kw_enum))
@@ -164,7 +164,7 @@ void DefinitionBlockSeparator::separateBlocks(
}
}
- if ((Style.isCSharp() && OperateLine->First->is(TT_AttributeSquare)))
+ if (Style.isCSharp() && OperateLine->First->is(TT_AttributeSquare))
return true;
return false;
};
diff --git a/contrib/llvm-project/clang/lib/Format/Format.cpp b/contrib/llvm-project/clang/lib/Format/Format.cpp
index a59d53009eaa..1075f8e1a42f 100644
--- a/contrib/llvm-project/clang/lib/Format/Format.cpp
+++ b/contrib/llvm-project/clang/lib/Format/Format.cpp
@@ -112,6 +112,17 @@ template <> struct MappingTraits<FormatStyle::AlignConsecutiveStyle> {
};
template <>
+struct MappingTraits<FormatStyle::ShortCaseStatementsAlignmentStyle> {
+ static void mapping(IO &IO,
+ FormatStyle::ShortCaseStatementsAlignmentStyle &Value) {
+ IO.mapOptional("Enabled", Value.Enabled);
+ IO.mapOptional("AcrossEmptyLines", Value.AcrossEmptyLines);
+ IO.mapOptional("AcrossComments", Value.AcrossComments);
+ IO.mapOptional("AlignCaseColons", Value.AlignCaseColons);
+ }
+};
+
+template <>
struct ScalarEnumerationTraits<FormatStyle::AttributeBreakingStyle> {
static void enumeration(IO &IO, FormatStyle::AttributeBreakingStyle &Value) {
IO.enumCase(Value, "Always", FormatStyle::ABS_Always);
@@ -375,6 +386,7 @@ template <> struct ScalarEnumerationTraits<FormatStyle::LanguageKind> {
IO.enumCase(Value, "TextProto", FormatStyle::LK_TextProto);
IO.enumCase(Value, "CSharp", FormatStyle::LK_CSharp);
IO.enumCase(Value, "Json", FormatStyle::LK_Json);
+ IO.enumCase(Value, "Verilog", FormatStyle::LK_Verilog);
}
};
@@ -446,6 +458,7 @@ struct ScalarEnumerationTraits<FormatStyle::PackConstructorInitializersStyle> {
IO.enumCase(Value, "BinPack", FormatStyle::PCIS_BinPack);
IO.enumCase(Value, "CurrentLine", FormatStyle::PCIS_CurrentLine);
IO.enumCase(Value, "NextLine", FormatStyle::PCIS_NextLine);
+ IO.enumCase(Value, "NextLineOnly", FormatStyle::PCIS_NextLineOnly);
}
};
@@ -501,6 +514,16 @@ struct ScalarEnumerationTraits<FormatStyle::ReferenceAlignmentStyle> {
};
template <>
+struct ScalarEnumerationTraits<FormatStyle::RemoveParenthesesStyle> {
+ static void enumeration(IO &IO, FormatStyle::RemoveParenthesesStyle &Value) {
+ IO.enumCase(Value, "Leave", FormatStyle::RPS_Leave);
+ IO.enumCase(Value, "MultipleParentheses",
+ FormatStyle::RPS_MultipleParentheses);
+ IO.enumCase(Value, "ReturnStatement", FormatStyle::RPS_ReturnStatement);
+ }
+};
+
+template <>
struct ScalarEnumerationTraits<FormatStyle::RequiresClausePositionStyle> {
static void enumeration(IO &IO,
FormatStyle::RequiresClausePositionStyle &Value) {
@@ -699,6 +722,22 @@ template <> struct MappingTraits<FormatStyle::SpacesInLineComment> {
}
};
+template <> struct MappingTraits<FormatStyle::SpacesInParensCustom> {
+ static void mapping(IO &IO, FormatStyle::SpacesInParensCustom &Spaces) {
+ IO.mapOptional("InCStyleCasts", Spaces.InCStyleCasts);
+ IO.mapOptional("InConditionalStatements", Spaces.InConditionalStatements);
+ IO.mapOptional("InEmptyParentheses", Spaces.InEmptyParentheses);
+ IO.mapOptional("Other", Spaces.Other);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::SpacesInParensStyle> {
+ static void enumeration(IO &IO, FormatStyle::SpacesInParensStyle &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::SIPO_Never);
+ IO.enumCase(Value, "Custom", FormatStyle::SIPO_Custom);
+ }
+};
+
template <> struct ScalarEnumerationTraits<FormatStyle::TrailingCommaStyle> {
static void enumeration(IO &IO, FormatStyle::TrailingCommaStyle &Value) {
IO.enumCase(Value, "None", FormatStyle::TCS_None);
@@ -814,6 +853,11 @@ template <> struct MappingTraits<FormatStyle> {
bool DeriveLineEnding = true;
bool UseCRLF = false;
+ bool SpaceInEmptyParentheses = false;
+ bool SpacesInConditionalStatement = false;
+ bool SpacesInCStyleCastParentheses = false;
+ bool SpacesInParentheses = false;
+
// For backward compatibility.
if (!IO.outputting()) {
IO.mapOptional("AlignEscapedNewlinesLeft", Style.AlignEscapedNewlines);
@@ -832,6 +876,12 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("PointerBindsToType", Style.PointerAlignment);
IO.mapOptional("SpaceAfterControlStatementKeyword",
Style.SpaceBeforeParens);
+ IO.mapOptional("SpaceInEmptyParentheses", SpaceInEmptyParentheses);
+ IO.mapOptional("SpacesInConditionalStatement",
+ SpacesInConditionalStatement);
+ IO.mapOptional("SpacesInCStyleCastParentheses",
+ SpacesInCStyleCastParentheses);
+ IO.mapOptional("SpacesInParentheses", SpacesInParentheses);
IO.mapOptional("UseCRLF", UseCRLF);
}
@@ -845,6 +895,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("AlignConsecutiveDeclarations",
Style.AlignConsecutiveDeclarations);
IO.mapOptional("AlignConsecutiveMacros", Style.AlignConsecutiveMacros);
+ IO.mapOptional("AlignConsecutiveShortCaseStatements",
+ Style.AlignConsecutiveShortCaseStatements);
IO.mapOptional("AlignEscapedNewlines", Style.AlignEscapedNewlines);
IO.mapOptional("AlignOperands", Style.AlignOperands);
IO.mapOptional("AlignTrailingComments", Style.AlignTrailingComments);
@@ -878,6 +930,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("BinPackArguments", Style.BinPackArguments);
IO.mapOptional("BinPackParameters", Style.BinPackParameters);
IO.mapOptional("BitFieldColonSpacing", Style.BitFieldColonSpacing);
+ IO.mapOptional("BracedInitializerIndentWidth",
+ Style.BracedInitializerIndentWidth);
IO.mapOptional("BraceWrapping", Style.BraceWrapping);
IO.mapOptional("BreakAfterAttributes", Style.BreakAfterAttributes);
IO.mapOptional("BreakAfterJavaFieldAnnotations",
@@ -938,6 +992,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("JavaScriptWrapImports", Style.JavaScriptWrapImports);
IO.mapOptional("KeepEmptyLinesAtTheStartOfBlocks",
Style.KeepEmptyLinesAtTheStartOfBlocks);
+ IO.mapOptional("KeepEmptyLinesAtEOF", Style.KeepEmptyLinesAtEOF);
IO.mapOptional("LambdaBodyIndentation", Style.LambdaBodyIndentation);
IO.mapOptional("LineEnding", Style.LineEnding);
IO.mapOptional("MacroBlockBegin", Style.MacroBlockBegin);
@@ -984,6 +1039,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("ReferenceAlignment", Style.ReferenceAlignment);
IO.mapOptional("ReflowComments", Style.ReflowComments);
IO.mapOptional("RemoveBracesLLVM", Style.RemoveBracesLLVM);
+ IO.mapOptional("RemoveParentheses", Style.RemoveParentheses);
IO.mapOptional("RemoveSemicolon", Style.RemoveSemicolon);
IO.mapOptional("RequiresClausePosition", Style.RequiresClausePosition);
IO.mapOptional("RequiresExpressionIndentation",
@@ -1008,6 +1064,7 @@ template <> struct MappingTraits<FormatStyle> {
Style.SpaceBeforeCtorInitializerColon);
IO.mapOptional("SpaceBeforeInheritanceColon",
Style.SpaceBeforeInheritanceColon);
+ IO.mapOptional("SpaceBeforeJsonColon", Style.SpaceBeforeJsonColon);
IO.mapOptional("SpaceBeforeParens", Style.SpaceBeforeParens);
IO.mapOptional("SpaceBeforeParensOptions", Style.SpaceBeforeParensOptions);
IO.mapOptional("SpaceBeforeRangeBasedForLoopColon",
@@ -1015,29 +1072,29 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("SpaceBeforeSquareBrackets",
Style.SpaceBeforeSquareBrackets);
IO.mapOptional("SpaceInEmptyBlock", Style.SpaceInEmptyBlock);
- IO.mapOptional("SpaceInEmptyParentheses", Style.SpaceInEmptyParentheses);
IO.mapOptional("SpacesBeforeTrailingComments",
Style.SpacesBeforeTrailingComments);
IO.mapOptional("SpacesInAngles", Style.SpacesInAngles);
- IO.mapOptional("SpacesInConditionalStatement",
- Style.SpacesInConditionalStatement);
IO.mapOptional("SpacesInContainerLiterals",
Style.SpacesInContainerLiterals);
- IO.mapOptional("SpacesInCStyleCastParentheses",
- Style.SpacesInCStyleCastParentheses);
IO.mapOptional("SpacesInLineCommentPrefix",
Style.SpacesInLineCommentPrefix);
- IO.mapOptional("SpacesInParentheses", Style.SpacesInParentheses);
+ IO.mapOptional("SpacesInParens", Style.SpacesInParens);
+ IO.mapOptional("SpacesInParensOptions", Style.SpacesInParensOptions);
IO.mapOptional("SpacesInSquareBrackets", Style.SpacesInSquareBrackets);
IO.mapOptional("Standard", Style.Standard);
IO.mapOptional("StatementAttributeLikeMacros",
Style.StatementAttributeLikeMacros);
IO.mapOptional("StatementMacros", Style.StatementMacros);
IO.mapOptional("TabWidth", Style.TabWidth);
+ IO.mapOptional("TypeNames", Style.TypeNames);
IO.mapOptional("TypenameMacros", Style.TypenameMacros);
IO.mapOptional("UseTab", Style.UseTab);
+ IO.mapOptional("VerilogBreakBetweenInstancePorts",
+ Style.VerilogBreakBetweenInstancePorts);
IO.mapOptional("WhitespaceSensitiveMacros",
Style.WhitespaceSensitiveMacros);
+ IO.mapOptional("Macros", Style.Macros);
// If AlwaysBreakAfterDefinitionReturnType was specified but
// AlwaysBreakAfterReturnType was not, initialize the latter from the
@@ -1090,6 +1147,30 @@ template <> struct MappingTraits<FormatStyle> {
else if (UseCRLF)
Style.LineEnding = FormatStyle::LE_DeriveCRLF;
}
+
+ if (Style.SpacesInParens != FormatStyle::SIPO_Custom &&
+ (SpacesInParentheses || SpaceInEmptyParentheses ||
+ SpacesInConditionalStatement || SpacesInCStyleCastParentheses)) {
+ if (SpacesInParentheses) {
+ // set all options except InCStyleCasts and InEmptyParentheses
+ // to true for backward compatibility.
+ Style.SpacesInParensOptions.InConditionalStatements = true;
+ Style.SpacesInParensOptions.InCStyleCasts =
+ SpacesInCStyleCastParentheses;
+ Style.SpacesInParensOptions.InEmptyParentheses =
+ SpaceInEmptyParentheses;
+ Style.SpacesInParensOptions.Other = true;
+ } else {
+ Style.SpacesInParensOptions = {};
+ Style.SpacesInParensOptions.InConditionalStatements =
+ SpacesInConditionalStatement;
+ Style.SpacesInParensOptions.InCStyleCasts =
+ SpacesInCStyleCastParentheses;
+ Style.SpacesInParensOptions.InEmptyParentheses =
+ SpaceInEmptyParentheses;
+ }
+ Style.SpacesInParens = FormatStyle::SIPO_Custom;
+ }
}
};
@@ -1294,6 +1375,14 @@ static void expandPresetsSpaceBeforeParens(FormatStyle &Expanded) {
}
}
+static void expandPresetsSpacesInParens(FormatStyle &Expanded) {
+ if (Expanded.SpacesInParens == FormatStyle::SIPO_Custom)
+ return;
+ assert(Expanded.SpacesInParens == FormatStyle::SIPO_Never);
+ // Reset all flags
+ Expanded.SpacesInParensOptions = {};
+}
+
FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
FormatStyle LLVMStyle;
LLVMStyle.InheritsParentConfig = false;
@@ -1312,6 +1401,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.AlignConsecutiveBitFields = {};
LLVMStyle.AlignConsecutiveDeclarations = {};
LLVMStyle.AlignConsecutiveMacros = {};
+ LLVMStyle.AlignConsecutiveShortCaseStatements = {};
LLVMStyle.AlignTrailingComments = {};
LLVMStyle.AlignTrailingComments.Kind = FormatStyle::TCAS_Always;
LLVMStyle.AlignTrailingComments.OverEmptyLines = 0;
@@ -1332,6 +1422,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.BitFieldColonSpacing = FormatStyle::BFCS_Both;
LLVMStyle.BinPackArguments = true;
LLVMStyle.BinPackParameters = true;
+ LLVMStyle.BracedInitializerIndentWidth = std::nullopt;
LLVMStyle.BraceWrapping = {/*AfterCaseLabel=*/false,
/*AfterClass=*/false,
/*AfterControlStatement=*/FormatStyle::BWACS_Never,
@@ -1401,6 +1492,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
/*Hex=*/0, /*HexMinDigits=*/0};
LLVMStyle.JavaScriptQuotes = FormatStyle::JSQS_Leave;
LLVMStyle.JavaScriptWrapImports = true;
+ LLVMStyle.KeepEmptyLinesAtEOF = false;
LLVMStyle.KeepEmptyLinesAtTheStartOfBlocks = true;
LLVMStyle.LambdaBodyIndentation = FormatStyle::LBI_Signature;
LLVMStyle.LineEnding = FormatStyle::LE_DeriveLF;
@@ -1418,6 +1510,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.ReferenceAlignment = FormatStyle::RAS_Pointer;
LLVMStyle.ReflowComments = true;
LLVMStyle.RemoveBracesLLVM = false;
+ LLVMStyle.RemoveParentheses = FormatStyle::RPS_Leave;
LLVMStyle.RemoveSemicolon = false;
LLVMStyle.RequiresClausePosition = FormatStyle::RCPS_OwnLine;
LLVMStyle.RequiresExpressionIndentation = FormatStyle::REI_OuterScope;
@@ -1433,6 +1526,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.SpaceBeforeCaseColon = false;
LLVMStyle.SpaceBeforeCtorInitializerColon = true;
LLVMStyle.SpaceBeforeInheritanceColon = true;
+ LLVMStyle.SpaceBeforeJsonColon = false;
LLVMStyle.SpaceBeforeParens = FormatStyle::SBPO_ControlStatements;
LLVMStyle.SpaceBeforeParensOptions = {};
LLVMStyle.SpaceBeforeParensOptions.AfterControlStatements = true;
@@ -1443,21 +1537,19 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.SpaceBeforeCpp11BracedList = false;
LLVMStyle.SpaceBeforeSquareBrackets = false;
LLVMStyle.SpaceInEmptyBlock = false;
- LLVMStyle.SpaceInEmptyParentheses = false;
LLVMStyle.SpacesBeforeTrailingComments = 1;
LLVMStyle.SpacesInAngles = FormatStyle::SIAS_Never;
LLVMStyle.SpacesInContainerLiterals = true;
- LLVMStyle.SpacesInCStyleCastParentheses = false;
LLVMStyle.SpacesInLineCommentPrefix = {/*Minimum=*/1, /*Maximum=*/-1u};
- LLVMStyle.SpacesInParentheses = false;
+ LLVMStyle.SpacesInParens = FormatStyle::SIPO_Never;
LLVMStyle.SpacesInSquareBrackets = false;
- LLVMStyle.SpacesInConditionalStatement = false;
LLVMStyle.Standard = FormatStyle::LS_Latest;
LLVMStyle.StatementAttributeLikeMacros.push_back("Q_EMIT");
LLVMStyle.StatementMacros.push_back("Q_UNUSED");
LLVMStyle.StatementMacros.push_back("QT_REQUIRE_VERSION");
LLVMStyle.TabWidth = 8;
LLVMStyle.UseTab = FormatStyle::UT_Never;
+ LLVMStyle.VerilogBreakBetweenInstancePorts = true;
LLVMStyle.WhitespaceSensitiveMacros.push_back("BOOST_PP_STRINGIZE");
LLVMStyle.WhitespaceSensitiveMacros.push_back("CF_SWIFT_NAME");
LLVMStyle.WhitespaceSensitiveMacros.push_back("NS_SWIFT_NAME");
@@ -1485,6 +1577,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
break;
case FormatStyle::LK_Verilog:
LLVMStyle.IndentCaseLabels = true;
+ LLVMStyle.SpacesInContainerLiterals = false;
break;
default:
break;
@@ -1931,6 +2024,7 @@ std::string configurationAsText(const FormatStyle &Style) {
FormatStyle NonConstStyle = Style;
expandPresetsBraceWrapping(NonConstStyle);
expandPresetsSpaceBeforeParens(NonConstStyle);
+ expandPresetsSpacesInParens(NonConstStyle);
Output << NonConstStyle;
return Stream.str();
@@ -1968,6 +2062,50 @@ FormatStyle::GetLanguageStyle(FormatStyle::LanguageKind Language) const {
namespace {
+class ParensRemover : public TokenAnalyzer {
+public:
+ ParensRemover(const Environment &Env, const FormatStyle &Style)
+ : TokenAnalyzer(Env, Style) {}
+
+ std::pair<tooling::Replacements, unsigned>
+ analyze(TokenAnnotator &Annotator,
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) override {
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
+ tooling::Replacements Result;
+ removeParens(AnnotatedLines, Result);
+ return {Result, 0};
+ }
+
+private:
+ void removeParens(SmallVectorImpl<AnnotatedLine *> &Lines,
+ tooling::Replacements &Result) {
+ const auto &SourceMgr = Env.getSourceManager();
+ for (auto *Line : Lines) {
+ removeParens(Line->Children, Result);
+ if (!Line->Affected)
+ continue;
+ for (const auto *Token = Line->First; Token && !Token->Finalized;
+ Token = Token->Next) {
+ if (!Token->Optional || !Token->isOneOf(tok::l_paren, tok::r_paren))
+ continue;
+ auto *Next = Token->Next;
+ assert(Next && Next->isNot(tok::eof));
+ SourceLocation Start;
+ if (Next->NewlinesBefore == 0) {
+ Start = Token->Tok.getLocation();
+ Next->WhitespaceRange = Token->WhitespaceRange;
+ } else {
+ Start = Token->WhitespaceRange.getBegin();
+ }
+ const auto &Range =
+ CharSourceRange::getCharRange(Start, Token->Tok.getEndLoc());
+ cantFail(Result.add(tooling::Replacement(SourceMgr, Range, " ")));
+ }
+ }
+ }
+};
+
class BracesInserter : public TokenAnalyzer {
public:
BracesInserter(const Environment &Env, const FormatStyle &Style)
@@ -2459,7 +2597,7 @@ private:
}
bool containsOnlyComments(const AnnotatedLine &Line) {
- for (FormatToken *Tok = Line.First; Tok != nullptr; Tok = Tok->Next)
+ for (FormatToken *Tok = Line.First; Tok; Tok = Tok->Next)
if (Tok->isNot(tok::comment))
return false;
return true;
@@ -2676,6 +2814,8 @@ private:
"CGSizeMake",
"CGVector",
"CGVectorMake",
+ "FOUNDATION_EXPORT", // This is an alias for FOUNDATION_EXTERN.
+ "FOUNDATION_EXTERN",
"NSAffineTransform",
"NSArray",
"NSAttributedString",
@@ -2691,6 +2831,8 @@ private:
"NSDecimalNumber",
"NSDictionary",
"NSEdgeInsets",
+ "NSError",
+ "NSErrorDomain",
"NSHashTable",
"NSIndexPath",
"NSIndexSet",
@@ -2730,6 +2872,7 @@ private:
"NSURLQueryItem",
"NSUUID",
"NSValue",
+ "NS_ASSUME_NONNULL_BEGIN",
"UIImage",
"UIView",
};
@@ -2752,6 +2895,7 @@ private:
FormatTok->TokenText)) ||
FormatTok->is(TT_ObjCStringLiteral) ||
FormatTok->isOneOf(Keywords.kw_NS_CLOSED_ENUM, Keywords.kw_NS_ENUM,
+ Keywords.kw_NS_ERROR_ENUM,
Keywords.kw_NS_OPTIONS, TT_ObjCBlockLBrace,
TT_ObjCBlockLParen, TT_ObjCDecl, TT_ObjCForIn,
TT_ObjCMethodExpr, TT_ObjCMethodSpecifier,
@@ -2795,7 +2939,7 @@ struct JavaImportDirective {
// Determines whether 'Ranges' intersects with ('Start', 'End').
static bool affectsRange(ArrayRef<tooling::Range> Ranges, unsigned Start,
unsigned End) {
- for (auto Range : Ranges) {
+ for (const auto &Range : Ranges) {
if (Range.getOffset() < End &&
Range.getOffset() + Range.getLength() > Start) {
return true;
@@ -3008,13 +3152,10 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
if (Trimmed.contains(RawStringTermination))
FormattingOff = false;
- if (Trimmed == "// clang-format off" ||
- Trimmed == "/* clang-format off */") {
+ if (isClangFormatOff(Trimmed))
FormattingOff = true;
- } else if (Trimmed == "// clang-format on" ||
- Trimmed == "/* clang-format on */") {
+ else if (isClangFormatOn(Trimmed))
FormattingOff = false;
- }
const bool EmptyLineSkipped =
Trimmed.empty() &&
@@ -3191,9 +3332,9 @@ tooling::Replacements sortJavaImports(const FormatStyle &Style, StringRef Code,
Code.substr(Prev, (Pos != StringRef::npos ? Pos : Code.size()) - Prev);
StringRef Trimmed = Line.trim();
- if (Trimmed == "// clang-format off")
+ if (isClangFormatOff(Trimmed))
FormattingOff = true;
- else if (Trimmed == "// clang-format on")
+ else if (isClangFormatOn(Trimmed))
FormattingOff = false;
if (ImportRegex.match(Line, &Matches)) {
@@ -3409,8 +3550,10 @@ reformat(const FormatStyle &Style, StringRef Code,
FormatStyle Expanded = Style;
expandPresetsBraceWrapping(Expanded);
expandPresetsSpaceBeforeParens(Expanded);
+ expandPresetsSpacesInParens(Expanded);
Expanded.InsertBraces = false;
Expanded.RemoveBracesLLVM = false;
+ Expanded.RemoveParentheses = FormatStyle::RPS_Leave;
Expanded.RemoveSemicolon = false;
switch (Expanded.RequiresClausePosition) {
case FormatStyle::RCPS_SingleLine:
@@ -3439,11 +3582,11 @@ reformat(const FormatStyle &Style, StringRef Code,
tooling::Replacements Replaces =
Formatter(*Env, Style, Status).process().first;
// add a replacement to remove the "x = " from the result.
- if (!Replaces.add(tooling::Replacement(FileName, 0, 4, ""))) {
- // apply the reformatting changes and the removal of "x = ".
- if (applyAllReplacements(Code, Replaces))
- return {Replaces, 0};
- }
+ Replaces = Replaces.merge(
+ tooling::Replacements(tooling::Replacement(FileName, 0, 4, "")));
+ // apply the reformatting changes and the removal of "x = ".
+ if (applyAllReplacements(Code, Replaces))
+ return {Replaces, 0};
return {tooling::Replacements(), 0};
}
@@ -3455,26 +3598,29 @@ reformat(const FormatStyle &Style, StringRef Code,
typedef std::function<std::pair<tooling::Replacements, unsigned>(
const Environment &)>
AnalyzerPass;
- SmallVector<AnalyzerPass, 8> Passes;
+
+ SmallVector<AnalyzerPass, 16> Passes;
Passes.emplace_back([&](const Environment &Env) {
return IntegerLiteralSeparatorFixer().process(Env, Expanded);
});
if (Style.isCpp()) {
- if (Style.QualifierAlignment != FormatStyle::QAS_Leave) {
- Passes.emplace_back([&](const Environment &Env) {
- return QualifierAlignmentFixer(Env, Expanded, Code, Ranges,
- FirstStartColumn, NextStartColumn,
- LastStartColumn, FileName)
- .process();
+ if (Style.QualifierAlignment != FormatStyle::QAS_Leave)
+ addQualifierAlignmentFixerPasses(Expanded, Passes);
+
+ if (Style.RemoveParentheses != FormatStyle::RPS_Leave) {
+ FormatStyle S = Expanded;
+ S.RemoveParentheses = Style.RemoveParentheses;
+ Passes.emplace_back([&, S = std::move(S)](const Environment &Env) {
+ return ParensRemover(Env, S).process(/*SkipAnnotation=*/true);
});
}
if (Style.InsertBraces) {
FormatStyle S = Expanded;
S.InsertBraces = true;
- Passes.emplace_back([&, S](const Environment &Env) {
+ Passes.emplace_back([&, S = std::move(S)](const Environment &Env) {
return BracesInserter(Env, S).process(/*SkipAnnotation=*/true);
});
}
@@ -3482,7 +3628,7 @@ reformat(const FormatStyle &Style, StringRef Code,
if (Style.RemoveBracesLLVM) {
FormatStyle S = Expanded;
S.RemoveBracesLLVM = true;
- Passes.emplace_back([&, S](const Environment &Env) {
+ Passes.emplace_back([&, S = std::move(S)](const Environment &Env) {
return BracesRemover(Env, S).process(/*SkipAnnotation=*/true);
});
}
@@ -3490,7 +3636,7 @@ reformat(const FormatStyle &Style, StringRef Code,
if (Style.RemoveSemicolon) {
FormatStyle S = Expanded;
S.RemoveSemicolon = true;
- Passes.emplace_back([&, S](const Environment &Env) {
+ Passes.emplace_back([&, S = std::move(S)](const Environment &Env) {
return SemiRemover(Env, S).process(/*SkipAnnotation=*/true);
});
}
@@ -3554,6 +3700,24 @@ reformat(const FormatStyle &Style, StringRef Code,
}
}
+ if (Style.QualifierAlignment != FormatStyle::QAS_Leave) {
+ // Don't make replacements that replace nothing. QualifierAlignment can
+ // produce them if one of its early passes changes e.g. `const volatile` to
+ // `volatile const` and then a later pass changes it back again.
+ tooling::Replacements NonNoOpFixes;
+ for (const tooling::Replacement &Fix : Fixes) {
+ StringRef OriginalCode = Code.substr(Fix.getOffset(), Fix.getLength());
+ if (!OriginalCode.equals(Fix.getReplacementText())) {
+ auto Err = NonNoOpFixes.add(Fix);
+ if (Err) {
+ llvm::errs() << "Error adding replacements : "
+ << llvm::toString(std::move(Err)) << "\n";
+ }
+ }
+ }
+ Fixes = std::move(NonNoOpFixes);
+ }
+
return {Fixes, Penalty};
}
} // namespace internal
@@ -3669,33 +3833,33 @@ const char *StyleOptionHelpDescription =
static FormatStyle::LanguageKind getLanguageByFileName(StringRef FileName) {
if (FileName.endswith(".java"))
return FormatStyle::LK_Java;
- if (FileName.endswith_insensitive(".js") ||
- FileName.endswith_insensitive(".mjs") ||
- FileName.endswith_insensitive(".ts")) {
+ if (FileName.ends_with_insensitive(".js") ||
+ FileName.ends_with_insensitive(".mjs") ||
+ FileName.ends_with_insensitive(".ts")) {
return FormatStyle::LK_JavaScript; // (module) JavaScript or TypeScript.
}
if (FileName.endswith(".m") || FileName.endswith(".mm"))
return FormatStyle::LK_ObjC;
- if (FileName.endswith_insensitive(".proto") ||
- FileName.endswith_insensitive(".protodevel")) {
+ if (FileName.ends_with_insensitive(".proto") ||
+ FileName.ends_with_insensitive(".protodevel")) {
return FormatStyle::LK_Proto;
}
- if (FileName.endswith_insensitive(".textpb") ||
- FileName.endswith_insensitive(".pb.txt") ||
- FileName.endswith_insensitive(".textproto") ||
- FileName.endswith_insensitive(".asciipb")) {
+ if (FileName.ends_with_insensitive(".textpb") ||
+ FileName.ends_with_insensitive(".pb.txt") ||
+ FileName.ends_with_insensitive(".textproto") ||
+ FileName.ends_with_insensitive(".asciipb")) {
return FormatStyle::LK_TextProto;
}
- if (FileName.endswith_insensitive(".td"))
+ if (FileName.ends_with_insensitive(".td"))
return FormatStyle::LK_TableGen;
- if (FileName.endswith_insensitive(".cs"))
+ if (FileName.ends_with_insensitive(".cs"))
return FormatStyle::LK_CSharp;
- if (FileName.endswith_insensitive(".json"))
+ if (FileName.ends_with_insensitive(".json"))
return FormatStyle::LK_Json;
- if (FileName.endswith_insensitive(".sv") ||
- FileName.endswith_insensitive(".svh") ||
- FileName.endswith_insensitive(".v") ||
- FileName.endswith_insensitive(".vh")) {
+ if (FileName.ends_with_insensitive(".sv") ||
+ FileName.ends_with_insensitive(".svh") ||
+ FileName.ends_with_insensitive(".v") ||
+ FileName.ends_with_insensitive(".vh")) {
return FormatStyle::LK_Verilog;
}
return FormatStyle::LK_Cpp;
@@ -3769,7 +3933,7 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
// User provided clang-format file using -style=file:path/to/format/file.
if (!Style.InheritsParentConfig &&
- StyleName.startswith_insensitive("file:")) {
+ StyleName.starts_with_insensitive("file:")) {
auto ConfigFile = StyleName.substr(5);
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Text =
loadAndParseConfigFile(ConfigFile, FS, &Style, AllowUnknownOptions);
@@ -3899,5 +4063,25 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
return FallbackStyle;
}
+static bool isClangFormatOnOff(StringRef Comment, bool On) {
+ if (Comment == (On ? "/* clang-format on */" : "/* clang-format off */"))
+ return true;
+
+ static const char ClangFormatOn[] = "// clang-format on";
+ static const char ClangFormatOff[] = "// clang-format off";
+ const unsigned Size = (On ? sizeof ClangFormatOn : sizeof ClangFormatOff) - 1;
+
+ return Comment.startswith(On ? ClangFormatOn : ClangFormatOff) &&
+ (Comment.size() == Size || Comment[Size] == ':');
+}
+
+bool isClangFormatOn(StringRef Comment) {
+ return isClangFormatOnOff(Comment, /*On=*/true);
+}
+
+bool isClangFormatOff(StringRef Comment) {
+ return isClangFormatOnOff(Comment, /*On=*/false);
+}
+
} // namespace format
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Format/FormatToken.cpp b/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
index f9f0d712bc16..d994ed048899 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
+++ b/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
@@ -75,6 +75,21 @@ bool FormatToken::isTypeOrIdentifier() const {
return isSimpleTypeSpecifier() || Tok.isOneOf(tok::kw_auto, tok::identifier);
}
+bool FormatToken::isBlockIndentedInitRBrace(const FormatStyle &Style) const {
+ assert(is(tok::r_brace));
+ if (!Style.Cpp11BracedListStyle ||
+ Style.AlignAfterOpenBracket != FormatStyle::BAS_BlockIndent) {
+ return false;
+ }
+ const auto *LBrace = MatchingParen;
+ assert(LBrace && LBrace->is(tok::l_brace));
+ if (LBrace->is(BK_BracedInit))
+ return true;
+ if (LBrace->Previous && LBrace->Previous->is(tok::equal))
+ return true;
+ return false;
+}
+
bool FormatToken::opensBlockOrBlockTypeList(const FormatStyle &Style) const {
// C# Does not indent object initialisers as continuations.
if (is(tok::l_brace) && getBlockKind() == BK_BracedInit && Style.isCSharp())
@@ -96,7 +111,7 @@ void TokenRole::precomputeFormattingInfos(const FormatToken *Token) {}
unsigned CommaSeparatedList::formatAfterToken(LineState &State,
ContinuationIndenter *Indenter,
bool DryRun) {
- if (State.NextToken == nullptr || !State.NextToken->Previous)
+ if (!State.NextToken || !State.NextToken->Previous)
return 0;
if (Formats.size() == 1)
diff --git a/contrib/llvm-project/clang/lib/Format/FormatToken.h b/contrib/llvm-project/clang/lib/Format/FormatToken.h
index 9d055efd8007..4e45478d7424 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatToken.h
+++ b/contrib/llvm-project/clang/lib/Format/FormatToken.h
@@ -37,6 +37,8 @@ namespace format {
TYPE(BitFieldColon) \
TYPE(BlockComment) \
TYPE(BracedListLBrace) \
+ /* The colon at the end of a case label. */ \
+ TYPE(CaseLabelColon) \
TYPE(CastRParen) \
TYPE(ClassLBrace) \
TYPE(CompoundRequirementLBrace) \
@@ -73,8 +75,7 @@ namespace format {
TYPE(FunctionTypeLParen) \
/* The colons as part of a C11 _Generic selection */ \
TYPE(GenericSelectionColon) \
- /* The colon at the end of a goto label or a case label. Currently only used \
- * for Verilog. */ \
+ /* The colon at the end of a goto label. */ \
TYPE(GotoLabelColon) \
TYPE(IfMacro) \
TYPE(ImplicitStringLiteral) \
@@ -140,20 +141,33 @@ namespace format {
TYPE(TrailingReturnArrow) \
TYPE(TrailingUnaryOperator) \
TYPE(TypeDeclarationParen) \
+ TYPE(TypeName) \
TYPE(TypenameMacro) \
TYPE(UnaryOperator) \
TYPE(UnionLBrace) \
TYPE(UntouchableMacroFunc) \
+ /* Like in 'assign x = 0, y = 1;' . */ \
+ TYPE(VerilogAssignComma) \
/* like in begin : block */ \
TYPE(VerilogBlockLabelColon) \
/* The square bracket for the dimension part of the type name. \
* In 'logic [1:0] x[1:0]', only the first '['. This way we can have space \
* before the first bracket but not the second. */ \
TYPE(VerilogDimensionedTypeName) \
+ /* list of port connections or parameters in a module instantiation */ \
+ TYPE(VerilogInstancePortComma) \
+ TYPE(VerilogInstancePortLParen) \
+ /* A parenthesized list within which line breaks are inserted by the \
+ * formatter, for example the list of ports in a module header. */ \
+ TYPE(VerilogMultiLineListLParen) \
/* for the base in a number literal, not including the quote */ \
TYPE(VerilogNumberBase) \
+ /* like `(strong1, pull0)` */ \
+ TYPE(VerilogStrength) \
/* Things inside the table in user-defined primitives. */ \
TYPE(VerilogTableItem) \
+ /* those that separate ports of different types */ \
+ TYPE(VerilogTypeComma) \
TYPE(Unknown)
/// Determines the semantic type of a syntactic token, e.g. whether "<" is a
@@ -373,6 +387,11 @@ public:
/// binary operator.
TokenType getType() const { return Type; }
void setType(TokenType T) {
+ // If this token is a macro argument while formatting an unexpanded macro
+ // call, we do not change its type any more - the type was deduced from
+ // formatting the expanded macro stream already.
+ if (MacroCtx && MacroCtx->Role == MR_UnexpandedArg)
+ return;
assert((!TypeIsFinalized || T == Type) &&
"Please use overwriteFixedType to change a fixed type.");
Type = T;
@@ -400,6 +419,12 @@ public:
/// and thereby e.g. leave an empty line between two function definitions.
unsigned NewlinesBefore = 0;
+ /// The number of newlines immediately before the \c Token after formatting.
+ ///
+ /// This is used to avoid overlapping whitespace replacements when \c Newlines
+ /// is recomputed for a finalized preprocessor branching directive.
+ int Newlines = -1;
+
/// The offset just past the last '\n' in this token's leading
/// whitespace (relative to \c WhiteSpaceStart). 0 if there is no '\n'.
unsigned LastNewlineOffset = 0;
@@ -603,7 +628,7 @@ public:
return isOneOf(tok::kw_const, tok::kw_restrict, tok::kw_volatile,
tok::kw___attribute, tok::kw__Nonnull, tok::kw__Nullable,
tok::kw__Null_unspecified, tok::kw___ptr32, tok::kw___ptr64,
- TT_AttributeMacro);
+ tok::kw___funcref, TT_AttributeMacro);
}
/// Determine whether the token is a simple-type-specifier.
@@ -743,13 +768,16 @@ public:
}
/// Returns the next token ignoring comments.
- [[nodiscard]] const FormatToken *getNextNonComment() const {
- const FormatToken *Tok = Next;
+ [[nodiscard]] FormatToken *getNextNonComment() const {
+ FormatToken *Tok = Next;
while (Tok && Tok->is(tok::comment))
Tok = Tok->Next;
return Tok;
}
+ /// Returns \c true if this token ends a block indented initializer list.
+ [[nodiscard]] bool isBlockIndentedInitRBrace(const FormatStyle &Style) const;
+
/// Returns \c true if this tokens starts a block-type list, i.e. a
/// list that should be indented with a block indent.
[[nodiscard]] bool opensBlockOrBlockTypeList(const FormatStyle &Style) const;
@@ -935,6 +963,7 @@ struct AdditionalKeywords {
kw_CF_OPTIONS = &IdentTable.get("CF_OPTIONS");
kw_NS_CLOSED_ENUM = &IdentTable.get("NS_CLOSED_ENUM");
kw_NS_ENUM = &IdentTable.get("NS_ENUM");
+ kw_NS_ERROR_ENUM = &IdentTable.get("NS_ERROR_ENUM");
kw_NS_OPTIONS = &IdentTable.get("NS_OPTIONS");
kw_as = &IdentTable.get("as");
@@ -1052,6 +1081,7 @@ struct AdditionalKeywords {
kw_delay_mode_zero = &IdentTable.get("delay_mode_zero");
kw_disable = &IdentTable.get("disable");
kw_dist = &IdentTable.get("dist");
+ kw_edge = &IdentTable.get("edge");
kw_elsif = &IdentTable.get("elsif");
kw_end = &IdentTable.get("end");
kw_end_keywords = &IdentTable.get("end_keywords");
@@ -1097,10 +1127,12 @@ struct AdditionalKeywords {
kw_macromodule = &IdentTable.get("macromodule");
kw_matches = &IdentTable.get("matches");
kw_medium = &IdentTable.get("medium");
+ kw_negedge = &IdentTable.get("negedge");
kw_nounconnected_drive = &IdentTable.get("nounconnected_drive");
kw_output = &IdentTable.get("output");
kw_packed = &IdentTable.get("packed");
kw_parameter = &IdentTable.get("parameter");
+ kw_posedge = &IdentTable.get("posedge");
kw_primitive = &IdentTable.get("primitive");
kw_priority = &IdentTable.get("priority");
kw_program = &IdentTable.get("program");
@@ -1182,132 +1214,71 @@ struct AdditionalKeywords {
// Some keywords are not included here because they don't need special
// treatment like `showcancelled` or they should be treated as identifiers
// like `int` and `logic`.
- VerilogExtraKeywords =
- std::unordered_set<IdentifierInfo *>({kw_always,
- kw_always_comb,
- kw_always_ff,
- kw_always_latch,
- kw_assert,
- kw_assign,
- kw_assume,
- kw_automatic,
- kw_before,
- kw_begin,
- kw_bins,
- kw_binsof,
- kw_casex,
- kw_casez,
- kw_celldefine,
- kw_checker,
- kw_clocking,
- kw_constraint,
- kw_cover,
- kw_covergroup,
- kw_coverpoint,
- kw_disable,
- kw_dist,
- kw_end,
- kw_endcase,
- kw_endchecker,
- kw_endclass,
- kw_endclocking,
- kw_endfunction,
- kw_endgenerate,
- kw_endgroup,
- kw_endinterface,
- kw_endmodule,
- kw_endpackage,
- kw_endprimitive,
- kw_endprogram,
- kw_endproperty,
- kw_endsequence,
- kw_endspecify,
- kw_endtable,
- kw_endtask,
- kw_extends,
- kw_final,
- kw_foreach,
- kw_forever,
- kw_fork,
- kw_function,
- kw_generate,
- kw_highz0,
- kw_highz1,
- kw_iff,
- kw_ifnone,
- kw_ignore_bins,
- kw_illegal_bins,
- kw_implements,
- kw_import,
- kw_initial,
- kw_inout,
- kw_input,
- kw_inside,
- kw_interconnect,
- kw_interface,
- kw_intersect,
- kw_join,
- kw_join_any,
- kw_join_none,
- kw_large,
- kw_let,
- kw_local,
- kw_localparam,
- kw_macromodule,
- kw_matches,
- kw_medium,
- kw_output,
- kw_package,
- kw_packed,
- kw_parameter,
- kw_primitive,
- kw_priority,
- kw_program,
- kw_property,
- kw_pull0,
- kw_pull1,
- kw_pure,
- kw_rand,
- kw_randc,
- kw_randcase,
- kw_randsequence,
- kw_ref,
- kw_repeat,
- kw_sample,
- kw_scalared,
- kw_sequence,
- kw_small,
- kw_soft,
- kw_solve,
- kw_specify,
- kw_specparam,
- kw_strong0,
- kw_strong1,
- kw_supply0,
- kw_supply1,
- kw_table,
- kw_tagged,
- kw_task,
- kw_tri,
- kw_tri0,
- kw_tri1,
- kw_triand,
- kw_trior,
- kw_trireg,
- kw_unique,
- kw_unique0,
- kw_uwire,
- kw_var,
- kw_vectored,
- kw_wand,
- kw_weak0,
- kw_weak1,
- kw_wildcard,
- kw_wire,
- kw_with,
- kw_wor,
- kw_verilogHash,
- kw_verilogHashHash});
+ VerilogExtraKeywords = std::unordered_set<IdentifierInfo *>(
+ {kw_always, kw_always_comb,
+ kw_always_ff, kw_always_latch,
+ kw_assert, kw_assign,
+ kw_assume, kw_automatic,
+ kw_before, kw_begin,
+ kw_bins, kw_binsof,
+ kw_casex, kw_casez,
+ kw_celldefine, kw_checker,
+ kw_clocking, kw_constraint,
+ kw_cover, kw_covergroup,
+ kw_coverpoint, kw_disable,
+ kw_dist, kw_edge,
+ kw_end, kw_endcase,
+ kw_endchecker, kw_endclass,
+ kw_endclocking, kw_endfunction,
+ kw_endgenerate, kw_endgroup,
+ kw_endinterface, kw_endmodule,
+ kw_endpackage, kw_endprimitive,
+ kw_endprogram, kw_endproperty,
+ kw_endsequence, kw_endspecify,
+ kw_endtable, kw_endtask,
+ kw_extends, kw_final,
+ kw_foreach, kw_forever,
+ kw_fork, kw_function,
+ kw_generate, kw_highz0,
+ kw_highz1, kw_iff,
+ kw_ifnone, kw_ignore_bins,
+ kw_illegal_bins, kw_implements,
+ kw_import, kw_initial,
+ kw_inout, kw_input,
+ kw_inside, kw_interconnect,
+ kw_interface, kw_intersect,
+ kw_join, kw_join_any,
+ kw_join_none, kw_large,
+ kw_let, kw_local,
+ kw_localparam, kw_macromodule,
+ kw_matches, kw_medium,
+ kw_negedge, kw_output,
+ kw_package, kw_packed,
+ kw_parameter, kw_posedge,
+ kw_primitive, kw_priority,
+ kw_program, kw_property,
+ kw_pull0, kw_pull1,
+ kw_pure, kw_rand,
+ kw_randc, kw_randcase,
+ kw_randsequence, kw_ref,
+ kw_repeat, kw_sample,
+ kw_scalared, kw_sequence,
+ kw_small, kw_soft,
+ kw_solve, kw_specify,
+ kw_specparam, kw_strong0,
+ kw_strong1, kw_supply0,
+ kw_supply1, kw_table,
+ kw_tagged, kw_task,
+ kw_tri, kw_tri0,
+ kw_tri1, kw_triand,
+ kw_trior, kw_trireg,
+ kw_unique, kw_unique0,
+ kw_uwire, kw_var,
+ kw_vectored, kw_wand,
+ kw_weak0, kw_weak1,
+ kw_wildcard, kw_wire,
+ kw_with, kw_wor,
+ kw_verilogHash, kw_verilogHashHash});
}
// Context sensitive keywords.
@@ -1320,6 +1291,7 @@ struct AdditionalKeywords {
IdentifierInfo *kw_CF_OPTIONS;
IdentifierInfo *kw_NS_CLOSED_ENUM;
IdentifierInfo *kw_NS_ENUM;
+ IdentifierInfo *kw_NS_ERROR_ENUM;
IdentifierInfo *kw_NS_OPTIONS;
IdentifierInfo *kw___except;
IdentifierInfo *kw___has_include;
@@ -1445,6 +1417,7 @@ struct AdditionalKeywords {
IdentifierInfo *kw_disable;
IdentifierInfo *kw_dist;
IdentifierInfo *kw_elsif;
+ IdentifierInfo *kw_edge;
IdentifierInfo *kw_end;
IdentifierInfo *kw_end_keywords;
IdentifierInfo *kw_endcase;
@@ -1489,10 +1462,12 @@ struct AdditionalKeywords {
IdentifierInfo *kw_macromodule;
IdentifierInfo *kw_matches;
IdentifierInfo *kw_medium;
+ IdentifierInfo *kw_negedge;
IdentifierInfo *kw_nounconnected_drive;
IdentifierInfo *kw_output;
IdentifierInfo *kw_packed;
IdentifierInfo *kw_parameter;
+ IdentifierInfo *kw_posedge;
IdentifierInfo *kw_primitive;
IdentifierInfo *kw_priority;
IdentifierInfo *kw_program;
@@ -1552,7 +1527,7 @@ struct AdditionalKeywords {
/// Returns \c true if \p Tok is a keyword or an identifier.
bool isWordLike(const FormatToken &Tok) const {
// getIdentifierinfo returns non-null for keywords as well as identifiers.
- return Tok.Tok.getIdentifierInfo() != nullptr &&
+ return Tok.Tok.getIdentifierInfo() &&
!Tok.isOneOf(kw_verilogHash, kw_verilogHashHash, kw_apostrophe);
}
@@ -1711,11 +1686,12 @@ struct AdditionalKeywords {
case tok::kw_while:
return false;
case tok::identifier:
- return VerilogExtraKeywords.find(Tok.Tok.getIdentifierInfo()) ==
- VerilogExtraKeywords.end();
+ return isWordLike(Tok) &&
+ VerilogExtraKeywords.find(Tok.Tok.getIdentifierInfo()) ==
+ VerilogExtraKeywords.end();
default:
// getIdentifierInfo returns non-null for both identifiers and keywords.
- return Tok.Tok.getIdentifierInfo() != nullptr;
+ return Tok.Tok.getIdentifierInfo();
}
}
@@ -1786,12 +1762,40 @@ struct AdditionalKeywords {
bool isVerilogEndOfLabel(const FormatToken &Tok) const {
const FormatToken *Next = Tok.getNextNonComment();
// In Verilog the colon in a default label is optional.
- return Tok.is(TT_GotoLabelColon) ||
+ return Tok.is(TT_CaseLabelColon) ||
(Tok.is(tok::kw_default) &&
!(Next && Next->isOneOf(tok::colon, tok::semi, kw_clocking, kw_iff,
kw_input, kw_output, kw_sequence)));
}
+ /// Returns whether \p Tok is a Verilog keyword that starts a
+ /// structured procedure like 'always'.
+ bool isVerilogStructuredProcedure(const FormatToken &Tok) const {
+ return Tok.isOneOf(kw_always, kw_always_comb, kw_always_ff, kw_always_latch,
+ kw_final, kw_forever, kw_initial);
+ }
+
+ bool isVerilogQualifier(const FormatToken &Tok) const {
+ switch (Tok.Tok.getKind()) {
+ case tok::kw_extern:
+ case tok::kw_signed:
+ case tok::kw_static:
+ case tok::kw_unsigned:
+ case tok::kw_virtual:
+ return true;
+ case tok::identifier:
+ return Tok.isOneOf(
+ kw_let, kw_var, kw_ref, kw_automatic, kw_bins, kw_coverpoint,
+ kw_ignore_bins, kw_illegal_bins, kw_inout, kw_input, kw_interconnect,
+ kw_local, kw_localparam, kw_output, kw_parameter, kw_pure, kw_rand,
+ kw_randc, kw_scalared, kw_specparam, kw_tri, kw_tri0, kw_tri1,
+ kw_triand, kw_trior, kw_trireg, kw_uwire, kw_vectored, kw_wand,
+ kw_wildcard, kw_wire, kw_wor);
+ default:
+ return false;
+ }
+ }
+
private:
/// The JavaScript keywords beyond the C++ keyword set.
std::unordered_set<IdentifierInfo *> JsExtraKeywords;
@@ -1803,6 +1807,25 @@ private:
std::unordered_set<IdentifierInfo *> VerilogExtraKeywords;
};
+inline bool isLineComment(const FormatToken &FormatTok) {
+ return FormatTok.is(tok::comment) && !FormatTok.TokenText.startswith("/*");
+}
+
+// Checks if \p FormatTok is a line comment that continues the line comment
+// \p Previous. The original column of \p MinColumnToken is used to determine
+// whether \p FormatTok is indented enough to the right to continue \p Previous.
+inline bool continuesLineComment(const FormatToken &FormatTok,
+ const FormatToken *Previous,
+ const FormatToken *MinColumnToken) {
+ if (!Previous || !MinColumnToken)
+ return false;
+ unsigned MinContinueColumn =
+ MinColumnToken->OriginalColumn + (isLineComment(*MinColumnToken) ? 0 : 1);
+ return isLineComment(FormatTok) && FormatTok.NewlinesBefore == 1 &&
+ isLineComment(*Previous) &&
+ FormatTok.OriginalColumn >= MinContinueColumn;
+}
+
} // namespace format
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
index f8f5f7112188..4d43796dd70e 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
@@ -71,6 +71,9 @@ FormatTokenLexer::FormatTokenLexer(
auto Identifier = &IdentTable.get(StatementAttributeLikeMacro);
Macros.insert({Identifier, TT_StatementAttributeLikeMacro});
}
+
+ for (const auto &TypeName : Style.TypeNames)
+ TypeNames.insert(&IdentTable.get(TypeName));
}
ArrayRef<FormatToken *> FormatTokenLexer::lex() {
@@ -103,6 +106,8 @@ void FormatTokenLexer::tryMergePreviousTokens() {
return;
if (tryMergeLessLess())
return;
+ if (tryMergeGreaterGreater())
+ return;
if (tryMergeForEach())
return;
if (Style.isCpp() && tryTransformTryUsageForC())
@@ -460,12 +465,11 @@ bool FormatTokenLexer::tryMergeLessLess() {
return false;
auto X = Tokens.size() > 3 ? First[-1] : nullptr;
- auto Y = First[2];
- if ((X && X->is(tok::less)) || Y->is(tok::less))
+ if (X && X->is(tok::less))
return false;
- // Do not remove a whitespace between the two "<" e.g. "operator< <>".
- if (X && X->is(tok::kw_operator) && Y->is(tok::greater))
+ auto Y = First[2];
+ if ((!X || X->isNot(tok::kw_operator)) && Y->is(tok::less))
return false;
First[0]->Tok.setKind(tok::lessless);
@@ -475,6 +479,30 @@ bool FormatTokenLexer::tryMergeLessLess() {
return true;
}
+bool FormatTokenLexer::tryMergeGreaterGreater() {
+ // Merge kw_operator,greater,greater into kw_operator,greatergreater.
+ if (Tokens.size() < 2)
+ return false;
+
+ auto First = Tokens.end() - 2;
+ if (First[0]->isNot(tok::greater) || First[1]->isNot(tok::greater))
+ return false;
+
+ // Only merge if there currently is no whitespace between the first two ">".
+ if (First[1]->hasWhitespaceBefore())
+ return false;
+
+ auto Tok = Tokens.size() > 2 ? First[-1] : nullptr;
+ if (Tok && Tok->isNot(tok::kw_operator))
+ return false;
+
+ First[0]->Tok.setKind(tok::greatergreater);
+ First[0]->TokenText = ">>";
+ First[0]->ColumnWidth += 1;
+ Tokens.erase(Tokens.end() - 1);
+ return true;
+}
+
bool FormatTokenLexer::tryMergeTokens(ArrayRef<tok::TokenKind> Kinds,
TokenType NewType) {
if (Tokens.size() < Kinds.size())
@@ -1197,7 +1225,8 @@ FormatToken *FormatTokenLexer::getNextToken() {
}
if (Style.isCpp()) {
- auto it = Macros.find(FormatTok->Tok.getIdentifierInfo());
+ auto *Identifier = FormatTok->Tok.getIdentifierInfo();
+ auto it = Macros.find(Identifier);
if (!(Tokens.size() > 0 && Tokens.back()->Tok.getIdentifierInfo() &&
Tokens.back()->Tok.getIdentifierInfo()->getPPKeywordID() ==
tok::pp_define) &&
@@ -1215,6 +1244,8 @@ FormatToken *FormatTokenLexer::getNextToken() {
FormatTok->setType(TT_MacroBlockBegin);
else if (MacroBlockEndRegex.match(Text))
FormatTok->setType(TT_MacroBlockEnd);
+ else if (TypeNames.contains(Identifier))
+ FormatTok->setFinalizedType(TT_TypeName);
}
}
@@ -1286,17 +1317,13 @@ void FormatTokenLexer::readRawToken(FormatToken &Tok) {
Tok.Tok.setKind(tok::string_literal);
}
- if (Tok.is(tok::comment) && (Tok.TokenText == "// clang-format on" ||
- Tok.TokenText == "/* clang-format on */")) {
+ if (Tok.is(tok::comment) && isClangFormatOn(Tok.TokenText))
FormattingDisabled = false;
- }
Tok.Finalized = FormattingDisabled;
- if (Tok.is(tok::comment) && (Tok.TokenText == "// clang-format off" ||
- Tok.TokenText == "/* clang-format off */")) {
+ if (Tok.is(tok::comment) && isClangFormatOff(Tok.TokenText))
FormattingDisabled = true;
- }
}
void FormatTokenLexer::resetLexer(unsigned Offset) {
diff --git a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h
index 950305a37d68..bb6a8ab69c1b 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h
+++ b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h
@@ -22,6 +22,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Regex.h"
@@ -51,6 +52,7 @@ private:
void tryMergePreviousTokens();
bool tryMergeLessLess();
+ bool tryMergeGreaterGreater();
bool tryMergeNSStringLiteral();
bool tryMergeJSPrivateIdentifier();
bool tryMergeCSharpStringLiteral();
@@ -125,6 +127,8 @@ private:
llvm::SmallMapVector<IdentifierInfo *, TokenType, 8> Macros;
+ llvm::SmallPtrSet<IdentifierInfo *, 8> TypeNames;
+
bool FormattingDisabled;
llvm::Regex MacroBlockBeginRegex;
diff --git a/contrib/llvm-project/clang/lib/Format/FormatTokenSource.h b/contrib/llvm-project/clang/lib/Format/FormatTokenSource.h
new file mode 100644
index 000000000000..ffd2fbbad02f
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Format/FormatTokenSource.h
@@ -0,0 +1,267 @@
+//===--- FormatTokenSource.h - Format C++ code ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the \c FormatTokenSource interface, which provides a token
+/// stream as well as the ability to manipulate the token stream.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_FORMATTOKENSOURCE_H
+#define LLVM_CLANG_LIB_FORMAT_FORMATTOKENSOURCE_H
+
+#include "FormatToken.h"
+#include "UnwrappedLineParser.h"
+#include "llvm/ADT/DenseMap.h"
+#include <cstddef>
+
+#define DEBUG_TYPE "format-token-source"
+
+namespace clang {
+namespace format {
+
+// Navigate a token stream.
+//
+// Enables traversal of a token stream, resetting the position in a token
+// stream, as well as inserting new tokens.
+class FormatTokenSource {
+public:
+ virtual ~FormatTokenSource() {}
+
+ // Returns the next token in the token stream.
+ virtual FormatToken *getNextToken() = 0;
+
+ // Returns the token preceding the token returned by the last call to
+ // getNextToken() in the token stream, or nullptr if no such token exists.
+ //
+ // Must not be called directly at the position directly after insertTokens()
+ // is called.
+ virtual FormatToken *getPreviousToken() = 0;
+
+ // Returns the token that would be returned by the next call to
+ // getNextToken().
+ virtual FormatToken *peekNextToken(bool SkipComment = false) = 0;
+
+ // Returns whether we are at the end of the file.
+ // This can be different from whether getNextToken() returned an eof token
+ // when the FormatTokenSource is a view on a part of the token stream.
+ virtual bool isEOF() = 0;
+
+ // Gets the current position in the token stream, to be used by setPosition().
+ //
+ // Note that the value of the position is not meaningful, and specifically
+ // should not be used to get relative token positions.
+ virtual unsigned getPosition() = 0;
+
+ // Resets the token stream to the state it was in when getPosition() returned
+ // Position, and return the token at that position in the stream.
+ virtual FormatToken *setPosition(unsigned Position) = 0;
+
+ // Insert the given tokens before the current position.
+ // Returns the first token in \c Tokens.
+ // The next returned token will be the second token in \c Tokens.
+ // Requires the last token in Tokens to be EOF; once the EOF token is reached,
+ // the next token will be the last token returned by getNextToken();
+ //
+ // For example, given the token sequence 'a1 a2':
+ // getNextToken() -> a1
+ // insertTokens('b1 b2') -> b1
+ // getNextToken() -> b2
+ // getNextToken() -> a1
+ // getNextToken() -> a2
+ virtual FormatToken *insertTokens(ArrayRef<FormatToken *> Tokens) = 0;
+};
+
+class IndexedTokenSource : public FormatTokenSource {
+public:
+ IndexedTokenSource(ArrayRef<FormatToken *> Tokens)
+ : Tokens(Tokens), Position(-1) {}
+
+ FormatToken *getNextToken() override {
+ if (Position >= 0 && isEOF()) {
+ LLVM_DEBUG({
+ llvm::dbgs() << "Next ";
+ dbgToken(Position);
+ });
+ return Tokens[Position];
+ }
+ Position = successor(Position);
+ LLVM_DEBUG({
+ llvm::dbgs() << "Next ";
+ dbgToken(Position);
+ });
+ return Tokens[Position];
+ }
+
+ FormatToken *getPreviousToken() override {
+ assert(Position <= 0 || !Tokens[Position - 1]->is(tok::eof));
+ return Position > 0 ? Tokens[Position - 1] : nullptr;
+ }
+
+ FormatToken *peekNextToken(bool SkipComment = false) override {
+ if (isEOF())
+ return Tokens[Position];
+ int Next = successor(Position);
+ if (SkipComment)
+ while (Tokens[Next]->is(tok::comment))
+ Next = successor(Next);
+ LLVM_DEBUG({
+ llvm::dbgs() << "Peeking ";
+ dbgToken(Next);
+ });
+ return Tokens[Next];
+ }
+
+ bool isEOF() override {
+ return Position == -1 ? false : Tokens[Position]->is(tok::eof);
+ }
+
+ unsigned getPosition() override {
+ LLVM_DEBUG(llvm::dbgs() << "Getting Position: " << Position << "\n");
+ assert(Position >= 0);
+ return Position;
+ }
+
+ FormatToken *setPosition(unsigned P) override {
+ LLVM_DEBUG(llvm::dbgs() << "Setting Position: " << P << "\n");
+ Position = P;
+ return Tokens[Position];
+ }
+
+ FormatToken *insertTokens(ArrayRef<FormatToken *> New) override {
+ assert(Position != -1);
+ assert((*New.rbegin())->Tok.is(tok::eof));
+ int Next = Tokens.size();
+ Tokens.append(New.begin(), New.end());
+ LLVM_DEBUG({
+ llvm::dbgs() << "Inserting:\n";
+ for (int I = Next, E = Tokens.size(); I != E; ++I)
+ dbgToken(I, " ");
+ llvm::dbgs() << " Jump from: " << (Tokens.size() - 1) << " -> "
+ << Position << "\n";
+ });
+ Jumps[Tokens.size() - 1] = Position;
+ Position = Next;
+ LLVM_DEBUG({
+ llvm::dbgs() << "At inserted token ";
+ dbgToken(Position);
+ });
+ return Tokens[Position];
+ }
+
+ void reset() { Position = -1; }
+
+private:
+ int successor(int Current) const {
+ int Next = Current + 1;
+ auto it = Jumps.find(Next);
+ if (it != Jumps.end()) {
+ Next = it->second;
+ assert(!Jumps.contains(Next));
+ }
+ return Next;
+ }
+
+ void dbgToken(int Position, llvm::StringRef Indent = "") {
+ FormatToken *Tok = Tokens[Position];
+ llvm::dbgs() << Indent << "[" << Position
+ << "] Token: " << Tok->Tok.getName() << " / " << Tok->TokenText
+ << ", Macro: " << !!Tok->MacroCtx << "\n";
+ }
+
+ SmallVector<FormatToken *> Tokens;
+ int Position;
+
+ // Maps from position a to position b, so that when we reach a, the token
+ // stream continues at position b instead.
+ llvm::DenseMap<int, int> Jumps;
+};
+
+class ScopedMacroState : public FormatTokenSource {
+public:
+ ScopedMacroState(UnwrappedLine &Line, FormatTokenSource *&TokenSource,
+ FormatToken *&ResetToken)
+ : Line(Line), TokenSource(TokenSource), ResetToken(ResetToken),
+ PreviousLineLevel(Line.Level), PreviousTokenSource(TokenSource),
+ Token(nullptr), PreviousToken(nullptr) {
+ FakeEOF.Tok.startToken();
+ FakeEOF.Tok.setKind(tok::eof);
+ TokenSource = this;
+ Line.Level = 0;
+ Line.InPPDirective = true;
+ // InMacroBody gets set after the `#define x` part.
+ }
+
+ ~ScopedMacroState() override {
+ TokenSource = PreviousTokenSource;
+ ResetToken = Token;
+ Line.InPPDirective = false;
+ Line.InMacroBody = false;
+ Line.Level = PreviousLineLevel;
+ }
+
+ FormatToken *getNextToken() override {
+ // The \c UnwrappedLineParser guards against this by never calling
+ // \c getNextToken() after it has encountered the first eof token.
+ assert(!eof());
+ PreviousToken = Token;
+ Token = PreviousTokenSource->getNextToken();
+ if (eof())
+ return &FakeEOF;
+ return Token;
+ }
+
+ FormatToken *getPreviousToken() override {
+ return PreviousTokenSource->getPreviousToken();
+ }
+
+ FormatToken *peekNextToken(bool SkipComment) override {
+ if (eof())
+ return &FakeEOF;
+ return PreviousTokenSource->peekNextToken(SkipComment);
+ }
+
+ bool isEOF() override { return PreviousTokenSource->isEOF(); }
+
+ unsigned getPosition() override { return PreviousTokenSource->getPosition(); }
+
+ FormatToken *setPosition(unsigned Position) override {
+ PreviousToken = nullptr;
+ Token = PreviousTokenSource->setPosition(Position);
+ return Token;
+ }
+
+ FormatToken *insertTokens(ArrayRef<FormatToken *> Tokens) override {
+ llvm_unreachable("Cannot insert tokens while parsing a macro.");
+ return nullptr;
+ }
+
+private:
+ bool eof() {
+ return Token && Token->HasUnescapedNewline &&
+ !continuesLineComment(*Token, PreviousToken,
+ /*MinColumnToken=*/PreviousToken);
+ }
+
+ FormatToken FakeEOF;
+ UnwrappedLine &Line;
+ FormatTokenSource *&TokenSource;
+ FormatToken *&ResetToken;
+ unsigned PreviousLineLevel;
+ FormatTokenSource *PreviousTokenSource;
+
+ FormatToken *Token;
+ FormatToken *PreviousToken;
+};
+
+} // namespace format
+} // namespace clang
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp b/contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp
index 3cc68673cd13..87823ae32b11 100644
--- a/contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp
@@ -93,9 +93,9 @@ IntegerLiteralSeparatorFixer::process(const Environment &Env,
auto Location = Tok.getLocation();
auto Text = StringRef(SourceMgr.getCharacterData(Location), Length);
if (Tok.is(tok::comment)) {
- if (Text == "// clang-format off" || Text == "/* clang-format off */")
+ if (isClangFormatOff(Text))
Skip = true;
- else if (Text == "// clang-format on" || Text == "/* clang-format on */")
+ else if (isClangFormatOn(Text))
Skip = false;
continue;
}
diff --git a/contrib/llvm-project/clang/lib/Format/MacroExpander.cpp b/contrib/llvm-project/clang/lib/Format/MacroExpander.cpp
index 9c6bcb8764f4..bc98ec4c361f 100644
--- a/contrib/llvm-project/clang/lib/Format/MacroExpander.cpp
+++ b/contrib/llvm-project/clang/lib/Format/MacroExpander.cpp
@@ -141,24 +141,42 @@ void MacroExpander::parseDefinition(const std::string &Macro) {
if (!Tokens.empty()) {
DefinitionParser Parser(Tokens);
auto Definition = Parser.parse();
- Definitions[Definition.Name] = std::move(Definition);
+ if (Definition.ObjectLike) {
+ ObjectLike[Definition.Name] = std::move(Definition);
+ } else {
+ FunctionLike[Definition.Name][Definition.Params.size()] =
+ std::move(Definition);
+ }
}
}
bool MacroExpander::defined(llvm::StringRef Name) const {
- return Definitions.find(Name) != Definitions.end();
+ return FunctionLike.contains(Name) || ObjectLike.contains(Name);
}
bool MacroExpander::objectLike(llvm::StringRef Name) const {
- return Definitions.find(Name)->second.ObjectLike;
+ return ObjectLike.contains(Name);
}
-llvm::SmallVector<FormatToken *, 8> MacroExpander::expand(FormatToken *ID,
- ArgsList Args) const {
- assert(defined(ID->TokenText));
- SmallVector<FormatToken *, 8> Result;
- const Definition &Def = Definitions.find(ID->TokenText)->second;
+bool MacroExpander::hasArity(llvm::StringRef Name, unsigned Arity) const {
+ auto it = FunctionLike.find(Name);
+ return it != FunctionLike.end() && it->second.contains(Arity);
+}
+llvm::SmallVector<FormatToken *, 8>
+MacroExpander::expand(FormatToken *ID,
+ std::optional<ArgsList> OptionalArgs) const {
+ if (OptionalArgs)
+ assert(hasArity(ID->TokenText, OptionalArgs->size()));
+ else
+ assert(objectLike(ID->TokenText));
+ const Definition &Def = OptionalArgs
+ ? FunctionLike.find(ID->TokenText)
+ ->second.find(OptionalArgs.value().size())
+ ->second
+ : ObjectLike.find(ID->TokenText)->second;
+ ArgsList Args = OptionalArgs ? OptionalArgs.value() : ArgsList();
+ SmallVector<FormatToken *, 8> Result;
// Expand each argument at most once.
llvm::StringSet<> ExpandedArgs;
diff --git a/contrib/llvm-project/clang/lib/Format/Macros.h b/contrib/llvm-project/clang/lib/Format/Macros.h
index b26799c20f8c..1964624e828c 100644
--- a/contrib/llvm-project/clang/lib/Format/Macros.h
+++ b/contrib/llvm-project/clang/lib/Format/Macros.h
@@ -106,17 +106,23 @@ public:
IdentifierTable &IdentTable);
~MacroExpander();
- /// Returns whether a macro \p Name is defined.
+ /// Returns whether any macro \p Name is defined, regardless of overloads.
bool defined(llvm::StringRef Name) const;
- /// Returns whether the macro has no arguments and should not consume
- /// subsequent parentheses.
+ /// Returns whetherh there is an object-like overload, i.e. where the macro
+ /// has no arguments and should not consume subsequent parentheses.
bool objectLike(llvm::StringRef Name) const;
+ /// Returns whether macro \p Name provides an overload with the given arity.
+ bool hasArity(llvm::StringRef Name, unsigned Arity) const;
+
/// Returns the expanded stream of format tokens for \p ID, where
/// each element in \p Args is a positional argument to the macro call.
- llvm::SmallVector<FormatToken *, 8> expand(FormatToken *ID,
- ArgsList Args) const;
+ /// If \p Args is not set, the object-like overload is used.
+ /// If \p Args is set, the overload with the arity equal to \c Args.size() is
+ /// used.
+ llvm::SmallVector<FormatToken *, 8>
+ expand(FormatToken *ID, std::optional<ArgsList> OptionalArgs) const;
private:
struct Definition;
@@ -129,7 +135,8 @@ private:
llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator;
IdentifierTable &IdentTable;
SmallVector<std::unique_ptr<llvm::MemoryBuffer>> Buffers;
- llvm::StringMap<Definition> Definitions;
+ llvm::StringMap<llvm::DenseMap<int, Definition>> FunctionLike;
+ llvm::StringMap<Definition> ObjectLike;
};
/// Converts a sequence of UnwrappedLines containing expanded macros into a
@@ -149,7 +156,7 @@ private:
///
/// After this point, the state of the spelled/expanded stream is "in sync"
/// (both at the start of an UnwrappedLine, with no macros open), so the
-/// Unexpander can be thrown away and parsing can continue.
+/// Reconstructor can be thrown away and parsing can continue.
///
/// Given a mapping from the macro name identifier token in the macro call
/// to the tokens of the macro call, for example:
diff --git a/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp b/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp
index 2615a499f7ab..95eb058d09e1 100644
--- a/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp
@@ -360,8 +360,12 @@ std::pair<tooling::Replacements, unsigned> NamespaceEndCommentsFixer::analyze(
Style.SpacesInLineCommentPrefix.Minimum);
if (!hasEndComment(EndCommentPrevTok)) {
bool isShort = I - StartLineIndex <= Style.ShortNamespaceLines + 1;
- if (!isShort)
- addEndComment(EndCommentPrevTok, EndCommentText, SourceMgr, &Fixes);
+ if (!isShort) {
+ addEndComment(EndCommentPrevTok,
+ std::string(Style.SpacesBeforeTrailingComments, ' ') +
+ EndCommentText,
+ SourceMgr, &Fixes);
+ }
} else if (!validEndComment(EndCommentPrevTok, NamespaceName,
NamespaceTok)) {
updateEndComment(EndCommentPrevTok, EndCommentText, SourceMgr, &Fixes);
diff --git a/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp
index 609b412380f8..86f62dc2eec9 100644
--- a/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp
@@ -25,18 +25,13 @@
namespace clang {
namespace format {
-QualifierAlignmentFixer::QualifierAlignmentFixer(
- const Environment &Env, const FormatStyle &Style, StringRef &Code,
- ArrayRef<tooling::Range> Ranges, unsigned FirstStartColumn,
- unsigned NextStartColumn, unsigned LastStartColumn, StringRef FileName)
- : TokenAnalyzer(Env, Style), Code(Code), Ranges(Ranges),
- FirstStartColumn(FirstStartColumn), NextStartColumn(NextStartColumn),
- LastStartColumn(LastStartColumn), FileName(FileName) {
+void addQualifierAlignmentFixerPasses(const FormatStyle &Style,
+ SmallVectorImpl<AnalyzerPass> &Passes) {
std::vector<std::string> LeftOrder;
std::vector<std::string> RightOrder;
std::vector<tok::TokenKind> ConfiguredQualifierTokens;
- PrepareLeftRightOrdering(Style.QualifierOrder, LeftOrder, RightOrder,
- ConfiguredQualifierTokens);
+ prepareLeftRightOrderingForQualifierAlignmentFixer(
+ Style.QualifierOrder, LeftOrder, RightOrder, ConfiguredQualifierTokens);
// Handle the left and right alignment separately.
for (const auto &Qualifier : LeftOrder) {
@@ -59,51 +54,6 @@ QualifierAlignmentFixer::QualifierAlignmentFixer(
}
}
-std::pair<tooling::Replacements, unsigned> QualifierAlignmentFixer::analyze(
- TokenAnnotator & /*Annotator*/,
- SmallVectorImpl<AnnotatedLine *> & /*AnnotatedLines*/,
- FormatTokenLexer & /*Tokens*/) {
- auto Env = Environment::make(Code, FileName, Ranges, FirstStartColumn,
- NextStartColumn, LastStartColumn);
- if (!Env)
- return {};
- std::optional<std::string> CurrentCode;
- tooling::Replacements Fixes;
- for (size_t I = 0, E = Passes.size(); I < E; ++I) {
- std::pair<tooling::Replacements, unsigned> PassFixes = Passes[I](*Env);
- auto NewCode = applyAllReplacements(
- CurrentCode ? StringRef(*CurrentCode) : Code, PassFixes.first);
- if (NewCode) {
- Fixes = Fixes.merge(PassFixes.first);
- if (I + 1 < E) {
- CurrentCode = std::move(*NewCode);
- Env = Environment::make(
- *CurrentCode, FileName,
- tooling::calculateRangesAfterReplacements(Fixes, Ranges),
- FirstStartColumn, NextStartColumn, LastStartColumn);
- if (!Env)
- return {};
- }
- }
- }
-
- // Don't make replacements that replace nothing.
- tooling::Replacements NonNoOpFixes;
-
- for (const tooling::Replacement &Fix : Fixes) {
- StringRef OriginalCode = Code.substr(Fix.getOffset(), Fix.getLength());
-
- if (!OriginalCode.equals(Fix.getReplacementText())) {
- auto Err = NonNoOpFixes.add(Fix);
- if (Err) {
- llvm::errs() << "Error adding replacements : "
- << llvm::toString(std::move(Err)) << "\n";
- }
- }
- }
- return {NonNoOpFixes, 0};
-}
-
static void replaceToken(const SourceManager &SourceMgr,
tooling::Replacements &Fixes,
const CharSourceRange &Range, std::string NewText) {
@@ -128,14 +78,12 @@ static void insertQualifierAfter(const SourceManager &SourceMgr,
tooling::Replacements &Fixes,
const FormatToken *First,
const std::string &Qualifier) {
- FormatToken *Next = First->Next;
- if (!Next)
- return;
- auto Range = CharSourceRange::getCharRange(Next->getStartOfNonWhitespace(),
- Next->Tok.getEndLoc());
+ auto Range = CharSourceRange::getCharRange(First->Tok.getLocation(),
+ First->Tok.getEndLoc());
- std::string NewText = " " + Qualifier + " ";
- NewText += Next->TokenText;
+ std::string NewText{};
+ NewText += First->TokenText;
+ NewText += " " + Qualifier;
replaceToken(SourceMgr, Fixes, Range, NewText);
}
@@ -204,9 +152,33 @@ static void rotateTokens(const SourceManager &SourceMgr,
replaceToken(SourceMgr, Fixes, Range, NewText);
}
+static bool
+isConfiguredQualifier(const FormatToken *const Tok,
+ const std::vector<tok::TokenKind> &Qualifiers) {
+ return Tok && llvm::is_contained(Qualifiers, Tok->Tok.getKind());
+}
+
+static bool isQualifier(const FormatToken *const Tok) {
+ if (!Tok)
+ return false;
+
+ switch (Tok->Tok.getKind()) {
+ case tok::kw_const:
+ case tok::kw_volatile:
+ case tok::kw_static:
+ case tok::kw_inline:
+ case tok::kw_constexpr:
+ case tok::kw_restrict:
+ case tok::kw_friend:
+ return true;
+ default:
+ return false;
+ }
+}
+
const FormatToken *LeftRightQualifierAlignmentFixer::analyzeRight(
const SourceManager &SourceMgr, const AdditionalKeywords &Keywords,
- tooling::Replacements &Fixes, const FormatToken *Tok,
+ tooling::Replacements &Fixes, const FormatToken *const Tok,
const std::string &Qualifier, tok::TokenKind QualifierType) {
// We only need to think about streams that begin with a qualifier.
if (!Tok->is(QualifierType))
@@ -214,65 +186,141 @@ const FormatToken *LeftRightQualifierAlignmentFixer::analyzeRight(
// Don't concern yourself if nothing follows the qualifier.
if (!Tok->Next)
return Tok;
- if (LeftRightQualifierAlignmentFixer::isPossibleMacro(Tok->Next))
- return Tok;
- auto AnalyzeTemplate =
- [&](const FormatToken *Tok,
- const FormatToken *StartTemplate) -> const FormatToken * {
- // Read from the TemplateOpener to TemplateCloser.
- FormatToken *EndTemplate = StartTemplate->MatchingParen;
- if (EndTemplate) {
- // Move to the end of any template class members e.g.
- // `Foo<int>::iterator`.
- if (EndTemplate->startsSequence(TT_TemplateCloser, tok::coloncolon,
- tok::identifier)) {
- EndTemplate = EndTemplate->Next->Next;
- }
+ // Skip qualifiers to the left to find what preceeds the qualifiers.
+ // Use isQualifier rather than isConfiguredQualifier to cover all qualifiers.
+ const FormatToken *PreviousCheck = Tok->getPreviousNonComment();
+ while (isQualifier(PreviousCheck))
+ PreviousCheck = PreviousCheck->getPreviousNonComment();
+
+ // Examples given in order of ['type', 'const', 'volatile']
+ const bool IsRightQualifier = PreviousCheck && [PreviousCheck]() {
+ // The cases:
+ // `Foo() const` -> `Foo() const`
+ // `Foo() const final` -> `Foo() const final`
+ // `Foo() const override` -> `Foo() const final`
+ // `Foo() const volatile override` -> `Foo() const volatile override`
+ // `Foo() volatile const final` -> `Foo() const volatile final`
+ if (PreviousCheck->is(tok::r_paren))
+ return true;
+
+ // The cases:
+ // `struct {} volatile const a;` -> `struct {} const volatile a;`
+ // `class {} volatile const a;` -> `class {} const volatile a;`
+ if (PreviousCheck->is(tok::r_brace))
+ return true;
+
+ // The case:
+ // `template <class T> const Bar Foo()` ->
+ // `template <class T> Bar const Foo()`
+ // The cases:
+ // `Foo<int> const foo` -> `Foo<int> const foo`
+ // `Foo<int> volatile const` -> `Foo<int> const volatile`
+ // The case:
+ // ```
+ // template <class T>
+ // requires Concept1<T> && requires Concept2<T>
+ // const Foo f();
+ // ```
+ // ->
+ // ```
+ // template <class T>
+ // requires Concept1<T> && requires Concept2<T>
+ // Foo const f();
+ // ```
+ if (PreviousCheck->is(TT_TemplateCloser)) {
+ // If the token closes a template<> or requires clause, then it is a left
+ // qualifier and should be moved to the right.
+ return !(PreviousCheck->ClosesTemplateDeclaration ||
+ PreviousCheck->ClosesRequiresClause);
}
- if (EndTemplate && EndTemplate->Next &&
- !EndTemplate->Next->isOneOf(tok::equal, tok::l_paren)) {
- insertQualifierAfter(SourceMgr, Fixes, EndTemplate, Qualifier);
- // Remove the qualifier.
- removeToken(SourceMgr, Fixes, Tok);
- return Tok;
+
+ // The case `Foo* const` -> `Foo* const`
+ // The case `Foo* volatile const` -> `Foo* const volatile`
+ // The case `int32_t const` -> `int32_t const`
+ // The case `auto volatile const` -> `auto const volatile`
+ if (PreviousCheck->isOneOf(TT_PointerOrReference, tok::identifier,
+ tok::kw_auto)) {
+ return true;
}
- return nullptr;
- };
-
- FormatToken *Qual = Tok->Next;
- FormatToken *LastQual = Qual;
- while (Qual && isQualifierOrType(Qual, ConfiguredQualifierTokens)) {
- LastQual = Qual;
- Qual = Qual->Next;
+
+ return false;
+ }();
+
+ // Find the last qualifier to the right.
+ const FormatToken *LastQual = Tok;
+ while (isQualifier(LastQual->getNextNonComment()))
+ LastQual = LastQual->getNextNonComment();
+
+ // If this qualifier is to the right of a type or pointer do a partial sort
+ // and return.
+ if (IsRightQualifier) {
+ if (LastQual != Tok)
+ rotateTokens(SourceMgr, Fixes, Tok, LastQual, /*Left=*/false);
+ return Tok;
+ }
+
+ const FormatToken *TypeToken = LastQual->getNextNonComment();
+ if (!TypeToken)
+ return Tok;
+
+ // Stay safe and don't move past macros, also don't bother with sorting.
+ if (isPossibleMacro(TypeToken))
+ return Tok;
+
+ // The case `const long long int volatile` -> `long long int const volatile`
+ // The case `long const long int volatile` -> `long long int const volatile`
+ // The case `long long volatile int const` -> `long long int const volatile`
+ // The case `const long long volatile int` -> `long long int const volatile`
+ if (TypeToken->isSimpleTypeSpecifier()) {
+ // The case `const decltype(foo)` -> `const decltype(foo)`
+ // The case `const typeof(foo)` -> `const typeof(foo)`
+ // The case `const _Atomic(foo)` -> `const _Atomic(foo)`
+ if (TypeToken->isOneOf(tok::kw_decltype, tok::kw_typeof, tok::kw__Atomic))
+ return Tok;
+
+ const FormatToken *LastSimpleTypeSpecifier = TypeToken;
+ while (isQualifierOrType(LastSimpleTypeSpecifier->getNextNonComment()))
+ LastSimpleTypeSpecifier = LastSimpleTypeSpecifier->getNextNonComment();
+
+ rotateTokens(SourceMgr, Fixes, Tok, LastSimpleTypeSpecifier,
+ /*Left=*/false);
+ return LastSimpleTypeSpecifier;
}
- if (LastQual && Qual != LastQual) {
- rotateTokens(SourceMgr, Fixes, Tok, LastQual, /*Left=*/false);
- Tok = LastQual;
- } else if (Tok->startsSequence(QualifierType, tok::identifier,
- TT_TemplateCloser)) {
- FormatToken *Closer = Tok->Next->Next;
- rotateTokens(SourceMgr, Fixes, Tok, Tok->Next, /*Left=*/false);
- Tok = Closer;
+
+ // The case `unsigned short const` -> `unsigned short const`
+ // The case:
+ // `unsigned short volatile const` -> `unsigned short const volatile`
+ if (PreviousCheck && PreviousCheck->isSimpleTypeSpecifier()) {
+ if (LastQual != Tok)
+ rotateTokens(SourceMgr, Fixes, Tok, LastQual, /*Left=*/false);
return Tok;
- } else if (Tok->startsSequence(QualifierType, tok::identifier,
- TT_TemplateOpener)) {
- // `const ArrayRef<int> a;`
- // `const ArrayRef<int> &a;`
- const FormatToken *NewTok = AnalyzeTemplate(Tok, Tok->Next->Next);
- if (NewTok)
- return NewTok;
- } else if (Tok->startsSequence(QualifierType, tok::coloncolon,
- tok::identifier, TT_TemplateOpener)) {
- // `const ::ArrayRef<int> a;`
- // `const ::ArrayRef<int> &a;`
- const FormatToken *NewTok = AnalyzeTemplate(Tok, Tok->Next->Next->Next);
- if (NewTok)
- return NewTok;
- } else if (Tok->startsSequence(QualifierType, tok::identifier) ||
- Tok->startsSequence(QualifierType, tok::coloncolon,
- tok::identifier)) {
- FormatToken *Next = Tok->Next;
+ }
+
+ // Skip the typename keyword.
+ // The case `const typename C::type` -> `typename C::type const`
+ if (TypeToken->is(tok::kw_typename))
+ TypeToken = TypeToken->getNextNonComment();
+
+ // Skip the initial :: of a global-namespace type.
+ // The case `const ::...` -> `::... const`
+ if (TypeToken->is(tok::coloncolon)) {
+ // The case `const ::template Foo...` -> `::template Foo... const`
+ TypeToken = TypeToken->getNextNonComment();
+ if (TypeToken && TypeToken->is(tok::kw_template))
+ TypeToken = TypeToken->getNextNonComment();
+ }
+
+ // Don't change declarations such as
+ // `foo(const struct Foo a);` -> `foo(const struct Foo a);`
+ // as they would currently change code such as
+ // `const struct my_struct_t {} my_struct;` -> `struct my_struct_t const {}
+ // my_struct;`
+ if (TypeToken->isOneOf(tok::kw_struct, tok::kw_class))
+ return Tok;
+
+ if (TypeToken->isOneOf(tok::kw_auto, tok::identifier)) {
+ // The case `const auto` -> `auto const`
// The case `const Foo` -> `Foo const`
// The case `const ::Foo` -> `::Foo const`
// The case `const Foo *` -> `Foo const *`
@@ -280,30 +328,35 @@ const FormatToken *LeftRightQualifierAlignmentFixer::analyzeRight(
// The case `const Foo &&` -> `Foo const &&`
// The case `const std::Foo &&` -> `std::Foo const &&`
// The case `const std::Foo<T> &&` -> `std::Foo<T> const &&`
- // However, `const Bar::*` remains the same.
- while (Next && Next->isOneOf(tok::identifier, tok::coloncolon) &&
- !Next->startsSequence(tok::coloncolon, tok::star)) {
- Next = Next->Next;
- }
- if (Next && Next->is(TT_TemplateOpener)) {
- Next = Next->MatchingParen;
- // Move to the end of any template class members e.g.
- // `Foo<int>::iterator`.
- if (Next && Next->startsSequence(TT_TemplateCloser, tok::coloncolon,
- tok::identifier)) {
- return Tok;
+ // The case `const ::template Foo` -> `::template Foo const`
+ // The case `const T::template Foo` -> `T::template Foo const`
+ const FormatToken *Next = nullptr;
+ while ((Next = TypeToken->getNextNonComment()) &&
+ (Next->is(TT_TemplateOpener) ||
+ Next->startsSequence(tok::coloncolon, tok::identifier) ||
+ Next->startsSequence(tok::coloncolon, tok::kw_template,
+ tok::identifier))) {
+ if (Next->is(TT_TemplateOpener)) {
+ assert(Next->MatchingParen && "Missing template closer");
+ TypeToken = Next->MatchingParen;
+ } else if (Next->startsSequence(tok::coloncolon, tok::identifier)) {
+ TypeToken = Next->getNextNonComment();
+ } else {
+ TypeToken = Next->getNextNonComment()->getNextNonComment();
}
- assert(Next && "Missing template opener");
- Next = Next->Next;
}
- if (Next && Next->isOneOf(tok::star, tok::amp, tok::ampamp) &&
- !Tok->Next->isOneOf(Keywords.kw_override, Keywords.kw_final)) {
- if (Next->Previous && !Next->Previous->is(QualifierType)) {
- insertQualifierAfter(SourceMgr, Fixes, Next->Previous, Qualifier);
- removeToken(SourceMgr, Fixes, Tok);
- }
- return Next;
+
+ // Place the Qualifier at the end of the list of qualifiers.
+ while (isQualifier(TypeToken->getNextNonComment())) {
+ // The case `volatile Foo::iter const` -> `Foo::iter const volatile`
+ TypeToken = TypeToken->getNextNonComment();
}
+
+ insertQualifierAfter(SourceMgr, Fixes, TypeToken, Qualifier);
+ // Remove token and following whitespace.
+ auto Range = CharSourceRange::getCharRange(
+ Tok->getStartOfNonWhitespace(), Tok->Next->getStartOfNonWhitespace());
+ replaceToken(SourceMgr, Fixes, Range, "");
}
return Tok;
@@ -311,98 +364,140 @@ const FormatToken *LeftRightQualifierAlignmentFixer::analyzeRight(
const FormatToken *LeftRightQualifierAlignmentFixer::analyzeLeft(
const SourceManager &SourceMgr, const AdditionalKeywords &Keywords,
- tooling::Replacements &Fixes, const FormatToken *Tok,
+ tooling::Replacements &Fixes, const FormatToken *const Tok,
const std::string &Qualifier, tok::TokenKind QualifierType) {
- // if Tok is an identifier and possibly a macro then don't convert.
- if (LeftRightQualifierAlignmentFixer::isPossibleMacro(Tok))
+ // We only need to think about streams that begin with a qualifier.
+ if (!Tok->is(QualifierType))
+ return Tok;
+ // Don't concern yourself if nothing preceeds the qualifier.
+ if (!Tok->getPreviousNonComment())
return Tok;
- const FormatToken *Qual = Tok;
- const FormatToken *LastQual = Qual;
- while (Qual && isQualifierOrType(Qual, ConfiguredQualifierTokens)) {
- LastQual = Qual;
- Qual = Qual->Next;
- if (Qual && Qual->is(QualifierType))
- break;
+ // Skip qualifiers to the left to find what preceeds the qualifiers.
+ const FormatToken *TypeToken = Tok->getPreviousNonComment();
+ while (isQualifier(TypeToken))
+ TypeToken = TypeToken->getPreviousNonComment();
+
+ // For left qualifiers preceeded by nothing, a template declaration, or *,&,&&
+ // we only perform sorting.
+ if (!TypeToken || TypeToken->isOneOf(tok::star, tok::amp, tok::ampamp) ||
+ TypeToken->ClosesRequiresClause || TypeToken->ClosesTemplateDeclaration) {
+
+ // Don't sort past a non-configured qualifier token.
+ const FormatToken *FirstQual = Tok;
+ while (isConfiguredQualifier(FirstQual->getPreviousNonComment(),
+ ConfiguredQualifierTokens)) {
+ FirstQual = FirstQual->getPreviousNonComment();
+ }
+
+ if (FirstQual != Tok)
+ rotateTokens(SourceMgr, Fixes, FirstQual, Tok, /*Left=*/true);
+ return Tok;
}
- if (!Qual)
+ // Stay safe and don't move past macros, also don't bother with sorting.
+ if (isPossibleMacro(TypeToken))
return Tok;
- if (LastQual && Qual != LastQual && Qual->is(QualifierType)) {
- rotateTokens(SourceMgr, Fixes, Tok, Qual, /*Left=*/true);
- if (!Qual->Next)
- return Tok;
- Tok = Qual->Next;
- } else if (Tok->startsSequence(tok::identifier, QualifierType)) {
- if (Tok->Next->Next && Tok->Next->Next->isOneOf(tok::identifier, tok::star,
- tok::amp, tok::ampamp)) {
- // Don't swap `::iterator const` to `::const iterator`.
- if (!Tok->Previous ||
- (Tok->Previous && !Tok->Previous->is(tok::coloncolon))) {
- rotateTokens(SourceMgr, Fixes, Tok, Tok->Next, /*Left=*/true);
- Tok = Tok->Next;
- }
- } else if (Tok->startsSequence(tok::identifier, QualifierType,
- TT_TemplateCloser)) {
- FormatToken *Closer = Tok->Next->Next;
- rotateTokens(SourceMgr, Fixes, Tok, Tok->Next, /*Left=*/true);
- Tok = Closer;
+ // Examples given in order of ['const', 'volatile', 'type']
+
+ // The case `volatile long long int const` -> `const volatile long long int`
+ // The case `volatile long long const int` -> `const volatile long long int`
+ // The case `const long long volatile int` -> `const volatile long long int`
+ // The case `long volatile long int const` -> `const volatile long long int`
+ if (TypeToken->isSimpleTypeSpecifier()) {
+ const FormatToken *LastSimpleTypeSpecifier = TypeToken;
+ while (isConfiguredQualifierOrType(
+ LastSimpleTypeSpecifier->getPreviousNonComment(),
+ ConfiguredQualifierTokens)) {
+ LastSimpleTypeSpecifier =
+ LastSimpleTypeSpecifier->getPreviousNonComment();
}
+
+ rotateTokens(SourceMgr, Fixes, LastSimpleTypeSpecifier, Tok,
+ /*Left=*/true);
+ return Tok;
}
- if (Tok->is(TT_TemplateOpener) && Tok->Next &&
- (Tok->Next->is(tok::identifier) || Tok->Next->isSimpleTypeSpecifier()) &&
- Tok->Next->Next && Tok->Next->Next->is(QualifierType)) {
- rotateTokens(SourceMgr, Fixes, Tok->Next, Tok->Next->Next, /*Left=*/true);
- }
- if ((Tok->startsSequence(tok::coloncolon, tok::identifier) ||
- Tok->is(tok::identifier)) &&
- Tok->Next) {
- if (Tok->Previous &&
- Tok->Previous->isOneOf(tok::star, tok::ampamp, tok::amp)) {
- return Tok;
- }
- const FormatToken *Next = Tok->Next;
- // The case `std::Foo<T> const` -> `const std::Foo<T> &&`
- while (Next && Next->isOneOf(tok::identifier, tok::coloncolon))
- Next = Next->Next;
- if (Next && Next->Previous &&
- Next->Previous->startsSequence(tok::identifier, TT_TemplateOpener)) {
- // Read from to the end of the TemplateOpener to
- // TemplateCloser const ArrayRef<int> a; const ArrayRef<int> &a;
- if (Next->is(tok::comment) && Next->getNextNonComment())
- Next = Next->getNextNonComment();
- assert(Next->MatchingParen && "Missing template closer");
- Next = Next->MatchingParen;
-
- // If the template closer is closing the requires clause,
- // then stop and go back to the TemplateOpener and do whatever is
- // inside the <>.
- if (Next->ClosesRequiresClause)
- return Next->MatchingParen;
- Next = Next->Next;
-
- // Move to the end of any template class members e.g.
- // `Foo<int>::iterator`.
- if (Next && Next->startsSequence(tok::coloncolon, tok::identifier))
- Next = Next->Next->Next;
- if (Next && Next->is(QualifierType)) {
- // Move the qualifier.
- insertQualifierBefore(SourceMgr, Fixes, Tok, Qualifier);
- removeToken(SourceMgr, Fixes, Next);
- return Next;
+
+ if (TypeToken->isOneOf(tok::kw_auto, tok::identifier, TT_TemplateCloser)) {
+ const auto IsStartOfType = [](const FormatToken *const Tok) -> bool {
+ if (!Tok)
+ return true;
+
+ // A template closer is not the start of a type.
+ // The case `?<> const` -> `const ?<>`
+ if (Tok->is(TT_TemplateCloser))
+ return false;
+
+ const FormatToken *const Previous = Tok->getPreviousNonComment();
+ if (!Previous)
+ return true;
+
+ // An identifier preceeded by :: is not the start of a type.
+ // The case `?::Foo const` -> `const ?::Foo`
+ if (Tok->is(tok::identifier) && Previous->is(tok::coloncolon))
+ return false;
+
+ const FormatToken *const PrePrevious = Previous->getPreviousNonComment();
+ // An identifier preceeded by ::template is not the start of a type.
+ // The case `?::template Foo const` -> `const ?::template Foo`
+ if (Tok->is(tok::identifier) && Previous->is(tok::kw_template) &&
+ PrePrevious && PrePrevious->is(tok::coloncolon)) {
+ return false;
}
- }
- if (Next && Next->Next &&
- Next->Next->isOneOf(tok::amp, tok::ampamp, tok::star)) {
- if (Next->is(QualifierType)) {
- // Move the qualifier.
- insertQualifierBefore(SourceMgr, Fixes, Tok, Qualifier);
- removeToken(SourceMgr, Fixes, Next);
- return Next;
+
+ return true;
+ };
+
+ while (!IsStartOfType(TypeToken)) {
+ // The case `?<>`
+ if (TypeToken->is(TT_TemplateCloser)) {
+ assert(TypeToken->MatchingParen && "Missing template opener");
+ TypeToken = TypeToken->MatchingParen->getPreviousNonComment();
+ } else {
+ // The cases
+ // `::Foo`
+ // `?>::Foo`
+ // `?Bar::Foo`
+ // `::template Foo`
+ // `?>::template Foo`
+ // `?Bar::template Foo`
+ if (TypeToken->getPreviousNonComment()->is(tok::kw_template))
+ TypeToken = TypeToken->getPreviousNonComment();
+
+ const FormatToken *const ColonColon =
+ TypeToken->getPreviousNonComment();
+ const FormatToken *const PreColonColon =
+ ColonColon->getPreviousNonComment();
+ if (PreColonColon &&
+ PreColonColon->isOneOf(TT_TemplateCloser, tok::identifier)) {
+ TypeToken = PreColonColon;
+ } else {
+ TypeToken = ColonColon;
+ }
}
}
+
+ assert(TypeToken && "Should be auto or identifier");
+
+ // Place the Qualifier at the start of the list of qualifiers.
+ const FormatToken *Previous = nullptr;
+ while ((Previous = TypeToken->getPreviousNonComment()) &&
+ (isConfiguredQualifier(Previous, ConfiguredQualifierTokens) ||
+ Previous->is(tok::kw_typename))) {
+ // The case `volatile Foo::iter const` -> `const volatile Foo::iter`
+ // The case `typename C::type const` -> `const typename C::type`
+ TypeToken = Previous;
+ }
+
+ // Don't change declarations such as
+ // `foo(struct Foo const a);` -> `foo(struct Foo const a);`
+ if (!Previous || !Previous->isOneOf(tok::kw_struct, tok::kw_class)) {
+ insertQualifierBefore(SourceMgr, Fixes, TypeToken, Qualifier);
+ removeToken(SourceMgr, Fixes, Tok);
+ }
}
+
return Tok;
}
@@ -442,7 +537,7 @@ LeftRightQualifierAlignmentFixer::analyze(
assert(QualifierToken != tok::identifier && "Unrecognised Qualifier");
for (AnnotatedLine *Line : AnnotatedLines) {
- if (Line->InPPDirective)
+ if (!Line->Affected || Line->InPPDirective)
continue;
FormatToken *First = Line->First;
assert(First);
@@ -467,7 +562,7 @@ LeftRightQualifierAlignmentFixer::analyze(
return {Fixes, 0};
}
-void QualifierAlignmentFixer::PrepareLeftRightOrdering(
+void prepareLeftRightOrderingForQualifierAlignmentFixer(
const std::vector<std::string> &Order, std::vector<std::string> &LeftOrder,
std::vector<std::string> &RightOrder,
std::vector<tok::TokenKind> &Qualifiers) {
@@ -502,9 +597,16 @@ void QualifierAlignmentFixer::PrepareLeftRightOrdering(
}
bool LeftRightQualifierAlignmentFixer::isQualifierOrType(
- const FormatToken *Tok, const std::vector<tok::TokenKind> &specifiedTypes) {
+ const FormatToken *const Tok) {
+ return Tok && (Tok->isSimpleTypeSpecifier() || Tok->is(tok::kw_auto) ||
+ isQualifier(Tok));
+}
+
+bool LeftRightQualifierAlignmentFixer::isConfiguredQualifierOrType(
+ const FormatToken *const Tok,
+ const std::vector<tok::TokenKind> &Qualifiers) {
return Tok && (Tok->isSimpleTypeSpecifier() || Tok->is(tok::kw_auto) ||
- llvm::is_contained(specifiedTypes, Tok->Tok.getKind()));
+ isConfiguredQualifier(Tok, Qualifiers));
}
// If a token is an identifier and it's upper case, it could
@@ -516,7 +618,7 @@ bool LeftRightQualifierAlignmentFixer::isPossibleMacro(const FormatToken *Tok) {
return false;
if (Tok->TokenText.upper() == Tok->TokenText.str()) {
// T,K,U,V likely could be template arguments
- return (Tok->TokenText.size() != 1);
+ return Tok->TokenText.size() != 1;
}
return false;
}
diff --git a/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h
index 30ef96b8b0a7..dc6f92e86ae7 100644
--- a/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h
+++ b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h
@@ -25,32 +25,13 @@ typedef std::function<std::pair<tooling::Replacements, unsigned>(
const Environment &)>
AnalyzerPass;
-class QualifierAlignmentFixer : public TokenAnalyzer {
- // Left to Right ordering requires multiple passes
- SmallVector<AnalyzerPass, 8> Passes;
- StringRef &Code;
- ArrayRef<tooling::Range> Ranges;
- unsigned FirstStartColumn;
- unsigned NextStartColumn;
- unsigned LastStartColumn;
- StringRef FileName;
+void addQualifierAlignmentFixerPasses(const FormatStyle &Style,
+ SmallVectorImpl<AnalyzerPass> &Passes);
-public:
- QualifierAlignmentFixer(const Environment &Env, const FormatStyle &Style,
- StringRef &Code, ArrayRef<tooling::Range> Ranges,
- unsigned FirstStartColumn, unsigned NextStartColumn,
- unsigned LastStartColumn, StringRef FileName);
-
- std::pair<tooling::Replacements, unsigned>
- analyze(TokenAnnotator &Annotator,
- SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
- FormatTokenLexer &Tokens) override;
-
- static void PrepareLeftRightOrdering(const std::vector<std::string> &Order,
- std::vector<std::string> &LeftOrder,
- std::vector<std::string> &RightOrder,
- std::vector<tok::TokenKind> &Qualifiers);
-};
+void prepareLeftRightOrderingForQualifierAlignmentFixer(
+ const std::vector<std::string> &Order, std::vector<std::string> &LeftOrder,
+ std::vector<std::string> &RightOrder,
+ std::vector<tok::TokenKind> &Qualifiers);
class LeftRightQualifierAlignmentFixer : public TokenAnalyzer {
std::string Qualifier;
@@ -86,11 +67,13 @@ public:
const std::string &Qualifier,
tok::TokenKind QualifierType);
- // is the Token a simple or qualifier type
- static bool isQualifierOrType(const FormatToken *Tok,
- const std::vector<tok::TokenKind> &Qualifiers);
+ // Is the Token a simple or qualifier type
+ static bool isQualifierOrType(const FormatToken *Tok);
+ static bool
+ isConfiguredQualifierOrType(const FormatToken *Tok,
+ const std::vector<tok::TokenKind> &Qualifiers);
- // is the Token likely a Macro
+ // Is the Token likely a Macro
static bool isPossibleMacro(const FormatToken *Tok);
};
diff --git a/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp b/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp
index c9de4868bf84..3ba649d07964 100644
--- a/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp
+++ b/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp
@@ -72,6 +72,7 @@ struct JsImportedSymbol {
struct JsModuleReference {
bool FormattingOff = false;
bool IsExport = false;
+ bool IsTypeOnly = false;
// Module references are sorted into these categories, in order.
enum ReferenceCategory {
SIDE_EFFECT, // "import 'something';"
@@ -195,8 +196,7 @@ public:
// Separate references from the main code body of the file.
if (FirstNonImportLine && FirstNonImportLine->First->NewlinesBefore < 2 &&
!(FirstNonImportLine->First->is(tok::comment) &&
- FirstNonImportLine->First->TokenText.trim() ==
- "// clang-format on")) {
+ isClangFormatOn(FirstNonImportLine->First->TokenText.trim()))) {
ReferencesText += "\n";
}
@@ -217,8 +217,8 @@ public:
}
private:
- FormatToken *Current;
- FormatToken *LineEnd;
+ FormatToken *Current = nullptr;
+ FormatToken *LineEnd = nullptr;
FormatToken invalidToken;
@@ -307,6 +307,7 @@ private:
if (Reference->Category == JsModuleReference::SIDE_EFFECT ||
PreviousReference->Category == JsModuleReference::SIDE_EFFECT ||
Reference->IsExport != PreviousReference->IsExport ||
+ Reference->IsTypeOnly != PreviousReference->IsTypeOnly ||
!PreviousReference->Prefix.empty() || !Reference->Prefix.empty() ||
!PreviousReference->DefaultImport.empty() ||
!Reference->DefaultImport.empty() || Reference->Symbols.empty() ||
@@ -376,9 +377,9 @@ private:
// This is tracked in FormattingOff here and on JsModuleReference.
while (Current && Current->is(tok::comment)) {
StringRef CommentText = Current->TokenText.trim();
- if (CommentText == "// clang-format off") {
+ if (isClangFormatOff(CommentText)) {
FormattingOff = true;
- } else if (CommentText == "// clang-format on") {
+ } else if (isClangFormatOn(CommentText)) {
FormattingOff = false;
// Special case: consider a trailing "clang-format on" line to be part
// of the module reference, so that it gets moved around together with
@@ -489,6 +490,11 @@ private:
bool parseStarBinding(const AdditionalKeywords &Keywords,
JsModuleReference &Reference) {
// * as prefix from '...';
+ if (Current->is(Keywords.kw_type) && Current->Next &&
+ Current->Next->is(tok::star)) {
+ Reference.IsTypeOnly = true;
+ nextToken();
+ }
if (Current->isNot(tok::star))
return false;
nextToken();
@@ -504,8 +510,14 @@ private:
bool parseNamedBindings(const AdditionalKeywords &Keywords,
JsModuleReference &Reference) {
+ if (Current->is(Keywords.kw_type) && Current->Next &&
+ Current->Next->isOneOf(tok::identifier, tok::l_brace)) {
+ Reference.IsTypeOnly = true;
+ nextToken();
+ }
+
// eat a potential "import X, " prefix.
- if (Current->is(tok::identifier)) {
+ if (!Reference.IsExport && Current->is(tok::identifier)) {
Reference.DefaultImport = Current->TokenText;
nextToken();
if (Current->is(Keywords.kw_from))
@@ -536,14 +548,19 @@ private:
nextToken();
if (Current->is(tok::r_brace))
break;
- if (!Current->isOneOf(tok::identifier, tok::kw_default))
+ bool isTypeOnly =
+ Current->is(Keywords.kw_type) && Current->Next &&
+ Current->Next->isOneOf(tok::identifier, tok::kw_default);
+ if (!isTypeOnly && !Current->isOneOf(tok::identifier, tok::kw_default))
return false;
JsImportedSymbol Symbol;
- Symbol.Symbol = Current->TokenText;
// Make sure to include any preceding comments.
Symbol.Range.setBegin(
Current->getPreviousNonComment()->Next->WhitespaceRange.getBegin());
+ if (isTypeOnly)
+ nextToken();
+ Symbol.Symbol = Current->TokenText;
nextToken();
if (Current->is(Keywords.kw_as)) {
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp b/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp
index 77e403581a0d..bd648c430f9b 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp
@@ -104,12 +104,12 @@ TokenAnalyzer::process(bool SkipAnnotation) {
IdentifierTable IdentTable(getFormattingLangOpts(Style));
FormatTokenLexer Lex(Env.getSourceManager(), Env.getFileID(),
Env.getFirstStartColumn(), Style, Encoding, Allocator,
-
IdentTable);
ArrayRef<FormatToken *> Toks(Lex.lex());
SmallVector<FormatToken *, 10> Tokens(Toks.begin(), Toks.end());
- UnwrappedLineParser Parser(Style, Lex.getKeywords(),
- Env.getFirstStartColumn(), Tokens, *this);
+ UnwrappedLineParser Parser(Env.getSourceManager(), Style, Lex.getKeywords(),
+ Env.getFirstStartColumn(), Tokens, *this,
+ Allocator, IdentTable);
Parser.parse();
assert(UnwrappedLines.back().empty());
unsigned Penalty = 0;
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.h b/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.h
index e5cc1287c616..4086dab1c94c 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.h
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.h
@@ -46,7 +46,7 @@ public:
FileID getFileID() const { return ID; }
- const SourceManager &getSourceManager() const { return SM; }
+ SourceManager &getSourceManager() const { return SM; }
ArrayRef<CharSourceRange> getCharRanges() const { return CharRanges; }
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
index 464594bf7488..73840332e22c 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
@@ -45,7 +45,7 @@ static bool startsWithInitStatement(const AnnotatedLine &Line) {
/// invokes @selector(...)). So, we allow treat any identifier or
/// keyword as a potential Objective-C selector component.
static bool canBeObjCSelectorComponent(const FormatToken &Tok) {
- return Tok.Tok.getIdentifierInfo() != nullptr;
+ return Tok.Tok.getIdentifierInfo();
}
/// With `Left` being '(', check if we're at either `[...](` or
@@ -111,14 +111,29 @@ static bool isCppAttribute(bool IsCpp, const FormatToken &Tok) {
class AnnotatingParser {
public:
AnnotatingParser(const FormatStyle &Style, AnnotatedLine &Line,
- const AdditionalKeywords &Keywords)
+ const AdditionalKeywords &Keywords,
+ SmallVector<ScopeType> &Scopes)
: Style(Style), Line(Line), CurrentToken(Line.First), AutoFound(false),
- Keywords(Keywords) {
+ Keywords(Keywords), Scopes(Scopes) {
Contexts.push_back(Context(tok::unknown, 1, /*IsExpression=*/false));
resetTokenMetadata();
}
private:
+ ScopeType getScopeType(const FormatToken &Token) const {
+ switch (Token.getType()) {
+ case TT_FunctionLBrace:
+ case TT_LambdaLBrace:
+ return ST_Function;
+ case TT_ClassLBrace:
+ case TT_StructLBrace:
+ case TT_UnionLBrace:
+ return ST_Class;
+ default:
+ return ST_Other;
+ }
+ }
+
bool parseAngle() {
if (!CurrentToken || !CurrentToken->Previous)
return false;
@@ -129,6 +144,8 @@ private:
if (Previous.Previous) {
if (Previous.Previous->Tok.isLiteral())
return false;
+ if (Previous.Previous->is(tok::r_brace))
+ return false;
if (Previous.Previous->is(tok::r_paren) && Contexts.size() > 1 &&
(!Previous.Previous->MatchingParen ||
!Previous.Previous->MatchingParen->is(
@@ -184,7 +201,10 @@ private:
CurrentToken->setType(TT_DictLiteral);
} else {
CurrentToken->setType(TT_TemplateCloser);
+ CurrentToken->Tok.setLength(1);
}
+ if (CurrentToken->Next && CurrentToken->Next->Tok.isLiteral())
+ return false;
next();
return true;
}
@@ -293,6 +313,9 @@ private:
bool OperatorCalledAsMemberFunction =
Prev->Previous && Prev->Previous->isOneOf(tok::period, tok::arrow);
Contexts.back().IsExpression = OperatorCalledAsMemberFunction;
+ } else if (OpeningParen.is(TT_VerilogInstancePortLParen)) {
+ Contexts.back().IsExpression = true;
+ Contexts.back().ContextType = Context::VerilogInstancePortList;
} else if (Style.isJavaScript() &&
(Line.startsWith(Keywords.kw_type, tok::identifier) ||
Line.startsWith(tok::kw_export, Keywords.kw_type,
@@ -301,9 +324,10 @@ private:
// export type X = (...);
Contexts.back().IsExpression = false;
} else if (OpeningParen.Previous &&
- (OpeningParen.Previous->isOneOf(tok::kw_static_assert,
- tok::kw_while, tok::l_paren,
- tok::comma, TT_BinaryOperator) ||
+ (OpeningParen.Previous->isOneOf(
+ tok::kw_static_assert, tok::kw_noexcept, tok::kw_explicit,
+ tok::kw_while, tok::l_paren, tok::comma,
+ TT_BinaryOperator) ||
OpeningParen.Previous->isIf())) {
// static_assert, if and while usually contain expressions.
Contexts.back().IsExpression = true;
@@ -398,6 +422,7 @@ private:
FormatToken *PrevPrev = Prev->getPreviousNonComment();
FormatToken *Next = CurrentToken->Next;
if (PrevPrev && PrevPrev->is(tok::identifier) &&
+ PrevPrev->isNot(TT_TypeName) &&
Prev->isOneOf(tok::star, tok::amp, tok::ampamp) &&
CurrentToken->is(tok::identifier) && Next->isNot(tok::equal)) {
Prev->setType(TT_BinaryOperator);
@@ -573,10 +598,6 @@ private:
return false;
}
- bool isCpp11AttributeSpecifier(const FormatToken &Tok) {
- return isCppAttribute(Style.isCpp(), Tok);
- }
-
bool parseSquare() {
if (!CurrentToken)
return false;
@@ -599,7 +620,7 @@ private:
const bool IsInnerSquare = Contexts.back().InCpp11AttributeSpecifier;
const bool IsCpp11AttributeSpecifier =
- isCpp11AttributeSpecifier(*Left) || IsInnerSquare;
+ isCppAttribute(Style.isCpp(), *Left) || IsInnerSquare;
// Treat C# Attributes [STAThread] much like C++ attributes [[...]].
bool IsCSharpAttributeSpecifier =
@@ -846,6 +867,9 @@ private:
unsigned CommaCount = 0;
while (CurrentToken) {
if (CurrentToken->is(tok::r_brace)) {
+ assert(!Scopes.empty());
+ assert(Scopes.back() == getScopeType(OpeningBrace));
+ Scopes.pop_back();
assert(OpeningBrace.Optional == CurrentToken->Optional);
OpeningBrace.MatchingParen = CurrentToken;
CurrentToken->MatchingParen = &OpeningBrace;
@@ -949,6 +973,10 @@ private:
case tok::colon:
if (!Tok->Previous)
return false;
+ // Goto labels and case labels are already identified in
+ // UnwrappedLineParser.
+ if (Tok->isTypeFinalized())
+ break;
// Colons from ?: are handled in parseConditional().
if (Style.isJavaScript()) {
if (Contexts.back().ColonIsForRangeExpr || // colon in for loop
@@ -987,7 +1015,7 @@ private:
// In Verilog a case label doesn't have the case keyword. We
// assume a colon following an expression is a case label.
// Colons from ?: are annotated in parseConditional().
- Tok->setType(TT_GotoLabelColon);
+ Tok->setType(TT_CaseLabelColon);
if (Line.Level > 1 || (!Line.InPPDirective && Line.Level > 0))
--Line.Level;
}
@@ -1124,6 +1152,52 @@ private:
Tok->setType(TT_OverloadedOperatorLParen);
}
+ if (Style.isVerilog()) {
+ // Identify the parameter list and port list in a module instantiation.
+ // This is still needed when we already have
+ // UnwrappedLineParser::parseVerilogHierarchyHeader because that
+ // function is only responsible for the definition, not the
+ // instantiation.
+ auto IsInstancePort = [&]() {
+ const FormatToken *Prev = Tok->getPreviousNonComment();
+ const FormatToken *PrevPrev;
+ // In the following example all 4 left parentheses will be treated as
+ // 'TT_VerilogInstancePortLParen'.
+ //
+ // module_x instance_1(port_1); // Case A.
+ // module_x #(parameter_1) // Case B.
+ // instance_2(port_1), // Case C.
+ // instance_3(port_1); // Case D.
+ if (!Prev || !(PrevPrev = Prev->getPreviousNonComment()))
+ return false;
+ // Case A.
+ if (Keywords.isVerilogIdentifier(*Prev) &&
+ Keywords.isVerilogIdentifier(*PrevPrev)) {
+ return true;
+ }
+ // Case B.
+ if (Prev->is(Keywords.kw_verilogHash) &&
+ Keywords.isVerilogIdentifier(*PrevPrev)) {
+ return true;
+ }
+ // Case C.
+ if (Keywords.isVerilogIdentifier(*Prev) && PrevPrev->is(tok::r_paren))
+ return true;
+ // Case D.
+ if (Keywords.isVerilogIdentifier(*Prev) && PrevPrev->is(tok::comma)) {
+ const FormatToken *PrevParen = PrevPrev->getPreviousNonComment();
+ if (PrevParen->is(tok::r_paren) && PrevParen->MatchingParen &&
+ PrevParen->MatchingParen->is(TT_VerilogInstancePortLParen)) {
+ return true;
+ }
+ }
+ return false;
+ };
+
+ if (IsInstancePort())
+ Tok->setFinalizedType(TT_VerilogInstancePortLParen);
+ }
+
if (!parseParens())
return false;
if (Line.MustBeDeclaration && Contexts.size() == 1 &&
@@ -1145,6 +1219,7 @@ private:
if (Previous && Previous->getType() != TT_DictLiteral)
Previous->setType(TT_SelectorName);
}
+ Scopes.push_back(getScopeType(*Tok));
if (!parseBrace())
return false;
break;
@@ -1175,6 +1250,9 @@ private:
case tok::r_square:
return false;
case tok::r_brace:
+ // Don't pop scope when encountering unbalanced r_brace.
+ if (!Scopes.empty())
+ Scopes.pop_back();
// Lines can start with '}'.
if (Tok->Previous)
return false;
@@ -1194,19 +1272,26 @@ private:
!CurrentToken->isOneOf(tok::l_paren, tok::semi, tok::r_paren)) {
if (CurrentToken->isOneOf(tok::star, tok::amp))
CurrentToken->setType(TT_PointerOrReference);
- consumeToken();
+ auto Next = CurrentToken->getNextNonComment();
+ if (!Next)
+ break;
+ if (Next->is(tok::less))
+ next();
+ else
+ consumeToken();
if (!CurrentToken)
- continue;
- if (CurrentToken->is(tok::comma) &&
- CurrentToken->Previous->isNot(tok::kw_operator)) {
break;
- }
- if (CurrentToken->Previous->isOneOf(TT_BinaryOperator, TT_UnaryOperator,
- tok::comma, tok::star, tok::arrow,
- tok::amp, tok::ampamp) ||
+ auto Previous = CurrentToken->getPreviousNonComment();
+ assert(Previous);
+ if (CurrentToken->is(tok::comma) && Previous->isNot(tok::kw_operator))
+ break;
+ if (Previous->isOneOf(TT_BinaryOperator, TT_UnaryOperator, tok::comma,
+ tok::star, tok::arrow, tok::amp, tok::ampamp) ||
// User defined literal.
- CurrentToken->Previous->TokenText.startswith("\"\"")) {
- CurrentToken->Previous->setType(TT_OverloadedOperator);
+ Previous->TokenText.startswith("\"\"")) {
+ Previous->setType(TT_OverloadedOperator);
+ if (CurrentToken->isOneOf(tok::less, tok::greater))
+ break;
}
}
if (CurrentToken && CurrentToken->is(tok::l_paren))
@@ -1217,7 +1302,7 @@ private:
case tok::question:
if (Style.isJavaScript() && Tok->Next &&
Tok->Next->isOneOf(tok::semi, tok::comma, tok::colon, tok::r_paren,
- tok::r_brace)) {
+ tok::r_brace, tok::r_square)) {
// Question marks before semicolons, colons, etc. indicate optional
// types (fields, parameters), e.g.
// function(x?: string, y?) {...}
@@ -1234,11 +1319,34 @@ private:
if (Style.isCSharp()) {
// `Type?)`, `Type?>`, `Type? name;` and `Type? name =` can only be
// nullable types.
+
+ // `Type?)`, `Type?>`, `Type? name;`
+ if (Tok->Next &&
+ (Tok->Next->startsSequence(tok::question, tok::r_paren) ||
+ Tok->Next->startsSequence(tok::question, tok::greater) ||
+ Tok->Next->startsSequence(tok::question, tok::identifier,
+ tok::semi))) {
+ Tok->setType(TT_CSharpNullable);
+ break;
+ }
+
+ // `Type? name =`
+ if (Tok->Next && Tok->Next->is(tok::identifier) && Tok->Next->Next &&
+ Tok->Next->Next->is(tok::equal)) {
+ Tok->setType(TT_CSharpNullable);
+ break;
+ }
+
// Line.MustBeDeclaration will be true for `Type? name;`.
- if ((!Contexts.back().IsExpression && Line.MustBeDeclaration) ||
- (Tok->Next && Tok->Next->isOneOf(tok::r_paren, tok::greater)) ||
- (Tok->Next && Tok->Next->is(tok::identifier) && Tok->Next->Next &&
- Tok->Next->Next->is(tok::equal))) {
+ // But not
+ // cond ? "A" : "B";
+ // cond ? id : "B";
+ // cond ? cond2 ? "A" : "B" : "C";
+ if (!Contexts.back().IsExpression && Line.MustBeDeclaration &&
+ (!Tok->Next ||
+ !Tok->Next->isOneOf(tok::identifier, tok::string_literal) ||
+ !Tok->Next->Next ||
+ !Tok->Next->Next->isOneOf(tok::colon, tok::question))) {
Tok->setType(TT_CSharpNullable);
break;
}
@@ -1256,9 +1364,15 @@ private:
case Context::InheritanceList:
Tok->setType(TT_InheritanceComma);
break;
+ case Context::VerilogInstancePortList:
+ Tok->setFinalizedType(TT_VerilogInstancePortComma);
+ break;
default:
- if (Contexts.back().FirstStartOfName &&
- (Contexts.size() == 1 || startsWithInitStatement(Line))) {
+ if (Style.isVerilog() && Contexts.size() == 1 &&
+ Line.startsWith(Keywords.kw_assign)) {
+ Tok->setFinalizedType(TT_VerilogAssignComma);
+ } else if (Contexts.back().FirstStartOfName &&
+ (Contexts.size() == 1 || startsWithInitStatement(Line))) {
Contexts.back().FirstStartOfName->PartOfMultiVariableDeclStmt = true;
Line.IsMultiVariableDeclStmt = true;
}
@@ -1283,7 +1397,7 @@ private:
Tok->Next->isNot(tok::l_paren)) {
Tok->setType(TT_CSharpGenericTypeConstraint);
parseCSharpGenericTypeConstraint();
- if (Tok->getPreviousNonComment() == nullptr)
+ if (!Tok->getPreviousNonComment())
Line.IsContinuation = true;
}
break;
@@ -1622,6 +1736,7 @@ private:
bool CaretFound = false;
bool InCpp11AttributeSpecifier = false;
bool InCSharpAttributeSpecifier = false;
+ bool VerilogAssignmentFound = false;
enum {
Unknown,
// Like the part after `:` in a constructor.
@@ -1639,6 +1754,8 @@ private:
TemplateArgument,
// C11 _Generic selection.
C11GenericSelection,
+ // Like in the outer parentheses in `ffnand ff1(.q());`.
+ VerilogInstancePortList,
} ContextType = Unknown;
};
@@ -1689,6 +1806,14 @@ private:
return false;
}
+ // This is the default value of a template parameter, determine if it's
+ // type or non-type.
+ if (Contexts.back().ContextKind == tok::less) {
+ assert(Current.Previous->Previous);
+ return !Current.Previous->Previous->isOneOf(tok::kw_typename,
+ tok::kw_class);
+ }
+
Tok = Tok->MatchingParen;
if (!Tok)
return false;
@@ -1723,7 +1848,7 @@ private:
Previous && Previous->Previous &&
!Previous->Previous->isOneOf(tok::comma, tok::semi);
Previous = Previous->Previous) {
- if (Previous->isOneOf(tok::r_square, tok::r_paren)) {
+ if (Previous->isOneOf(tok::r_square, tok::r_paren, tok::greater)) {
Previous = Previous->MatchingParen;
if (!Previous)
break;
@@ -1879,7 +2004,8 @@ private:
} else if (Current.is(tok::arrow) &&
Style.Language == FormatStyle::LK_Java) {
Current.setType(TT_LambdaArrow);
- } else if (Current.is(tok::arrow) && AutoFound && Line.MustBeDeclaration &&
+ } else if (Current.is(tok::arrow) && AutoFound &&
+ (Line.MightBeFunctionDecl || Line.InPPDirective) &&
Current.NestingLevel == 0 &&
!Current.Previous->isOneOf(tok::kw_operator, tok::identifier)) {
// not auto operator->() -> xxx;
@@ -1919,6 +2045,17 @@ private:
(!Current.Previous || Current.Previous->isNot(tok::l_square)) &&
(!Current.is(tok::greater) &&
Style.Language != FormatStyle::LK_TextProto)) {
+ if (Style.isVerilog()) {
+ if (Current.is(tok::lessequal) && Contexts.size() == 1 &&
+ !Contexts.back().VerilogAssignmentFound) {
+ // In Verilog `<=` is assignment if in its own statement. It is a
+ // statement instead of an expression, that is it can not be chained.
+ Current.ForcedPrecedence = prec::Assignment;
+ Current.setFinalizedType(TT_BinaryOperator);
+ }
+ if (Current.getPrecedence() == prec::Assignment)
+ Contexts.back().VerilogAssignmentFound = true;
+ }
Current.setType(TT_BinaryOperator);
} else if (Current.is(tok::comment)) {
if (Current.TokenText.startswith("/*")) {
@@ -2031,6 +2168,10 @@ private:
/// This is a heuristic based on whether \p Tok is an identifier following
/// something that is likely a type.
bool isStartOfName(const FormatToken &Tok) {
+ // Handled in ExpressionParser for Verilog.
+ if (Style.isVerilog())
+ return false;
+
if (Tok.isNot(tok::identifier) || !Tok.Previous)
return false;
@@ -2201,7 +2342,7 @@ private:
if (Tok.Next->isOneOf(tok::kw_noexcept, tok::kw_volatile, tok::kw_const,
tok::kw_requires, tok::kw_throw, tok::arrow,
Keywords.kw_override, Keywords.kw_final) ||
- isCpp11AttributeSpecifier(*Tok.Next)) {
+ isCppAttribute(Style.isCpp(), *Tok.Next)) {
return false;
}
@@ -2288,12 +2429,16 @@ private:
// If the next token after the parenthesis is a unary operator, assume
// that this is cast, unless there are unexpected tokens inside the
// parenthesis.
- bool NextIsUnary =
- Tok.Next->isUnaryOperator() || Tok.Next->isOneOf(tok::amp, tok::star);
- if (!NextIsUnary || Tok.Next->is(tok::plus) ||
+ const bool NextIsAmpOrStar = Tok.Next->isOneOf(tok::amp, tok::star);
+ if (!(Tok.Next->isUnaryOperator() || NextIsAmpOrStar) ||
+ Tok.Next->is(tok::plus) ||
!Tok.Next->Next->isOneOf(tok::identifier, tok::numeric_constant)) {
return false;
}
+ if (NextIsAmpOrStar &&
+ (Tok.Next->Next->is(tok::numeric_constant) || Line.InPPDirective)) {
+ return false;
+ }
// Search for unexpected tokens.
for (FormatToken *Prev = Tok.Previous; Prev != Tok.MatchingParen;
Prev = Prev->Previous) {
@@ -2350,9 +2495,22 @@ private:
if (Style.isCSharp() && Tok.is(tok::ampamp))
return TT_BinaryOperator;
+ if (Style.isVerilog()) {
+ // In Verilog, `*` can only be a binary operator. `&` can be either unary
+ // or binary. `*` also includes `*>` in module path declarations in
+ // specify blocks because merged tokens take the type of the first one by
+ // default.
+ if (Tok.is(tok::star))
+ return TT_BinaryOperator;
+ return determineUnaryOperatorByUsage(Tok) ? TT_UnaryOperator
+ : TT_BinaryOperator;
+ }
+
const FormatToken *PrevToken = Tok.getPreviousNonComment();
if (!PrevToken)
return TT_UnaryOperator;
+ if (PrevToken->is(TT_TypeName))
+ return TT_PointerOrReference;
const FormatToken *NextToken = Tok.getNextNonComment();
@@ -2361,7 +2519,7 @@ private:
if (!NextToken ||
NextToken->isOneOf(tok::arrow, tok::equal, tok::kw_noexcept, tok::comma,
- tok::r_paren) ||
+ tok::r_paren, TT_RequiresClause) ||
NextToken->canBePointerOrReferenceQualifier() ||
(NextToken->is(tok::l_brace) && !NextToken->getNextNonComment())) {
return TT_PointerOrReference;
@@ -2439,6 +2597,28 @@ private:
if (IsExpression && !Contexts.back().CaretFound)
return TT_BinaryOperator;
+ // Opeartors at class scope are likely pointer or reference members.
+ if (!Scopes.empty() && Scopes.back() == ST_Class)
+ return TT_PointerOrReference;
+
+ // Tokens that indicate member access or chained operator& use.
+ auto IsChainedOperatorAmpOrMember = [](const FormatToken *token) {
+ return !token || token->isOneOf(tok::amp, tok::period, tok::arrow,
+ tok::arrowstar, tok::periodstar);
+ };
+
+ // It's more likely that & represents operator& than an uninitialized
+ // reference.
+ if (Tok.is(tok::amp) && PrevToken && PrevToken->Tok.isAnyIdentifier() &&
+ IsChainedOperatorAmpOrMember(PrevToken->getPreviousNonComment()) &&
+ NextToken && NextToken->Tok.isAnyIdentifier()) {
+ if (auto NextNext = NextToken->getNextNonComment();
+ NextNext &&
+ (IsChainedOperatorAmpOrMember(NextNext) || NextNext->is(tok::semi))) {
+ return TT_BinaryOperator;
+ }
+ }
+
return TT_PointerOrReference;
}
@@ -2476,6 +2656,8 @@ private:
bool AutoFound;
const AdditionalKeywords &Keywords;
+ SmallVector<ScopeType> &Scopes;
+
// Set of "<" tokens that do not open a template parameter list. If parseAngle
// determines that a specific token can't be a template opener, it will make
// same decision irrespective of the decisions for tokens leading up to it.
@@ -2523,11 +2705,31 @@ public:
FormatToken *Start = Current;
FormatToken *LatestOperator = nullptr;
unsigned OperatorIndex = 0;
+ // The first name of the current type in a port list.
+ FormatToken *VerilogFirstOfType = nullptr;
while (Current) {
+ // In Verilog ports in a module header that don't have a type take the
+ // type of the previous one. For example,
+ // module a(output b,
+ // c,
+ // output d);
+ // In this case there need to be fake parentheses around b and c.
+ if (Style.isVerilog() && Precedence == prec::Comma) {
+ VerilogFirstOfType =
+ verilogGroupDecl(VerilogFirstOfType, LatestOperator);
+ }
+
// Consume operators with higher precedence.
parse(Precedence + 1);
+ // Do not assign fake parenthesis to tokens that are part of an
+ // unexpanded macro call. The line within the macro call contains
+ // the parenthesis and commas, and we will not find operators within
+ // that structure.
+ if (Current && Current->MacroParent)
+ break;
+
int CurrentPrecedence = getCurrentPrecedence();
if (Precedence == CurrentPrecedence && Current &&
@@ -2537,7 +2739,7 @@ public:
Start = Current;
}
- // At the end of the line or when an operator with higher precedence is
+ // At the end of the line or when an operator with lower precedence is
// found, insert fake parenthesis and return.
if (!Current ||
(Current->closesScope() &&
@@ -2574,6 +2776,10 @@ public:
}
}
+ // Group variables of the same type.
+ if (Style.isVerilog() && Precedence == prec::Comma && VerilogFirstOfType)
+ addFakeParenthesis(VerilogFirstOfType, prec::Comma);
+
if (LatestOperator && (Current || Precedence > 0)) {
// The requires clauses do not neccessarily end in a semicolon or a brace,
// but just go over to struct/class or a function declaration, we need to
@@ -2709,6 +2915,140 @@ private:
}
}
+ // Add fake parenthesis around declarations of the same type for example in a
+ // module prototype. Return the first port / variable of the current type.
+ FormatToken *verilogGroupDecl(FormatToken *FirstOfType,
+ FormatToken *PreviousComma) {
+ if (!Current)
+ return nullptr;
+
+ FormatToken *Start = Current;
+
+ // Skip attributes.
+ while (Start->startsSequence(tok::l_paren, tok::star)) {
+ if (!(Start = Start->MatchingParen) ||
+ !(Start = Start->getNextNonComment())) {
+ return nullptr;
+ }
+ }
+
+ FormatToken *Tok = Start;
+
+ if (Tok->is(Keywords.kw_assign))
+ Tok = Tok->getNextNonComment();
+
+ // Skip any type qualifiers to find the first identifier. It may be either a
+ // new type name or a variable name. There can be several type qualifiers
+ // preceding a variable name, and we can not tell them apart by looking at
+ // the word alone since a macro can be defined as either a type qualifier or
+ // a variable name. Thus we use the last word before the dimensions instead
+ // of the first word as the candidate for the variable or type name.
+ FormatToken *First = nullptr;
+ while (Tok) {
+ FormatToken *Next = Tok->getNextNonComment();
+
+ if (Tok->is(tok::hash)) {
+ // Start of a macro expansion.
+ First = Tok;
+ Tok = Next;
+ if (Tok)
+ Tok = Tok->getNextNonComment();
+ } else if (Tok->is(tok::hashhash)) {
+ // Concatenation. Skip.
+ Tok = Next;
+ if (Tok)
+ Tok = Tok->getNextNonComment();
+ } else if (Keywords.isVerilogQualifier(*Tok) ||
+ Keywords.isVerilogIdentifier(*Tok)) {
+ First = Tok;
+ Tok = Next;
+ // The name may have dots like `interface_foo.modport_foo`.
+ while (Tok && Tok->isOneOf(tok::period, tok::coloncolon) &&
+ (Tok = Tok->getNextNonComment())) {
+ if (Keywords.isVerilogIdentifier(*Tok))
+ Tok = Tok->getNextNonComment();
+ }
+ } else if (!Next) {
+ Tok = nullptr;
+ } else if (Tok->is(tok::l_paren)) {
+ // Make sure the parenthesized list is a drive strength. Otherwise the
+ // statement may be a module instantiation in which case we have already
+ // found the instance name.
+ if (Next->isOneOf(
+ Keywords.kw_highz0, Keywords.kw_highz1, Keywords.kw_large,
+ Keywords.kw_medium, Keywords.kw_pull0, Keywords.kw_pull1,
+ Keywords.kw_small, Keywords.kw_strong0, Keywords.kw_strong1,
+ Keywords.kw_supply0, Keywords.kw_supply1, Keywords.kw_weak0,
+ Keywords.kw_weak1)) {
+ Tok->setType(TT_VerilogStrength);
+ Tok = Tok->MatchingParen;
+ if (Tok) {
+ Tok->setType(TT_VerilogStrength);
+ Tok = Tok->getNextNonComment();
+ }
+ } else {
+ break;
+ }
+ } else if (Tok->is(tok::hash)) {
+ if (Next->is(tok::l_paren))
+ Next = Next->MatchingParen;
+ if (Next)
+ Tok = Next->getNextNonComment();
+ } else {
+ break;
+ }
+ }
+
+ // Find the second identifier. If it exists it will be the name.
+ FormatToken *Second = nullptr;
+ // Dimensions.
+ while (Tok && Tok->is(tok::l_square) && (Tok = Tok->MatchingParen))
+ Tok = Tok->getNextNonComment();
+ if (Tok && (Tok->is(tok::hash) || Keywords.isVerilogIdentifier(*Tok)))
+ Second = Tok;
+
+ // If the second identifier doesn't exist and there are qualifiers, the type
+ // is implied.
+ FormatToken *TypedName = nullptr;
+ if (Second) {
+ TypedName = Second;
+ if (First && First->is(TT_Unknown))
+ First->setType(TT_VerilogDimensionedTypeName);
+ } else if (First != Start) {
+ // If 'First' is null, then this isn't a declaration, 'TypedName' gets set
+ // to null as intended.
+ TypedName = First;
+ }
+
+ if (TypedName) {
+ // This is a declaration with a new type.
+ if (TypedName->is(TT_Unknown))
+ TypedName->setType(TT_StartOfName);
+ // Group variables of the previous type.
+ if (FirstOfType && PreviousComma) {
+ PreviousComma->setType(TT_VerilogTypeComma);
+ addFakeParenthesis(FirstOfType, prec::Comma, PreviousComma->Previous);
+ }
+
+ FirstOfType = TypedName;
+
+ // Don't let higher precedence handle the qualifiers. For example if we
+ // have:
+ // parameter x = 0
+ // We skip `parameter` here. This way the fake parentheses for the
+ // assignment will be around `x = 0`.
+ while (Current && Current != FirstOfType) {
+ if (Current->opensScope()) {
+ next();
+ parse();
+ }
+ next();
+ }
+ }
+
+ return FirstOfType;
+ }
+
const FormatStyle &Style;
const AdditionalKeywords &Keywords;
const AnnotatedLine &Line;
@@ -2725,8 +3065,8 @@ void TokenAnnotator::setCommentLineLevels(
// If the comment is currently aligned with the line immediately following
// it, that's probably intentional and we should keep it.
- if (NextNonCommentLine && Line->isComment() &&
- NextNonCommentLine->First->NewlinesBefore <= 1 &&
+ if (NextNonCommentLine && !NextNonCommentLine->First->Finalized &&
+ Line->isComment() && NextNonCommentLine->First->NewlinesBefore <= 1 &&
NextNonCommentLine->First->OriginalColumn ==
Line->First->OriginalColumn) {
const bool PPDirectiveOrImportStmt =
@@ -2751,16 +3091,16 @@ void TokenAnnotator::setCommentLineLevels(
static unsigned maxNestingDepth(const AnnotatedLine &Line) {
unsigned Result = 0;
- for (const auto *Tok = Line.First; Tok != nullptr; Tok = Tok->Next)
+ for (const auto *Tok = Line.First; Tok; Tok = Tok->Next)
Result = std::max(Result, Tok->NestingLevel);
return Result;
}
-void TokenAnnotator::annotate(AnnotatedLine &Line) const {
+void TokenAnnotator::annotate(AnnotatedLine &Line) {
for (auto &Child : Line.Children)
annotate(*Child);
- AnnotatingParser Parser(Style, Line, Keywords);
+ AnnotatingParser Parser(Style, Line, Keywords, Scopes);
Line.Type = Parser.parseLine();
// With very deep nesting, ExpressionParser uses lots of stack and the
@@ -2791,6 +3131,10 @@ void TokenAnnotator::annotate(AnnotatedLine &Line) const {
// function declaration.
static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
const AnnotatedLine &Line) {
+ assert(Current.Previous);
+ if (!Current.Tok.getIdentifierInfo())
+ return false;
+
auto skipOperatorName = [](const FormatToken *Next) -> const FormatToken * {
for (; Next; Next = Next->Next) {
if (Next->is(TT_OverloadedOperatorLParen))
@@ -2829,7 +3173,12 @@ static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
// Find parentheses of parameter list.
const FormatToken *Next = Current.Next;
if (Current.is(tok::kw_operator)) {
- if (Current.Previous && Current.Previous->is(tok::coloncolon))
+ const auto *Previous = Current.Previous;
+ if (Previous->Tok.getIdentifierInfo() &&
+ !Previous->isOneOf(tok::kw_return, tok::kw_co_return)) {
+ return true;
+ }
+ if (!Previous->isOneOf(tok::star, tok::amp, tok::ampamp, TT_TemplateCloser))
return false;
Next = skipOperatorName(Next);
} else {
@@ -2960,9 +3309,11 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) const {
if (AlignArrayOfStructures)
calculateArrayInitializerColumnList(Line);
+ bool LineIsFunctionDeclaration = false;
for (FormatToken *Tok = Current, *AfterLastAttribute = nullptr; Tok;
Tok = Tok->Next) {
if (isFunctionDeclarationName(Style.isCpp(), *Tok, Line)) {
+ LineIsFunctionDeclaration = true;
Tok->setType(TT_FunctionDeclarationName);
if (AfterLastAttribute &&
mustBreakAfterAttributes(*AfterLastAttribute, Style)) {
@@ -2975,12 +3326,43 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) const {
AfterLastAttribute = Tok;
}
+ if (Style.isCpp() && !LineIsFunctionDeclaration) {
+ // Annotate */&/&& in `operator` function calls as binary operators.
+ for (const auto *Tok = Line.First; Tok; Tok = Tok->Next) {
+ if (Tok->isNot(tok::kw_operator))
+ continue;
+ do {
+ Tok = Tok->Next;
+ } while (Tok && Tok->isNot(TT_OverloadedOperatorLParen));
+ if (!Tok)
+ break;
+ const auto *LeftParen = Tok;
+ for (Tok = Tok->Next; Tok && Tok != LeftParen->MatchingParen;
+ Tok = Tok->Next) {
+ if (Tok->isNot(tok::identifier))
+ continue;
+ auto *Next = Tok->Next;
+ const bool NextIsBinaryOperator =
+ Next && Next->isOneOf(tok::star, tok::amp, tok::ampamp) &&
+ Next->Next && Next->Next->is(tok::identifier);
+ if (!NextIsBinaryOperator)
+ continue;
+ Next->setType(TT_BinaryOperator);
+ Tok = Next;
+ }
+ }
+ }
+
while (Current) {
const FormatToken *Prev = Current->Previous;
if (Current->is(TT_LineComment)) {
if (Prev->is(BK_BracedInit) && Prev->opensScope()) {
Current->SpacesRequiredBefore =
- (Style.Cpp11BracedListStyle && !Style.SpacesInParentheses) ? 0 : 1;
+ (Style.Cpp11BracedListStyle && !Style.SpacesInParensOptions.Other)
+ ? 0
+ : 1;
+ } else if (Prev->is(TT_VerilogMultiLineListLParen)) {
+ Current->SpacesRequiredBefore = 0;
} else {
Current->SpacesRequiredBefore = Style.SpacesBeforeTrailingComments;
}
@@ -3067,7 +3449,7 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) const {
calculateUnbreakableTailLengths(Line);
unsigned IndentLevel = Line.Level;
- for (Current = Line.First; Current != nullptr; Current = Current->Next) {
+ for (Current = Line.First; Current; Current = Current->Next) {
if (Current->Role)
Current->Role->precomputeFormattingInfos(Current);
if (Current->MatchingParen &&
@@ -3107,10 +3489,10 @@ void TokenAnnotator::calculateArrayInitializerColumnList(
auto *CurrentToken = Line.First;
CurrentToken->ArrayInitializerLineStart = true;
unsigned Depth = 0;
- while (CurrentToken != nullptr && CurrentToken != Line.Last) {
+ while (CurrentToken && CurrentToken != Line.Last) {
if (CurrentToken->is(tok::l_brace)) {
CurrentToken->IsArrayInitializer = true;
- if (CurrentToken->Next != nullptr)
+ if (CurrentToken->Next)
CurrentToken->Next->MustBreakBefore = true;
CurrentToken =
calculateInitializerColumnList(Line, CurrentToken->Next, Depth + 1);
@@ -3122,14 +3504,14 @@ void TokenAnnotator::calculateArrayInitializerColumnList(
FormatToken *TokenAnnotator::calculateInitializerColumnList(
AnnotatedLine &Line, FormatToken *CurrentToken, unsigned Depth) const {
- while (CurrentToken != nullptr && CurrentToken != Line.Last) {
+ while (CurrentToken && CurrentToken != Line.Last) {
if (CurrentToken->is(tok::l_brace))
++Depth;
else if (CurrentToken->is(tok::r_brace))
--Depth;
if (Depth == 2 && CurrentToken->isOneOf(tok::l_brace, tok::comma)) {
CurrentToken = CurrentToken->Next;
- if (CurrentToken == nullptr)
+ if (!CurrentToken)
break;
CurrentToken->StartsColumn = true;
CurrentToken = CurrentToken->Previous;
@@ -3379,8 +3761,6 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
Right.MatchingParen->is(TT_CastRParen)) {
return true;
}
- if (Style.isJson() && Left.is(tok::string_literal) && Right.is(tok::colon))
- return false;
if (Left.is(Keywords.kw_assert) && Style.Language == FormatStyle::LK_Java)
return true;
if (Style.ObjCSpaceAfterProperty && Line.Type == LT_ObjCProperty &&
@@ -3394,17 +3774,19 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if ((Left.is(tok::l_paren) && Right.is(tok::r_paren)) ||
(Left.is(tok::l_brace) && Left.isNot(BK_Block) &&
Right.is(tok::r_brace) && Right.isNot(BK_Block))) {
- return Style.SpaceInEmptyParentheses;
+ return Style.SpacesInParensOptions.InEmptyParentheses;
}
- if (Style.SpacesInConditionalStatement) {
+ if (Style.SpacesInParensOptions.InConditionalStatements) {
const FormatToken *LeftParen = nullptr;
if (Left.is(tok::l_paren))
LeftParen = &Left;
else if (Right.is(tok::r_paren) && Right.MatchingParen)
LeftParen = Right.MatchingParen;
- if (LeftParen && LeftParen->Previous &&
- isKeywordWithCondition(*LeftParen->Previous)) {
- return true;
+ if (LeftParen) {
+ if (LeftParen->is(TT_ConditionLParen))
+ return true;
+ if (LeftParen->Previous && isKeywordWithCondition(*LeftParen->Previous))
+ return true;
}
}
@@ -3433,8 +3815,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Left.is(tok::l_paren) || Right.is(tok::r_paren)) {
return (Right.is(TT_CastRParen) ||
(Left.MatchingParen && Left.MatchingParen->is(TT_CastRParen)))
- ? Style.SpacesInCStyleCastParentheses
- : Style.SpacesInParentheses;
+ ? Style.SpacesInParensOptions.InCStyleCasts
+ : Style.SpacesInParensOptions.Other;
}
if (Right.isOneOf(tok::semi, tok::comma))
return false;
@@ -3531,8 +3913,9 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Right.is(TT_BlockComment))
return true;
// foo() -> const Bar * override/final
- if (Right.isOneOf(Keywords.kw_override, Keywords.kw_final,
- tok::kw_noexcept) &&
+ // S::foo() & noexcept/requires
+ if (Right.isOneOf(Keywords.kw_override, Keywords.kw_final, tok::kw_noexcept,
+ TT_RequiresClause) &&
!Right.is(TT_StartOfName)) {
return true;
}
@@ -3616,6 +3999,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
}
}
}
+ if (Style.isCSharp() && Left.is(Keywords.kw_is) && Right.is(tok::l_square))
+ return true;
const auto SpaceRequiredForArrayInitializerLSquare =
[](const FormatToken &LSquareTok, const FormatStyle &Style) {
return Style.SpacesInContainerLiterals ||
@@ -3657,7 +4042,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if ((Left.is(tok::l_brace) && Left.isNot(BK_Block)) ||
(Right.is(tok::r_brace) && Right.MatchingParen &&
Right.MatchingParen->isNot(BK_Block))) {
- return Style.Cpp11BracedListStyle ? Style.SpacesInParentheses : true;
+ return Style.Cpp11BracedListStyle ? Style.SpacesInParensOptions.Other
+ : true;
}
if (Left.is(TT_BlockComment)) {
// No whitespace in x(/*foo=*/1), except for JavaScript.
@@ -3756,7 +4142,12 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return !Left.isOneOf(tok::l_paren, tok::l_square, tok::at) &&
(Left.isNot(tok::colon) || Left.isNot(TT_ObjCMethodExpr));
}
- if ((Left.isOneOf(tok::identifier, tok::greater, tok::r_square,
+ // No space between the variable name and the initializer list.
+ // A a1{1};
+ // Verilog doesn't have such syntax, but it has word operators that are C++
+ // identifiers like `a inside {b, c}`. So the rule is not applicable.
+ if (!Style.isVerilog() &&
+ (Left.isOneOf(tok::identifier, tok::greater, tok::r_square,
tok::r_paren) ||
Left.isSimpleTypeSpecifier()) &&
Right.is(tok::l_brace) && Right.getNextNonComment() &&
@@ -3826,6 +4217,10 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return true;
if (Style.isCpp()) {
+ if (Left.is(TT_OverloadedOperator) &&
+ Right.isOneOf(TT_TemplateOpener, TT_TemplateCloser)) {
+ return true;
+ }
// Space between UDL and dot: auto b = 4s .count();
if (Right.is(tok::period) && Left.is(tok::numeric_constant))
return true;
@@ -3893,8 +4288,8 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.is(tok::numeric_constant) && Right.is(tok::percent))
return Right.hasWhitespaceBefore();
} else if (Style.isJson()) {
- if (Right.is(tok::colon))
- return false;
+ if (Right.is(tok::colon) && Left.is(tok::string_literal))
+ return Style.SpaceBeforeJsonColon;
} else if (Style.isCSharp()) {
// Require spaces around '{' and before '}' unless they appear in
// interpolated strings. Interpolated strings are merged into a single token
@@ -4089,6 +4484,11 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return true;
}
} else if (Style.isVerilog()) {
+ // An escaped identifier ends with whitespace.
+ if (Style.isVerilog() && Left.is(tok::identifier) &&
+ Left.TokenText[0] == '\\') {
+ return true;
+ }
// Add space between things in a primitive's state table unless in a
// transition like `(0?)`.
if ((Left.is(TT_VerilogTableItem) &&
@@ -4117,6 +4517,11 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
(Left.is(TT_VerilogNumberBase) && Right.is(tok::numeric_constant))) {
return false;
}
+ // Don't add spaces between two at signs. Like in a coverage event.
+ // Don't add spaces between at and a sensitivity list like
+ // `@(posedge clk)`.
+ if (Left.is(tok::at) && Right.isOneOf(tok::l_paren, tok::star, tok::at))
+ return false;
// Add space between the type name and dimension like `logic [1:0]`.
if (Right.is(tok::l_square) &&
Left.isOneOf(TT_VerilogDimensionedTypeName, Keywords.kw_function)) {
@@ -4133,9 +4538,24 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
Keywords.isWordLike(Left))) {
return false;
}
+ // Don't add spaces in imports like `import foo::*;`.
+ if ((Right.is(tok::star) && Left.is(tok::coloncolon)) ||
+ (Left.is(tok::star) && Right.is(tok::semi))) {
+ return false;
+ }
// Add space in attribute like `(* ASYNC_REG = "TRUE" *)`.
if (Left.endsSequence(tok::star, tok::l_paren) && Right.is(tok::identifier))
return true;
+ // Add space before drive strength like in `wire (strong1, pull0)`.
+ if (Right.is(tok::l_paren) && Right.is(TT_VerilogStrength))
+ return true;
+ // Don't add space in a streaming concatenation like `{>>{j}}`.
+ if ((Left.is(tok::l_brace) &&
+ Right.isOneOf(tok::lessless, tok::greatergreater)) ||
+ (Left.endsSequence(tok::lessless, tok::l_brace) ||
+ Left.endsSequence(tok::greatergreater, tok::l_brace))) {
+ return false;
+ }
}
if (Left.is(TT_ImplicitStringLiteral))
return Right.hasWhitespaceBefore();
@@ -4158,8 +4578,12 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
Left.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow)) {
return true;
}
- if (Left.is(tok::comma) && !Right.is(TT_OverloadedOperatorLParen))
+ if (Left.is(tok::comma) && !Right.is(TT_OverloadedOperatorLParen) &&
+ // In an unexpanded macro call we only find the parentheses and commas
+ // in a line; the commas and closing parenthesis do not require a space.
+ (Left.Children.empty() || !Left.MacroParent)) {
return true;
+ }
if (Right.is(tok::comma))
return false;
if (Right.is(TT_ObjCBlockLParen))
@@ -4177,15 +4601,12 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
Style.BitFieldColonSpacing == FormatStyle::BFCS_After;
}
if (Right.is(tok::colon)) {
- if (Right.is(TT_GotoLabelColon) ||
- (!Style.isVerilog() &&
- Line.First->isOneOf(tok::kw_default, tok::kw_case))) {
- return Style.SpaceBeforeCaseColon;
- }
- if (Line.First->isOneOf(tok::kw_default, tok::kw_case))
+ if (Right.is(TT_CaseLabelColon))
return Style.SpaceBeforeCaseColon;
- const FormatToken *Next = Right.getNextNonComment();
- if (!Next || Next->is(tok::semi))
+ if (Right.is(TT_GotoLabelColon))
+ return false;
+ // `private:` and `public:`.
+ if (!Right.getNextNonComment())
return false;
if (Right.is(TT_ObjCMethodExpr))
return false;
@@ -4284,7 +4705,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
!(Left.isOneOf(tok::l_paren, tok::r_paren, tok::l_square,
tok::kw___super, TT_TemplateOpener,
TT_TemplateCloser)) ||
- (Left.is(tok::l_paren) && Style.SpacesInParentheses);
+ (Left.is(tok::l_paren) && Style.SpacesInParensOptions.Other);
}
if ((Left.is(TT_TemplateOpener)) != (Right.is(TT_TemplateCloser)))
return ShouldAddSpacesInAngles();
@@ -4447,6 +4868,21 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
return true;
}
} else if (Style.isVerilog()) {
+ // Break between assignments.
+ if (Left.is(TT_VerilogAssignComma))
+ return true;
+ // Break between ports of different types.
+ if (Left.is(TT_VerilogTypeComma))
+ return true;
+ // Break between ports in a module instantiation and after the parameter
+ // list.
+ if (Style.VerilogBreakBetweenInstancePorts &&
+ (Left.is(TT_VerilogInstancePortComma) ||
+ (Left.is(tok::r_paren) && Keywords.isVerilogIdentifier(Right) &&
+ Left.MatchingParen &&
+ Left.MatchingParen->is(TT_VerilogInstancePortLParen)))) {
+ return true;
+ }
// Break after labels. In Verilog labels don't have the 'case' keyword, so
// it is hard to identify them in UnwrappedLineParser.
if (!Keywords.isVerilogBegin(Right) && Keywords.isVerilogEndOfLabel(Left))
@@ -4570,6 +5006,18 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
Right.isOneOf(TT_CtorInitializerComma, TT_CtorInitializerColon)) {
return true;
}
+ if (Style.PackConstructorInitializers == FormatStyle::PCIS_NextLineOnly) {
+ if ((Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeColon ||
+ Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma) &&
+ Right.is(TT_CtorInitializerColon)) {
+ return true;
+ }
+
+ if (Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon &&
+ Left.is(TT_CtorInitializerColon)) {
+ return true;
+ }
+ }
// Break only if we have multiple inheritance.
if (Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma &&
Right.is(TT_InheritanceComma)) {
@@ -4622,8 +5070,13 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
return true;
}
- return (Line.startsWith(tok::kw_class) && Style.BraceWrapping.AfterClass) ||
- (Line.startsWith(tok::kw_struct) && Style.BraceWrapping.AfterStruct);
+ // Don't attempt to interpret struct return types as structs.
+ if (Right.isNot(TT_FunctionLBrace)) {
+ return (Line.startsWith(tok::kw_class) &&
+ Style.BraceWrapping.AfterClass) ||
+ (Line.startsWith(tok::kw_struct) &&
+ Style.BraceWrapping.AfterStruct);
+ }
}
if (Left.is(TT_ObjCBlockLBrace) &&
@@ -5038,8 +5491,10 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
// We only break before r_brace if there was a corresponding break before
// the l_brace, which is tracked by BreakBeforeClosingBrace.
- if (Right.is(tok::r_brace))
- return Right.MatchingParen && Right.MatchingParen->is(BK_Block);
+ if (Right.is(tok::r_brace)) {
+ return Right.MatchingParen && (Right.MatchingParen->is(BK_Block) ||
+ (Right.isBlockIndentedInitRBrace(Style)));
+ }
// We only break before r_paren if we're in a block indented context.
if (Right.is(tok::r_paren)) {
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h
index 354511b6323a..611e95ba11b0 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h
@@ -34,6 +34,15 @@ enum LineType {
LT_CommentAbovePPDirective,
};
+enum ScopeType {
+ // Contained in class declaration/definition.
+ ST_Class,
+ // Contained within function definition.
+ ST_Function,
+ // Contained within other scope block (loop, if/else, etc).
+ ST_Other,
+};
+
class AnnotatedLine {
public:
AnnotatedLine(const UnwrappedLine &Line)
@@ -56,20 +65,32 @@ public:
// left them in a different state.
First->Previous = nullptr;
FormatToken *Current = First;
+ addChildren(Line.Tokens.front(), Current);
for (const UnwrappedLineNode &Node : llvm::drop_begin(Line.Tokens)) {
+ if (Node.Tok->MacroParent)
+ ContainsMacroCall = true;
Current->Next = Node.Tok;
Node.Tok->Previous = Current;
Current = Current->Next;
- Current->Children.clear();
- for (const auto &Child : Node.Children) {
- Children.push_back(new AnnotatedLine(Child));
- Current->Children.push_back(Children.back());
- }
+ addChildren(Node, Current);
+ // FIXME: if we add children, previous will point to the token before
+ // the children; changing this requires significant changes across
+ // clang-format.
}
Last = Current;
Last->Next = nullptr;
}
+ void addChildren(const UnwrappedLineNode &Node, FormatToken *Current) {
+ Current->Children.clear();
+ for (const auto &Child : Node.Children) {
+ Children.push_back(new AnnotatedLine(Child));
+ if (Children.back()->ContainsMacroCall)
+ ContainsMacroCall = true;
+ Current->Children.push_back(Children.back());
+ }
+ }
+
~AnnotatedLine() {
for (AnnotatedLine *Child : Children)
delete Child;
@@ -140,6 +161,9 @@ public:
bool MightBeFunctionDecl;
bool IsMultiVariableDeclStmt;
+ /// \c True if this line contains a macro call for which an expansion exists.
+ bool ContainsMacroCall = false;
+
/// \c True if this line should be formatted, i.e. intersects directly or
/// indirectly with one of the input ranges.
bool Affected;
@@ -178,7 +202,7 @@ public:
// FIXME: Can/should this be done in the UnwrappedLineParser?
void setCommentLineLevels(SmallVectorImpl<AnnotatedLine *> &Lines) const;
- void annotate(AnnotatedLine &Line) const;
+ void annotate(AnnotatedLine &Line);
void calculateFormattingInformation(AnnotatedLine &Line) const;
private:
@@ -220,6 +244,8 @@ private:
const FormatStyle &Style;
const AdditionalKeywords &Keywords;
+
+ SmallVector<ScopeType> Scopes;
};
} // end namespace format
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp b/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
index 8e1d907208c0..52519145279c 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "UnwrappedLineFormatter.h"
+#include "FormatToken.h"
#include "NamespaceEndCommentsFixer.h"
#include "WhitespaceManager.h"
#include "llvm/Support/Debug.h"
@@ -59,7 +60,8 @@ public:
Offset = getIndentOffset(*Line.First);
// Update the indent level cache size so that we can rely on it
// having the right size in adjustToUnmodifiedline.
- skipLine(Line, /*UnknownIndent=*/true);
+ if (Line.Level >= IndentForLevel.size())
+ IndentForLevel.resize(Line.Level + 1, -1);
if (Style.IndentPPDirectives != FormatStyle::PPDIS_None &&
(Line.InPPDirective ||
(Style.IndentPPDirectives == FormatStyle::PPDIS_BeforeHash &&
@@ -72,6 +74,13 @@ public:
: Line.Level * PPIndentWidth;
Indent += AdditionalIndent;
} else {
+ // When going to lower levels, forget previous higher levels so that we
+ // recompute future higher levels. But don't forget them if we enter a PP
+ // directive, since these do not terminate a C++ code block.
+ if (!Line.InPPDirective) {
+ assert(Line.Level <= IndentForLevel.size());
+ IndentForLevel.resize(Line.Level + 1);
+ }
Indent = getIndent(Line.Level);
}
if (static_cast<int>(Indent) + Offset >= 0)
@@ -80,27 +89,21 @@ public:
Indent = Line.Level * Style.IndentWidth + Style.ContinuationIndentWidth;
}
- /// Update the indent state given that \p Line indent should be
- /// skipped.
- void skipLine(const AnnotatedLine &Line, bool UnknownIndent = false) {
- if (Line.Level >= IndentForLevel.size())
- IndentForLevel.resize(Line.Level + 1, UnknownIndent ? -1 : Indent);
- }
-
/// Update the level indent to adapt to the given \p Line.
///
/// When a line is not formatted, we move the subsequent lines on the same
/// level to the same indent.
/// Note that \c nextLine must have been called before this method.
void adjustToUnmodifiedLine(const AnnotatedLine &Line) {
+ if (Line.InPPDirective)
+ return;
+ assert(Line.Level < IndentForLevel.size());
+ if (Line.First->is(tok::comment) && IndentForLevel[Line.Level] != -1)
+ return;
unsigned LevelIndent = Line.First->OriginalColumn;
if (static_cast<int>(LevelIndent) - Offset >= 0)
LevelIndent -= Offset;
- assert(Line.Level < IndentForLevel.size());
- if ((!Line.First->is(tok::comment) || IndentForLevel[Line.Level] == -1) &&
- !Line.InPPDirective) {
- IndentForLevel[Line.Level] = LevelIndent;
- }
+ IndentForLevel[Line.Level] = LevelIndent;
}
private:
@@ -121,8 +124,8 @@ private:
return true;
}
// Handle Qt signals.
- else if ((RootToken.isOneOf(Keywords.kw_signals, Keywords.kw_qsignals) &&
- RootToken.Next && RootToken.Next->is(tok::colon))) {
+ else if (RootToken.isOneOf(Keywords.kw_signals, Keywords.kw_qsignals) &&
+ RootToken.Next && RootToken.Next->is(tok::colon)) {
return true;
} else if (RootToken.Next &&
RootToken.Next->isOneOf(Keywords.kw_slots,
@@ -153,6 +156,7 @@ private:
/// at \p IndentForLevel[l], or a value < 0 if the indent for
/// that level is unknown.
unsigned getIndent(unsigned Level) const {
+ assert(Level < IndentForLevel.size());
if (IndentForLevel[Level] != -1)
return IndentForLevel[Level];
if (Level == 0)
@@ -164,7 +168,10 @@ private:
const AdditionalKeywords &Keywords;
const unsigned AdditionalIndent;
- /// The indent in characters for each level.
+ /// The indent in characters for each level. It remembers the indent of
+ /// previous lines (that are not PP directives) of equal or lower levels. This
+ /// is used to align formatted lines to the indent of previous non-formatted
+ /// lines. Think about the --lines parameter of clang-format.
SmallVector<int> IndentForLevel;
/// Offset of the current line relative to the indent level.
@@ -366,20 +373,27 @@ private:
// instead of TheLine->First.
if (Style.CompactNamespaces) {
- if (auto nsToken = TheLine->First->getNamespaceToken()) {
- int i = 0;
- unsigned closingLine = TheLine->MatchingClosingBlockLineIndex - 1;
- for (; I + 1 + i != E &&
- nsToken->TokenText == getNamespaceTokenText(I[i + 1]) &&
- closingLine == I[i + 1]->MatchingClosingBlockLineIndex &&
- I[i + 1]->Last->TotalLength < Limit;
- i++, --closingLine) {
- // No extra indent for compacted namespaces.
- IndentTracker.skipLine(*I[i + 1]);
-
- Limit -= I[i + 1]->Last->TotalLength;
+ if (const auto *NSToken = TheLine->First->getNamespaceToken()) {
+ int J = 1;
+ assert(TheLine->MatchingClosingBlockLineIndex > 0);
+ for (auto ClosingLineIndex = TheLine->MatchingClosingBlockLineIndex - 1;
+ I + J != E && NSToken->TokenText == getNamespaceTokenText(I[J]) &&
+ ClosingLineIndex == I[J]->MatchingClosingBlockLineIndex &&
+ I[J]->Last->TotalLength < Limit;
+ ++J, --ClosingLineIndex) {
+ Limit -= I[J]->Last->TotalLength;
+
+ // Reduce indent level for bodies of namespaces which were compacted,
+ // but only if their content was indented in the first place.
+ auto *ClosingLine = AnnotatedLines.begin() + ClosingLineIndex + 1;
+ auto OutdentBy = I[J]->Level - TheLine->Level;
+ for (auto *CompactedLine = I + J; CompactedLine <= ClosingLine;
+ ++CompactedLine) {
+ if (!(*CompactedLine)->InPPDirective)
+ (*CompactedLine)->Level -= OutdentBy;
+ }
}
- return i;
+ return J - 1;
}
if (auto nsToken = getMatchingNamespaceToken(TheLine, AnnotatedLines)) {
@@ -509,12 +523,11 @@ private:
ShouldMerge = !Style.BraceWrapping.AfterClass ||
(NextLine.First->is(tok::r_brace) &&
!Style.BraceWrapping.SplitEmptyRecord);
- } else {
+ } else if (TheLine->InPPDirective ||
+ !TheLine->First->isOneOf(tok::kw_class, tok::kw_enum,
+ tok::kw_struct)) {
// Try to merge a block with left brace unwrapped that wasn't yet
// covered.
- assert(TheLine->InPPDirective ||
- !TheLine->First->isOneOf(tok::kw_class, tok::kw_enum,
- tok::kw_struct));
ShouldMerge = !Style.BraceWrapping.AfterFunction ||
(NextLine.First->is(tok::r_brace) &&
!Style.BraceWrapping.SplitEmptyFunction);
@@ -887,7 +900,10 @@ private:
}
bool containsMustBreak(const AnnotatedLine *Line) {
- for (const FormatToken *Tok = Line->First; Tok; Tok = Tok->Next)
+ assert(Line->First);
+ // Ignore the first token, because in this situation, it applies more to the
+ // last token of the previous line.
+ for (const FormatToken *Tok = Line->First->Next; Tok; Tok = Tok->Next)
if (Tok->MustBreakBefore)
return true;
return false;
@@ -918,9 +934,22 @@ private:
static void markFinalized(FormatToken *Tok) {
for (; Tok; Tok = Tok->Next) {
- Tok->Finalized = true;
- for (AnnotatedLine *Child : Tok->Children)
- markFinalized(Child->First);
+ if (Tok->MacroCtx && Tok->MacroCtx->Role == MR_ExpandedArg) {
+ // In the first pass we format all macro arguments in the expanded token
+ // stream. Instead of finalizing the macro arguments, we mark that they
+ // will be modified as unexpanded arguments (as part of the macro call
+ // formatting) in the next pass.
+ Tok->MacroCtx->Role = MR_UnexpandedArg;
+ // Reset whether spaces are required before this token, as that is context
+ // dependent, and that context may change when formatting the macro call.
+ // For example, given M(x) -> 2 * x, and the macro call M(var),
+ // the token 'var' will have SpacesRequiredBefore = 1 after being
+ // formatted as part of the expanded macro, but SpacesRequiredBefore = 0
+ // for its position within the macro call.
+ Tok->SpacesRequiredBefore = 0;
+ } else {
+ Tok->Finalized = true;
+ }
}
}
@@ -975,30 +1004,19 @@ protected:
bool formatChildren(LineState &State, bool NewLine, bool DryRun,
unsigned &Penalty) {
const FormatToken *LBrace = State.NextToken->getPreviousNonComment();
+ bool HasLBrace = LBrace && LBrace->is(tok::l_brace) && LBrace->is(BK_Block);
FormatToken &Previous = *State.NextToken->Previous;
- if (!LBrace || LBrace->isNot(tok::l_brace) || LBrace->isNot(BK_Block) ||
- Previous.Children.size() == 0) {
+ if (Previous.Children.size() == 0 || (!HasLBrace && !LBrace->MacroParent)) {
// The previous token does not open a block. Nothing to do. We don't
// assert so that we can simply call this function for all tokens.
return true;
}
- if (NewLine) {
+ if (NewLine || Previous.MacroParent) {
const ParenState &P = State.Stack.back();
int AdditionalIndent =
P.Indent - Previous.Children[0]->Level * Style.IndentWidth;
-
- if (Style.LambdaBodyIndentation == FormatStyle::LBI_OuterScope &&
- P.NestedBlockIndent == P.LastSpace) {
- if (State.NextToken->MatchingParen &&
- State.NextToken->MatchingParen->is(TT_LambdaLBrace)) {
- State.Stack.pop_back();
- }
- if (LBrace->is(TT_LambdaLBrace))
- AdditionalIndent = 0;
- }
-
Penalty +=
BlockFormatter->format(Previous.Children, DryRun, AdditionalIndent,
/*FixBadIndentation=*/true);
@@ -1349,11 +1367,12 @@ unsigned UnwrappedLineFormatter::format(
NextLine = Joiner.getNextMergedLine(DryRun, IndentTracker);
unsigned ColumnLimit = getColumnLimit(TheLine.InPPDirective, NextLine);
bool FitsIntoOneLine =
- TheLine.Last->TotalLength + Indent <= ColumnLimit ||
- (TheLine.Type == LT_ImportStatement &&
- (!Style.isJavaScript() || !Style.JavaScriptWrapImports)) ||
- (Style.isCSharp() &&
- TheLine.InPPDirective); // don't split #regions in C#
+ !TheLine.ContainsMacroCall &&
+ (TheLine.Last->TotalLength + Indent <= ColumnLimit ||
+ (TheLine.Type == LT_ImportStatement &&
+ (!Style.isJavaScript() || !Style.JavaScriptWrapImports)) ||
+ (Style.isCSharp() &&
+ TheLine.InPPDirective)); // don't split #regions in C#
if (Style.ColumnLimit == 0) {
NoColumnLimitLineFormatter(Indenter, Whitespaces, Style, this)
.formatLine(TheLine, NextStartColumn + Indent,
@@ -1404,27 +1423,28 @@ unsigned UnwrappedLineFormatter::format(
NextLine = Joiner.getNextMergedLine(DryRun, IndentTracker);
RangeMinLevel = UINT_MAX;
}
- if (!DryRun)
- markFinalized(TheLine.First);
+ if (!DryRun) {
+ auto *Tok = TheLine.First;
+ if (Tok->is(tok::hash) && !Tok->Previous && Tok->Next &&
+ Tok->Next->isOneOf(tok::pp_if, tok::pp_ifdef, tok::pp_ifndef,
+ tok::pp_elif, tok::pp_elifdef, tok::pp_elifndef,
+ tok::pp_else, tok::pp_endif)) {
+ Tok = Tok->Next;
+ }
+ markFinalized(Tok);
+ }
}
PenaltyCache[CacheKey] = Penalty;
return Penalty;
}
-void UnwrappedLineFormatter::formatFirstToken(
- const AnnotatedLine &Line, const AnnotatedLine *PreviousLine,
- const AnnotatedLine *PrevPrevLine,
- const SmallVectorImpl<AnnotatedLine *> &Lines, unsigned Indent,
- unsigned NewlineIndent) {
- FormatToken &RootToken = *Line.First;
- if (RootToken.is(tok::eof)) {
- unsigned Newlines = std::min(RootToken.NewlinesBefore, 1u);
- unsigned TokenIndent = Newlines ? NewlineIndent : 0;
- Whitespaces->replaceWhitespace(RootToken, Newlines, TokenIndent,
- TokenIndent);
- return;
- }
- unsigned Newlines =
+static auto computeNewlines(const AnnotatedLine &Line,
+ const AnnotatedLine *PreviousLine,
+ const AnnotatedLine *PrevPrevLine,
+ const SmallVectorImpl<AnnotatedLine *> &Lines,
+ const FormatStyle &Style) {
+ const auto &RootToken = *Line.First;
+ auto Newlines =
std::min(RootToken.NewlinesBefore, Style.MaxEmptyLinesToKeep + 1);
// Remove empty lines before "}" where applicable.
if (RootToken.is(tok::r_brace) &&
@@ -1435,7 +1455,7 @@ void UnwrappedLineFormatter::formatFirstToken(
Newlines = std::min(Newlines, 1u);
}
// Remove empty lines at the start of nested blocks (lambdas/arrow functions)
- if (PreviousLine == nullptr && Line.Level > 0)
+ if (!PreviousLine && Line.Level > 0)
Newlines = std::min(Newlines, 1u);
if (Newlines == 0 && !RootToken.IsFirst)
Newlines = 1;
@@ -1503,7 +1523,32 @@ void UnwrappedLineFormatter::formatFirstToken(
}
}
- if (Newlines)
+ return Newlines;
+}
+
+void UnwrappedLineFormatter::formatFirstToken(
+ const AnnotatedLine &Line, const AnnotatedLine *PreviousLine,
+ const AnnotatedLine *PrevPrevLine,
+ const SmallVectorImpl<AnnotatedLine *> &Lines, unsigned Indent,
+ unsigned NewlineIndent) {
+ FormatToken &RootToken = *Line.First;
+ if (RootToken.is(tok::eof)) {
+ unsigned Newlines =
+ std::min(RootToken.NewlinesBefore,
+ Style.KeepEmptyLinesAtEOF ? Style.MaxEmptyLinesToKeep + 1 : 1);
+ unsigned TokenIndent = Newlines ? NewlineIndent : 0;
+ Whitespaces->replaceWhitespace(RootToken, Newlines, TokenIndent,
+ TokenIndent);
+ return;
+ }
+
+ if (RootToken.Newlines < 0) {
+ RootToken.Newlines =
+ computeNewlines(Line, PreviousLine, PrevPrevLine, Lines, Style);
+ assert(RootToken.Newlines >= 0);
+ }
+
+ if (RootToken.Newlines > 0)
Indent = NewlineIndent;
// Preprocessor directives get indented before the hash only if specified. In
@@ -1515,7 +1560,7 @@ void UnwrappedLineFormatter::formatFirstToken(
Indent = 0;
}
- Whitespaces->replaceWhitespace(RootToken, Newlines, Indent, Indent,
+ Whitespaces->replaceWhitespace(RootToken, RootToken.Newlines, Indent, Indent,
/*IsAligned=*/false,
Line.InPPDirective &&
!RootToken.HasUnescapedNewline);
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
index 7a49b189b481..32619bc56f7a 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
@@ -14,10 +14,15 @@
#include "UnwrappedLineParser.h"
#include "FormatToken.h"
+#include "FormatTokenLexer.h"
+#include "FormatTokenSource.h"
+#include "Macros.h"
#include "TokenAnnotator.h"
#include "clang/Basic/TokenKinds.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_os_ostream.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
@@ -28,34 +33,6 @@
namespace clang {
namespace format {
-class FormatTokenSource {
-public:
- virtual ~FormatTokenSource() {}
-
- // Returns the next token in the token stream.
- virtual FormatToken *getNextToken() = 0;
-
- // Returns the token preceding the token returned by the last call to
- // getNextToken() in the token stream, or nullptr if no such token exists.
- virtual FormatToken *getPreviousToken() = 0;
-
- // Returns the token that would be returned by the next call to
- // getNextToken().
- virtual FormatToken *peekNextToken(bool SkipComment = false) = 0;
-
- // Returns whether we are at the end of the file.
- // This can be different from whether getNextToken() returned an eof token
- // when the FormatTokenSource is a view on a part of the token stream.
- virtual bool isEOF() = 0;
-
- // Gets the current position in the token stream, to be used by setPosition().
- virtual unsigned getPosition() = 0;
-
- // Resets the token stream to the state it was in when getPosition() returned
- // Position, and return the token at that position in the stream.
- virtual FormatToken *setPosition(unsigned Position) = 0;
-};
-
namespace {
void printLine(llvm::raw_ostream &OS, const UnwrappedLine &Line,
@@ -112,97 +89,6 @@ private:
llvm::BitVector &Stack;
};
-static bool isLineComment(const FormatToken &FormatTok) {
- return FormatTok.is(tok::comment) && !FormatTok.TokenText.startswith("/*");
-}
-
-// Checks if \p FormatTok is a line comment that continues the line comment
-// \p Previous. The original column of \p MinColumnToken is used to determine
-// whether \p FormatTok is indented enough to the right to continue \p Previous.
-static bool continuesLineComment(const FormatToken &FormatTok,
- const FormatToken *Previous,
- const FormatToken *MinColumnToken) {
- if (!Previous || !MinColumnToken)
- return false;
- unsigned MinContinueColumn =
- MinColumnToken->OriginalColumn + (isLineComment(*MinColumnToken) ? 0 : 1);
- return isLineComment(FormatTok) && FormatTok.NewlinesBefore == 1 &&
- isLineComment(*Previous) &&
- FormatTok.OriginalColumn >= MinContinueColumn;
-}
-
-class ScopedMacroState : public FormatTokenSource {
-public:
- ScopedMacroState(UnwrappedLine &Line, FormatTokenSource *&TokenSource,
- FormatToken *&ResetToken)
- : Line(Line), TokenSource(TokenSource), ResetToken(ResetToken),
- PreviousLineLevel(Line.Level), PreviousTokenSource(TokenSource),
- Token(nullptr), PreviousToken(nullptr) {
- FakeEOF.Tok.startToken();
- FakeEOF.Tok.setKind(tok::eof);
- TokenSource = this;
- Line.Level = 0;
- Line.InPPDirective = true;
- // InMacroBody gets set after the `#define x` part.
- }
-
- ~ScopedMacroState() override {
- TokenSource = PreviousTokenSource;
- ResetToken = Token;
- Line.InPPDirective = false;
- Line.InMacroBody = false;
- Line.Level = PreviousLineLevel;
- }
-
- FormatToken *getNextToken() override {
- // The \c UnwrappedLineParser guards against this by never calling
- // \c getNextToken() after it has encountered the first eof token.
- assert(!eof());
- PreviousToken = Token;
- Token = PreviousTokenSource->getNextToken();
- if (eof())
- return &FakeEOF;
- return Token;
- }
-
- FormatToken *getPreviousToken() override {
- return PreviousTokenSource->getPreviousToken();
- }
-
- FormatToken *peekNextToken(bool SkipComment) override {
- if (eof())
- return &FakeEOF;
- return PreviousTokenSource->peekNextToken(SkipComment);
- }
-
- bool isEOF() override { return PreviousTokenSource->isEOF(); }
-
- unsigned getPosition() override { return PreviousTokenSource->getPosition(); }
-
- FormatToken *setPosition(unsigned Position) override {
- PreviousToken = nullptr;
- Token = PreviousTokenSource->setPosition(Position);
- return Token;
- }
-
-private:
- bool eof() {
- return Token && Token->HasUnescapedNewline &&
- !continuesLineComment(*Token, PreviousToken,
- /*MinColumnToken=*/PreviousToken);
- }
-
- FormatToken FakeEOF;
- UnwrappedLine &Line;
- FormatTokenSource *&TokenSource;
- FormatToken *&ResetToken;
- unsigned PreviousLineLevel;
- FormatTokenSource *PreviousTokenSource;
-
- FormatToken *Token;
- FormatToken *PreviousToken;
-};
-
} // end anonymous namespace
class ScopedLineState {
@@ -261,80 +147,12 @@ private:
unsigned OldLineLevel;
};
-namespace {
-
-class IndexedTokenSource : public FormatTokenSource {
-public:
- IndexedTokenSource(ArrayRef<FormatToken *> Tokens)
- : Tokens(Tokens), Position(-1) {}
-
- FormatToken *getNextToken() override {
- if (Position >= 0 && isEOF()) {
- LLVM_DEBUG({
- llvm::dbgs() << "Next ";
- dbgToken(Position);
- });
- return Tokens[Position];
- }
- ++Position;
- LLVM_DEBUG({
- llvm::dbgs() << "Next ";
- dbgToken(Position);
- });
- return Tokens[Position];
- }
-
- FormatToken *getPreviousToken() override {
- return Position > 0 ? Tokens[Position - 1] : nullptr;
- }
-
- FormatToken *peekNextToken(bool SkipComment) override {
- int Next = Position + 1;
- if (SkipComment)
- while (Tokens[Next]->is(tok::comment))
- ++Next;
- LLVM_DEBUG({
- llvm::dbgs() << "Peeking ";
- dbgToken(Next);
- });
- return Tokens[Next];
- }
-
- bool isEOF() override { return Tokens[Position]->is(tok::eof); }
-
- unsigned getPosition() override {
- LLVM_DEBUG(llvm::dbgs() << "Getting Position: " << Position << "\n");
- assert(Position >= 0);
- return Position;
- }
-
- FormatToken *setPosition(unsigned P) override {
- LLVM_DEBUG(llvm::dbgs() << "Setting Position: " << P << "\n");
- Position = P;
- return Tokens[Position];
- }
-
- void reset() { Position = -1; }
-
-private:
- void dbgToken(int Position, llvm::StringRef Indent = "") {
- FormatToken *Tok = Tokens[Position];
- llvm::dbgs() << Indent << "[" << Position
- << "] Token: " << Tok->Tok.getName() << " / " << Tok->TokenText
- << ", Macro: " << !!Tok->MacroCtx << "\n";
- }
-
- ArrayRef<FormatToken *> Tokens;
- int Position;
-};
-
-} // end anonymous namespace
-
-UnwrappedLineParser::UnwrappedLineParser(const FormatStyle &Style,
- const AdditionalKeywords &Keywords,
- unsigned FirstStartColumn,
- ArrayRef<FormatToken *> Tokens,
- UnwrappedLineConsumer &Callback)
+UnwrappedLineParser::UnwrappedLineParser(
+ SourceManager &SourceMgr, const FormatStyle &Style,
+ const AdditionalKeywords &Keywords, unsigned FirstStartColumn,
+ ArrayRef<FormatToken *> Tokens, UnwrappedLineConsumer &Callback,
+ llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
+ IdentifierTable &IdentTable)
: Line(new UnwrappedLine), MustBreakBeforeNextToken(false),
CurrentLines(&Lines), Style(Style), Keywords(Keywords),
CommentPragmasRegex(Style.CommentPragmas), Tokens(nullptr),
@@ -342,7 +160,8 @@ UnwrappedLineParser::UnwrappedLineParser(const FormatStyle &Style,
IncludeGuard(Style.IndentPPDirectives == FormatStyle::PPDIS_None
? IG_Rejected
: IG_Inited),
- IncludeGuardToken(nullptr), FirstStartColumn(FirstStartColumn) {}
+ IncludeGuardToken(nullptr), FirstStartColumn(FirstStartColumn),
+ Macros(Style.Macros, SourceMgr, Style, Allocator, IdentTable) {}
void UnwrappedLineParser::reset() {
PPBranchLevel = -1;
@@ -360,6 +179,15 @@ void UnwrappedLineParser::reset() {
NestedTooDeep.clear();
PPStack.clear();
Line->FirstStartColumn = FirstStartColumn;
+
+ if (!Unexpanded.empty())
+ for (FormatToken *Token : AllTokens)
+ Token->MacroCtx.reset();
+ CurrentExpandedLines.clear();
+ ExpandedLines.clear();
+ Unexpanded.clear();
+ InExpansion = false;
+ Reconstruct.reset();
}
void UnwrappedLineParser::parse() {
@@ -383,12 +211,36 @@ void UnwrappedLineParser::parse() {
}
// Create line with eof token.
+ assert(FormatTok->is(tok::eof));
pushToken(FormatTok);
addUnwrappedLine();
- for (const UnwrappedLine &Line : Lines)
- Callback.consumeUnwrappedLine(Line);
+ // In a first run, format everything with the lines containing macro calls
+ // replaced by the expansion.
+ if (!ExpandedLines.empty()) {
+ LLVM_DEBUG(llvm::dbgs() << "Expanded lines:\n");
+ for (const auto &Line : Lines) {
+ if (!Line.Tokens.empty()) {
+ auto it = ExpandedLines.find(Line.Tokens.begin()->Tok);
+ if (it != ExpandedLines.end()) {
+ for (const auto &Expanded : it->second) {
+ LLVM_DEBUG(printDebugInfo(Expanded));
+ Callback.consumeUnwrappedLine(Expanded);
+ }
+ continue;
+ }
+ }
+ LLVM_DEBUG(printDebugInfo(Line));
+ Callback.consumeUnwrappedLine(Line);
+ }
+ Callback.finishRun();
+ }
+ LLVM_DEBUG(llvm::dbgs() << "Unwrapped lines:\n");
+ for (const UnwrappedLine &Line : Lines) {
+ LLVM_DEBUG(printDebugInfo(Line));
+ Callback.consumeUnwrappedLine(Line);
+ }
Callback.finishRun();
Lines.clear();
while (!PPLevelBranchIndex.empty() &&
@@ -639,7 +491,11 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
// Keep a stack of positions of lbrace tokens. We will
// update information about whether an lbrace starts a
// braced init list or a different block during the loop.
- SmallVector<FormatToken *, 8> LBraceStack;
+ struct StackEntry {
+ FormatToken *Tok;
+ const FormatToken *PrevTok;
+ };
+ SmallVector<StackEntry, 8> LBraceStack;
assert(Tok->is(tok::l_brace));
do {
// Get next non-comment token.
@@ -669,12 +525,12 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
} else {
Tok->setBlockKind(BK_Unknown);
}
- LBraceStack.push_back(Tok);
+ LBraceStack.push_back({Tok, PrevTok});
break;
case tok::r_brace:
if (LBraceStack.empty())
break;
- if (LBraceStack.back()->is(BK_Unknown)) {
+ if (LBraceStack.back().Tok->is(BK_Unknown)) {
bool ProbablyBracedList = false;
if (Style.Language == FormatStyle::LK_Proto) {
ProbablyBracedList = NextTok->isOneOf(tok::comma, tok::r_square);
@@ -702,7 +558,7 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
// If we already marked the opening brace as braced list, the closing
// must also be part of it.
- ProbablyBracedList = LBraceStack.back()->is(TT_BracedListLBrace);
+ ProbablyBracedList = LBraceStack.back().Tok->is(TT_BracedListLBrace);
ProbablyBracedList = ProbablyBracedList ||
(Style.isJavaScript() &&
@@ -718,8 +574,14 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
ProbablyBracedList =
ProbablyBracedList ||
NextTok->isOneOf(tok::comma, tok::period, tok::colon,
- tok::r_paren, tok::r_square, tok::l_brace,
- tok::ellipsis);
+ tok::r_paren, tok::r_square, tok::ellipsis);
+
+ // Distinguish between braced list in a constructor initializer list
+ // followed by constructor body, or just adjacent blocks.
+ ProbablyBracedList =
+ ProbablyBracedList ||
+ (NextTok->is(tok::l_brace) && LBraceStack.back().PrevTok &&
+ LBraceStack.back().PrevTok->is(tok::identifier));
ProbablyBracedList =
ProbablyBracedList ||
@@ -743,10 +605,10 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
}
if (ProbablyBracedList) {
Tok->setBlockKind(BK_BracedInit);
- LBraceStack.back()->setBlockKind(BK_BracedInit);
+ LBraceStack.back().Tok->setBlockKind(BK_BracedInit);
} else {
Tok->setBlockKind(BK_Block);
- LBraceStack.back()->setBlockKind(BK_Block);
+ LBraceStack.back().Tok->setBlockKind(BK_Block);
}
}
LBraceStack.pop_back();
@@ -763,8 +625,8 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
case tok::kw_switch:
case tok::kw_try:
case tok::kw___try:
- if (!LBraceStack.empty() && LBraceStack.back()->is(BK_Unknown))
- LBraceStack.back()->setBlockKind(BK_Block);
+ if (!LBraceStack.empty() && LBraceStack.back().Tok->is(BK_Unknown))
+ LBraceStack.back().Tok->setBlockKind(BK_Block);
break;
default:
break;
@@ -774,9 +636,9 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
} while (Tok->isNot(tok::eof) && !LBraceStack.empty());
// Assume other blocks for all unclosed opening braces.
- for (FormatToken *LBrace : LBraceStack)
- if (LBrace->is(BK_Unknown))
- LBrace->setBlockKind(BK_Block);
+ for (const auto &Entry : LBraceStack)
+ if (Entry.Tok->is(BK_Unknown))
+ Entry.Tok->setBlockKind(BK_Block);
FormatTok = Tokens->setPosition(StoredPosition);
}
@@ -911,7 +773,7 @@ FormatToken *UnwrappedLineParser::parseBlock(
parseParens();
size_t NbPreprocessorDirectives =
- CurrentLines == &Lines ? PreprocessorDirectives.size() : 0;
+ !parsingPPDirective() ? PreprocessorDirectives.size() : 0;
addUnwrappedLine();
size_t OpeningLineIndex =
CurrentLines->empty()
@@ -1353,7 +1215,7 @@ static bool mustBeJSIdent(const AdditionalKeywords &Keywords,
const FormatToken *FormatTok) {
// FIXME: This returns true for C/C++ keywords like 'struct'.
return FormatTok->is(tok::identifier) &&
- (FormatTok->Tok.getIdentifierInfo() == nullptr ||
+ (!FormatTok->Tok.getIdentifierInfo() ||
!FormatTok->isOneOf(
Keywords.kw_in, Keywords.kw_of, Keywords.kw_as, Keywords.kw_async,
Keywords.kw_await, Keywords.kw_yield, Keywords.kw_finally,
@@ -1537,6 +1399,20 @@ void UnwrappedLineParser::parseStructuralElement(
}
if (Style.isVerilog()) {
+ if (Keywords.isVerilogStructuredProcedure(*FormatTok)) {
+ parseForOrWhileLoop(/*HasParens=*/false);
+ return;
+ }
+ if (FormatTok->isOneOf(Keywords.kw_foreach, Keywords.kw_repeat)) {
+ parseForOrWhileLoop();
+ return;
+ }
+ if (FormatTok->isOneOf(tok::kw_restrict, Keywords.kw_assert,
+ Keywords.kw_assume, Keywords.kw_cover)) {
+ parseIfThenElse(IfKind, /*KeepBraces=*/false, /*IsVerilogAssert=*/true);
+ return;
+ }
+
// Skip things that can exist before keywords like 'if' and 'case'.
while (true) {
if (FormatTok->isOneOf(Keywords.kw_priority, Keywords.kw_unique,
@@ -1627,6 +1503,7 @@ void UnwrappedLineParser::parseStructuralElement(
}
nextToken();
if (FormatTok->is(tok::colon)) {
+ FormatTok->setFinalizedType(TT_CaseLabelColon);
parseLabel();
return;
}
@@ -1852,8 +1729,8 @@ void UnwrappedLineParser::parseStructuralElement(
// enum definition can start a structural element.
if (!parseEnum())
break;
- // This only applies for C++.
- if (!Style.isCpp()) {
+ // This only applies to C++ and Verilog.
+ if (!Style.isCpp() && !Style.isVerilog()) {
addUnwrappedLine();
return;
}
@@ -1921,12 +1798,18 @@ void UnwrappedLineParser::parseStructuralElement(
break;
case tok::caret:
nextToken();
+ // Block return type.
if (FormatTok->Tok.isAnyIdentifier() ||
FormatTok->isSimpleTypeSpecifier()) {
nextToken();
+ // Return types: pointers are ok too.
+ while (FormatTok->is(tok::star))
+ nextToken();
}
+ // Block argument list.
if (FormatTok->is(tok::l_paren))
parseParens();
+ // Block body.
if (FormatTok->is(tok::l_brace))
parseChildBlock();
break;
@@ -2024,7 +1907,7 @@ void UnwrappedLineParser::parseStructuralElement(
}
}
- if (FormatTok->is(Keywords.kw_interface)) {
+ if (!Style.isCpp() && FormatTok->is(Keywords.kw_interface)) {
if (parseStructLike())
return;
break;
@@ -2059,6 +1942,7 @@ void UnwrappedLineParser::parseStructuralElement(
if (!Style.isVerilog() && FormatTok->is(tok::colon) &&
!Line->MustBeDeclaration) {
Line->Tokens.begin()->Tok->MustBreakBefore = true;
+ FormatTok->setFinalizedType(TT_GotoLabelColon);
parseLabel(!Style.IndentGotoLabels);
if (HasLabel)
*HasLabel = true;
@@ -2277,7 +2161,7 @@ bool UnwrappedLineParser::tryToParseLambda() {
case tok::l_brace:
break;
case tok::l_paren:
- parseParens();
+ parseParens(/*AmpAmpTokenType=*/TT_PointerOrReference);
break;
case tok::l_square:
parseSquare();
@@ -2354,6 +2238,12 @@ bool UnwrappedLineParser::tryToParseLambda() {
SeenArrow = true;
nextToken();
break;
+ case tok::kw_requires: {
+ auto *RequiresToken = FormatTok;
+ nextToken();
+ parseRequiresClause(RequiresToken);
+ break;
+ }
default:
return true;
}
@@ -2368,11 +2258,11 @@ bool UnwrappedLineParser::tryToParseLambdaIntroducer() {
const FormatToken *Previous = FormatTok->Previous;
const FormatToken *LeftSquare = FormatTok;
nextToken();
- if (Previous &&
- (Previous->isOneOf(tok::identifier, tok::kw_operator, tok::kw_new,
- tok::kw_delete, tok::l_square) ||
- LeftSquare->isCppStructuredBinding(Style) || Previous->closesScope() ||
- Previous->isSimpleTypeSpecifier())) {
+ if ((Previous && ((Previous->Tok.getIdentifierInfo() &&
+ !Previous->isOneOf(tok::kw_return, tok::kw_co_await,
+ tok::kw_co_yield, tok::kw_co_return)) ||
+ Previous->closesScope())) ||
+ LeftSquare->isCppStructuredBinding(Style)) {
return false;
}
if (FormatTok->is(tok::l_square))
@@ -2547,23 +2437,51 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
/// \brief Parses a pair of parentheses (and everything between them).
/// \param AmpAmpTokenType If different than TT_Unknown sets this type for all
-/// double ampersands. This only counts for the current parens scope.
-void UnwrappedLineParser::parseParens(TokenType AmpAmpTokenType) {
+/// double ampersands. This applies for all nested scopes as well.
+///
+/// Returns whether there is a `=` token between the parentheses.
+bool UnwrappedLineParser::parseParens(TokenType AmpAmpTokenType) {
assert(FormatTok->is(tok::l_paren) && "'(' expected.");
+ auto *LeftParen = FormatTok;
+ bool SeenEqual = false;
+ const bool MightBeStmtExpr = Tokens->peekNextToken()->is(tok::l_brace);
nextToken();
do {
switch (FormatTok->Tok.getKind()) {
case tok::l_paren:
- parseParens();
+ if (parseParens(AmpAmpTokenType))
+ SeenEqual = true;
if (Style.Language == FormatStyle::LK_Java && FormatTok->is(tok::l_brace))
parseChildBlock();
break;
case tok::r_paren:
+ if (!MightBeStmtExpr &&
+ Style.RemoveParentheses > FormatStyle::RPS_Leave) {
+ const auto *Prev = LeftParen->Previous;
+ const auto *Next = Tokens->peekNextToken();
+ const bool DoubleParens =
+ Prev && Prev->is(tok::l_paren) && Next && Next->is(tok::r_paren);
+ const auto *PrevPrev = Prev ? Prev->getPreviousNonComment() : nullptr;
+ const bool Blacklisted =
+ PrevPrev &&
+ (PrevPrev->is(tok::kw___attribute) ||
+ (SeenEqual &&
+ (PrevPrev->isOneOf(tok::kw_if, tok::kw_while) ||
+ PrevPrev->endsSequence(tok::kw_constexpr, tok::kw_if))));
+ const bool ReturnParens =
+ Style.RemoveParentheses == FormatStyle::RPS_ReturnStatement &&
+ Prev && Prev->isOneOf(tok::kw_return, tok::kw_co_return) && Next &&
+ Next->is(tok::semi);
+ if ((DoubleParens && !Blacklisted) || ReturnParens) {
+ LeftParen->Optional = true;
+ FormatTok->Optional = true;
+ }
+ }
nextToken();
- return;
+ return SeenEqual;
case tok::r_brace:
// A "}" inside parenthesis is an error if there wasn't a matching "{".
- return;
+ return SeenEqual;
case tok::l_square:
tryToParseLambda();
break;
@@ -2579,6 +2497,7 @@ void UnwrappedLineParser::parseParens(TokenType AmpAmpTokenType) {
}
break;
case tok::equal:
+ SeenEqual = true;
if (Style.isCSharp() && FormatTok->is(TT_FatArrow))
tryToParseChildBlock();
else
@@ -2615,6 +2534,7 @@ void UnwrappedLineParser::parseParens(TokenType AmpAmpTokenType) {
break;
}
} while (!eof());
+ return SeenEqual;
}
void UnwrappedLineParser::parseSquare(bool LambdaIntroducer) {
@@ -2761,9 +2681,28 @@ bool UnwrappedLineParser::isBlockBegin(const FormatToken &Tok) const {
}
FormatToken *UnwrappedLineParser::parseIfThenElse(IfStmtKind *IfKind,
- bool KeepBraces) {
- assert(FormatTok->is(tok::kw_if) && "'if' expected");
+ bool KeepBraces,
+ bool IsVerilogAssert) {
+ assert((FormatTok->is(tok::kw_if) ||
+ (Style.isVerilog() &&
+ FormatTok->isOneOf(tok::kw_restrict, Keywords.kw_assert,
+ Keywords.kw_assume, Keywords.kw_cover))) &&
+ "'if' expected");
nextToken();
+
+ if (IsVerilogAssert) {
+ // Handle `assert #0` and `assert final`.
+ if (FormatTok->is(Keywords.kw_verilogHash)) {
+ nextToken();
+ if (FormatTok->is(tok::numeric_constant))
+ nextToken();
+ } else if (FormatTok->isOneOf(Keywords.kw_final, Keywords.kw_property,
+ Keywords.kw_sequence)) {
+ nextToken();
+ }
+ }
+
+ // Handle `if !consteval`.
if (FormatTok->is(tok::exclaim))
nextToken();
@@ -2774,10 +2713,18 @@ FormatToken *UnwrappedLineParser::parseIfThenElse(IfStmtKind *IfKind,
KeepIfBraces = !Style.RemoveBracesLLVM || KeepBraces;
if (FormatTok->isOneOf(tok::kw_constexpr, tok::identifier))
nextToken();
- if (FormatTok->is(tok::l_paren))
+ if (FormatTok->is(tok::l_paren)) {
+ FormatTok->setFinalizedType(TT_ConditionLParen);
parseParens();
+ }
}
handleAttributes();
+ // The then action is optional in Verilog assert statements.
+ if (IsVerilogAssert && FormatTok->is(tok::semi)) {
+ nextToken();
+ addUnwrappedLine();
+ return nullptr;
+ }
bool NeedsUnwrappedLine = false;
keepAncestorBraces();
@@ -2795,6 +2742,8 @@ FormatToken *UnwrappedLineParser::parseIfThenElse(IfStmtKind *IfKind,
addUnwrappedLine();
else
NeedsUnwrappedLine = true;
+ } else if (IsVerilogAssert && FormatTok->is(tok::kw_else)) {
+ addUnwrappedLine();
} else {
parseUnbracedBody();
}
@@ -2837,7 +2786,7 @@ FormatToken *UnwrappedLineParser::parseIfThenElse(IfStmtKind *IfKind,
markOptionalBraces(ElseLeftBrace);
}
addUnwrappedLine();
- } else if (FormatTok->is(tok::kw_if)) {
+ } else if (!IsVerilogAssert && FormatTok->is(tok::kw_if)) {
const FormatToken *Previous = Tokens->getPreviousToken();
assert(Previous);
const bool IsPrecededByComment = Previous->is(tok::comment);
@@ -3027,15 +2976,12 @@ void UnwrappedLineParser::parseNamespace() {
if (ManageWhitesmithsBraces)
++Line->Level;
+ // Munch the semicolon after a namespace. This is more common than one would
+ // think. Putting the semicolon into its own line is very ugly.
parseBlock(/*MustBeDeclaration=*/true, AddLevels, /*MunchSemi=*/true,
/*KeepBraces=*/true, /*IfKind=*/nullptr,
ManageWhitesmithsBraces);
- // Munch the semicolon after a namespace. This is more common than one would
- // think. Putting the semicolon into its own line is very ugly.
- if (FormatTok->is(tok::semi))
- nextToken();
-
addUnwrappedLine(AddLevels > 0 ? LineLevel::Remove : LineLevel::Keep);
if (ManageWhitesmithsBraces)
@@ -3112,8 +3058,14 @@ void UnwrappedLineParser::parseLoopBody(bool KeepBraces, bool WrapRightBrace) {
NestedTooDeep.pop_back();
}
-void UnwrappedLineParser::parseForOrWhileLoop() {
- assert(FormatTok->isOneOf(tok::kw_for, tok::kw_while, TT_ForEachMacro) &&
+void UnwrappedLineParser::parseForOrWhileLoop(bool HasParens) {
+ assert((FormatTok->isOneOf(tok::kw_for, tok::kw_while, TT_ForEachMacro) ||
+ (Style.isVerilog() &&
+ FormatTok->isOneOf(Keywords.kw_always, Keywords.kw_always_comb,
+ Keywords.kw_always_ff, Keywords.kw_always_latch,
+ Keywords.kw_final, Keywords.kw_initial,
+ Keywords.kw_foreach, Keywords.kw_forever,
+ Keywords.kw_repeat))) &&
"'for', 'while' or foreach macro expected");
const bool KeepBraces = !Style.RemoveBracesLLVM ||
!FormatTok->isOneOf(tok::kw_for, tok::kw_while);
@@ -3124,8 +3076,17 @@ void UnwrappedLineParser::parseForOrWhileLoop() {
nextToken();
if (Style.isCpp() && FormatTok->is(tok::kw_co_await))
nextToken();
- if (FormatTok->is(tok::l_paren))
+ if (HasParens && FormatTok->is(tok::l_paren)) {
+ // The type is only set for Verilog basically because we were afraid to
+ // change the existing behavior for loops. See the discussion on D121756 for
+ // details.
+ if (Style.isVerilog())
+ FormatTok->setFinalizedType(TT_ConditionLParen);
parseParens();
+ }
+ // Event control.
+ if (Style.isVerilog())
+ parseVerilogSensitivityList();
handleAttributes();
parseLoopBody(KeepBraces, /*WrapRightBrace=*/true);
@@ -3197,7 +3158,11 @@ void UnwrappedLineParser::parseCaseLabel() {
// FIXME: fix handling of complex expressions here.
do {
nextToken();
- } while (!eof() && !FormatTok->is(tok::colon));
+ if (FormatTok->is(tok::colon)) {
+ FormatTok->setFinalizedType(TT_CaseLabelColon);
+ break;
+ }
+ } while (!eof());
parseLabel();
}
@@ -3506,6 +3471,17 @@ void UnwrappedLineParser::parseConstraintExpression() {
// lambda to be possible.
// template <typename T> requires requires { ... } [[nodiscard]] ...;
bool LambdaNextTimeAllowed = true;
+
+ // Within lambda declarations, it is permitted to put a requires clause after
+ // its template parameter list, which would place the requires clause right
+ // before the parentheses of the parameters of the lambda declaration. Thus,
+ // we track if we expect to see grouping parentheses at all.
+ // Without this check, `requires foo<T> (T t)` in the below example would be
+ // seen as the whole requires clause, accidentally eating the parameters of
+ // the lambda.
+ // [&]<typename T> requires foo<T> (T t) { ... };
+ bool TopLevelParensAllowed = true;
+
do {
bool LambdaThisTimeAllowed = std::exchange(LambdaNextTimeAllowed, false);
@@ -3518,7 +3494,10 @@ void UnwrappedLineParser::parseConstraintExpression() {
}
case tok::l_paren:
+ if (!TopLevelParensAllowed)
+ return;
parseParens(/*AmpAmpTokenType=*/TT_BinaryOperator);
+ TopLevelParensAllowed = false;
break;
case tok::l_square:
@@ -3542,6 +3521,7 @@ void UnwrappedLineParser::parseConstraintExpression() {
FormatTok->setFinalizedType(TT_BinaryOperator);
nextToken();
LambdaNextTimeAllowed = true;
+ TopLevelParensAllowed = true;
break;
case tok::comma:
@@ -3565,6 +3545,7 @@ void UnwrappedLineParser::parseConstraintExpression() {
case tok::star:
case tok::slash:
LambdaNextTimeAllowed = true;
+ TopLevelParensAllowed = true;
// Just eat them.
nextToken();
break;
@@ -3573,6 +3554,7 @@ void UnwrappedLineParser::parseConstraintExpression() {
case tok::coloncolon:
case tok::kw_true:
case tok::kw_false:
+ TopLevelParensAllowed = false;
// Just eat them.
nextToken();
break;
@@ -3590,17 +3572,6 @@ void UnwrappedLineParser::parseConstraintExpression() {
/*ClosingBraceKind=*/tok::greater);
break;
- case tok::kw_bool:
- // bool is only allowed if it is directly followed by a paren for a cast:
- // concept C = bool(...);
- // and bool is the only type, all other types as cast must be inside a
- // cast to bool an thus are handled by the other cases.
- if (Tokens->peekNextToken()->isNot(tok::l_paren))
- return;
- nextToken();
- parseParens();
- break;
-
default:
if (!FormatTok->Tok.getIdentifierInfo()) {
// Identifiers are part of the default case, we check for more then
@@ -3632,6 +3603,7 @@ void UnwrappedLineParser::parseConstraintExpression() {
parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
/*ClosingBraceKind=*/tok::greater);
}
+ TopLevelParensAllowed = false;
break;
}
} while (!eof());
@@ -3662,7 +3634,15 @@ bool UnwrappedLineParser::parseEnum() {
FormatTok->isOneOf(tok::colon, tok::coloncolon, tok::less,
tok::greater, tok::comma, tok::question,
tok::l_square, tok::r_square)) {
- nextToken();
+ if (Style.isVerilog()) {
+ FormatTok->setFinalizedType(TT_VerilogDimensionedTypeName);
+ nextToken();
+ // In Verilog the base type can have dimensions.
+ while (FormatTok->is(tok::l_square))
+ parseSquare();
+ } else {
+ nextToken();
+ }
// We can have macros or attributes in between 'enum' and the enum name.
if (FormatTok->is(tok::l_paren))
parseParens();
@@ -3905,7 +3885,7 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
// Don't try parsing a lambda if we had a closing parenthesis before,
// it was probably a pointer to an array: int (*)[].
if (!tryToParseLambda())
- break;
+ continue;
} else {
parseSquare();
continue;
@@ -4131,7 +4111,9 @@ void UnwrappedLineParser::parseJavaScriptEs6ImportExport() {
// parsing the structural element, i.e. the declaration or expression for
// `export default`.
if (!IsImport && !FormatTok->isOneOf(tok::l_brace, tok::star) &&
- !FormatTok->isStringLiteral()) {
+ !FormatTok->isStringLiteral() &&
+ !(FormatTok->is(Keywords.kw_type) &&
+ Tokens->peekNextToken()->isOneOf(tok::l_brace, tok::star))) {
return;
}
@@ -4257,11 +4239,14 @@ unsigned UnwrappedLineParser::parseVerilogHierarchyHeader() {
if (FormatTok->is(Keywords.kw_verilogHash)) {
NewLine();
nextToken();
- if (FormatTok->is(tok::l_paren))
+ if (FormatTok->is(tok::l_paren)) {
+ FormatTok->setFinalizedType(TT_VerilogMultiLineListLParen);
parseParens();
+ }
}
if (FormatTok->is(tok::l_paren)) {
NewLine();
+ FormatTok->setFinalizedType(TT_VerilogMultiLineListLParen);
parseParens();
}
@@ -4334,12 +4319,25 @@ void UnwrappedLineParser::parseVerilogCaseLabel() {
Line->Level = OrigLevel;
}
+bool UnwrappedLineParser::containsExpansion(const UnwrappedLine &Line) const {
+ for (const auto &N : Line.Tokens) {
+ if (N.Tok->MacroCtx)
+ return true;
+ for (const UnwrappedLine &Child : N.Children)
+ if (containsExpansion(Child))
+ return true;
+ }
+ return false;
+}
+
void UnwrappedLineParser::addUnwrappedLine(LineLevel AdjustLevel) {
if (Line->Tokens.empty())
return;
LLVM_DEBUG({
- if (CurrentLines == &Lines)
+ if (!parsingPPDirective()) {
+ llvm::dbgs() << "Adding unwrapped line:\n";
printDebugInfo(*Line);
+ }
});
// If this line closes a block when in Whitesmiths mode, remember that
@@ -4350,7 +4348,39 @@ void UnwrappedLineParser::addUnwrappedLine(LineLevel AdjustLevel) {
Line->MatchingOpeningBlockLineIndex != UnwrappedLine::kInvalidIndex &&
Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths;
- CurrentLines->push_back(std::move(*Line));
+ // If the current line was expanded from a macro call, we use it to
+ // reconstruct an unwrapped line from the structure of the expanded unwrapped
+ // line and the unexpanded token stream.
+ if (!parsingPPDirective() && !InExpansion && containsExpansion(*Line)) {
+ if (!Reconstruct)
+ Reconstruct.emplace(Line->Level, Unexpanded);
+ Reconstruct->addLine(*Line);
+
+ // While the reconstructed unexpanded lines are stored in the normal
+ // flow of lines, the expanded lines are stored on the side to be analyzed
+ // in an extra step.
+ CurrentExpandedLines.push_back(std::move(*Line));
+
+ if (Reconstruct->finished()) {
+ UnwrappedLine Reconstructed = std::move(*Reconstruct).takeResult();
+ assert(!Reconstructed.Tokens.empty() &&
+ "Reconstructed must at least contain the macro identifier.");
+ assert(!parsingPPDirective());
+ LLVM_DEBUG({
+ llvm::dbgs() << "Adding unexpanded line:\n";
+ printDebugInfo(Reconstructed);
+ });
+ ExpandedLines[Reconstructed.Tokens.begin()->Tok] = CurrentExpandedLines;
+ Lines.push_back(std::move(Reconstructed));
+ CurrentExpandedLines.clear();
+ Reconstruct.reset();
+ }
+ } else {
+ // At the top level we only get here when no unexpansion is going on, or
+ // when conditional formatting led to unfinished macro reconstructions.
+ assert(!Reconstruct || (CurrentLines != &Lines) || PPStack.size() > 0);
+ CurrentLines->push_back(std::move(*Line));
+ }
Line->Tokens.clear();
Line->MatchingOpeningBlockLineIndex = UnwrappedLine::kInvalidIndex;
Line->FirstStartColumn = 0;
@@ -4358,7 +4388,7 @@ void UnwrappedLineParser::addUnwrappedLine(LineLevel AdjustLevel) {
if (ClosesWhitesmithsBlock && AdjustLevel == LineLevel::Remove)
--Line->Level;
- if (CurrentLines == &Lines && !PreprocessorDirectives.empty()) {
+ if (!parsingPPDirective() && !PreprocessorDirectives.empty()) {
CurrentLines->append(
std::make_move_iterator(PreprocessorDirectives.begin()),
std::make_move_iterator(PreprocessorDirectives.end()));
@@ -4652,6 +4682,87 @@ void UnwrappedLineParser::readToken(int LevelDifference) {
continue;
}
+ if (FormatTok->is(tok::identifier) &&
+ Macros.defined(FormatTok->TokenText) &&
+ // FIXME: Allow expanding macros in preprocessor directives.
+ !Line->InPPDirective) {
+ FormatToken *ID = FormatTok;
+ unsigned Position = Tokens->getPosition();
+
+ // To correctly parse the code, we need to replace the tokens of the macro
+ // call with its expansion.
+ auto PreCall = std::move(Line);
+ Line.reset(new UnwrappedLine);
+ bool OldInExpansion = InExpansion;
+ InExpansion = true;
+ // We parse the macro call into a new line.
+ auto Args = parseMacroCall();
+ InExpansion = OldInExpansion;
+ assert(Line->Tokens.front().Tok == ID);
+ // And remember the unexpanded macro call tokens.
+ auto UnexpandedLine = std::move(Line);
+ // Reset to the old line.
+ Line = std::move(PreCall);
+
+ LLVM_DEBUG({
+ llvm::dbgs() << "Macro call: " << ID->TokenText << "(";
+ if (Args) {
+ llvm::dbgs() << "(";
+ for (const auto &Arg : Args.value())
+ for (const auto &T : Arg)
+ llvm::dbgs() << T->TokenText << " ";
+ llvm::dbgs() << ")";
+ }
+ llvm::dbgs() << "\n";
+ });
+ if (Macros.objectLike(ID->TokenText) && Args &&
+ !Macros.hasArity(ID->TokenText, Args->size())) {
+ // The macro is either
+ // - object-like, but we got argumnets, or
+ // - overloaded to be both object-like and function-like, but none of
+ // the function-like arities match the number of arguments.
+ // Thus, expand as object-like macro.
+ LLVM_DEBUG(llvm::dbgs()
+ << "Macro \"" << ID->TokenText
+ << "\" not overloaded for arity " << Args->size()
+ << "or not function-like, using object-like overload.");
+ Args.reset();
+ UnexpandedLine->Tokens.resize(1);
+ Tokens->setPosition(Position);
+ nextToken();
+ assert(!Args && Macros.objectLike(ID->TokenText));
+ }
+ if ((!Args && Macros.objectLike(ID->TokenText)) ||
+ (Args && Macros.hasArity(ID->TokenText, Args->size()))) {
+ // Next, we insert the expanded tokens in the token stream at the
+ // current position, and continue parsing.
+ Unexpanded[ID] = std::move(UnexpandedLine);
+ SmallVector<FormatToken *, 8> Expansion =
+ Macros.expand(ID, std::move(Args));
+ if (!Expansion.empty())
+ FormatTok = Tokens->insertTokens(Expansion);
+
+ LLVM_DEBUG({
+ llvm::dbgs() << "Expanded: ";
+ for (const auto &T : Expansion)
+ llvm::dbgs() << T->TokenText << " ";
+ llvm::dbgs() << "\n";
+ });
+ } else {
+ LLVM_DEBUG({
+ llvm::dbgs() << "Did not expand macro \"" << ID->TokenText
+ << "\", because it was used ";
+ if (Args)
+ llvm::dbgs() << "with " << Args->size();
+ else
+ llvm::dbgs() << "without";
+ llvm::dbgs() << " arguments, which doesn't match any definition.\n";
+ });
+ Tokens->setPosition(Position);
+ FormatTok = ID;
+ }
+ }
+
if (!FormatTok->is(tok::comment)) {
distributeComments(Comments, FormatTok);
Comments.clear();
@@ -4665,6 +4776,71 @@ void UnwrappedLineParser::readToken(int LevelDifference) {
Comments.clear();
}
+namespace {
+template <typename Iterator>
+void pushTokens(Iterator Begin, Iterator End,
+ llvm::SmallVectorImpl<FormatToken *> &Into) {
+ for (auto I = Begin; I != End; ++I) {
+ Into.push_back(I->Tok);
+ for (const auto &Child : I->Children)
+ pushTokens(Child.Tokens.begin(), Child.Tokens.end(), Into);
+ }
+}
+} // namespace
+
+std::optional<llvm::SmallVector<llvm::SmallVector<FormatToken *, 8>, 1>>
+UnwrappedLineParser::parseMacroCall() {
+ std::optional<llvm::SmallVector<llvm::SmallVector<FormatToken *, 8>, 1>> Args;
+ assert(Line->Tokens.empty());
+ nextToken();
+ if (!FormatTok->is(tok::l_paren))
+ return Args;
+ unsigned Position = Tokens->getPosition();
+ FormatToken *Tok = FormatTok;
+ nextToken();
+ Args.emplace();
+ auto ArgStart = std::prev(Line->Tokens.end());
+
+ int Parens = 0;
+ do {
+ switch (FormatTok->Tok.getKind()) {
+ case tok::l_paren:
+ ++Parens;
+ nextToken();
+ break;
+ case tok::r_paren: {
+ if (Parens > 0) {
+ --Parens;
+ nextToken();
+ break;
+ }
+ Args->push_back({});
+ pushTokens(std::next(ArgStart), Line->Tokens.end(), Args->back());
+ nextToken();
+ return Args;
+ }
+ case tok::comma: {
+ if (Parens > 0) {
+ nextToken();
+ break;
+ }
+ Args->push_back({});
+ pushTokens(std::next(ArgStart), Line->Tokens.end(), Args->back());
+ nextToken();
+ ArgStart = std::prev(Line->Tokens.end());
+ break;
+ }
+ default:
+ nextToken();
+ break;
+ }
+ } while (!eof());
+ Line->Tokens.resize(1);
+ Tokens->setPosition(Position);
+ FormatTok = Tok;
+ return {};
+}
+
void UnwrappedLineParser::pushToken(FormatToken *Tok) {
Line->Tokens.push_back(UnwrappedLineNode(Tok));
if (MustBreakBeforeNextToken) {
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
index f043e567eb73..57515af64a3e 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
@@ -15,10 +15,14 @@
#ifndef LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEPARSER_H
#define LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEPARSER_H
+#include "Encoding.h"
#include "FormatToken.h"
+#include "Macros.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Format/Format.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/Support/Regex.h"
#include <list>
#include <stack>
@@ -76,6 +80,19 @@ struct UnwrappedLine {
unsigned FirstStartColumn = 0;
};
+/// Interface for users of the UnwrappedLineParser to receive the parsed lines.
+/// Parsing a single snippet of code can lead to multiple runs, where each
+/// run is a coherent view of the file.
+///
+/// For example, different runs are generated:
+/// - for different combinations of #if blocks
+/// - when macros are involved, for the expanded code and the as-written code
+///
+/// Some tokens will only be visible in a subset of the runs.
+/// For each run, \c UnwrappedLineParser will call \c consumeUnwrappedLine
+/// for each parsed unwrapped line, and then \c finishRun to indicate
+/// that the set of unwrapped lines before is one coherent view of the
+/// code snippet to be formatted.
class UnwrappedLineConsumer {
public:
virtual ~UnwrappedLineConsumer() {}
@@ -87,10 +104,12 @@ class FormatTokenSource;
class UnwrappedLineParser {
public:
- UnwrappedLineParser(const FormatStyle &Style,
+ UnwrappedLineParser(SourceManager &SourceMgr, const FormatStyle &Style,
const AdditionalKeywords &Keywords,
unsigned FirstStartColumn, ArrayRef<FormatToken *> Tokens,
- UnwrappedLineConsumer &Callback);
+ UnwrappedLineConsumer &Callback,
+ llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
+ IdentifierTable &IdentTable);
void parse();
@@ -137,17 +156,18 @@ private:
bool tryToParseBracedList();
bool parseBracedList(bool ContinueOnSemicolons = false, bool IsEnum = false,
tok::TokenKind ClosingBraceKind = tok::r_brace);
- void parseParens(TokenType AmpAmpTokenType = TT_Unknown);
+ bool parseParens(TokenType AmpAmpTokenType = TT_Unknown);
void parseSquare(bool LambdaIntroducer = false);
void keepAncestorBraces();
void parseUnbracedBody(bool CheckEOF = false);
void handleAttributes();
bool handleCppAttributes();
bool isBlockBegin(const FormatToken &Tok) const;
- FormatToken *parseIfThenElse(IfStmtKind *IfKind, bool KeepBraces = false);
+ FormatToken *parseIfThenElse(IfStmtKind *IfKind, bool KeepBraces = false,
+ bool IsVerilogAssert = false);
void parseTryCatch();
void parseLoopBody(bool KeepBraces, bool WrapRightBrace);
- void parseForOrWhileLoop();
+ void parseForOrWhileLoop(bool HasParens = true);
void parseDoWhile();
void parseLabel(bool LeftAlignLabel = false);
void parseCaseLabel();
@@ -193,6 +213,8 @@ private:
unsigned parseVerilogHierarchyHeader();
void parseVerilogTable();
void parseVerilogCaseLabel();
+ std::optional<llvm::SmallVector<llvm::SmallVector<FormatToken *, 8>, 1>>
+ parseMacroCall();
// Used by addUnwrappedLine to denote whether to keep or remove a level
// when resetting the line state.
@@ -236,22 +258,55 @@ private:
bool isOnNewLine(const FormatToken &FormatTok);
+ // Returns whether there is a macro expansion in the line, i.e. a token that
+ // was expanded from a macro call.
+ bool containsExpansion(const UnwrappedLine &Line) const;
+
// Compute hash of the current preprocessor branch.
// This is used to identify the different branches, and thus track if block
// open and close in the same branch.
size_t computePPHash() const;
+ bool parsingPPDirective() const { return CurrentLines != &Lines; }
+
// FIXME: We are constantly running into bugs where Line.Level is incorrectly
// subtracted from beyond 0. Introduce a method to subtract from Line.Level
// and use that everywhere in the Parser.
std::unique_ptr<UnwrappedLine> Line;
+ // Lines that are created by macro expansion.
+ // When formatting code containing macro calls, we first format the expanded
+ // lines to set the token types correctly. Afterwards, we format the
+ // reconstructed macro calls, re-using the token types determined in the first
+ // step.
+ // ExpandedLines will be reset every time we create a new LineAndExpansion
+ // instance once a line containing macro calls has been parsed.
+ SmallVector<UnwrappedLine, 8> CurrentExpandedLines;
+
+ // Maps from the first token of a top-level UnwrappedLine that contains
+ // a macro call to the replacement UnwrappedLines expanded from the macro
+ // call.
+ llvm::DenseMap<FormatToken *, SmallVector<UnwrappedLine, 8>> ExpandedLines;
+
+ // Map from the macro identifier to a line containing the full unexpanded
+ // macro call.
+ llvm::DenseMap<FormatToken *, std::unique_ptr<UnwrappedLine>> Unexpanded;
+
+ // For recursive macro expansions, trigger reconstruction only on the
+ // outermost expansion.
+ bool InExpansion = false;
+
+ // Set while we reconstruct a macro call.
+ // For reconstruction, we feed the expanded lines into the reconstructor
+ // until it is finished.
+ std::optional<MacroCallReconstructor> Reconstruct;
+
// Comments are sorted into unwrapped lines by whether they are in the same
// line as the previous token, or not. If not, they belong to the next token.
// Since the next token might already be in a new unwrapped line, we need to
// store the comments belonging to that token.
SmallVector<FormatToken *, 1> CommentsBeforeNextToken;
- FormatToken *FormatTok;
+ FormatToken *FormatTok = nullptr;
bool MustBreakBeforeNextToken;
// The parsed lines. Only added to through \c CurrentLines.
@@ -280,9 +335,6 @@ private:
FormatTokenSource *Tokens;
UnwrappedLineConsumer &Callback;
- // FIXME: This is a temporary measure until we have reworked the ownership
- // of the format tokens. The goal is to have the actual tokens created and
- // owned outside of and handed into the UnwrappedLineParser.
ArrayRef<FormatToken *> AllTokens;
// Keeps a stack of the states of nested control statements (true if the
@@ -348,13 +400,17 @@ private:
// does not start at the beginning of the file.
unsigned FirstStartColumn;
+ MacroExpander Macros;
+
friend class ScopedLineState;
friend class CompoundStatementIndenter;
};
struct UnwrappedLineNode {
UnwrappedLineNode() : Tok(nullptr) {}
- UnwrappedLineNode(FormatToken *Tok) : Tok(Tok) {}
+ UnwrappedLineNode(FormatToken *Tok,
+ llvm::ArrayRef<UnwrappedLine> Children = {})
+ : Tok(Tok), Children(Children.begin(), Children.end()) {}
FormatToken *Tok;
SmallVector<UnwrappedLine, 0> Children;
diff --git a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
index 9951906b6af0..668ca38ad683 100644
--- a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
+++ b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
@@ -49,7 +49,7 @@ void WhitespaceManager::replaceWhitespace(FormatToken &Tok, unsigned Newlines,
unsigned Spaces,
unsigned StartOfTokenColumn,
bool IsAligned, bool InPPDirective) {
- if (Tok.Finalized)
+ if (Tok.Finalized || (Tok.MacroCtx && Tok.MacroCtx->Role == MR_ExpandedArg))
return;
Tok.setDecision((Newlines > 0) ? FD_Break : FD_Continue);
Changes.push_back(Change(Tok, /*CreateReplacement=*/true, Tok.WhitespaceRange,
@@ -60,7 +60,7 @@ void WhitespaceManager::replaceWhitespace(FormatToken &Tok, unsigned Newlines,
void WhitespaceManager::addUntouchableToken(const FormatToken &Tok,
bool InPPDirective) {
- if (Tok.Finalized)
+ if (Tok.Finalized || (Tok.MacroCtx && Tok.MacroCtx->Role == MR_ExpandedArg))
return;
Changes.push_back(Change(Tok, /*CreateReplacement=*/false,
Tok.WhitespaceRange, /*Spaces=*/0,
@@ -84,7 +84,7 @@ void WhitespaceManager::replaceWhitespaceInToken(
const FormatToken &Tok, unsigned Offset, unsigned ReplaceChars,
StringRef PreviousPostfix, StringRef CurrentPrefix, bool InPPDirective,
unsigned Newlines, int Spaces) {
- if (Tok.Finalized)
+ if (Tok.Finalized || (Tok.MacroCtx && Tok.MacroCtx->Role == MR_ExpandedArg))
return;
SourceLocation Start = Tok.getStartOfNonWhitespace().getLocWithOffset(Offset);
Changes.push_back(
@@ -102,6 +102,7 @@ const tooling::Replacements &WhitespaceManager::generateReplacements() {
llvm::sort(Changes, Change::IsBeforeInFile(SourceMgr));
calculateLineBreakInformation();
alignConsecutiveMacros();
+ alignConsecutiveShortCaseStatements();
alignConsecutiveDeclarations();
alignConsecutiveBitFields();
alignConsecutiveAssignments();
@@ -445,12 +446,24 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
Changes[i + 1].PreviousEndOfTokenColumn += Shift;
// If PointerAlignment is PAS_Right, keep *s or &s next to the token
- if (Style.PointerAlignment == FormatStyle::PAS_Right &&
+ if ((Style.PointerAlignment == FormatStyle::PAS_Right ||
+ Style.ReferenceAlignment == FormatStyle::RAS_Right) &&
Changes[i].Spaces != 0) {
+ const bool ReferenceNotRightAligned =
+ Style.ReferenceAlignment != FormatStyle::RAS_Right &&
+ Style.ReferenceAlignment != FormatStyle::RAS_Pointer;
for (int Previous = i - 1;
Previous >= 0 &&
Changes[Previous].Tok->getType() == TT_PointerOrReference;
--Previous) {
+ assert(
+ Changes[Previous].Tok->isOneOf(tok::star, tok::amp, tok::ampamp));
+ if (Changes[Previous].Tok->isNot(tok::star)) {
+ if (ReferenceNotRightAligned)
+ continue;
+ } else if (Style.PointerAlignment != FormatStyle::PAS_Right) {
+ continue;
+ }
Changes[Previous + 1].Spaces -= Shift;
Changes[Previous].Spaces += Shift;
Changes[Previous].StartOfTokenColumn += Shift;
@@ -522,13 +535,6 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
? Changes[StartAt].indentAndNestingLevel()
: std::tuple<unsigned, unsigned, unsigned>();
- // Keep track if the first token has a non-zero indent and nesting level.
- // This can happen when aligning the contents of "#else" preprocessor blocks,
- // which is done separately.
- bool HasInitialIndentAndNesting =
- StartAt == 0 &&
- IndentAndNestingLevel > std::tuple<unsigned, unsigned, unsigned>();
-
// Keep track of the number of commas before the matching tokens, we will only
// align a sequence of matching tokens if they are preceded by the same number
// of commas.
@@ -563,19 +569,8 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
unsigned i = StartAt;
for (unsigned e = Changes.size(); i != e; ++i) {
- if (Changes[i].indentAndNestingLevel() < IndentAndNestingLevel) {
- if (!HasInitialIndentAndNesting)
- break;
- // The contents of preprocessor blocks are aligned separately.
- // If the initial preprocessor block is indented or nested (e.g. it's in
- // a function), do not align and exit after finishing this scope block.
- // Instead, align, and then lower the baseline indent and nesting level
- // in order to continue aligning subsequent blocks.
- EndOfSequence = i;
- AlignCurrentSequence();
- IndentAndNestingLevel =
- Changes[i].indentAndNestingLevel(); // new baseline
- }
+ if (Changes[i].indentAndNestingLevel() < IndentAndNestingLevel)
+ break;
if (Changes[i].NewlinesBefore != 0) {
CommasBeforeMatch = 0;
@@ -681,14 +676,12 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
//
// We need to adjust the StartOfTokenColumn of each Change that is on a line
// containing any matching token to be aligned and located after such token.
-static void AlignMacroSequence(
+static void AlignMatchingTokenSequence(
unsigned &StartOfSequence, unsigned &EndOfSequence, unsigned &MinColumn,
- unsigned &MaxColumn, bool &FoundMatchOnLine,
- std::function<bool(const WhitespaceManager::Change &C)> AlignMacrosMatches,
+ std::function<bool(const WhitespaceManager::Change &C)> Matches,
SmallVector<WhitespaceManager::Change, 16> &Changes) {
if (StartOfSequence > 0 && StartOfSequence < EndOfSequence) {
-
- FoundMatchOnLine = false;
+ bool FoundMatchOnLine = false;
int Shift = 0;
for (unsigned I = StartOfSequence; I != EndOfSequence; ++I) {
@@ -699,8 +692,8 @@ static void AlignMacroSequence(
// If this is the first matching token to be aligned, remember by how many
// spaces it has to be shifted, so the rest of the changes on the line are
- // shifted by the same amount
- if (!FoundMatchOnLine && AlignMacrosMatches(Changes[I])) {
+ // shifted by the same amount.
+ if (!FoundMatchOnLine && Matches(Changes[I])) {
FoundMatchOnLine = true;
Shift = MinColumn - Changes[I].StartOfTokenColumn;
Changes[I].Spaces += Shift;
@@ -714,7 +707,6 @@ static void AlignMacroSequence(
}
MinColumn = 0;
- MaxColumn = UINT_MAX;
StartOfSequence = 0;
EndOfSequence = 0;
}
@@ -753,7 +745,6 @@ void WhitespaceManager::alignConsecutiveMacros() {
};
unsigned MinColumn = 0;
- unsigned MaxColumn = UINT_MAX;
// Start and end of the token sequence we're processing.
unsigned StartOfSequence = 0;
@@ -781,8 +772,8 @@ void WhitespaceManager::alignConsecutiveMacros() {
!(LineIsComment && Style.AlignConsecutiveMacros.AcrossComments);
if (EmptyLineBreak || NoMatchBreak) {
- AlignMacroSequence(StartOfSequence, EndOfSequence, MinColumn, MaxColumn,
- FoundMatchOnLine, AlignMacrosMatches, Changes);
+ AlignMatchingTokenSequence(StartOfSequence, EndOfSequence, MinColumn,
+ AlignMacrosMatches, Changes);
}
// A new line starts, re-initialize line status tracking bools.
@@ -802,18 +793,12 @@ void WhitespaceManager::alignConsecutiveMacros() {
StartOfSequence = I;
unsigned ChangeMinColumn = Changes[I].StartOfTokenColumn;
- int LineLengthAfter = -Changes[I].Spaces;
- for (unsigned j = I; j != E && Changes[j].NewlinesBefore == 0; ++j)
- LineLengthAfter += Changes[j].Spaces + Changes[j].TokenLength;
- unsigned ChangeMaxColumn = Style.ColumnLimit - LineLengthAfter;
-
MinColumn = std::max(MinColumn, ChangeMinColumn);
- MaxColumn = std::min(MaxColumn, ChangeMaxColumn);
}
EndOfSequence = I;
- AlignMacroSequence(StartOfSequence, EndOfSequence, MinColumn, MaxColumn,
- FoundMatchOnLine, AlignMacrosMatches, Changes);
+ AlignMatchingTokenSequence(StartOfSequence, EndOfSequence, MinColumn,
+ AlignMacrosMatches, Changes);
}
void WhitespaceManager::alignConsecutiveAssignments() {
@@ -838,7 +823,12 @@ void WhitespaceManager::alignConsecutiveAssignments() {
return Style.AlignConsecutiveAssignments.AlignCompound
? C.Tok->getPrecedence() == prec::Assignment
- : C.Tok->is(tok::equal);
+ : (C.Tok->is(tok::equal) ||
+ // In Verilog the '<=' is not a compound assignment, thus
+ // it is aligned even when the AlignCompound option is not
+ // set.
+ (Style.isVerilog() && C.Tok->is(tok::lessequal) &&
+ C.Tok->getPrecedence() == prec::Assignment));
},
Changes, /*StartAt=*/0, Style.AlignConsecutiveAssignments,
/*RightJustify=*/true);
@@ -864,6 +854,110 @@ void WhitespaceManager::alignConsecutiveBitFields() {
Changes, /*StartAt=*/0, Style.AlignConsecutiveBitFields);
}
+void WhitespaceManager::alignConsecutiveShortCaseStatements() {
+ if (!Style.AlignConsecutiveShortCaseStatements.Enabled ||
+ !Style.AllowShortCaseLabelsOnASingleLine) {
+ return;
+ }
+
+ auto Matches = [&](const Change &C) {
+ if (Style.AlignConsecutiveShortCaseStatements.AlignCaseColons)
+ return C.Tok->is(TT_CaseLabelColon);
+
+ // Ignore 'IsInsideToken' to allow matching trailing comments which
+ // need to be reflowed as that causes the token to appear in two
+ // different changes, which will cause incorrect alignment as we'll
+ // reflow early due to detecting multiple aligning tokens per line.
+ return !C.IsInsideToken && C.Tok->Previous &&
+ C.Tok->Previous->is(TT_CaseLabelColon);
+ };
+
+ unsigned MinColumn = 0;
+
+ // Empty case statements don't break the alignment, but don't necessarily
+ // match our predicate, so we need to track their column so they can push out
+ // our alignment.
+ unsigned MinEmptyCaseColumn = 0;
+
+ // Start and end of the token sequence we're processing.
+ unsigned StartOfSequence = 0;
+ unsigned EndOfSequence = 0;
+
+ // Whether a matching token has been found on the current line.
+ bool FoundMatchOnLine = false;
+
+ bool LineIsComment = true;
+ bool LineIsEmptyCase = false;
+
+ unsigned I = 0;
+ for (unsigned E = Changes.size(); I != E; ++I) {
+ if (Changes[I].NewlinesBefore != 0) {
+ // Whether to break the alignment sequence because of an empty line.
+ bool EmptyLineBreak =
+ (Changes[I].NewlinesBefore > 1) &&
+ !Style.AlignConsecutiveShortCaseStatements.AcrossEmptyLines;
+
+ // Whether to break the alignment sequence because of a line without a
+ // match.
+ bool NoMatchBreak =
+ !FoundMatchOnLine &&
+ !(LineIsComment &&
+ Style.AlignConsecutiveShortCaseStatements.AcrossComments) &&
+ !LineIsEmptyCase;
+
+ if (EmptyLineBreak || NoMatchBreak) {
+ AlignMatchingTokenSequence(StartOfSequence, EndOfSequence, MinColumn,
+ Matches, Changes);
+ MinEmptyCaseColumn = 0;
+ }
+
+ // A new line starts, re-initialize line status tracking bools.
+ FoundMatchOnLine = false;
+ LineIsComment = true;
+ LineIsEmptyCase = false;
+ }
+
+ if (Changes[I].Tok->isNot(tok::comment))
+ LineIsComment = false;
+
+ if (Changes[I].Tok->is(TT_CaseLabelColon)) {
+ LineIsEmptyCase =
+ !Changes[I].Tok->Next || Changes[I].Tok->Next->isTrailingComment();
+
+ if (LineIsEmptyCase) {
+ if (Style.AlignConsecutiveShortCaseStatements.AlignCaseColons) {
+ MinEmptyCaseColumn =
+ std::max(MinEmptyCaseColumn, Changes[I].StartOfTokenColumn);
+ } else {
+ MinEmptyCaseColumn =
+ std::max(MinEmptyCaseColumn, Changes[I].StartOfTokenColumn + 2);
+ }
+ }
+ }
+
+ if (!Matches(Changes[I]))
+ continue;
+
+ if (LineIsEmptyCase)
+ continue;
+
+ FoundMatchOnLine = true;
+
+ if (StartOfSequence == 0)
+ StartOfSequence = I;
+
+ EndOfSequence = I + 1;
+
+ MinColumn = std::max(MinColumn, Changes[I].StartOfTokenColumn);
+
+ // Allow empty case statements to push out our alignment.
+ MinColumn = std::max(MinColumn, MinEmptyCaseColumn);
+ }
+
+ AlignMatchingTokenSequence(StartOfSequence, EndOfSequence, MinColumn, Matches,
+ Changes);
+}
+
void WhitespaceManager::alignConsecutiveDeclarations() {
if (!Style.AlignConsecutiveDeclarations.Enabled)
return;
@@ -871,7 +965,7 @@ void WhitespaceManager::alignConsecutiveDeclarations() {
AlignTokens(
Style,
[](Change const &C) {
- if (C.Tok->is(TT_FunctionDeclarationName))
+ if (C.Tok->isOneOf(TT_FunctionDeclarationName, TT_FunctionTypeLParen))
return true;
if (C.Tok->isNot(TT_StartOfName))
return false;
@@ -1167,7 +1261,7 @@ void WhitespaceManager::alignArrayInitializersRightJustified(
Changes[CellIter->Index].Spaces = (MaxNetWidth - ThisNetWidth);
auto RowCount = 1U;
auto Offset = std::distance(Cells.begin(), CellIter);
- for (const auto *Next = CellIter->NextColumnElement; Next != nullptr;
+ for (const auto *Next = CellIter->NextColumnElement; Next;
Next = Next->NextColumnElement) {
auto *Start = (Cells.begin() + RowCount * CellDescs.CellCounts[0]);
auto *End = Start + Offset;
@@ -1186,7 +1280,7 @@ void WhitespaceManager::alignArrayInitializersRightJustified(
Changes[CellIter->Index].Spaces += (i > 0) ? 1 : 0;
}
alignToStartOfCell(CellIter->Index, CellIter->EndIndex);
- for (const auto *Next = CellIter->NextColumnElement; Next != nullptr;
+ for (const auto *Next = CellIter->NextColumnElement; Next;
Next = Next->NextColumnElement) {
ThisWidth =
calculateCellWidth(Next->Index, Next->EndIndex, true) + NetWidth;
@@ -1228,7 +1322,7 @@ void WhitespaceManager::alignArrayInitializersLeftJustified(
}
auto RowCount = 1U;
auto Offset = std::distance(Cells.begin(), CellIter);
- for (const auto *Next = CellIter->NextColumnElement; Next != nullptr;
+ for (const auto *Next = CellIter->NextColumnElement; Next;
Next = Next->NextColumnElement) {
if (RowCount > CellDescs.CellCounts.size())
break;
@@ -1248,7 +1342,7 @@ void WhitespaceManager::alignArrayInitializersLeftJustified(
bool WhitespaceManager::isSplitCell(const CellDescription &Cell) {
if (Cell.HasSplit)
return true;
- for (const auto *Next = Cell.NextColumnElement; Next != nullptr;
+ for (const auto *Next = Cell.NextColumnElement; Next;
Next = Next->NextColumnElement) {
if (Next->HasSplit)
return true;
@@ -1401,8 +1495,7 @@ WhitespaceManager::CellDescriptions
WhitespaceManager::linkCells(CellDescriptions &&CellDesc) {
auto &Cells = CellDesc.Cells;
for (auto *CellIter = Cells.begin(); CellIter != Cells.end(); ++CellIter) {
- if (CellIter->NextColumnElement == nullptr &&
- ((CellIter + 1) != Cells.end())) {
+ if (!CellIter->NextColumnElement && (CellIter + 1) != Cells.end()) {
for (auto *NextIter = CellIter + 1; NextIter != Cells.end(); ++NextIter) {
if (NextIter->Cell == CellIter->Cell) {
CellIter->NextColumnElement = &(*NextIter);
diff --git a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h
index 2ccf8c08302a..df7e9add1cd4 100644
--- a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h
+++ b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h
@@ -232,6 +232,9 @@ private:
/// Align consecutive declarations over all \c Changes.
void alignChainedConditionals();
+ /// Align consecutive short case statements over all \c Changes.
+ void alignConsecutiveShortCaseStatements();
+
/// Align trailing comments over all \c Changes.
void alignTrailingComments();
@@ -294,7 +297,7 @@ private:
calculateCellWidth(CellIter->Index, CellIter->EndIndex, true);
if (Changes[CellIter->Index].NewlinesBefore == 0)
CellWidth += NetWidth;
- for (const auto *Next = CellIter->NextColumnElement; Next != nullptr;
+ for (const auto *Next = CellIter->NextColumnElement; Next;
Next = Next->NextColumnElement) {
auto ThisWidth = calculateCellWidth(Next->Index, Next->EndIndex, true);
if (Changes[Next->Index].NewlinesBefore == 0)
@@ -312,7 +315,7 @@ private:
auto MaxNetWidth = getNetWidth(CellStart, CellStop, InitialSpaces);
auto RowCount = 1U;
auto Offset = std::distance(CellStart, CellStop);
- for (const auto *Next = CellStop->NextColumnElement; Next != nullptr;
+ for (const auto *Next = CellStop->NextColumnElement; Next;
Next = Next->NextColumnElement) {
if (RowCount > MaxRowCount)
break;
diff --git a/contrib/llvm-project/clang/lib/Frontend/ASTConsumers.cpp b/contrib/llvm-project/clang/lib/Frontend/ASTConsumers.cpp
index 96f5926c0d7e..7b58eaa04df9 100644
--- a/contrib/llvm-project/clang/lib/Frontend/ASTConsumers.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/ASTConsumers.cpp
@@ -183,21 +183,20 @@ std::unique_ptr<ASTConsumer> clang::CreateASTDeclNodeLister() {
/// ASTViewer - AST Visualization
namespace {
- class ASTViewer : public ASTConsumer {
- ASTContext *Context;
- public:
- void Initialize(ASTContext &Context) override {
- this->Context = &Context;
- }
+class ASTViewer : public ASTConsumer {
+ ASTContext *Context = nullptr;
- bool HandleTopLevelDecl(DeclGroupRef D) override {
- for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I)
- HandleTopLevelSingleDecl(*I);
- return true;
- }
+public:
+ void Initialize(ASTContext &Context) override { this->Context = &Context; }
- void HandleTopLevelSingleDecl(Decl *D);
- };
+ bool HandleTopLevelDecl(DeclGroupRef D) override {
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I)
+ HandleTopLevelSingleDecl(*I);
+ return true;
+ }
+
+ void HandleTopLevelSingleDecl(Decl *D);
+};
}
void ASTViewer::HandleTopLevelSingleDecl(Decl *D) {
diff --git a/contrib/llvm-project/clang/lib/Frontend/ASTMerge.cpp b/contrib/llvm-project/clang/lib/Frontend/ASTMerge.cpp
index 14d781ccdf93..057ea4fd5bb3 100644
--- a/contrib/llvm-project/clang/lib/Frontend/ASTMerge.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/ASTMerge.cpp
@@ -48,7 +48,7 @@ void ASTMergeAction::ExecuteAction() {
/*ShouldOwnClient=*/true));
std::unique_ptr<ASTUnit> Unit = ASTUnit::LoadFromASTFile(
ASTFiles[I], CI.getPCHContainerReader(), ASTUnit::LoadEverything, Diags,
- CI.getFileSystemOpts(), false);
+ CI.getFileSystemOpts(), CI.getHeaderSearchOptsPtr(), false);
if (!Unit)
continue;
diff --git a/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp b/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp
index 3b4f25182ac9..c13cec2dfa58 100644
--- a/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp
@@ -68,7 +68,6 @@
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
@@ -83,7 +82,6 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/FileUtilities.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/Timer.h"
@@ -322,6 +320,7 @@ static uint64_t getDeclShowContexts(const NamedDecl *ND,
if (ID->getDefinition())
Contexts |= (1LL << CodeCompletionContext::CCC_Expression);
Contexts |= (1LL << CodeCompletionContext::CCC_ObjCInterfaceName);
+ Contexts |= (1LL << CodeCompletionContext::CCC_ObjCClassForwardDecl);
}
// Deal with tag names.
@@ -785,7 +784,8 @@ void ASTUnit::ConfigureDiags(IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
const std::string &Filename, const PCHContainerReader &PCHContainerRdr,
WhatToLoad ToLoad, IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
- const FileSystemOptions &FileSystemOpts, bool UseDebugInfo,
+ const FileSystemOptions &FileSystemOpts,
+ std::shared_ptr<HeaderSearchOptions> HSOpts, bool UseDebugInfo,
bool OnlyLocalDecls, CaptureDiagsKind CaptureDiagnostics,
bool AllowASTWithCompilerErrors, bool UserFilesAreVolatile,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
@@ -810,8 +810,8 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
AST->getFileManager(),
UserFilesAreVolatile);
AST->ModuleCache = new InMemoryModuleCache;
- AST->HSOpts = std::make_shared<HeaderSearchOptions>();
- AST->HSOpts->ModuleFormat = std::string(PCHContainerRdr.getFormat());
+ AST->HSOpts = HSOpts ? HSOpts : std::make_shared<HeaderSearchOptions>();
+ AST->HSOpts->ModuleFormat = std::string(PCHContainerRdr.getFormats().front());
AST->HeaderInfo.reset(new HeaderSearch(AST->HSOpts,
AST->getSourceManager(),
AST->getDiagnostics(),
@@ -822,7 +822,6 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
// Gather Info for preprocessor construction later on.
HeaderSearch &HeaderInfo = *AST->HeaderInfo;
- unsigned Counter;
AST->PP = std::make_shared<Preprocessor>(
AST->PPOpts, AST->getDiagnostics(), *AST->LangOpts,
@@ -846,6 +845,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
/*isysroot=*/"",
/*DisableValidationKind=*/disableValid, AllowASTWithCompilerErrors);
+ unsigned Counter = 0;
AST->Reader->setListener(std::make_unique<ASTInfoCollector>(
*AST->PP, AST->Ctx.get(), *AST->HSOpts, *AST->PPOpts, *AST->LangOpts,
AST->TargetOpts, AST->Target, Counter));
@@ -859,7 +859,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
AST->Ctx->setExternalSource(AST->Reader);
switch (AST->Reader->ReadAST(Filename, serialization::MK_MainFile,
- SourceLocation(), ASTReader::ARR_None)) {
+ SourceLocation(), ASTReader::ARR_None)) {
case ASTReader::Success:
break;
@@ -877,6 +877,10 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
PP.setCounterValue(Counter);
+ Module *M = HeaderInfo.lookupModule(AST->getLangOpts().CurrentModule);
+ if (M && AST->getLangOpts().isCompilingModule() && M->isModulePurview())
+ AST->Ctx->setCurrentNamedModule(M);
+
// Create an AST consumer, even though it isn't used.
if (ToLoad >= LoadASTOnly)
AST->Consumer.reset(new ASTConsumer);
@@ -1141,6 +1145,7 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
// Create the compiler instance to use for building the AST.
std::unique_ptr<CompilerInstance> Clang(
new CompilerInstance(std::move(PCHContainerOps)));
+ Clang->setInvocation(CCInvocation);
// Clean up on error, disengage it if the function returns successfully.
auto CleanOnError = llvm::make_scope_exit([&]() {
@@ -1167,7 +1172,6 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
CICleanup(Clang.get());
- Clang->setInvocation(CCInvocation);
OriginalSourceFile =
std::string(Clang->getFrontendOpts().Inputs[0].getFile());
@@ -1393,7 +1397,8 @@ ASTUnit::getMainBufferWithPrecompiledPreamble(
llvm::ErrorOr<PrecompiledPreamble> NewPreamble = PrecompiledPreamble::Build(
PreambleInvocationIn, MainFileBuffer.get(), Bounds, *Diagnostics, VFS,
- PCHContainerOps, /*StoreInMemory=*/false, Callbacks);
+ PCHContainerOps, StorePreamblesInMemory, PreambleStoragePath,
+ Callbacks);
PreambleInvocationIn.getFrontendOpts().SkipFunctionBodies =
PreviousSkipFunctionBodies;
@@ -1733,10 +1738,11 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromCompilerInvocation(
return AST;
}
-ASTUnit *ASTUnit::LoadFromCommandLine(
+std::unique_ptr<ASTUnit> ASTUnit::LoadFromCommandLine(
const char **ArgBegin, const char **ArgEnd,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags, StringRef ResourceFilesPath,
+ bool StorePreamblesInMemory, StringRef PreambleStoragePath,
bool OnlyLocalDecls, CaptureDiagsKind CaptureDiagnostics,
ArrayRef<RemappedFile> RemappedFiles, bool RemappedFilesKeepOriginalName,
unsigned PrecompilePreambleAfterNParses, TranslationUnitKind TUKind,
@@ -1748,6 +1754,12 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
assert(Diags.get() && "no DiagnosticsEngine was provided");
+ // If no VFS was provided, create one that tracks the physical file system.
+ // If '-working-directory' was passed as an argument, 'createInvocation' will
+ // set this as the current working directory of the VFS.
+ if (!VFS)
+ VFS = llvm::vfs::createPhysicalFileSystem();
+
SmallVector<StoredDiagnostic, 4> StoredDiagnostics;
std::shared_ptr<CompilerInvocation> CI;
@@ -1793,10 +1805,10 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
ConfigureDiags(Diags, *AST, CaptureDiagnostics);
AST->Diagnostics = Diags;
AST->FileSystemOpts = CI->getFileSystemOpts();
- if (!VFS)
- VFS = llvm::vfs::getRealFileSystem();
VFS = createVFSFromCompilerInvocation(*CI, *Diags, VFS);
AST->FileMgr = new FileManager(AST->FileSystemOpts, VFS);
+ AST->StorePreamblesInMemory = StorePreamblesInMemory;
+ AST->PreambleStoragePath = PreambleStoragePath;
AST->ModuleCache = new InMemoryModuleCache;
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
@@ -1829,7 +1841,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
return nullptr;
}
- return AST.release();
+ return AST;
}
bool ASTUnit::Reparse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
@@ -2019,6 +2031,7 @@ static void CalculateHiddenNames(const CodeCompletionContext &Context,
case CodeCompletionContext::CCC_IncludedFile:
case CodeCompletionContext::CCC_Attribute:
case CodeCompletionContext::CCC_NewName:
+ case CodeCompletionContext::CCC_ObjCClassForwardDecl:
// We're looking for nothing, or we're looking for names that cannot
// be hidden.
return;
@@ -2304,16 +2317,11 @@ bool ASTUnit::Save(StringRef File) {
if (HadModuleLoaderFatalFailure)
return true;
- // Write to a temporary file and later rename it to the actual file, to avoid
- // possible race conditions.
- SmallString<128> TempPath;
- TempPath = File;
- TempPath += "-%%%%%%%%";
// FIXME: Can we somehow regenerate the stat cache here, or do we need to
// unconditionally create a stat cache when we parse the file?
- if (llvm::Error Err = llvm::writeFileAtomically(
- TempPath, File, [this](llvm::raw_ostream &Out) {
+ if (llvm::Error Err = llvm::writeToOutput(
+ File, [this](llvm::raw_ostream &Out) {
return serialize(Out) ? llvm::make_error<llvm::StringError>(
"ASTUnit serialization failed",
llvm::inconvertibleErrorCode())
@@ -2636,9 +2644,9 @@ bool ASTUnit::visitLocalTopLevelDecls(void *context, DeclVisitorFn Fn) {
return true;
}
-const FileEntry *ASTUnit::getPCHFile() {
+OptionalFileEntryRef ASTUnit::getPCHFile() {
if (!Reader)
- return nullptr;
+ return std::nullopt;
serialization::ModuleFile *Mod = nullptr;
Reader->getModuleManager().visit([&Mod](serialization::ModuleFile &M) {
@@ -2661,7 +2669,7 @@ const FileEntry *ASTUnit::getPCHFile() {
if (Mod)
return Mod->File;
- return nullptr;
+ return std::nullopt;
}
bool ASTUnit::isModuleFile() const {
diff --git a/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp b/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
index ac9f8f8ed51c..92e0b74e38f0 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
@@ -46,7 +46,6 @@
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/LockFileManager.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
@@ -55,6 +54,7 @@
#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
#include <optional>
#include <time.h>
#include <utility>
@@ -113,7 +113,7 @@ bool CompilerInstance::createTarget() {
// Check whether AuxTarget exists, if not, then create TargetInfo for the
// other side of CUDA/OpenMP/SYCL compilation.
if (!getAuxTarget() &&
- (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice ||
+ (getLangOpts().CUDA || getLangOpts().OpenMPIsTargetDevice ||
getLangOpts().SYCLIsDevice) &&
!getFrontendOpts().AuxTriple.empty()) {
auto TO = std::make_shared<TargetOptions>();
@@ -605,8 +605,9 @@ struct ReadModuleNames : ASTReaderListener {
Module *Current = Stack.pop_back_val();
if (Current->IsUnimportable) continue;
Current->IsAvailable = true;
- Stack.insert(Stack.end(),
- Current->submodule_begin(), Current->submodule_end());
+ auto SubmodulesRange = Current->submodules();
+ Stack.insert(Stack.end(), SubmodulesRange.begin(),
+ SubmodulesRange.end());
}
}
}
@@ -851,6 +852,9 @@ CompilerInstance::createOutputFileImpl(StringRef OutputPath, bool Binary,
// relative to that.
std::optional<SmallString<128>> AbsPath;
if (OutputPath != "-" && !llvm::sys::path::is_absolute(OutputPath)) {
+ assert(hasFileManager() &&
+ "File Manager is required to fix up relative path.\n");
+
AbsPath.emplace(OutputPath);
FileMgr->FixupRelativePath(*AbsPath);
OutputPath = *AbsPath;
@@ -891,10 +895,12 @@ CompilerInstance::createOutputFileImpl(StringRef OutputPath, bool Binary,
TempPath += "-%%%%%%%%";
TempPath += OutputExtension;
TempPath += ".tmp";
+ llvm::sys::fs::OpenFlags BinaryFlags =
+ Binary ? llvm::sys::fs::OF_None : llvm::sys::fs::OF_Text;
Expected<llvm::sys::fs::TempFile> ExpectedFile =
llvm::sys::fs::TempFile::create(
TempPath, llvm::sys::fs::all_read | llvm::sys::fs::all_write,
- Binary ? llvm::sys::fs::OF_None : llvm::sys::fs::OF_Text);
+ BinaryFlags);
llvm::Error E = handleErrors(
ExpectedFile.takeError(), [&](const llvm::ECError &E) -> llvm::Error {
@@ -904,7 +910,9 @@ CompilerInstance::createOutputFileImpl(StringRef OutputPath, bool Binary,
StringRef Parent = llvm::sys::path::parent_path(OutputPath);
EC = llvm::sys::fs::create_directories(Parent);
if (!EC) {
- ExpectedFile = llvm::sys::fs::TempFile::create(TempPath);
+ ExpectedFile = llvm::sys::fs::TempFile::create(
+ TempPath, llvm::sys::fs::all_read | llvm::sys::fs::all_write,
+ BinaryFlags);
if (!ExpectedFile)
return llvm::errorCodeToError(
llvm::errc::no_such_file_or_directory);
@@ -978,10 +986,9 @@ bool CompilerInstance::InitializeSourceManager(const FrontendInputFile &Input,
? FileMgr.getSTDIN()
: FileMgr.getFileRef(InputFile, /*OpenFile=*/true);
if (!FileOrErr) {
- // FIXME: include the error in the diagnostic even when it's not stdin.
auto EC = llvm::errorToErrorCode(FileOrErr.takeError());
if (InputFile != "-")
- Diags.Report(diag::err_fe_error_reading) << InputFile;
+ Diags.Report(diag::err_fe_error_reading) << InputFile << EC.message();
else
Diags.Report(diag::err_fe_error_reading_stdin) << EC.message();
return false;
@@ -1084,9 +1091,12 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
}
StringRef StatsFile = getFrontendOpts().StatsFile;
if (!StatsFile.empty()) {
+ llvm::sys::fs::OpenFlags FileFlags = llvm::sys::fs::OF_TextWithCRLF;
+ if (getFrontendOpts().AppendStats)
+ FileFlags |= llvm::sys::fs::OF_Append;
std::error_code EC;
- auto StatS = std::make_unique<llvm::raw_fd_ostream>(
- StatsFile, EC, llvm::sys::fs::OF_TextWithCRLF);
+ auto StatS =
+ std::make_unique<llvm::raw_fd_ostream>(StatsFile, EC, FileFlags);
if (EC) {
getDiagnostics().Report(diag::warn_fe_unable_to_open_stats_file)
<< StatsFile << EC.message();
@@ -2021,8 +2031,12 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
PrivateModule, PP->getIdentifierInfo(Module->Name)->getTokenID());
PrivPath.push_back(std::make_pair(&II, Path[0].second));
+ std::string FileName;
+ // If there is a modulemap module or prebuilt module, load it.
if (PP->getHeaderSearchInfo().lookupModule(PrivateModule, ImportLoc, true,
- !IsInclusionDirective))
+ !IsInclusionDirective) ||
+ selectModuleSource(nullptr, PrivateModule, FileName, BuiltModules,
+ PP->getHeaderSearchInfo()) != MS_ModuleNotFound)
Sub = loadModule(ImportLoc, PrivPath, Visibility, IsInclusionDirective);
if (Sub) {
MapPrivateSubModToTopLevel = true;
diff --git a/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
index 0bb9c8c83c63..1fba91bed041 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
@@ -12,7 +12,6 @@
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/CommentOptions.h"
-#include "clang/Basic/DebugInfoOptions.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticDriver.h"
#include "clang/Basic/DiagnosticOptions.h"
@@ -57,9 +56,9 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Config/llvm-config.h"
+#include "llvm/Frontend/Debug/Options.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/Linker/Linker.h"
#include "llvm/MC/MCTargetOptions.h"
@@ -77,7 +76,6 @@
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/HashBuilder.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
@@ -87,6 +85,8 @@
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <atomic>
#include <cassert>
@@ -427,8 +427,10 @@ static T extractMaskValue(T KeyPath) {
}
#define PARSE_OPTION_WITH_MARSHALLING( \
- ARGS, DIAGS, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX) \
+ ARGS, DIAGS, PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, \
+ PARAM, HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, \
+ KEYPATH, DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, \
+ DENORMALIZER, MERGER, EXTRACTOR, TABLE_INDEX) \
if ((FLAGS)&options::CC1Option) { \
KEYPATH = MERGER(KEYPATH, DEFAULT_VALUE); \
if (IMPLIED_CHECK) \
@@ -442,9 +444,10 @@ static T extractMaskValue(T KeyPath) {
// Capture the extracted value as a lambda argument to avoid potential issues
// with lifetime extension of the reference.
#define GENERATE_OPTION_WITH_MARSHALLING( \
- ARGS, STRING_ALLOCATOR, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, \
- TABLE_INDEX) \
+ ARGS, STRING_ALLOCATOR, PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, \
+ ALIASARGS, FLAGS, PARAM, HELPTEXT, METAVAR, VALUES, SPELLING, \
+ SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, IMPLIED_CHECK, \
+ IMPLIED_VALUE, NORMALIZER, DENORMALIZER, MERGER, EXTRACTOR, TABLE_INDEX) \
if ((FLAGS)&options::CC1Option) { \
[&](const auto &Extracted) { \
if (ALWAYS_EMIT || \
@@ -641,18 +644,31 @@ using GenerateFn = llvm::function_ref<void(
CompilerInvocation &, SmallVectorImpl<const char *> &,
CompilerInvocation::StringAllocator)>;
-// May perform round-trip of command line arguments. By default, the round-trip
-// is enabled in assert builds. This can be overwritten at run-time via the
-// "-round-trip-args" and "-no-round-trip-args" command line flags.
-// During round-trip, the command line arguments are parsed into a dummy
-// instance of CompilerInvocation which is used to generate the command line
-// arguments again. The real CompilerInvocation instance is then created by
-// parsing the generated arguments, not the original ones.
+/// May perform round-trip of command line arguments. By default, the round-trip
+/// is enabled in assert builds. This can be overwritten at run-time via the
+/// "-round-trip-args" and "-no-round-trip-args" command line flags, or via the
+/// ForceRoundTrip parameter.
+///
+/// During round-trip, the command line arguments are parsed into a dummy
+/// CompilerInvocation, which is used to generate the command line arguments
+/// again. The real CompilerInvocation is then created by parsing the generated
+/// arguments, not the original ones. This (in combination with tests covering
+/// argument behavior) ensures the generated command line is complete (doesn't
+/// drop/mangle any arguments).
+///
+/// Finally, we check the command line that was used to create the real
+/// CompilerInvocation instance. By default, we compare it to the command line
+/// the real CompilerInvocation generates. This checks whether the generator is
+/// deterministic. If \p CheckAgainstOriginalInvocation is enabled, we instead
+/// compare it to the original command line to verify the original command-line
+/// was canonical and can round-trip exactly.
static bool RoundTrip(ParseFn Parse, GenerateFn Generate,
CompilerInvocation &RealInvocation,
CompilerInvocation &DummyInvocation,
ArrayRef<const char *> CommandLineArgs,
- DiagnosticsEngine &Diags, const char *Argv0) {
+ DiagnosticsEngine &Diags, const char *Argv0,
+ bool CheckAgainstOriginalInvocation = false,
+ bool ForceRoundTrip = false) {
#ifndef NDEBUG
bool DoRoundTripDefault = true;
#else
@@ -660,11 +676,15 @@ static bool RoundTrip(ParseFn Parse, GenerateFn Generate,
#endif
bool DoRoundTrip = DoRoundTripDefault;
- for (const auto *Arg : CommandLineArgs) {
- if (Arg == StringRef("-round-trip-args"))
- DoRoundTrip = true;
- if (Arg == StringRef("-no-round-trip-args"))
- DoRoundTrip = false;
+ if (ForceRoundTrip) {
+ DoRoundTrip = true;
+ } else {
+ for (const auto *Arg : CommandLineArgs) {
+ if (Arg == StringRef("-round-trip-args"))
+ DoRoundTrip = true;
+ if (Arg == StringRef("-no-round-trip-args"))
+ DoRoundTrip = false;
+ }
}
// If round-trip was not requested, simply run the parser with the real
@@ -719,30 +739,34 @@ static bool RoundTrip(ParseFn Parse, GenerateFn Generate,
// Generate arguments from the dummy invocation. If Generate is the
// inverse of Parse, the newly generated arguments must have the same
// semantics as the original.
- SmallVector<const char *> GeneratedArgs1;
- Generate(DummyInvocation, GeneratedArgs1, SA);
+ SmallVector<const char *> GeneratedArgs;
+ Generate(DummyInvocation, GeneratedArgs, SA);
// Run the second parse, now on the generated arguments, and with the real
// invocation and diagnostics. The result is what we will end up using for the
// rest of compilation, so if Generate is not inverse of Parse, something down
// the line will break.
- bool Success2 = Parse(RealInvocation, GeneratedArgs1, Diags, Argv0);
+ bool Success2 = Parse(RealInvocation, GeneratedArgs, Diags, Argv0);
// The first parse on original arguments succeeded, but second parse of
// generated arguments failed. Something must be wrong with the generator.
if (!Success2) {
Diags.Report(diag::err_cc1_round_trip_ok_then_fail);
Diags.Report(diag::note_cc1_round_trip_generated)
- << 1 << SerializeArgs(GeneratedArgs1);
+ << 1 << SerializeArgs(GeneratedArgs);
return false;
}
- // Generate arguments again, this time from the options we will end up using
- // for the rest of the compilation.
- SmallVector<const char *> GeneratedArgs2;
- Generate(RealInvocation, GeneratedArgs2, SA);
+ SmallVector<const char *> ComparisonArgs;
+ if (CheckAgainstOriginalInvocation)
+ // Compare against original arguments.
+ ComparisonArgs.assign(CommandLineArgs.begin(), CommandLineArgs.end());
+ else
+ // Generate arguments again, this time from the options we will end up using
+ // for the rest of the compilation.
+ Generate(RealInvocation, ComparisonArgs, SA);
- // Compares two lists of generated arguments.
+ // Compares two lists of arguments.
auto Equal = [](const ArrayRef<const char *> A,
const ArrayRef<const char *> B) {
return std::equal(A.begin(), A.end(), B.begin(), B.end(),
@@ -754,23 +778,41 @@ static bool RoundTrip(ParseFn Parse, GenerateFn Generate,
// If we generated different arguments from what we assume are two
// semantically equivalent CompilerInvocations, the Generate function may
// be non-deterministic.
- if (!Equal(GeneratedArgs1, GeneratedArgs2)) {
+ if (!Equal(GeneratedArgs, ComparisonArgs)) {
Diags.Report(diag::err_cc1_round_trip_mismatch);
Diags.Report(diag::note_cc1_round_trip_generated)
- << 1 << SerializeArgs(GeneratedArgs1);
+ << 1 << SerializeArgs(GeneratedArgs);
Diags.Report(diag::note_cc1_round_trip_generated)
- << 2 << SerializeArgs(GeneratedArgs2);
+ << 2 << SerializeArgs(ComparisonArgs);
return false;
}
Diags.Report(diag::remark_cc1_round_trip_generated)
- << 1 << SerializeArgs(GeneratedArgs1);
+ << 1 << SerializeArgs(GeneratedArgs);
Diags.Report(diag::remark_cc1_round_trip_generated)
- << 2 << SerializeArgs(GeneratedArgs2);
+ << 2 << SerializeArgs(ComparisonArgs);
return Success2;
}
+bool CompilerInvocation::checkCC1RoundTrip(ArrayRef<const char *> Args,
+ DiagnosticsEngine &Diags,
+ const char *Argv0) {
+ CompilerInvocation DummyInvocation1, DummyInvocation2;
+ return RoundTrip(
+ [](CompilerInvocation &Invocation, ArrayRef<const char *> CommandLineArgs,
+ DiagnosticsEngine &Diags, const char *Argv0) {
+ return CreateFromArgsImpl(Invocation, CommandLineArgs, Diags, Argv0);
+ },
+ [](CompilerInvocation &Invocation, SmallVectorImpl<const char *> &Args,
+ StringAllocator SA) {
+ Args.push_back("-cc1");
+ Invocation.generateCC1CommandLine(Args, SA);
+ },
+ DummyInvocation1, DummyInvocation2, Args, Diags, Argv0,
+ /*CheckAgainstOriginalInvocation=*/true, /*ForceRoundTrip=*/true);
+}
+
static void addDiagnosticArgs(ArgList &Args, OptSpecifier Group,
OptSpecifier GroupWithValue,
std::vector<std::string> &Diagnostics) {
@@ -809,14 +851,8 @@ static void GenerateAnalyzerArgs(AnalyzerOptions &Opts,
CompilerInvocation::StringAllocator SA) {
const AnalyzerOptions *AnalyzerOpts = &Opts;
-#define ANALYZER_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define ANALYZER_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Args, SA, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef ANALYZER_OPTION_WITH_MARSHALLING
@@ -877,14 +913,20 @@ static void GenerateAnalyzerArgs(AnalyzerOptions &Opts,
AnalyzerOptions ConfigOpts;
parseAnalyzerConfigs(ConfigOpts, nullptr);
- for (const auto &C : Opts.Config) {
+ // Sort options by key to avoid relying on StringMap iteration order.
+ SmallVector<std::pair<StringRef, StringRef>, 4> SortedConfigOpts;
+ for (const auto &C : Opts.Config)
+ SortedConfigOpts.emplace_back(C.getKey(), C.getValue());
+ llvm::sort(SortedConfigOpts, llvm::less_first());
+
+ for (const auto &[Key, Value] : SortedConfigOpts) {
// Don't generate anything that came from parseAnalyzerConfigs. It would be
// redundant and may not be valid on the command line.
- auto Entry = ConfigOpts.Config.find(C.getKey());
- if (Entry != ConfigOpts.Config.end() && Entry->getValue() == C.getValue())
+ auto Entry = ConfigOpts.Config.find(Key);
+ if (Entry != ConfigOpts.Config.end() && Entry->getValue() == Value)
continue;
- GenerateArg(Args, OPT_analyzer_config, C.getKey() + "=" + C.getValue(), SA);
+ GenerateArg(Args, OPT_analyzer_config, Key + "=" + Value, SA);
}
// Nothing to generate for FullCompilerInvocation.
@@ -896,14 +938,8 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
AnalyzerOptions *AnalyzerOpts = &Opts;
-#define ANALYZER_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define ANALYZER_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef ANALYZER_OPTION_WITH_MARSHALLING
@@ -1020,15 +1056,6 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
A->claim();
Opts.Config[key] = std::string(val);
-
- // FIXME: Remove this hunk after clang-17 released.
- constexpr auto SingleFAM =
- "consider-single-element-arrays-as-flexible-array-members";
- if (key == SingleFAM) {
- Diags.Report(diag::warn_analyzer_deprecated_option_with_alternative)
- << SingleFAM << "clang-17"
- << "-fstrict-flex-arrays=<N>";
- }
}
}
@@ -1304,8 +1331,9 @@ static std::string serializeXRayInstrumentationBundle(const XRayInstrSet &S) {
// Set the profile kind using fprofile-instrument-use-path.
static void setPGOUseInstrumentor(CodeGenOptions &Opts,
const Twine &ProfileName,
+ llvm::vfs::FileSystem &FS,
DiagnosticsEngine &Diags) {
- auto ReaderOrErr = llvm::IndexedInstrProfReader::create(ProfileName);
+ auto ReaderOrErr = llvm::IndexedInstrProfReader::create(ProfileName, FS);
if (auto E = ReaderOrErr.takeError()) {
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"Error in reading profile %0: %1");
@@ -1339,14 +1367,8 @@ void CompilerInvocation::GenerateCodeGenArgs(
else
GenerateArg(Args, OPT_O, Twine(Opts.OptimizationLevel), SA);
-#define CODEGEN_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define CODEGEN_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Args, SA, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef CODEGEN_OPTION_WITH_MARSHALLING
@@ -1366,28 +1388,28 @@ void CompilerInvocation::GenerateCodeGenArgs(
std::optional<StringRef> DebugInfoVal;
switch (Opts.DebugInfo) {
- case codegenoptions::DebugLineTablesOnly:
+ case llvm::codegenoptions::DebugLineTablesOnly:
DebugInfoVal = "line-tables-only";
break;
- case codegenoptions::DebugDirectivesOnly:
+ case llvm::codegenoptions::DebugDirectivesOnly:
DebugInfoVal = "line-directives-only";
break;
- case codegenoptions::DebugInfoConstructor:
+ case llvm::codegenoptions::DebugInfoConstructor:
DebugInfoVal = "constructor";
break;
- case codegenoptions::LimitedDebugInfo:
+ case llvm::codegenoptions::LimitedDebugInfo:
DebugInfoVal = "limited";
break;
- case codegenoptions::FullDebugInfo:
+ case llvm::codegenoptions::FullDebugInfo:
DebugInfoVal = "standalone";
break;
- case codegenoptions::UnusedTypeInfo:
+ case llvm::codegenoptions::UnusedTypeInfo:
DebugInfoVal = "unused-types";
break;
- case codegenoptions::NoDebugInfo: // default value
+ case llvm::codegenoptions::NoDebugInfo: // default value
DebugInfoVal = std::nullopt;
break;
- case codegenoptions::LocTrackingOnly: // implied value
+ case llvm::codegenoptions::LocTrackingOnly: // implied value
DebugInfoVal = std::nullopt;
break;
}
@@ -1432,10 +1454,10 @@ void CompilerInvocation::GenerateCodeGenArgs(
GenerateArg(Args, OPT_gpubnames, SA);
auto TNK = Opts.getDebugSimpleTemplateNames();
- if (TNK != codegenoptions::DebugTemplateNamesKind::Full) {
- if (TNK == codegenoptions::DebugTemplateNamesKind::Simple)
+ if (TNK != llvm::codegenoptions::DebugTemplateNamesKind::Full) {
+ if (TNK == llvm::codegenoptions::DebugTemplateNamesKind::Simple)
GenerateArg(Args, OPT_gsimple_template_names_EQ, "simple", SA);
- else if (TNK == codegenoptions::DebugTemplateNamesKind::Mangled)
+ else if (TNK == llvm::codegenoptions::DebugTemplateNamesKind::Mangled)
GenerateArg(Args, OPT_gsimple_template_names_EQ, "mangled", SA);
}
// ProfileInstrumentUsePath is marshalled automatically, no need to generate
@@ -1505,8 +1527,8 @@ void CompilerInvocation::GenerateCodeGenArgs(
F.Filename, SA);
}
- GenerateArg(
- Args, Opts.EmulatedTLS ? OPT_femulated_tls : OPT_fno_emulated_tls, SA);
+ if (Opts.EmulatedTLS)
+ GenerateArg(Args, OPT_femulated_tls, SA);
if (Opts.FPDenormalMode != llvm::DenormalMode::getIEEE())
GenerateArg(Args, OPT_fdenormal_fp_math_EQ, Opts.FPDenormalMode.str(), SA);
@@ -1529,6 +1551,9 @@ void CompilerInvocation::GenerateCodeGenArgs(
if (Opts.EnableAIXExtendedAltivecABI)
GenerateArg(Args, OPT_mabi_EQ_vec_extabi, SA);
+ if (Opts.XCOFFReadOnlyPointers)
+ GenerateArg(Args, OPT_mxcoff_roptr, SA);
+
if (!Opts.OptRecordPasses.empty())
GenerateArg(Args, OPT_opt_record_passes, Opts.OptRecordPasses, SA);
@@ -1601,14 +1626,8 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
// variable name and type.
const LangOptions *LangOpts = &LangOptsRef;
-#define CODEGEN_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define CODEGEN_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef CODEGEN_OPTION_WITH_MARSHALLING
@@ -1642,18 +1661,19 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
if (Arg *A = Args.getLastArg(OPT_debug_info_kind_EQ)) {
unsigned Val =
llvm::StringSwitch<unsigned>(A->getValue())
- .Case("line-tables-only", codegenoptions::DebugLineTablesOnly)
- .Case("line-directives-only", codegenoptions::DebugDirectivesOnly)
- .Case("constructor", codegenoptions::DebugInfoConstructor)
- .Case("limited", codegenoptions::LimitedDebugInfo)
- .Case("standalone", codegenoptions::FullDebugInfo)
- .Case("unused-types", codegenoptions::UnusedTypeInfo)
+ .Case("line-tables-only", llvm::codegenoptions::DebugLineTablesOnly)
+ .Case("line-directives-only",
+ llvm::codegenoptions::DebugDirectivesOnly)
+ .Case("constructor", llvm::codegenoptions::DebugInfoConstructor)
+ .Case("limited", llvm::codegenoptions::LimitedDebugInfo)
+ .Case("standalone", llvm::codegenoptions::FullDebugInfo)
+ .Case("unused-types", llvm::codegenoptions::UnusedTypeInfo)
.Default(~0U);
if (Val == ~0U)
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
<< A->getValue();
else
- Opts.setDebugInfo(static_cast<codegenoptions::DebugInfoKind>(Val));
+ Opts.setDebugInfo(static_cast<llvm::codegenoptions::DebugInfoKind>(Val));
}
// If -fuse-ctor-homing is set and limited debug info is already on, then use
@@ -1661,23 +1681,21 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
if (const Arg *A =
Args.getLastArg(OPT_fuse_ctor_homing, OPT_fno_use_ctor_homing)) {
if (A->getOption().matches(OPT_fuse_ctor_homing) &&
- Opts.getDebugInfo() == codegenoptions::LimitedDebugInfo)
- Opts.setDebugInfo(codegenoptions::DebugInfoConstructor);
+ Opts.getDebugInfo() == llvm::codegenoptions::LimitedDebugInfo)
+ Opts.setDebugInfo(llvm::codegenoptions::DebugInfoConstructor);
if (A->getOption().matches(OPT_fno_use_ctor_homing) &&
- Opts.getDebugInfo() == codegenoptions::DebugInfoConstructor)
- Opts.setDebugInfo(codegenoptions::LimitedDebugInfo);
+ Opts.getDebugInfo() == llvm::codegenoptions::DebugInfoConstructor)
+ Opts.setDebugInfo(llvm::codegenoptions::LimitedDebugInfo);
}
for (const auto &Arg : Args.getAllArgValues(OPT_fdebug_prefix_map_EQ)) {
auto Split = StringRef(Arg).split('=');
- Opts.DebugPrefixMap.insert(
- {std::string(Split.first), std::string(Split.second)});
+ Opts.DebugPrefixMap.emplace_back(Split.first, Split.second);
}
for (const auto &Arg : Args.getAllArgValues(OPT_fcoverage_prefix_map_EQ)) {
auto Split = StringRef(Arg).split('=');
- Opts.CoveragePrefixMap.insert(
- {std::string(Split.first), std::string(Split.second)});
+ Opts.CoveragePrefixMap.emplace_back(Split.first, Split.second);
}
const llvm::Triple::ArchType DebugEntryValueArchs[] = {
@@ -1720,13 +1738,10 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
<< A->getSpelling() << A->getValue();
Opts.setDebugSimpleTemplateNames(
StringRef(A->getValue()) == "simple"
- ? codegenoptions::DebugTemplateNamesKind::Simple
- : codegenoptions::DebugTemplateNamesKind::Mangled);
+ ? llvm::codegenoptions::DebugTemplateNamesKind::Simple
+ : llvm::codegenoptions::DebugTemplateNamesKind::Mangled);
}
- if (!Opts.ProfileInstrumentUsePath.empty())
- setPGOUseInstrumentor(Opts, Opts.ProfileInstrumentUsePath, Diags);
-
if (const Arg *A = Args.getLastArg(OPT_ftime_report, OPT_ftime_report_EQ)) {
Opts.TimePasses = true;
@@ -1752,6 +1767,8 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.PrepareForThinLTO = true;
else if (S != "full")
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ if (Args.hasArg(OPT_funified_lto))
+ Opts.PrepareForThinLTO = true;
}
if (Arg *A = Args.getLastArg(OPT_fthinlto_index_EQ)) {
if (IK.getLanguage() != Language::LLVM_IR)
@@ -1777,7 +1794,7 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.MemoryProfileOutput = MemProfileBasename;
memcpy(Opts.CoverageVersion, "408*", 4);
- if (Opts.EmitGcovArcs || Opts.EmitGcovNotes) {
+ if (Opts.CoverageNotesFile.size() || Opts.CoverageDataFile.size()) {
if (Args.hasArg(OPT_coverage_version_EQ)) {
StringRef CoverageVersion = Args.getLastArgValue(OPT_coverage_version_EQ);
if (CoverageVersion.size() != 4) {
@@ -1867,15 +1884,10 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.LinkBitcodeFiles.push_back(F);
}
- if (!Args.getLastArg(OPT_femulated_tls) &&
- !Args.getLastArg(OPT_fno_emulated_tls)) {
- Opts.EmulatedTLS = T.hasDefaultEmulatedTLS();
- }
-
if (Arg *A = Args.getLastArg(OPT_ftlsmodel_EQ)) {
if (T.isOSAIX()) {
StringRef Name = A->getValue();
- if (Name != "global-dynamic")
+ if (Name != "global-dynamic" && Name != "local-exec")
Diags.Report(diag::err_aix_unsupported_tls_model) << Name;
}
}
@@ -1917,14 +1929,23 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
}
}
- if (Arg *A =
- Args.getLastArg(OPT_mabi_EQ_vec_default, OPT_mabi_EQ_vec_extabi)) {
+ if (Arg *A = Args.getLastArg(OPT_mxcoff_roptr)) {
if (!T.isOSAIX())
Diags.Report(diag::err_drv_unsupported_opt_for_target)
<< A->getSpelling() << T.str();
- const Option &O = A->getOption();
- Opts.EnableAIXExtendedAltivecABI = O.matches(OPT_mabi_EQ_vec_extabi);
+ // Since the storage mapping class is specified per csect,
+ // without using data sections, it is less effective to use read-only
+ // pointers. Using read-only pointers may cause other RO variables in the
+ // same csect to become RW when the linker acts upon `-bforceimprw`;
+ // therefore, we require that separate data sections
+ // are used when `-mxcoff-roptr` is in effect. We respect the setting of
+ // data-sections since we have not found reasons to do otherwise that
+ // overcome the user surprise of not respecting the setting.
+ if (!Args.hasFlag(OPT_fdata_sections, OPT_fno_data_sections, false))
+ Diags.Report(diag::err_roptr_requires_data_sections);
+
+ Opts.XCOFFReadOnlyPointers = true;
}
if (Arg *A = Args.getLastArg(OPT_mabi_EQ_quadword_atomics)) {
@@ -1962,8 +1983,8 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.OptimizationRemarkAnalysis.hasValidPattern();
bool UsingSampleProfile = !Opts.SampleProfileFile.empty();
- bool UsingProfile = UsingSampleProfile ||
- (Opts.getProfileUse() != CodeGenOptions::ProfileNone);
+ bool UsingProfile =
+ UsingSampleProfile || !Opts.ProfileInstrumentUsePath.empty();
if (Opts.DiagnosticsWithHotness && !UsingProfile &&
// An IR file will contain PGO as metadata
@@ -2018,8 +2039,9 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
// If the user requested a flag that requires source locations available in
// the backend, make sure that the backend tracks source location information.
- if (NeedLocTracking && Opts.getDebugInfo() == codegenoptions::NoDebugInfo)
- Opts.setDebugInfo(codegenoptions::LocTrackingOnly);
+ if (NeedLocTracking &&
+ Opts.getDebugInfo() == llvm::codegenoptions::NoDebugInfo)
+ Opts.setDebugInfo(llvm::codegenoptions::LocTrackingOnly);
// Parse -fsanitize-recover= arguments.
// FIXME: Report unrecoverable sanitizers incorrectly specified here.
@@ -2050,14 +2072,8 @@ GenerateDependencyOutputArgs(const DependencyOutputOptions &Opts,
SmallVectorImpl<const char *> &Args,
CompilerInvocation::StringAllocator SA) {
const DependencyOutputOptions &DependencyOutputOpts = Opts;
-#define DEPENDENCY_OUTPUT_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define DEPENDENCY_OUTPUT_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Args, SA, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef DEPENDENCY_OUTPUT_OPTION_WITH_MARSHALLING
@@ -2091,14 +2107,8 @@ static bool ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
unsigned NumErrorsBefore = Diags.getNumErrors();
DependencyOutputOptions &DependencyOutputOpts = Opts;
-#define DEPENDENCY_OUTPUT_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define DEPENDENCY_OUTPUT_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef DEPENDENCY_OUTPUT_OPTION_WITH_MARSHALLING
@@ -2214,14 +2224,8 @@ static void GenerateFileSystemArgs(const FileSystemOptions &Opts,
CompilerInvocation::StringAllocator SA) {
const FileSystemOptions &FileSystemOpts = Opts;
-#define FILE_SYSTEM_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define FILE_SYSTEM_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Args, SA, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef FILE_SYSTEM_OPTION_WITH_MARSHALLING
}
@@ -2232,14 +2236,8 @@ static bool ParseFileSystemArgs(FileSystemOptions &Opts, const ArgList &Args,
FileSystemOptions &FileSystemOpts = Opts;
-#define FILE_SYSTEM_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define FILE_SYSTEM_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef FILE_SYSTEM_OPTION_WITH_MARSHALLING
@@ -2250,14 +2248,8 @@ static void GenerateMigratorArgs(const MigratorOptions &Opts,
SmallVectorImpl<const char *> &Args,
CompilerInvocation::StringAllocator SA) {
const MigratorOptions &MigratorOpts = Opts;
-#define MIGRATOR_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define MIGRATOR_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Args, SA, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef MIGRATOR_OPTION_WITH_MARSHALLING
}
@@ -2268,14 +2260,8 @@ static bool ParseMigratorArgs(MigratorOptions &Opts, const ArgList &Args,
MigratorOptions &MigratorOpts = Opts;
-#define MIGRATOR_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define MIGRATOR_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef MIGRATOR_OPTION_WITH_MARSHALLING
@@ -2286,14 +2272,8 @@ void CompilerInvocation::GenerateDiagnosticArgs(
const DiagnosticOptions &Opts, SmallVectorImpl<const char *> &Args,
StringAllocator SA, bool DefaultDiagColor) {
const DiagnosticOptions *DiagnosticOpts = &Opts;
-#define DIAG_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define DIAG_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Args, SA, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef DIAG_OPTION_WITH_MARSHALLING
@@ -2354,10 +2334,20 @@ clang::CreateAndPopulateDiagOpts(ArrayRef<const char *> Argv) {
unsigned MissingArgIndex, MissingArgCount;
InputArgList Args = getDriverOptTable().ParseArgs(
Argv.slice(1), MissingArgIndex, MissingArgCount);
+
+ bool ShowColors = true;
+ if (std::optional<std::string> NoColor =
+ llvm::sys::Process::GetEnv("NO_COLOR");
+ NoColor && !NoColor->empty()) {
+ // If the user set the NO_COLOR environment variable, we'll honor that
+ // unless the command line overrides it.
+ ShowColors = false;
+ }
+
// We ignore MissingArgCount and the return value of ParseDiagnosticArgs.
// Any errors that would be diagnosed here will also be diagnosed later,
// when the DiagnosticsEngine actually exists.
- (void)ParseDiagnosticArgs(*DiagOpts, Args);
+ (void)ParseDiagnosticArgs(*DiagOpts, Args, /*Diags=*/nullptr, ShowColors);
return DiagOpts;
}
@@ -2377,14 +2367,8 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
// "DiagnosticOpts->". Let's provide the expected variable name and type.
DiagnosticOptions *DiagnosticOpts = &Opts;
-#define DIAG_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, *Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define DIAG_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, *Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef DIAG_OPTION_WITH_MARSHALLING
@@ -2413,9 +2397,9 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
DiagMask = DiagnosticLevelMask::All;
Opts.setVerifyIgnoreUnexpected(DiagMask);
if (Opts.TabStop == 0 || Opts.TabStop > DiagnosticOptions::MaxTabStop) {
- Opts.TabStop = DiagnosticOptions::DefaultTabStop;
Diags->Report(diag::warn_ignoring_ftabstop_value)
<< Opts.TabStop << DiagnosticOptions::DefaultTabStop;
+ Opts.TabStop = DiagnosticOptions::DefaultTabStop;
}
addDiagnosticArgs(Args, OPT_W_Group, OPT_W_value_Group, Opts.Warnings);
@@ -2530,14 +2514,8 @@ static void GenerateFrontendArgs(const FrontendOptions &Opts,
CompilerInvocation::StringAllocator SA,
bool IsHeader) {
const FrontendOptions &FrontendOpts = Opts;
-#define FRONTEND_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define FRONTEND_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Args, SA, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef FRONTEND_OPTION_WITH_MARSHALLING
@@ -2706,14 +2684,8 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
FrontendOptions &FrontendOpts = Opts;
-#define FRONTEND_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define FRONTEND_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef FRONTEND_OPTION_WITH_MARSHALLING
@@ -2965,14 +2937,8 @@ static void GenerateHeaderSearchArgs(HeaderSearchOptions &Opts,
SmallVectorImpl<const char *> &Args,
CompilerInvocation::StringAllocator SA) {
const HeaderSearchOptions *HeaderSearchOpts = &Opts;
-#define HEADER_SEARCH_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define HEADER_SEARCH_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Args, SA, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef HEADER_SEARCH_OPTION_WITH_MARSHALLING
@@ -3095,14 +3061,8 @@ static bool ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
HeaderSearchOptions *HeaderSearchOpts = &Opts;
-#define HEADER_SEARCH_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define HEADER_SEARCH_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef HEADER_SEARCH_OPTION_WITH_MARSHALLING
@@ -3217,7 +3177,7 @@ static bool ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
Opts.AddSystemHeaderPrefix(
A->getValue(), A->getOption().matches(OPT_system_header_prefix));
- for (const auto *A : Args.filtered(OPT_ivfsoverlay))
+ for (const auto *A : Args.filtered(OPT_ivfsoverlay, OPT_vfsoverlay))
Opts.AddVFSOverlayFile(A->getValue());
return Diags.getNumErrors() == NumErrorsBefore;
@@ -3348,14 +3308,8 @@ void CompilerInvocation::GenerateLangArgs(const LangOptions &Opts,
const LangOptions *LangOpts = &Opts;
-#define LANG_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define LANG_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Args, SA, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef LANG_OPTION_WITH_MARSHALLING
@@ -3443,14 +3397,14 @@ void CompilerInvocation::GenerateLangArgs(const LangOptions &Opts,
if (Opts.OpenMP && !Opts.OpenMPSimd) {
GenerateArg(Args, OPT_fopenmp, SA);
- if (Opts.OpenMP != 50)
+ if (Opts.OpenMP != 51)
GenerateArg(Args, OPT_fopenmp_version_EQ, Twine(Opts.OpenMP), SA);
if (!Opts.OpenMPUseTLS)
GenerateArg(Args, OPT_fnoopenmp_use_tls, SA);
- if (Opts.OpenMPIsDevice)
- GenerateArg(Args, OPT_fopenmp_is_device, SA);
+ if (Opts.OpenMPIsTargetDevice)
+ GenerateArg(Args, OPT_fopenmp_is_target_device, SA);
if (Opts.OpenMPIRBuilder)
GenerateArg(Args, OPT_fopenmp_enable_irbuilder, SA);
@@ -3459,7 +3413,7 @@ void CompilerInvocation::GenerateLangArgs(const LangOptions &Opts,
if (Opts.OpenMPSimd) {
GenerateArg(Args, OPT_fopenmp_simd, SA);
- if (Opts.OpenMP != 50)
+ if (Opts.OpenMP != 51)
GenerateArg(Args, OPT_fopenmp_version_EQ, Twine(Opts.OpenMP), SA);
}
@@ -3671,14 +3625,8 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
// "LangOpts->". Let's provide the expected variable name and type.
LangOptions *LangOpts = &Opts;
-#define LANG_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define LANG_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef LANG_OPTION_WITH_MARSHALLING
@@ -3796,9 +3744,9 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.Blocks = Args.hasArg(OPT_fblocks) || (Opts.OpenCL
&& Opts.OpenCLVersion == 200);
- Opts.ConvergentFunctions = Opts.OpenCL || (Opts.CUDA && Opts.CUDAIsDevice) ||
- Opts.SYCLIsDevice ||
- Args.hasArg(OPT_fconvergent_functions);
+ Opts.ConvergentFunctions = Args.hasArg(OPT_fconvergent_functions) ||
+ Opts.OpenCL || (Opts.CUDA && Opts.CUDAIsDevice) ||
+ Opts.SYCLIsDevice;
Opts.NoBuiltin = Args.hasArg(OPT_fno_builtin) || Opts.Freestanding;
if (!Opts.NoBuiltin)
@@ -3833,7 +3781,7 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
}
// Check if -fopenmp is specified and set default version to 5.0.
- Opts.OpenMP = Args.hasArg(OPT_fopenmp) ? 50 : 0;
+ Opts.OpenMP = Args.hasArg(OPT_fopenmp) ? 51 : 0;
// Check if -fopenmp-simd is specified.
bool IsSimdSpecified =
Args.hasFlag(options::OPT_fopenmp_simd, options::OPT_fno_openmp_simd,
@@ -3841,23 +3789,24 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.OpenMPSimd = !Opts.OpenMP && IsSimdSpecified;
Opts.OpenMPUseTLS =
Opts.OpenMP && !Args.hasArg(options::OPT_fnoopenmp_use_tls);
- Opts.OpenMPIsDevice =
- Opts.OpenMP && Args.hasArg(options::OPT_fopenmp_is_device);
+ Opts.OpenMPIsTargetDevice =
+ Opts.OpenMP && Args.hasArg(options::OPT_fopenmp_is_target_device);
Opts.OpenMPIRBuilder =
Opts.OpenMP && Args.hasArg(options::OPT_fopenmp_enable_irbuilder);
bool IsTargetSpecified =
- Opts.OpenMPIsDevice || Args.hasArg(options::OPT_fopenmp_targets_EQ);
+ Opts.OpenMPIsTargetDevice || Args.hasArg(options::OPT_fopenmp_targets_EQ);
- Opts.ConvergentFunctions = Opts.ConvergentFunctions || Opts.OpenMPIsDevice;
+ Opts.ConvergentFunctions =
+ Opts.ConvergentFunctions || Opts.OpenMPIsTargetDevice;
if (Opts.OpenMP || Opts.OpenMPSimd) {
if (int Version = getLastArgIntValue(
Args, OPT_fopenmp_version_EQ,
- (IsSimdSpecified || IsTargetSpecified) ? 50 : Opts.OpenMP, Diags))
+ (IsSimdSpecified || IsTargetSpecified) ? 51 : Opts.OpenMP, Diags))
Opts.OpenMP = Version;
// Provide diagnostic when a given target is not expected to be an OpenMP
// device or host.
- if (!Opts.OpenMPIsDevice) {
+ if (!Opts.OpenMPIsTargetDevice) {
switch (T.getArch()) {
default:
break;
@@ -3872,13 +3821,13 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
// Set the flag to prevent the implementation from emitting device exception
// handling code for those requiring so.
- if ((Opts.OpenMPIsDevice && (T.isNVPTX() || T.isAMDGCN())) ||
+ if ((Opts.OpenMPIsTargetDevice && (T.isNVPTX() || T.isAMDGCN())) ||
Opts.OpenCLCPlusPlus) {
Opts.Exceptions = 0;
Opts.CXXExceptions = 0;
}
- if (Opts.OpenMPIsDevice && T.isNVPTX()) {
+ if (Opts.OpenMPIsTargetDevice && T.isNVPTX()) {
Opts.OpenMPCUDANumSMs =
getLastArgIntValue(Args, options::OPT_fopenmp_cuda_number_of_sm_EQ,
Opts.OpenMPCUDANumSMs, Diags);
@@ -3892,15 +3841,15 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
// Set the value of the debugging flag used in the new offloading device RTL.
// Set either by a specific value or to a default if not specified.
- if (Opts.OpenMPIsDevice && (Args.hasArg(OPT_fopenmp_target_debug) ||
- Args.hasArg(OPT_fopenmp_target_debug_EQ))) {
+ if (Opts.OpenMPIsTargetDevice && (Args.hasArg(OPT_fopenmp_target_debug) ||
+ Args.hasArg(OPT_fopenmp_target_debug_EQ))) {
Opts.OpenMPTargetDebug = getLastArgIntValue(
Args, OPT_fopenmp_target_debug_EQ, Opts.OpenMPTargetDebug, Diags);
if (!Opts.OpenMPTargetDebug && Args.hasArg(OPT_fopenmp_target_debug))
Opts.OpenMPTargetDebug = 1;
}
- if (Opts.OpenMPIsDevice) {
+ if (Opts.OpenMPIsTargetDevice) {
if (Args.hasArg(OPT_fopenmp_assume_teams_oversubscription))
Opts.OpenMPTeamSubscription = true;
if (Args.hasArg(OPT_fopenmp_assume_threads_oversubscription))
@@ -3948,7 +3897,8 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
}
// Set CUDA mode for OpenMP target NVPTX/AMDGCN if specified in options
- Opts.OpenMPCUDAMode = Opts.OpenMPIsDevice && (T.isNVPTX() || T.isAMDGCN()) &&
+ Opts.OpenMPCUDAMode = Opts.OpenMPIsTargetDevice &&
+ (T.isNVPTX() || T.isAMDGCN()) &&
Args.hasArg(options::OPT_fopenmp_cuda_mode);
// FIXME: Eliminate this dependency.
@@ -4179,14 +4129,8 @@ static void GeneratePreprocessorArgs(PreprocessorOptions &Opts,
const CodeGenOptions &CodeGenOpts) {
PreprocessorOptions *PreprocessorOpts = &Opts;
-#define PREPROCESSOR_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define PREPROCESSOR_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Args, SA, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef PREPROCESSOR_OPTION_WITH_MARSHALLING
@@ -4254,14 +4198,8 @@ static bool ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
PreprocessorOptions *PreprocessorOpts = &Opts;
-#define PREPROCESSOR_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define PREPROCESSOR_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef PREPROCESSOR_OPTION_WITH_MARSHALLING
@@ -4354,14 +4292,8 @@ static void GeneratePreprocessorOutputArgs(
CompilerInvocation::StringAllocator SA, frontend::ActionKind Action) {
const PreprocessorOutputOptions &PreprocessorOutputOpts = Opts;
-#define PREPROCESSOR_OUTPUT_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define PREPROCESSOR_OUTPUT_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Args, SA, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef PREPROCESSOR_OUTPUT_OPTION_WITH_MARSHALLING
@@ -4381,14 +4313,8 @@ static bool ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
PreprocessorOutputOptions &PreprocessorOutputOpts = Opts;
-#define PREPROCESSOR_OUTPUT_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define PREPROCESSOR_OUTPUT_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef PREPROCESSOR_OUTPUT_OPTION_WITH_MARSHALLING
@@ -4403,14 +4329,8 @@ static void GenerateTargetArgs(const TargetOptions &Opts,
SmallVectorImpl<const char *> &Args,
CompilerInvocation::StringAllocator SA) {
const TargetOptions *TargetOpts = &Opts;
-#define TARGET_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define TARGET_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Args, SA, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef TARGET_OPTION_WITH_MARSHALLING
@@ -4428,14 +4348,8 @@ static bool ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
TargetOptions *TargetOpts = &Opts;
-#define TARGET_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define TARGET_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef TARGET_OPTION_WITH_MARSHALLING
@@ -4522,7 +4436,7 @@ bool CompilerInvocation::CreateFromArgsImpl(
}
// Set the triple of the host for OpenMP device compile.
- if (LangOpts.OpenMPIsDevice)
+ if (LangOpts.OpenMPIsTargetDevice)
Res.getTargetOpts().HostTriple = Res.getFrontendOpts().AuxTriple;
ParseCodeGenArgs(Res.getCodeGenOpts(), Args, DashX, Diags, T,
@@ -4563,6 +4477,17 @@ bool CompilerInvocation::CreateFromArgsImpl(
append_range(Res.getCodeGenOpts().CommandLineArgs, CommandLineArgs);
}
+ // Set PGOOptions. Need to create a temporary VFS to read the profile
+ // to determine the PGO type.
+ if (!Res.getCodeGenOpts().ProfileInstrumentUsePath.empty()) {
+ auto FS =
+ createVFSFromOverlayFiles(Res.getHeaderSearchOpts().VFSOverlayFiles,
+ Diags, llvm::vfs::getRealFileSystem());
+ setPGOUseInstrumentor(Res.getCodeGenOpts(),
+ Res.getCodeGenOpts().ProfileInstrumentUsePath, *FS,
+ Diags);
+ }
+
FixupInvocation(Res, Diags, Args, DashX);
return Diags.getNumErrors() == NumErrorsBefore;
diff --git a/contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp b/contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
index 35b5e2144e6d..1df3a12fce14 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
@@ -22,7 +22,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/Support/Host.h"
+#include "llvm/TargetParser/Host.h"
using namespace clang;
using namespace llvm::opt;
diff --git a/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp b/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp
index fe4218b6e672..44268e71dc24 100644
--- a/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp
@@ -49,6 +49,7 @@ struct DepCollectorPPCallbacks : public PPCallbacks {
DepCollector.maybeAddDependency(
llvm::sys::path::remove_leading_dotslash(*Filename),
/*FromModule*/ false, isSystem(FileType), /*IsModuleFile*/ false,
+ &PP.getFileManager(),
/*IsMissing*/ false);
}
@@ -56,9 +57,11 @@ struct DepCollectorPPCallbacks : public PPCallbacks {
SrcMgr::CharacteristicKind FileType) override {
StringRef Filename =
llvm::sys::path::remove_leading_dotslash(SkippedFile.getName());
- DepCollector.maybeAddDependency(Filename, /*FromModule=*/false,
+ DepCollector.maybeAddDependency(Filename,
+ /*FromModule=*/false,
/*IsSystem=*/isSystem(FileType),
/*IsModuleFile=*/false,
+ &PP.getFileManager(),
/*IsMissing=*/false);
}
@@ -69,9 +72,12 @@ struct DepCollectorPPCallbacks : public PPCallbacks {
StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) override {
if (!File)
- DepCollector.maybeAddDependency(FileName, /*FromModule*/false,
- /*IsSystem*/false, /*IsModuleFile*/false,
- /*IsMissing*/true);
+ DepCollector.maybeAddDependency(FileName,
+ /*FromModule*/ false,
+ /*IsSystem*/ false,
+ /*IsModuleFile*/ false,
+ &PP.getFileManager(),
+ /*IsMissing*/ true);
// Files that actually exist are handled by FileChanged.
}
@@ -82,9 +88,11 @@ struct DepCollectorPPCallbacks : public PPCallbacks {
return;
StringRef Filename =
llvm::sys::path::remove_leading_dotslash(File->getName());
- DepCollector.maybeAddDependency(Filename, /*FromModule=*/false,
+ DepCollector.maybeAddDependency(Filename,
+ /*FromModule=*/false,
/*IsSystem=*/isSystem(FileType),
/*IsModuleFile=*/false,
+ &PP.getFileManager(),
/*IsMissing=*/false);
}
@@ -100,10 +108,12 @@ struct DepCollectorMMCallbacks : public ModuleMapCallbacks {
void moduleMapFileRead(SourceLocation Loc, const FileEntry &Entry,
bool IsSystem) override {
StringRef Filename = Entry.getName();
- DepCollector.maybeAddDependency(Filename, /*FromModule*/false,
- /*IsSystem*/IsSystem,
- /*IsModuleFile*/false,
- /*IsMissing*/false);
+ DepCollector.maybeAddDependency(Filename,
+ /*FromModule*/ false,
+ /*IsSystem*/ IsSystem,
+ /*IsModuleFile*/ false,
+ /*FileMgr*/ nullptr,
+ /*IsMissing*/ false);
}
};
@@ -118,9 +128,11 @@ struct DepCollectorASTListener : public ASTReaderListener {
}
void visitModuleFile(StringRef Filename,
serialization::ModuleKind Kind) override {
- DepCollector.maybeAddDependency(Filename, /*FromModule*/true,
- /*IsSystem*/false, /*IsModuleFile*/true,
- /*IsMissing*/false);
+ DepCollector.maybeAddDependency(Filename,
+ /*FromModule*/ true,
+ /*IsSystem*/ false, /*IsModuleFile*/ true,
+ /*FileMgr*/ nullptr,
+ /*IsMissing*/ false);
}
bool visitInputFile(StringRef Filename, bool IsSystem,
bool IsOverridden, bool IsExplicitModule) override {
@@ -132,8 +144,9 @@ struct DepCollectorASTListener : public ASTReaderListener {
if (auto FE = FileMgr.getOptionalFileRef(Filename))
Filename = FE->getName();
- DepCollector.maybeAddDependency(Filename, /*FromModule*/true, IsSystem,
- /*IsModuleFile*/false, /*IsMissing*/false);
+ DepCollector.maybeAddDependency(Filename, /*FromModule*/ true, IsSystem,
+ /*IsModuleFile*/ false, /*FileMgr*/ nullptr,
+ /*IsMissing*/ false);
return true;
}
};
@@ -142,9 +155,15 @@ struct DepCollectorASTListener : public ASTReaderListener {
void DependencyCollector::maybeAddDependency(StringRef Filename,
bool FromModule, bool IsSystem,
bool IsModuleFile,
+ FileManager *FileMgr,
bool IsMissing) {
- if (sawDependency(Filename, FromModule, IsSystem, IsModuleFile, IsMissing))
+ if (sawDependency(Filename, FromModule, IsSystem, IsModuleFile, IsMissing)) {
+ if (IsSystem && FileMgr && shouldCanonicalizeSystemDependencies()) {
+ if (auto F = FileMgr->getFile(Filename))
+ Filename = FileMgr->getCanonicalName(*F);
+ }
addDependency(Filename);
+ }
}
bool DependencyCollector::addDependency(StringRef Filename) {
@@ -192,6 +211,7 @@ DependencyFileGenerator::DependencyFileGenerator(
const DependencyOutputOptions &Opts)
: OutputFile(Opts.OutputFile), Targets(Opts.Targets),
IncludeSystemHeaders(Opts.IncludeSystemHeaders),
+ CanonicalSystemHeaders(Opts.CanonicalSystemHeaders),
PhonyTarget(Opts.UsePhonyTargets),
AddMissingHeaderDeps(Opts.AddMissingHeaderDeps), SeenMissingHeader(false),
IncludeModuleFiles(Opts.IncludeModuleFiles),
diff --git a/contrib/llvm-project/clang/lib/Frontend/DiagnosticRenderer.cpp b/contrib/llvm-project/clang/lib/Frontend/DiagnosticRenderer.cpp
index 9177ba9f4f06..18c8be7a7293 100644
--- a/contrib/llvm-project/clang/lib/Frontend/DiagnosticRenderer.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/DiagnosticRenderer.cpp
@@ -493,20 +493,18 @@ static bool checkRangesForMacroArgExpansion(FullSourceLoc Loc,
SmallVector<CharSourceRange, 4> SpellingRanges;
mapDiagnosticRanges(Loc, Ranges, SpellingRanges);
- /// Count all valid ranges.
- unsigned ValidCount = 0;
- for (const auto &Range : Ranges)
- if (Range.isValid())
- ValidCount++;
+ // Count all valid ranges.
+ unsigned ValidCount =
+ llvm::count_if(Ranges, [](const auto &R) { return R.isValid(); });
if (ValidCount > SpellingRanges.size())
return false;
- /// To store the source location of the argument location.
+ // To store the source location of the argument location.
FullSourceLoc ArgumentLoc;
- /// Set the ArgumentLoc to the beginning location of the expansion of Loc
- /// so to check if the ranges expands to the same beginning location.
+ // Set the ArgumentLoc to the beginning location of the expansion of Loc
+ // so to check if the ranges expands to the same beginning location.
if (!Loc.isMacroArgExpansion(&ArgumentLoc))
return false;
diff --git a/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp b/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp
index 1e276642016d..c6f958a6077b 100644
--- a/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp
@@ -364,22 +364,22 @@ static std::error_code collectModuleHeaderIncludes(
}
// Note that Module->PrivateHeaders will not be a TopHeader.
- if (Module::Header UmbrellaHeader = Module->getUmbrellaHeader()) {
- Module->addTopHeader(UmbrellaHeader.Entry);
+ if (std::optional<Module::Header> UmbrellaHeader =
+ Module->getUmbrellaHeaderAsWritten()) {
+ Module->addTopHeader(UmbrellaHeader->Entry);
if (Module->Parent)
// Include the umbrella header for submodules.
- addHeaderInclude(UmbrellaHeader.PathRelativeToRootModuleDirectory,
+ addHeaderInclude(UmbrellaHeader->PathRelativeToRootModuleDirectory,
Includes, LangOpts, Module->IsExternC);
- } else if (Module::DirectoryName UmbrellaDir = Module->getUmbrellaDir()) {
+ } else if (std::optional<Module::DirectoryName> UmbrellaDir =
+ Module->getUmbrellaDirAsWritten()) {
// Add all of the headers we find in this subdirectory.
std::error_code EC;
SmallString<128> DirNative;
- llvm::sys::path::native(UmbrellaDir.Entry->getName(), DirNative);
+ llvm::sys::path::native(UmbrellaDir->Entry.getName(), DirNative);
llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
- SmallVector<
- std::pair<std::string, OptionalFileEntryRefDegradesToFileEntryPtr>, 8>
- Headers;
+ SmallVector<std::pair<std::string, FileEntryRef>, 8> Headers;
for (llvm::vfs::recursive_directory_iterator Dir(FS, DirNative, EC), End;
Dir != End && !EC; Dir.increment(EC)) {
// Check whether this entry has an extension typically associated with
@@ -406,7 +406,7 @@ static std::error_code collectModuleHeaderIncludes(
for (int I = 0; I != Dir.level() + 1; ++I, ++PathIt)
Components.push_back(*PathIt);
SmallString<128> RelativeHeader(
- UmbrellaDir.PathRelativeToRootModuleDirectory);
+ UmbrellaDir->PathRelativeToRootModuleDirectory);
for (auto It = Components.rbegin(), End = Components.rend(); It != End;
++It)
llvm::sys::path::append(RelativeHeader, *It);
@@ -429,11 +429,9 @@ static std::error_code collectModuleHeaderIncludes(
}
// Recurse into submodules.
- for (clang::Module::submodule_iterator Sub = Module->submodule_begin(),
- SubEnd = Module->submodule_end();
- Sub != SubEnd; ++Sub)
+ for (auto *Submodule : Module->submodules())
if (std::error_code Err = collectModuleHeaderIncludes(
- LangOpts, FileMgr, Diag, ModMap, *Sub, Includes))
+ LangOpts, FileMgr, Diag, ModMap, Submodule, Includes))
return Err;
return std::error_code();
@@ -448,7 +446,8 @@ static bool loadModuleMapForModuleBuild(CompilerInstance &CI, bool IsSystem,
// Map the current input to a file.
FileID ModuleMapID = SrcMgr.getMainFileID();
- const FileEntry *ModuleMap = SrcMgr.getFileEntryForID(ModuleMapID);
+ OptionalFileEntryRef ModuleMap = SrcMgr.getFileEntryRefForID(ModuleMapID);
+ assert(ModuleMap && "MainFileID without FileEntry");
// If the module map is preprocessed, handle the initial line marker;
// line directives are not part of the module map syntax in general.
@@ -461,7 +460,7 @@ static bool loadModuleMapForModuleBuild(CompilerInstance &CI, bool IsSystem,
}
// Load the module map file.
- if (HS.loadModuleMapFile(ModuleMap, IsSystem, ModuleMapID, &Offset,
+ if (HS.loadModuleMapFile(*ModuleMap, IsSystem, ModuleMapID, &Offset,
PresumedModuleMapFile))
return true;
@@ -470,10 +469,11 @@ static bool loadModuleMapForModuleBuild(CompilerInstance &CI, bool IsSystem,
// Infer framework module if possible.
if (HS.getModuleMap().canInferFrameworkModule(ModuleMap->getDir())) {
- SmallString<128> InferredFrameworkPath = ModuleMap->getDir()->getName();
+ SmallString<128> InferredFrameworkPath = ModuleMap->getDir().getName();
llvm::sys::path::append(InferredFrameworkPath,
CI.getLangOpts().ModuleName + ".framework");
- if (auto Dir = CI.getFileManager().getDirectory(InferredFrameworkPath))
+ if (auto Dir =
+ CI.getFileManager().getOptionalDirectoryRef(InferredFrameworkPath))
(void)HS.getModuleMap().inferFrameworkModule(*Dir, IsSystem, nullptr);
}
@@ -510,7 +510,7 @@ static Module *prepareToBuildModule(CompilerInstance &CI,
// Inform the preprocessor that includes from within the input buffer should
// be resolved relative to the build directory of the module map file.
- CI.getPreprocessor().setMainFileDir(M->Directory);
+ CI.getPreprocessor().setMainFileDir(*M->Directory);
// If the module was inferred from a different module map (via an expanded
// umbrella module definition), track that fact.
@@ -552,8 +552,9 @@ getInputBufferForModule(CompilerInstance &CI, Module *M) {
// Collect the set of #includes we need to build the module.
SmallString<256> HeaderContents;
std::error_code Err = std::error_code();
- if (Module::Header UmbrellaHeader = M->getUmbrellaHeader())
- addHeaderInclude(UmbrellaHeader.PathRelativeToRootModuleDirectory,
+ if (std::optional<Module::Header> UmbrellaHeader =
+ M->getUmbrellaHeaderAsWritten())
+ addHeaderInclude(UmbrellaHeader->PathRelativeToRootModuleDirectory,
HeaderContents, CI.getLangOpts(), M->IsExternC);
Err = collectModuleHeaderIncludes(
CI.getLangOpts(), FileMgr, CI.getDiagnostics(),
@@ -614,7 +615,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
std::unique_ptr<ASTUnit> AST = ASTUnit::LoadFromASTFile(
std::string(InputFile), CI.getPCHContainerReader(),
ASTUnit::LoadPreprocessorOnly, ASTDiags, CI.getFileSystemOpts(),
- CI.getCodeGenOpts().DebugTypeExtRefs);
+ /*HeaderSearchOptions=*/nullptr, CI.getCodeGenOpts().DebugTypeExtRefs);
if (!AST)
return false;
@@ -682,6 +683,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
std::unique_ptr<ASTUnit> AST = ASTUnit::LoadFromASTFile(
std::string(InputFile), CI.getPCHContainerReader(),
ASTUnit::LoadEverything, Diags, CI.getFileSystemOpts(),
+ CI.getHeaderSearchOptsPtr(),
CI.getCodeGenOpts().DebugTypeExtRefs);
if (!AST)
@@ -821,11 +823,9 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
"trying to build a header unit without a Pre-processor?");
HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
// Relative searches begin from CWD.
- const DirectoryEntry *Dir = nullptr;
- if (auto DirOrErr = CI.getFileManager().getDirectory("."))
- Dir = *DirOrErr;
- SmallVector<std::pair<const FileEntry *, const DirectoryEntry *>, 1> CWD;
- CWD.push_back({nullptr, Dir});
+ auto Dir = CI.getFileManager().getOptionalDirectoryRef(".");
+ SmallVector<std::pair<const FileEntry *, DirectoryEntryRef>, 1> CWD;
+ CWD.push_back({nullptr, *Dir});
OptionalFileEntryRef FE =
HS.LookupFile(FileName, SourceLocation(),
/*Angled*/ Input.getKind().getHeaderUnitKind() ==
@@ -910,7 +910,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
// If we were asked to load any module map files, do so now.
for (const auto &Filename : CI.getFrontendOpts().ModuleMapFiles) {
- if (auto File = CI.getFileManager().getFile(Filename))
+ if (auto File = CI.getFileManager().getOptionalFileRef(Filename))
CI.getPreprocessor().getHeaderSearchInfo().loadModuleMapFile(
*File, /*IsSystem*/false);
else
@@ -1117,6 +1117,9 @@ void FrontendAction::EndSourceFile() {
// FrontendAction.
CI.clearOutputFiles(/*EraseFiles=*/shouldEraseOutputFiles());
+ // The resources are owned by AST when the current file is AST.
+ // So we reset the resources here to avoid users accessing it
+ // accidently.
if (isCurrentFileAST()) {
if (DisableFree) {
CI.resetAndLeakPreprocessor();
diff --git a/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp b/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp
index 2d81178fa60e..8a4a4cf6823d 100644
--- a/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp
@@ -202,7 +202,8 @@ GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
/*AllowASTWithErrors=*/
+CI.getFrontendOpts().AllowPCMWithCompilerErrors,
/*IncludeTimestamps=*/
- +CI.getFrontendOpts().BuildingImplicitModule,
+ +CI.getFrontendOpts().BuildingImplicitModule &&
+ +CI.getFrontendOpts().IncludeTimestamps,
/*ShouldCacheASTInMemory=*/
+CI.getFrontendOpts().BuildingImplicitModule));
Consumers.push_back(CI.getPCHContainerWriter().CreatePCHContainerGenerator(
@@ -250,11 +251,6 @@ GenerateModuleFromModuleMapAction::CreateOutputFile(CompilerInstance &CI,
bool GenerateModuleInterfaceAction::BeginSourceFileAction(
CompilerInstance &CI) {
- if (!CI.getLangOpts().ModulesTS && !CI.getLangOpts().CPlusPlusModules) {
- CI.getDiagnostics().Report(diag::err_module_interface_requires_cpp_modules);
- return false;
- }
-
CI.getLangOpts().setCompilingModule(LangOptions::CMK_ModuleInterface);
return GenerateModuleAction::BeginSourceFileAction(CI);
@@ -376,6 +372,8 @@ private:
return "ExplicitTemplateArgumentSubstitution";
case CodeSynthesisContext::DeducedTemplateArgumentSubstitution:
return "DeducedTemplateArgumentSubstitution";
+ case CodeSynthesisContext::LambdaExpressionSubstitution:
+ return "LambdaExpressionSubstitution";
case CodeSynthesisContext::PriorTemplateArgumentSubstitution:
return "PriorTemplateArgumentSubstitution";
case CodeSynthesisContext::DefaultTemplateArgumentChecking:
@@ -414,6 +412,8 @@ private:
return "MarkingClassDllexported";
case CodeSynthesisContext::BuildingBuiltinDumpStructCall:
return "BuildingBuiltinDumpStructCall";
+ case CodeSynthesisContext::BuildingDeductionGuides:
+ return "BuildingDeductionGuides";
}
return "";
}
@@ -460,6 +460,8 @@ private:
return;
}
+ assert(NamedCtx && "NamedCtx cannot be null");
+
if (const auto *Decl = dyn_cast<ParmVarDecl>(NamedTemplate)) {
OS << "unnamed function parameter " << Decl->getFunctionScopeIndex()
<< " ";
@@ -763,14 +765,18 @@ static StringRef ModuleKindName(Module::ModuleKind MK) {
return "Module Map Module";
case Module::ModuleInterfaceUnit:
return "Interface Unit";
+ case Module::ModuleImplementationUnit:
+ return "Implementation Unit";
case Module::ModulePartitionInterface:
return "Partition Interface";
case Module::ModulePartitionImplementation:
return "Partition Implementation";
case Module::ModuleHeaderUnit:
return "Header Unit";
- case Module::GlobalModuleFragment:
+ case Module::ExplicitGlobalModuleFragment:
return "Global Module Fragment";
+ case Module::ImplicitGlobalModuleFragment:
+ return "Implicit Module Fragment";
case Module::PrivateModuleFragment:
return "Private Module Fragment";
}
@@ -780,14 +786,12 @@ static StringRef ModuleKindName(Module::ModuleKind MK) {
void DumpModuleInfoAction::ExecuteAction() {
assert(isCurrentFileAST() && "dumping non-AST?");
// Set up the output file.
- std::unique_ptr<llvm::raw_fd_ostream> OutFile;
CompilerInstance &CI = getCompilerInstance();
StringRef OutputFileName = CI.getFrontendOpts().OutputFile;
if (!OutputFileName.empty() && OutputFileName != "-") {
std::error_code EC;
- OutFile.reset(new llvm::raw_fd_ostream(OutputFileName.str(), EC,
- llvm::sys::fs::OF_TextWithCRLF));
- OutputStream = OutFile.get();
+ OutputStream.reset(new llvm::raw_fd_ostream(
+ OutputFileName.str(), EC, llvm::sys::fs::OF_TextWithCRLF));
}
llvm::raw_ostream &Out = OutputStream ? *OutputStream : llvm::outs();
@@ -884,7 +888,7 @@ void DumpModuleInfoAction::ExecuteAction() {
}
// Now let's print out any modules we did not see as part of the Primary.
- for (auto SM : SubModMap) {
+ for (const auto &SM : SubModMap) {
if (!SM.second.Seen && SM.second.Mod) {
Out << " " << ModuleKindName(SM.second.Kind) << " '" << SM.first
<< "' at index #" << SM.second.Idx
diff --git a/contrib/llvm-project/clang/lib/Frontend/HeaderIncludeGen.cpp b/contrib/llvm-project/clang/lib/Frontend/HeaderIncludeGen.cpp
index 2ab480940264..9c1bf490fcd6 100644
--- a/contrib/llvm-project/clang/lib/Frontend/HeaderIncludeGen.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/HeaderIncludeGen.cpp
@@ -43,12 +43,27 @@ public:
delete OutputFile;
}
+ HeaderIncludesCallback(const HeaderIncludesCallback &) = delete;
+ HeaderIncludesCallback &operator=(const HeaderIncludesCallback &) = delete;
+
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
FileID PrevFID) override;
void FileSkipped(const FileEntryRef &SkippedFile, const Token &FilenameTok,
SrcMgr::CharacteristicKind FileType) override;
+
+private:
+ bool ShouldShowHeader(SrcMgr::CharacteristicKind HeaderType) {
+ if (!DepOpts.IncludeSystemHeaders && isSystem(HeaderType))
+ return false;
+
+ // Show the current header if we are (a) past the predefines, or (b) showing
+ // all headers and in the predefines at a depth past the initial file and
+ // command line buffers.
+ return (HasProcessedPredefines ||
+ (ShowAllHeaders && CurrentIncludeDepth > 2));
+ }
};
/// A callback for emitting header usage information to a file in JSON. Each
@@ -78,6 +93,10 @@ public:
delete OutputFile;
}
+ HeaderIncludesJSONCallback(const HeaderIncludesJSONCallback &) = delete;
+ HeaderIncludesJSONCallback &
+ operator=(const HeaderIncludesJSONCallback &) = delete;
+
void EndOfMainFile() override;
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
@@ -202,38 +221,24 @@ void HeaderIncludesCallback::FileChanged(SourceLocation Loc,
// We track when we are done with the predefines by watching for the first
// place where we drop back to a nesting depth of 1.
- if (CurrentIncludeDepth == 1 && !HasProcessedPredefines) {
- if (!DepOpts.ShowIncludesPretendHeader.empty()) {
- PrintHeaderInfo(OutputFile, DepOpts.ShowIncludesPretendHeader,
- ShowDepth, 2, MSStyle);
- }
+ if (CurrentIncludeDepth == 1 && !HasProcessedPredefines)
HasProcessedPredefines = true;
- }
return;
- } else
+ } else {
+ return;
+ }
+
+ if (!ShouldShowHeader(NewFileType))
return;
- // Show the header if we are (a) past the predefines, or (b) showing all
- // headers and in the predefines at a depth past the initial file and command
- // line buffers.
- bool ShowHeader = (HasProcessedPredefines ||
- (ShowAllHeaders && CurrentIncludeDepth > 2));
unsigned IncludeDepth = CurrentIncludeDepth;
if (!HasProcessedPredefines)
--IncludeDepth; // Ignore indent from <built-in>.
- else if (!DepOpts.ShowIncludesPretendHeader.empty())
- ++IncludeDepth; // Pretend inclusion by ShowIncludesPretendHeader.
-
- if (!DepOpts.IncludeSystemHeaders && isSystem(NewFileType))
- ShowHeader = false;
- // Dump the header include information we are past the predefines buffer or
- // are showing all headers and this isn't the magic implicit <command line>
- // header.
// FIXME: Identify headers in a more robust way than comparing their name to
// "<command line>" and "<built-in>" in a bunch of places.
- if (ShowHeader && Reason == PPCallbacks::EnterFile &&
+ if (Reason == PPCallbacks::EnterFile &&
UserLoc.getFilename() != StringRef("<command line>")) {
PrintHeaderInfo(OutputFile, UserLoc.getFilename(), ShowDepth, IncludeDepth,
MSStyle);
@@ -246,7 +251,7 @@ void HeaderIncludesCallback::FileSkipped(const FileEntryRef &SkippedFile, const
if (!DepOpts.ShowSkippedHeaderIncludes)
return;
- if (!DepOpts.IncludeSystemHeaders && isSystem(FileType))
+ if (!ShouldShowHeader(FileType))
return;
PrintHeaderInfo(OutputFile, SkippedFile.getName(), ShowDepth,
diff --git a/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp b/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
index 208c6a8db159..f8fae82fba12 100644
--- a/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
@@ -451,9 +451,11 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__STDC_VERSION__", "199409L");
} else {
// -- __cplusplus
- // FIXME: Use correct value for C++23.
- if (LangOpts.CPlusPlus2b)
- Builder.defineMacro("__cplusplus", "202101L");
+ if (LangOpts.CPlusPlus26)
+ // FIXME: Use correct value for C++26.
+ Builder.defineMacro("__cplusplus", "202400L");
+ else if (LangOpts.CPlusPlus23)
+ Builder.defineMacro("__cplusplus", "202302L");
// [C++20] The integer literal 202002L.
else if (LangOpts.CPlusPlus20)
Builder.defineMacro("__cplusplus", "202002L");
@@ -572,6 +574,9 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__CLANG_RDC__");
if (!LangOpts.HIP)
Builder.defineMacro("__CUDA__");
+ if (LangOpts.GPUDefaultStream ==
+ LangOptions::GPUDefaultStreamKind::PerThread)
+ Builder.defineMacro("CUDA_API_PER_THREAD_DEFAULT_STREAM");
}
if (LangOpts.HIP) {
Builder.defineMacro("__HIP__");
@@ -581,11 +586,20 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__HIP_MEMORY_SCOPE_WORKGROUP", "3");
Builder.defineMacro("__HIP_MEMORY_SCOPE_AGENT", "4");
Builder.defineMacro("__HIP_MEMORY_SCOPE_SYSTEM", "5");
- if (LangOpts.CUDAIsDevice)
+ if (LangOpts.CUDAIsDevice) {
Builder.defineMacro("__HIP_DEVICE_COMPILE__");
+ if (!TI.hasHIPImageSupport()) {
+ Builder.defineMacro("__HIP_NO_IMAGE_SUPPORT__", "1");
+ // Deprecated.
+ Builder.defineMacro("__HIP_NO_IMAGE_SUPPORT", "1");
+ }
+ }
if (LangOpts.GPUDefaultStream ==
- LangOptions::GPUDefaultStreamKind::PerThread)
+ LangOptions::GPUDefaultStreamKind::PerThread) {
+ Builder.defineMacro("__HIP_API_PER_THREAD_DEFAULT_STREAM__");
+ // Deprecated.
Builder.defineMacro("HIP_API_PER_THREAD_DEFAULT_STREAM");
+ }
}
}
@@ -606,7 +620,8 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_unicode_literals", "200710L");
Builder.defineMacro("__cpp_user_defined_literals", "200809L");
Builder.defineMacro("__cpp_lambdas", "200907L");
- Builder.defineMacro("__cpp_constexpr", LangOpts.CPlusPlus2b ? "202211L"
+ Builder.defineMacro("__cpp_constexpr", LangOpts.CPlusPlus26 ? "202306L"
+ : LangOpts.CPlusPlus23 ? "202211L"
: LangOpts.CPlusPlus20 ? "201907L"
: LangOpts.CPlusPlus17 ? "201603L"
: LangOpts.CPlusPlus14 ? "201304L"
@@ -614,8 +629,10 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_constexpr_in_decltype", "201711L");
Builder.defineMacro("__cpp_range_based_for",
LangOpts.CPlusPlus17 ? "201603L" : "200907");
- Builder.defineMacro("__cpp_static_assert",
- LangOpts.CPlusPlus17 ? "201411L" : "200410");
+ Builder.defineMacro("__cpp_static_assert", LangOpts.CPlusPlus26 ? "202306L"
+ : LangOpts.CPlusPlus17
+ ? "201411L"
+ : "200410");
Builder.defineMacro("__cpp_decltype", "200707L");
Builder.defineMacro("__cpp_attributes", "200809L");
Builder.defineMacro("__cpp_rvalue_references", "200610L");
@@ -681,7 +698,7 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
// Refer to the discussion of this at https://reviews.llvm.org/D128619.
Builder.defineMacro("__cpp_concepts", "201907L");
Builder.defineMacro("__cpp_conditional_explicit", "201806L");
- //Builder.defineMacro("__cpp_consteval", "201811L");
+ Builder.defineMacro("__cpp_consteval", "202211L");
Builder.defineMacro("__cpp_constexpr_dynamic_alloc", "201907L");
Builder.defineMacro("__cpp_constinit", "201907L");
Builder.defineMacro("__cpp_impl_coroutine", "201902L");
@@ -690,15 +707,15 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
//Builder.defineMacro("__cpp_modules", "201907L");
Builder.defineMacro("__cpp_using_enum", "201907L");
}
- // C++2b features.
- if (LangOpts.CPlusPlus2b) {
+ // C++23 features.
+ if (LangOpts.CPlusPlus23) {
Builder.defineMacro("__cpp_implicit_move", "202011L");
Builder.defineMacro("__cpp_size_t_suffix", "202011L");
Builder.defineMacro("__cpp_if_consteval", "202106L");
Builder.defineMacro("__cpp_multidimensional_subscript", "202211L");
}
- // We provide those C++2b features as extensions in earlier language modes, so
+ // We provide those C++23 features as extensions in earlier language modes, so
// we also define their feature test macros.
if (LangOpts.CPlusPlus11)
Builder.defineMacro("__cpp_static_call_operator", "202207L");
@@ -707,10 +724,6 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
if (LangOpts.Char8)
Builder.defineMacro("__cpp_char8_t", "202207L");
Builder.defineMacro("__cpp_impl_destroying_delete", "201806L");
-
- // TS features.
- if (LangOpts.Coroutines)
- Builder.defineMacro("__cpp_coroutines", "201703L");
}
/// InitializeOpenCLFeatureTestMacros - Define OpenCL macros based on target
@@ -794,6 +807,18 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES", "3");
Builder.defineMacro("__OPENCL_MEMORY_SCOPE_SUB_GROUP", "4");
+ // Define macros for floating-point data classes, used in __builtin_isfpclass.
+ Builder.defineMacro("__FPCLASS_SNAN", "0x0001");
+ Builder.defineMacro("__FPCLASS_QNAN", "0x0002");
+ Builder.defineMacro("__FPCLASS_NEGINF", "0x0004");
+ Builder.defineMacro("__FPCLASS_NEGNORMAL", "0x0008");
+ Builder.defineMacro("__FPCLASS_NEGSUBNORMAL", "0x0010");
+ Builder.defineMacro("__FPCLASS_NEGZERO", "0x0020");
+ Builder.defineMacro("__FPCLASS_POSZERO", "0x0040");
+ Builder.defineMacro("__FPCLASS_POSSUBNORMAL", "0x0080");
+ Builder.defineMacro("__FPCLASS_POSNORMAL", "0x0100");
+ Builder.defineMacro("__FPCLASS_POSINF", "0x0200");
+
// Support for #pragma redefine_extname (Sun compatibility)
Builder.defineMacro("__PRAGMA_REDEFINE_EXTNAME", "1");
@@ -1252,16 +1277,15 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
case 45:
Builder.defineMacro("_OPENMP", "201511");
break;
- case 51:
- Builder.defineMacro("_OPENMP", "202011");
+ case 50:
+ Builder.defineMacro("_OPENMP", "201811");
break;
case 52:
Builder.defineMacro("_OPENMP", "202111");
break;
- case 50:
- default:
- // Default version is OpenMP 5.0
- Builder.defineMacro("_OPENMP", "201811");
+ default: // case 51:
+ // Default version is OpenMP 5.1
+ Builder.defineMacro("_OPENMP", "202011");
break;
}
}
@@ -1301,6 +1325,10 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__GLIBCXX_BITSIZE_INT_N_0", "128");
}
+ // ELF targets define __ELF__
+ if (TI.getTriple().isOSBinFormatELF())
+ Builder.defineMacro("__ELF__");
+
// Get other target #defines.
TI.getTargetDefines(LangOpts, Builder);
}
@@ -1317,17 +1345,17 @@ void clang::InitializePreprocessor(
llvm::raw_string_ostream Predefines(PredefineBuffer);
MacroBuilder Builder(Predefines);
- // Emit line markers for various builtin sections of the file. We don't do
- // this in asm preprocessor mode, because "# 4" is not a line marker directive
- // in this mode.
- if (!PP.getLangOpts().AsmPreprocessor)
- Builder.append("# 1 \"<built-in>\" 3");
+ // Emit line markers for various builtin sections of the file. The 3 here
+ // marks <built-in> as being a system header, which suppresses warnings when
+ // the same macro is defined multiple times.
+ Builder.append("# 1 \"<built-in>\" 3");
// Install things like __POWERPC__, __GNUC__, etc into the macro table.
if (InitOpts.UsePredefines) {
// FIXME: This will create multiple definitions for most of the predefined
// macros. This is not the right way to handle this.
- if ((LangOpts.CUDA || LangOpts.OpenMPIsDevice || LangOpts.SYCLIsDevice) &&
+ if ((LangOpts.CUDA || LangOpts.OpenMPIsTargetDevice ||
+ LangOpts.SYCLIsDevice) &&
PP.getAuxTargetInfo())
InitializePredefinedMacros(*PP.getAuxTargetInfo(), LangOpts, FEOpts,
PP.getPreprocessorOpts(), Builder);
@@ -1359,8 +1387,7 @@ void clang::InitializePreprocessor(
// Add on the predefines from the driver. Wrap in a #line directive to report
// that they come from the command line.
- if (!PP.getLangOpts().AsmPreprocessor)
- Builder.append("# 1 \"<command line>\" 1");
+ Builder.append("# 1 \"<command line>\" 1");
// Process #define's and #undef's in the order they are given.
for (unsigned i = 0, e = InitOpts.Macros.size(); i != e; ++i) {
@@ -1372,8 +1399,7 @@ void clang::InitializePreprocessor(
}
// Exit the command line and go back to <built-in> (2 is LC_LEAVE).
- if (!PP.getLangOpts().AsmPreprocessor)
- Builder.append("# 1 \"<built-in>\" 2");
+ Builder.append("# 1 \"<built-in>\" 2");
// If -imacros are specified, include them now. These are processed before
// any -include directives.
diff --git a/contrib/llvm-project/clang/lib/Frontend/LayoutOverrideSource.cpp b/contrib/llvm-project/clang/lib/Frontend/LayoutOverrideSource.cpp
index 0d288db0632f..f474d4fe8fdc 100644
--- a/contrib/llvm-project/clang/lib/Frontend/LayoutOverrideSource.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/LayoutOverrideSource.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Frontend/LayoutOverrideSource.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/Basic/CharInfo.h"
#include "llvm/Support/raw_ostream.h"
#include <fstream>
@@ -26,6 +27,18 @@ static std::string parseName(StringRef S) {
return S.substr(0, Offset).str();
}
+/// Parse an unsigned integer and move S to the next non-digit character.
+static bool parseUnsigned(StringRef &S, unsigned long long &ULL) {
+ if (S.empty() || !isDigit(S[0]))
+ return false;
+ unsigned Idx = 1;
+ while (Idx < S.size() && isDigit(S[Idx]))
+ ++Idx;
+ (void)S.substr(0, Idx).getAsInteger(10, ULL);
+ S = S.substr(Idx);
+ return true;
+}
+
LayoutOverrideSource::LayoutOverrideSource(StringRef Filename) {
std::ifstream Input(Filename.str().c_str());
if (!Input.is_open())
@@ -80,8 +93,8 @@ LayoutOverrideSource::LayoutOverrideSource(StringRef Filename) {
LineStr = LineStr.substr(Pos + strlen(" Size:"));
unsigned long long Size = 0;
- (void)LineStr.getAsInteger(10, Size);
- CurrentLayout.Size = Size;
+ if (parseUnsigned(LineStr, Size))
+ CurrentLayout.Size = Size;
continue;
}
@@ -92,12 +105,13 @@ LayoutOverrideSource::LayoutOverrideSource(StringRef Filename) {
LineStr = LineStr.substr(Pos + strlen("Alignment:"));
unsigned long long Alignment = 0;
- (void)LineStr.getAsInteger(10, Alignment);
- CurrentLayout.Align = Alignment;
+ if (parseUnsigned(LineStr, Alignment))
+ CurrentLayout.Align = Alignment;
continue;
}
- // Check for the size/alignment of the type.
+ // Check for the size/alignment of the type. The number follows "size=" or
+ // "align=" indicates number of bytes.
Pos = LineStr.find("sizeof=");
if (Pos != StringRef::npos) {
/* Skip past the sizeof= prefix. */
@@ -105,8 +119,8 @@ LayoutOverrideSource::LayoutOverrideSource(StringRef Filename) {
// Parse size.
unsigned long long Size = 0;
- (void)LineStr.getAsInteger(10, Size);
- CurrentLayout.Size = Size;
+ if (parseUnsigned(LineStr, Size))
+ CurrentLayout.Size = Size * 8;
Pos = LineStr.find("align=");
if (Pos != StringRef::npos) {
@@ -115,8 +129,8 @@ LayoutOverrideSource::LayoutOverrideSource(StringRef Filename) {
// Parse alignment.
unsigned long long Alignment = 0;
- (void)LineStr.getAsInteger(10, Alignment);
- CurrentLayout.Align = Alignment;
+ if (parseUnsigned(LineStr, Alignment))
+ CurrentLayout.Align = Alignment * 8;
}
continue;
@@ -124,25 +138,51 @@ LayoutOverrideSource::LayoutOverrideSource(StringRef Filename) {
// Check for the field offsets of the type.
Pos = LineStr.find("FieldOffsets: [");
- if (Pos == StringRef::npos)
- continue;
+ if (Pos != StringRef::npos) {
+ LineStr = LineStr.substr(Pos + strlen("FieldOffsets: ["));
+ while (!LineStr.empty() && isDigit(LineStr[0])) {
+ unsigned long long Offset = 0;
+ if (parseUnsigned(LineStr, Offset))
+ CurrentLayout.FieldOffsets.push_back(Offset);
- LineStr = LineStr.substr(Pos + strlen("FieldOffsets: ["));
- while (!LineStr.empty() && isDigit(LineStr[0])) {
- // Parse this offset.
- unsigned Idx = 1;
- while (Idx < LineStr.size() && isDigit(LineStr[Idx]))
- ++Idx;
+ // Skip over this offset, the following comma, and any spaces.
+ LineStr = LineStr.substr(1);
+ while (!LineStr.empty() && isWhitespace(LineStr[0]))
+ LineStr = LineStr.substr(1);
+ }
+ }
- unsigned long long Offset = 0;
- (void)LineStr.substr(0, Idx).getAsInteger(10, Offset);
+ // Check for the virtual base offsets.
+ Pos = LineStr.find("VBaseOffsets: [");
+ if (Pos != StringRef::npos) {
+ LineStr = LineStr.substr(Pos + strlen("VBaseOffsets: ["));
+ while (!LineStr.empty() && isDigit(LineStr[0])) {
+ unsigned long long Offset = 0;
+ if (parseUnsigned(LineStr, Offset))
+ CurrentLayout.VBaseOffsets.push_back(CharUnits::fromQuantity(Offset));
- CurrentLayout.FieldOffsets.push_back(Offset);
+ // Skip over this offset, the following comma, and any spaces.
+ LineStr = LineStr.substr(1);
+ while (!LineStr.empty() && isWhitespace(LineStr[0]))
+ LineStr = LineStr.substr(1);
+ }
+ continue;
+ }
- // Skip over this offset, the following comma, and any spaces.
- LineStr = LineStr.substr(Idx + 1);
- while (!LineStr.empty() && isWhitespace(LineStr[0]))
+ // Check for the base offsets.
+ Pos = LineStr.find("BaseOffsets: [");
+ if (Pos != StringRef::npos) {
+ LineStr = LineStr.substr(Pos + strlen("BaseOffsets: ["));
+ while (!LineStr.empty() && isDigit(LineStr[0])) {
+ unsigned long long Offset = 0;
+ if (parseUnsigned(LineStr, Offset))
+ CurrentLayout.BaseOffsets.push_back(CharUnits::fromQuantity(Offset));
+
+ // Skip over this offset, the following comma, and any spaces.
LineStr = LineStr.substr(1);
+ while (!LineStr.empty() && isWhitespace(LineStr[0]))
+ LineStr = LineStr.substr(1);
+ }
}
}
@@ -182,6 +222,24 @@ LayoutOverrideSource::layoutRecordType(const RecordDecl *Record,
if (NumFields != Known->second.FieldOffsets.size())
return false;
+ // Provide base offsets.
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(Record)) {
+ unsigned NumNB = 0;
+ unsigned NumVB = 0;
+ for (const auto &I : RD->vbases()) {
+ if (NumVB >= Known->second.VBaseOffsets.size())
+ continue;
+ const CXXRecordDecl *VBase = I.getType()->getAsCXXRecordDecl();
+ VirtualBaseOffsets[VBase] = Known->second.VBaseOffsets[NumVB++];
+ }
+ for (const auto &I : RD->bases()) {
+ if (I.isVirtual() || NumNB >= Known->second.BaseOffsets.size())
+ continue;
+ const CXXRecordDecl *Base = I.getType()->getAsCXXRecordDecl();
+ BaseOffsets[Base] = Known->second.BaseOffsets[NumNB++];
+ }
+ }
+
Size = Known->second.Size;
Alignment = Known->second.Align;
return true;
diff --git a/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp b/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp
index b4b312bc93b9..939e611e5489 100644
--- a/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp
@@ -72,37 +72,12 @@ struct ModuleDependencyMMCallbacks : public ModuleMapCallbacks {
if (llvm::sys::path::is_absolute(HeaderPath))
Collector.addFile(HeaderPath);
}
- void moduleMapAddUmbrellaHeader(FileManager *FileMgr,
- const FileEntry *Header) override {
- StringRef HeaderFilename = Header->getName();
- moduleMapAddHeader(HeaderFilename);
- // The FileManager can find and cache the symbolic link for a framework
- // header before its real path, this means a module can have some of its
- // headers to use other paths. Although this is usually not a problem, it's
- // inconsistent, and not collecting the original path header leads to
- // umbrella clashes while rebuilding modules in the crash reproducer. For
- // example:
- // ApplicationServices.framework/Frameworks/ImageIO.framework/ImageIO.h
- // instead of:
- // ImageIO.framework/ImageIO.h
- //
- // FIXME: this shouldn't be necessary once we have FileName instances
- // around instead of FileEntry ones. For now, make sure we collect all
- // that we need for the reproducer to work correctly.
- StringRef UmbreallDirFromHeader =
- llvm::sys::path::parent_path(HeaderFilename);
- StringRef UmbrellaDir = Header->getDir()->getName();
- if (!UmbrellaDir.equals(UmbreallDirFromHeader)) {
- SmallString<128> AltHeaderFilename;
- llvm::sys::path::append(AltHeaderFilename, UmbrellaDir,
- llvm::sys::path::filename(HeaderFilename));
- if (FileMgr->getFile(AltHeaderFilename))
- moduleMapAddHeader(AltHeaderFilename);
- }
+ void moduleMapAddUmbrellaHeader(FileEntryRef Header) override {
+ moduleMapAddHeader(Header.getNameAsRequested());
}
};
-}
+} // namespace
void ModuleDependencyCollector::attachToASTReader(ASTReader &R) {
R.addListener(
diff --git a/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp b/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp
index 579a0b8b614d..5ffb54e2fdf6 100644
--- a/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp
@@ -113,16 +113,16 @@ public:
// Reconstruct the filenames that would satisfy this directive...
llvm::SmallString<256> Buf;
- auto NotFoundRelativeTo = [&](const DirectoryEntry *DE) {
- Buf = DE->getName();
+ auto NotFoundRelativeTo = [&](DirectoryEntryRef DE) {
+ Buf = DE.getName();
llvm::sys::path::append(Buf, FileName);
llvm::sys::path::remove_dots(Buf, /*remove_dot_dot=*/true);
Out.insert(Buf);
};
// ...relative to the including file.
if (!IsAngled) {
- if (const FileEntry *IncludingFile =
- SM.getFileEntryForID(SM.getFileID(IncludeTok.getLocation())))
+ if (OptionalFileEntryRef IncludingFile =
+ SM.getFileEntryRefForID(SM.getFileID(IncludeTok.getLocation())))
if (IncludingFile->getDir())
NotFoundRelativeTo(IncludingFile->getDir());
}
@@ -132,7 +132,7 @@ public:
Search.search_dir_end())) {
// No support for frameworks or header maps yet.
if (Dir.isNormalDir())
- NotFoundRelativeTo(Dir.getDir());
+ NotFoundRelativeTo(*Dir.getDirRef());
}
}
};
@@ -197,20 +197,32 @@ void TemporaryFiles::removeFile(StringRef File) {
class TempPCHFile {
public:
// A main method used to construct TempPCHFile.
- static std::unique_ptr<TempPCHFile> create() {
+ static std::unique_ptr<TempPCHFile> create(StringRef StoragePath) {
// FIXME: This is a hack so that we can override the preamble file during
// crash-recovery testing, which is the only case where the preamble files
// are not necessarily cleaned up.
if (const char *TmpFile = ::getenv("CINDEXTEST_PREAMBLE_FILE"))
return std::unique_ptr<TempPCHFile>(new TempPCHFile(TmpFile));
- llvm::SmallString<64> File;
- // Using a version of createTemporaryFile with a file descriptor guarantees
+ llvm::SmallString<128> File;
+ // Using the versions of createTemporaryFile() and
+ // createUniqueFile() with a file descriptor guarantees
// that we would never get a race condition in a multi-threaded setting
// (i.e., multiple threads getting the same temporary path).
int FD;
- if (auto EC =
- llvm::sys::fs::createTemporaryFile("preamble", "pch", FD, File))
+ std::error_code EC;
+ if (StoragePath.empty())
+ EC = llvm::sys::fs::createTemporaryFile("preamble", "pch", FD, File);
+ else {
+ llvm::SmallString<128> TempPath = StoragePath;
+ // Use the same filename model as fs::createTemporaryFile().
+ llvm::sys::path::append(TempPath, "preamble-%%%%%%.pch");
+ namespace fs = llvm::sys::fs;
+ // Use the same owner-only file permissions as fs::createTemporaryFile().
+ EC = fs::createUniqueFile(TempPath, FD, File, fs::OF_None,
+ fs::owner_read | fs::owner_write);
+ }
+ if (EC)
return nullptr;
// We only needed to make sure the file exists, close the file right away.
llvm::sys::Process::SafelyCloseFileDescriptor(FD);
@@ -403,7 +415,7 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
DiagnosticsEngine &Diagnostics,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
std::shared_ptr<PCHContainerOperations> PCHContainerOps, bool StoreInMemory,
- PreambleCallbacks &Callbacks) {
+ StringRef StoragePath, PreambleCallbacks &Callbacks) {
assert(VFS && "VFS is null");
auto PreambleInvocation = std::make_shared<CompilerInvocation>(Invocation);
@@ -418,7 +430,8 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
} else {
// Create a temporary file for the precompiled preamble. In rare
// circumstances, this can fail.
- std::unique_ptr<TempPCHFile> PreamblePCHFile = TempPCHFile::create();
+ std::unique_ptr<TempPCHFile> PreamblePCHFile =
+ TempPCHFile::create(StoragePath);
if (!PreamblePCHFile)
return BuildPreambleError::CouldntCreateTempFile;
Storage = PCHStorage::file(std::move(PreamblePCHFile));
diff --git a/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp b/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp
index ffa85e523c03..1b262d9e6f7c 100644
--- a/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -663,7 +663,8 @@ void PrintPPOutputPPCallbacks::HandleWhitespaceBeforeTok(const Token &Tok,
// them.
if (Tok.is(tok::eof) ||
(Tok.isAnnotation() && !Tok.is(tok::annot_header_unit) &&
- !Tok.is(tok::annot_module_begin) && !Tok.is(tok::annot_module_end)))
+ !Tok.is(tok::annot_module_begin) && !Tok.is(tok::annot_module_end) &&
+ !Tok.is(tok::annot_repl_input_end)))
return;
// EmittedDirectiveOnThisLine takes priority over RequireSameLine.
@@ -819,6 +820,9 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
// -traditional-cpp the lexer keeps /all/ whitespace, including comments.
PP.Lex(Tok);
continue;
+ } else if (Tok.is(tok::annot_repl_input_end)) {
+ PP.Lex(Tok);
+ continue;
} else if (Tok.is(tok::eod)) {
// Don't print end of directive tokens, since they are typically newlines
// that mess up our line tracking. These come from unknown pre-processor
diff --git a/contrib/llvm-project/clang/lib/Frontend/Rewrite/FrontendActions.cpp b/contrib/llvm-project/clang/lib/Frontend/Rewrite/FrontendActions.cpp
index 6685109f8d33..14569013b92c 100644
--- a/contrib/llvm-project/clang/lib/Frontend/Rewrite/FrontendActions.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/Rewrite/FrontendActions.cpp
@@ -165,10 +165,11 @@ RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
if (std::unique_ptr<raw_ostream> OS =
CI.createDefaultOutputFile(false, InFile, "cpp")) {
if (CI.getLangOpts().ObjCRuntime.isNonFragile())
- return CreateModernObjCRewriter(
- std::string(InFile), std::move(OS), CI.getDiagnostics(),
- CI.getLangOpts(), CI.getDiagnosticOpts().NoRewriteMacros,
- (CI.getCodeGenOpts().getDebugInfo() != codegenoptions::NoDebugInfo));
+ return CreateModernObjCRewriter(std::string(InFile), std::move(OS),
+ CI.getDiagnostics(), CI.getLangOpts(),
+ CI.getDiagnosticOpts().NoRewriteMacros,
+ (CI.getCodeGenOpts().getDebugInfo() !=
+ llvm::codegenoptions::NoDebugInfo));
return CreateObjCRewriter(std::string(InFile), std::move(OS),
CI.getDiagnostics(), CI.getLangOpts(),
CI.getDiagnosticOpts().NoRewriteMacros);
diff --git a/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp b/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
index fc8fce4b42b8..b76728acb907 100644
--- a/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -37,14 +37,12 @@ public:
AbbreviationMap() {}
void set(unsigned recordID, unsigned abbrevID) {
- assert(Abbrevs.find(recordID) == Abbrevs.end()
- && "Abbreviation already set.");
+ assert(!Abbrevs.contains(recordID) && "Abbreviation already set.");
Abbrevs[recordID] = abbrevID;
}
unsigned get(unsigned recordID) {
- assert(Abbrevs.find(recordID) != Abbrevs.end() &&
- "Abbreviation not set.");
+ assert(Abbrevs.contains(recordID) && "Abbreviation not set.");
return Abbrevs[recordID];
}
};
diff --git a/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp b/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp
index 809d5309d1af..3a3cc246d3af 100644
--- a/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp
@@ -91,89 +91,108 @@ static int bytesSincePreviousTabOrLineBegin(StringRef SourceLine, size_t i) {
/// printableTextForNextCharacter.
///
/// \param SourceLine The line of source
-/// \param i Pointer to byte index,
+/// \param I Pointer to byte index,
/// \param TabStop used to expand tabs
/// \return pair(printable text, 'true' iff original text was printable)
///
static std::pair<SmallString<16>, bool>
-printableTextForNextCharacter(StringRef SourceLine, size_t *i,
+printableTextForNextCharacter(StringRef SourceLine, size_t *I,
unsigned TabStop) {
- assert(i && "i must not be null");
- assert(*i<SourceLine.size() && "must point to a valid index");
+ assert(I && "I must not be null");
+ assert(*I < SourceLine.size() && "must point to a valid index");
- if (SourceLine[*i]=='\t') {
+ if (SourceLine[*I] == '\t') {
assert(0 < TabStop && TabStop <= DiagnosticOptions::MaxTabStop &&
"Invalid -ftabstop value");
- unsigned col = bytesSincePreviousTabOrLineBegin(SourceLine, *i);
- unsigned NumSpaces = TabStop - col%TabStop;
+ unsigned Col = bytesSincePreviousTabOrLineBegin(SourceLine, *I);
+ unsigned NumSpaces = TabStop - (Col % TabStop);
assert(0 < NumSpaces && NumSpaces <= TabStop
&& "Invalid computation of space amt");
- ++(*i);
+ ++(*I);
- SmallString<16> expandedTab;
- expandedTab.assign(NumSpaces, ' ');
- return std::make_pair(expandedTab, true);
+ SmallString<16> ExpandedTab;
+ ExpandedTab.assign(NumSpaces, ' ');
+ return std::make_pair(ExpandedTab, true);
}
- unsigned char const *begin, *end;
- begin = reinterpret_cast<unsigned char const *>(&*(SourceLine.begin() + *i));
- end = begin + (SourceLine.size() - *i);
-
- if (llvm::isLegalUTF8Sequence(begin, end)) {
- llvm::UTF32 c;
- llvm::UTF32 *cptr = &c;
- unsigned char const *original_begin = begin;
- unsigned char const *cp_end =
- begin + llvm::getNumBytesForUTF8(SourceLine[*i]);
-
- llvm::ConversionResult res = llvm::ConvertUTF8toUTF32(
- &begin, cp_end, &cptr, cptr + 1, llvm::strictConversion);
- (void)res;
- assert(llvm::conversionOK == res);
- assert(0 < begin-original_begin
- && "we must be further along in the string now");
- *i += begin-original_begin;
-
- if (!llvm::sys::locale::isPrint(c)) {
- // If next character is valid UTF-8, but not printable
- SmallString<16> expandedCP("<U+>");
- while (c) {
- expandedCP.insert(expandedCP.begin()+3, llvm::hexdigit(c%16));
- c/=16;
- }
- while (expandedCP.size() < 8)
- expandedCP.insert(expandedCP.begin()+3, llvm::hexdigit(0));
- return std::make_pair(expandedCP, false);
- }
-
- // If next character is valid UTF-8, and printable
- return std::make_pair(SmallString<16>(original_begin, cp_end), true);
+ const unsigned char *Begin = SourceLine.bytes_begin() + *I;
+ // Fast path for the common ASCII case.
+ if (*Begin < 0x80 && llvm::sys::locale::isPrint(*Begin)) {
+ ++(*I);
+ return std::make_pair(SmallString<16>(Begin, Begin + 1), true);
+ }
+ unsigned CharSize = llvm::getNumBytesForUTF8(*Begin);
+ const unsigned char *End = Begin + CharSize;
+
+ // Convert it to UTF32 and check if it's printable.
+ if (End <= SourceLine.bytes_end() && llvm::isLegalUTF8Sequence(Begin, End)) {
+ llvm::UTF32 C;
+ llvm::UTF32 *CPtr = &C;
+
+ // Begin and end before conversion.
+ unsigned char const *OriginalBegin = Begin;
+ llvm::ConversionResult Res = llvm::ConvertUTF8toUTF32(
+ &Begin, End, &CPtr, CPtr + 1, llvm::strictConversion);
+ (void)Res;
+ assert(Res == llvm::conversionOK);
+ assert(OriginalBegin < Begin);
+ assert((Begin - OriginalBegin) == CharSize);
+
+ (*I) += (Begin - OriginalBegin);
+
+ // Valid, multi-byte, printable UTF8 character.
+ if (llvm::sys::locale::isPrint(C))
+ return std::make_pair(SmallString<16>(OriginalBegin, End), true);
+
+ // Valid but not printable.
+ SmallString<16> Str("<U+>");
+ while (C) {
+ Str.insert(Str.begin() + 3, llvm::hexdigit(C % 16));
+ C /= 16;
+ }
+ while (Str.size() < 8)
+ Str.insert(Str.begin() + 3, llvm::hexdigit(0));
+ return std::make_pair(Str, false);
}
- // If next byte is not valid UTF-8 (and therefore not printable)
- SmallString<16> expandedByte("<XX>");
- unsigned char byte = SourceLine[*i];
- expandedByte[1] = llvm::hexdigit(byte / 16);
- expandedByte[2] = llvm::hexdigit(byte % 16);
- ++(*i);
- return std::make_pair(expandedByte, false);
+ // Otherwise, not printable since it's not valid UTF8.
+ SmallString<16> ExpandedByte("<XX>");
+ unsigned char Byte = SourceLine[*I];
+ ExpandedByte[1] = llvm::hexdigit(Byte / 16);
+ ExpandedByte[2] = llvm::hexdigit(Byte % 16);
+ ++(*I);
+ return std::make_pair(ExpandedByte, false);
}
static void expandTabs(std::string &SourceLine, unsigned TabStop) {
- size_t i = SourceLine.size();
- while (i>0) {
- i--;
- if (SourceLine[i]!='\t')
+ size_t I = SourceLine.size();
+ while (I > 0) {
+ I--;
+ if (SourceLine[I] != '\t')
continue;
- size_t tmp_i = i;
- std::pair<SmallString<16>,bool> res
- = printableTextForNextCharacter(SourceLine, &tmp_i, TabStop);
- SourceLine.replace(i, 1, res.first.c_str());
+ size_t TmpI = I;
+ auto [Str, Printable] =
+ printableTextForNextCharacter(SourceLine, &TmpI, TabStop);
+ SourceLine.replace(I, 1, Str.c_str());
}
}
-/// This function takes a raw source line and produces a mapping from the bytes
+/// \p BytesOut:
+/// A mapping from columns to the byte of the source line that produced the
+/// character displaying at that column. This is the inverse of \p ColumnsOut.
+///
+/// The last element in the array is the number of bytes in the source string.
+///
+/// example: (given a tabstop of 8)
+///
+/// "a \t \u3042" -> {0,1,2,-1,-1,-1,-1,-1,3,4,-1,7}
+///
+/// (\\u3042 is represented in UTF-8 by three bytes and takes two columns to
+/// display)
+///
+/// \p ColumnsOut:
+/// A mapping from the bytes
/// of the printable representation of the line to the columns those printable
/// characters will appear at (numbering the first column as 0).
///
@@ -195,60 +214,34 @@ static void expandTabs(std::string &SourceLine, unsigned TabStop) {
///
/// (\\u3042 is represented in UTF-8 by three bytes and takes two columns to
/// display)
-static void byteToColumn(StringRef SourceLine, unsigned TabStop,
- SmallVectorImpl<int> &out) {
- out.clear();
+static void genColumnByteMapping(StringRef SourceLine, unsigned TabStop,
+ SmallVectorImpl<int> &BytesOut,
+ SmallVectorImpl<int> &ColumnsOut) {
+ assert(BytesOut.empty());
+ assert(ColumnsOut.empty());
if (SourceLine.empty()) {
- out.resize(1u,0);
+ BytesOut.resize(1u, 0);
+ ColumnsOut.resize(1u, 0);
return;
}
- out.resize(SourceLine.size()+1, -1);
-
- int columns = 0;
- size_t i = 0;
- while (i<SourceLine.size()) {
- out[i] = columns;
- std::pair<SmallString<16>,bool> res
- = printableTextForNextCharacter(SourceLine, &i, TabStop);
- columns += llvm::sys::locale::columnWidth(res.first);
- }
- out.back() = columns;
-}
-
-/// This function takes a raw source line and produces a mapping from columns
-/// to the byte of the source line that produced the character displaying at
-/// that column. This is the inverse of the mapping produced by byteToColumn()
-///
-/// The last element in the array is the number of bytes in the source string
-///
-/// example: (given a tabstop of 8)
-///
-/// "a \t \u3042" -> {0,1,2,-1,-1,-1,-1,-1,3,4,-1,7}
-///
-/// (\\u3042 is represented in UTF-8 by three bytes and takes two columns to
-/// display)
-static void columnToByte(StringRef SourceLine, unsigned TabStop,
- SmallVectorImpl<int> &out) {
- out.clear();
-
- if (SourceLine.empty()) {
- out.resize(1u, 0);
- return;
+ ColumnsOut.resize(SourceLine.size() + 1, -1);
+
+ int Columns = 0;
+ size_t I = 0;
+ while (I < SourceLine.size()) {
+ ColumnsOut[I] = Columns;
+ BytesOut.resize(Columns + 1, -1);
+ BytesOut.back() = I;
+ auto [Str, Printable] =
+ printableTextForNextCharacter(SourceLine, &I, TabStop);
+ Columns += llvm::sys::locale::columnWidth(Str);
}
- int columns = 0;
- size_t i = 0;
- while (i<SourceLine.size()) {
- out.resize(columns+1, -1);
- out.back() = i;
- std::pair<SmallString<16>,bool> res
- = printableTextForNextCharacter(SourceLine, &i, TabStop);
- columns += llvm::sys::locale::columnWidth(res.first);
- }
- out.resize(columns+1, -1);
- out.back() = i;
+ ColumnsOut.back() = Columns;
+ BytesOut.resize(Columns + 1, -1);
+ BytesOut.back() = I;
}
namespace {
@@ -256,8 +249,7 @@ struct SourceColumnMap {
SourceColumnMap(StringRef SourceLine, unsigned TabStop)
: m_SourceLine(SourceLine) {
- ::byteToColumn(SourceLine, TabStop, m_byteToColumn);
- ::columnToByte(SourceLine, TabStop, m_columnToByte);
+ genColumnByteMapping(SourceLine, TabStop, m_columnToByte, m_byteToColumn);
assert(m_byteToColumn.size()==SourceLine.size()+1);
assert(0 < m_byteToColumn.size() && 0 < m_columnToByte.size());
@@ -471,9 +463,7 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
CaretEnd = map.byteToColumn(SourceEnd) + CaretColumnsOutsideSource;
// [CaretStart, CaretEnd) is the slice we want. Update the various
- // output lines to show only this slice, with two-space padding
- // before the lines so that it looks nicer.
-
+ // output lines to show only this slice.
assert(CaretStart!=(unsigned)-1 && CaretEnd!=(unsigned)-1 &&
SourceStart!=(unsigned)-1 && SourceEnd!=(unsigned)-1);
assert(SourceStart <= SourceEnd);
@@ -605,21 +595,13 @@ static unsigned findEndOfWord(unsigned Start, StringRef Str,
/// Str will be printed. This will be non-zero when part of the first
/// line has already been printed.
/// \param Bold if the current text should be bold
-/// \param Indentation the number of spaces to indent any lines beyond
-/// the first line.
/// \returns true if word-wrapping was required, or false if the
/// string fit on the first line.
-static bool printWordWrapped(raw_ostream &OS, StringRef Str,
- unsigned Columns,
- unsigned Column = 0,
- bool Bold = false,
- unsigned Indentation = WordWrapIndentation) {
+static bool printWordWrapped(raw_ostream &OS, StringRef Str, unsigned Columns,
+ unsigned Column, bool Bold) {
const unsigned Length = std::min(Str.find('\n'), Str.size());
bool TextNormal = true;
- // The string used to indent each line.
- SmallString<16> IndentStr;
- IndentStr.assign(Indentation, ' ');
bool Wrapped = false;
for (unsigned WordStart = 0, WordEnd; WordStart < Length;
WordStart = WordEnd) {
@@ -648,10 +630,10 @@ static bool printWordWrapped(raw_ostream &OS, StringRef Str,
// This word does not fit on the current line, so wrap to the next
// line.
OS << '\n';
- OS.write(&IndentStr[0], Indentation);
+ OS.indent(WordWrapIndentation);
applyTemplateHighlighting(OS, Str.substr(WordStart, WordLength),
TextNormal, Bold);
- Column = Indentation + WordLength;
+ Column = WordWrapIndentation + WordLength;
Wrapped = true;
}
@@ -787,7 +769,7 @@ void TextDiagnostic::emitFilename(StringRef Filename, const SourceManager &SM) {
/// Print out the file/line/column information and include trace.
///
-/// This method handlen the emission of the diagnostic location information.
+/// This method handles the emission of the diagnostic location information.
/// This includes extracting as much location information as is present for
/// the diagnostic and printing it, as well as any include stack or source
/// ranges necessary.
@@ -796,8 +778,7 @@ void TextDiagnostic::emitDiagnosticLoc(FullSourceLoc Loc, PresumedLoc PLoc,
ArrayRef<CharSourceRange> Ranges) {
if (PLoc.isInvalid()) {
// At least print the file name if available:
- FileID FID = Loc.getFileID();
- if (FID.isValid()) {
+ if (FileID FID = Loc.getFileID(); FID.isValid()) {
if (const FileEntry *FE = Loc.getFileEntry()) {
emitFilename(FE->getName(), Loc.getManager());
OS << ": ";
@@ -855,31 +836,26 @@ void TextDiagnostic::emitDiagnosticLoc(FullSourceLoc Loc, PresumedLoc PLoc,
if (DiagOpts->ShowSourceRanges && !Ranges.empty()) {
FileID CaretFileID = Loc.getExpansionLoc().getFileID();
bool PrintedRange = false;
+ const SourceManager &SM = Loc.getManager();
- for (ArrayRef<CharSourceRange>::const_iterator RI = Ranges.begin(),
- RE = Ranges.end();
- RI != RE; ++RI) {
+ for (const auto &R : Ranges) {
// Ignore invalid ranges.
- if (!RI->isValid()) continue;
+ if (!R.isValid())
+ continue;
- auto &SM = Loc.getManager();
- SourceLocation B = SM.getExpansionLoc(RI->getBegin());
- CharSourceRange ERange = SM.getExpansionRange(RI->getEnd());
+ SourceLocation B = SM.getExpansionLoc(R.getBegin());
+ CharSourceRange ERange = SM.getExpansionRange(R.getEnd());
SourceLocation E = ERange.getEnd();
- bool IsTokenRange = ERange.isTokenRange();
- std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(B);
- std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(E);
-
- // If the start or end of the range is in another file, just discard
- // it.
- if (BInfo.first != CaretFileID || EInfo.first != CaretFileID)
+ // If the start or end of the range is in another file, just
+ // discard it.
+ if (SM.getFileID(B) != CaretFileID || SM.getFileID(E) != CaretFileID)
continue;
// Add in the length of the token, so that we cover multi-char
// tokens.
unsigned TokSize = 0;
- if (IsTokenRange)
+ if (ERange.isTokenRange())
TokSize = Lexer::MeasureTokenLength(E, SM, LangOpts);
FullSourceLoc BF(B, SM), EF(E, SM);
@@ -897,10 +873,11 @@ void TextDiagnostic::emitDiagnosticLoc(FullSourceLoc Loc, PresumedLoc PLoc,
}
void TextDiagnostic::emitIncludeLocation(FullSourceLoc Loc, PresumedLoc PLoc) {
- if (DiagOpts->ShowLocation && PLoc.isValid())
- OS << "In file included from " << PLoc.getFilename() << ':'
- << PLoc.getLine() << ":\n";
- else
+ if (DiagOpts->ShowLocation && PLoc.isValid()) {
+ OS << "In file included from ";
+ emitFilename(PLoc.getFilename(), Loc.getManager());
+ OS << ':' << PLoc.getLine() << ":\n";
+ } else
OS << "In included file:\n";
}
@@ -974,87 +951,43 @@ maybeAddRange(std::pair<unsigned, unsigned> A, std::pair<unsigned, unsigned> B,
return A;
}
-/// Highlight a SourceRange (with ~'s) for any characters on LineNo.
-static void highlightRange(const CharSourceRange &R,
- unsigned LineNo, FileID FID,
- const SourceColumnMap &map,
- std::string &CaretLine,
- const SourceManager &SM,
- const LangOptions &LangOpts) {
- if (!R.isValid()) return;
-
- SourceLocation Begin = R.getBegin();
- SourceLocation End = R.getEnd();
-
- unsigned StartLineNo = SM.getExpansionLineNumber(Begin);
- if (StartLineNo > LineNo || SM.getFileID(Begin) != FID)
- return; // No intersection.
-
- unsigned EndLineNo = SM.getExpansionLineNumber(End);
- if (EndLineNo < LineNo || SM.getFileID(End) != FID)
- return; // No intersection.
-
- // Compute the column number of the start.
- unsigned StartColNo = 0;
- if (StartLineNo == LineNo) {
- StartColNo = SM.getExpansionColumnNumber(Begin);
- if (StartColNo) --StartColNo; // Zero base the col #.
- }
-
- // Compute the column number of the end.
- unsigned EndColNo = map.getSourceLine().size();
- if (EndLineNo == LineNo) {
- EndColNo = SM.getExpansionColumnNumber(End);
- if (EndColNo) {
- --EndColNo; // Zero base the col #.
-
- // Add in the length of the token, so that we cover multi-char tokens if
- // this is a token range.
- if (R.isTokenRange())
- EndColNo += Lexer::MeasureTokenLength(End, SM, LangOpts);
- } else {
- EndColNo = CaretLine.size();
- }
- }
-
- assert(StartColNo <= EndColNo && "Invalid range!");
-
- // Check that a token range does not highlight only whitespace.
- if (R.isTokenRange()) {
- // Pick the first non-whitespace column.
- while (StartColNo < map.getSourceLine().size() &&
- (map.getSourceLine()[StartColNo] == ' ' ||
- map.getSourceLine()[StartColNo] == '\t'))
- StartColNo = map.startOfNextColumn(StartColNo);
-
- // Pick the last non-whitespace column.
- if (EndColNo > map.getSourceLine().size())
- EndColNo = map.getSourceLine().size();
- while (EndColNo &&
- (map.getSourceLine()[EndColNo-1] == ' ' ||
- map.getSourceLine()[EndColNo-1] == '\t'))
- EndColNo = map.startOfPreviousColumn(EndColNo);
-
- // If the start/end passed each other, then we are trying to highlight a
- // range that just exists in whitespace. That most likely means we have
- // a multi-line highlighting range that covers a blank line.
- if (StartColNo > EndColNo) {
- assert(StartLineNo != EndLineNo && "trying to highlight whitespace");
- StartColNo = EndColNo;
- }
- }
+struct LineRange {
+ unsigned LineNo;
+ unsigned StartCol;
+ unsigned EndCol;
+};
- assert(StartColNo <= map.getSourceLine().size() && "Invalid range!");
- assert(EndColNo <= map.getSourceLine().size() && "Invalid range!");
+/// Highlight \p R (with ~'s) on the current source line.
+static void highlightRange(const LineRange &R, const SourceColumnMap &Map,
+ std::string &CaretLine) {
+ // Pick the first non-whitespace column.
+ unsigned StartColNo = R.StartCol;
+ while (StartColNo < Map.getSourceLine().size() &&
+ (Map.getSourceLine()[StartColNo] == ' ' ||
+ Map.getSourceLine()[StartColNo] == '\t'))
+ StartColNo = Map.startOfNextColumn(StartColNo);
+
+ // Pick the last non-whitespace column.
+ unsigned EndColNo =
+ std::min(static_cast<size_t>(R.EndCol), Map.getSourceLine().size());
+ while (EndColNo && (Map.getSourceLine()[EndColNo - 1] == ' ' ||
+ Map.getSourceLine()[EndColNo - 1] == '\t'))
+ EndColNo = Map.startOfPreviousColumn(EndColNo);
+
+ // If the start/end passed each other, then we are trying to highlight a
+ // range that just exists in whitespace. That most likely means we have
+ // a multi-line highlighting range that covers a blank line.
+ if (StartColNo > EndColNo)
+ return;
// Fill the range with ~'s.
- StartColNo = map.byteToContainingColumn(StartColNo);
- EndColNo = map.byteToContainingColumn(EndColNo);
+ StartColNo = Map.byteToContainingColumn(StartColNo);
+ EndColNo = Map.byteToContainingColumn(EndColNo);
assert(StartColNo <= EndColNo && "Invalid range!");
if (CaretLine.size() < EndColNo)
- CaretLine.resize(EndColNo,' ');
- std::fill(CaretLine.begin()+StartColNo,CaretLine.begin()+EndColNo,'~');
+ CaretLine.resize(EndColNo, ' ');
+ std::fill(CaretLine.begin() + StartColNo, CaretLine.begin() + EndColNo, '~');
}
static std::string buildFixItInsertionLine(FileID FID,
@@ -1068,51 +1001,51 @@ static std::string buildFixItInsertionLine(FileID FID,
return FixItInsertionLine;
unsigned PrevHintEndCol = 0;
- for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
- I != E; ++I) {
- if (!I->CodeToInsert.empty()) {
- // We have an insertion hint. Determine whether the inserted
- // code contains no newlines and is on the same line as the caret.
- std::pair<FileID, unsigned> HintLocInfo
- = SM.getDecomposedExpansionLoc(I->RemoveRange.getBegin());
- if (FID == HintLocInfo.first &&
- LineNo == SM.getLineNumber(HintLocInfo.first, HintLocInfo.second) &&
- StringRef(I->CodeToInsert).find_first_of("\n\r") == StringRef::npos) {
- // Insert the new code into the line just below the code
- // that the user wrote.
- // Note: When modifying this function, be very careful about what is a
- // "column" (printed width, platform-dependent) and what is a
- // "byte offset" (SourceManager "column").
- unsigned HintByteOffset
- = SM.getColumnNumber(HintLocInfo.first, HintLocInfo.second) - 1;
-
- // The hint must start inside the source or right at the end
- assert(HintByteOffset < static_cast<unsigned>(map.bytes())+1);
- unsigned HintCol = map.byteToContainingColumn(HintByteOffset);
-
- // If we inserted a long previous hint, push this one forwards, and add
- // an extra space to show that this is not part of the previous
- // completion. This is sort of the best we can do when two hints appear
- // to overlap.
- //
- // Note that if this hint is located immediately after the previous
- // hint, no space will be added, since the location is more important.
- if (HintCol < PrevHintEndCol)
- HintCol = PrevHintEndCol + 1;
-
- // This should NOT use HintByteOffset, because the source might have
- // Unicode characters in earlier columns.
- unsigned NewFixItLineSize = FixItInsertionLine.size() +
- (HintCol - PrevHintEndCol) + I->CodeToInsert.size();
- if (NewFixItLineSize > FixItInsertionLine.size())
- FixItInsertionLine.resize(NewFixItLineSize, ' ');
-
- std::copy(I->CodeToInsert.begin(), I->CodeToInsert.end(),
- FixItInsertionLine.end() - I->CodeToInsert.size());
-
- PrevHintEndCol =
- HintCol + llvm::sys::locale::columnWidth(I->CodeToInsert);
- }
+ for (const auto &H : Hints) {
+ if (H.CodeToInsert.empty())
+ continue;
+
+ // We have an insertion hint. Determine whether the inserted
+ // code contains no newlines and is on the same line as the caret.
+ std::pair<FileID, unsigned> HintLocInfo =
+ SM.getDecomposedExpansionLoc(H.RemoveRange.getBegin());
+ if (FID == HintLocInfo.first &&
+ LineNo == SM.getLineNumber(HintLocInfo.first, HintLocInfo.second) &&
+ StringRef(H.CodeToInsert).find_first_of("\n\r") == StringRef::npos) {
+ // Insert the new code into the line just below the code
+ // that the user wrote.
+ // Note: When modifying this function, be very careful about what is a
+ // "column" (printed width, platform-dependent) and what is a
+ // "byte offset" (SourceManager "column").
+ unsigned HintByteOffset =
+ SM.getColumnNumber(HintLocInfo.first, HintLocInfo.second) - 1;
+
+ // The hint must start inside the source or right at the end
+ assert(HintByteOffset < static_cast<unsigned>(map.bytes()) + 1);
+ unsigned HintCol = map.byteToContainingColumn(HintByteOffset);
+
+ // If we inserted a long previous hint, push this one forwards, and add
+ // an extra space to show that this is not part of the previous
+ // completion. This is sort of the best we can do when two hints appear
+ // to overlap.
+ //
+ // Note that if this hint is located immediately after the previous
+ // hint, no space will be added, since the location is more important.
+ if (HintCol < PrevHintEndCol)
+ HintCol = PrevHintEndCol + 1;
+
+ // This should NOT use HintByteOffset, because the source might have
+ // Unicode characters in earlier columns.
+ unsigned NewFixItLineSize = FixItInsertionLine.size() +
+ (HintCol - PrevHintEndCol) +
+ H.CodeToInsert.size();
+ if (NewFixItLineSize > FixItInsertionLine.size())
+ FixItInsertionLine.resize(NewFixItLineSize, ' ');
+
+ std::copy(H.CodeToInsert.begin(), H.CodeToInsert.end(),
+ FixItInsertionLine.end() - H.CodeToInsert.size());
+
+ PrevHintEndCol = HintCol + llvm::sys::locale::columnWidth(H.CodeToInsert);
}
}
@@ -1121,6 +1054,65 @@ static std::string buildFixItInsertionLine(FileID FID,
return FixItInsertionLine;
}
+static unsigned getNumDisplayWidth(unsigned N) {
+ unsigned L = 1u, M = 10u;
+ while (M <= N && ++L != std::numeric_limits<unsigned>::digits10 + 1)
+ M *= 10u;
+
+ return L;
+}
+
+/// Filter out invalid ranges, ranges that don't fit into the window of
+/// source lines we will print, and ranges from other files.
+///
+/// For the remaining ranges, convert them to simple LineRange structs,
+/// which only cover one line at a time.
+static SmallVector<LineRange>
+prepareAndFilterRanges(const SmallVectorImpl<CharSourceRange> &Ranges,
+ const SourceManager &SM,
+ const std::pair<unsigned, unsigned> &Lines, FileID FID,
+ const LangOptions &LangOpts) {
+ SmallVector<LineRange> LineRanges;
+
+ for (const CharSourceRange &R : Ranges) {
+ if (R.isInvalid())
+ continue;
+ SourceLocation Begin = R.getBegin();
+ SourceLocation End = R.getEnd();
+
+ unsigned StartLineNo = SM.getExpansionLineNumber(Begin);
+ if (StartLineNo > Lines.second || SM.getFileID(Begin) != FID)
+ continue;
+
+ unsigned EndLineNo = SM.getExpansionLineNumber(End);
+ if (EndLineNo < Lines.first || SM.getFileID(End) != FID)
+ continue;
+
+ unsigned StartColumn = SM.getExpansionColumnNumber(Begin);
+ unsigned EndColumn = SM.getExpansionColumnNumber(End);
+ if (R.isTokenRange())
+ EndColumn += Lexer::MeasureTokenLength(End, SM, LangOpts);
+
+ // Only a single line.
+ if (StartLineNo == EndLineNo) {
+ LineRanges.push_back({StartLineNo, StartColumn - 1, EndColumn - 1});
+ continue;
+ }
+
+ // Start line.
+ LineRanges.push_back({StartLineNo, StartColumn - 1, ~0u});
+
+ // Middle lines.
+ for (unsigned S = StartLineNo + 1; S != EndLineNo; ++S)
+ LineRanges.push_back({S, 0, ~0u});
+
+ // End line.
+ LineRanges.push_back({EndLineNo, 0, EndColumn - 1});
+ }
+
+ return LineRanges;
+}
+
/// Emit a code snippet and caret line.
///
/// This routine emits a single line's code snippet and caret line..
@@ -1146,9 +1138,7 @@ void TextDiagnostic::emitSnippetAndCaret(
(LastLevel != DiagnosticsEngine::Note || Level == LastLevel))
return;
- // Decompose the location into a FID/Offset pair.
- std::pair<FileID, unsigned> LocInfo = Loc.getDecomposedLoc();
- FileID FID = LocInfo.first;
+ FileID FID = Loc.getFileID();
const SourceManager &SM = Loc.getManager();
// Get information about the buffer it points into.
@@ -1156,6 +1146,8 @@ void TextDiagnostic::emitSnippetAndCaret(
StringRef BufData = Loc.getBufferData(&Invalid);
if (Invalid)
return;
+ const char *BufStart = BufData.data();
+ const char *BufEnd = BufStart + BufData.size();
unsigned CaretLineNo = Loc.getLineNumber();
unsigned CaretColNo = Loc.getColumnNumber();
@@ -1168,16 +1160,34 @@ void TextDiagnostic::emitSnippetAndCaret(
// Find the set of lines to include.
const unsigned MaxLines = DiagOpts->SnippetLineLimit;
std::pair<unsigned, unsigned> Lines = {CaretLineNo, CaretLineNo};
- for (SmallVectorImpl<CharSourceRange>::iterator I = Ranges.begin(),
- E = Ranges.end();
- I != E; ++I)
- if (auto OptionalRange = findLinesForRange(*I, FID, SM))
+ unsigned DisplayLineNo =
+ Ranges.empty() ? Loc.getPresumedLoc().getLine() : ~0u;
+ for (const auto &I : Ranges) {
+ if (auto OptionalRange = findLinesForRange(I, FID, SM))
Lines = maybeAddRange(Lines, *OptionalRange, MaxLines);
- for (unsigned LineNo = Lines.first; LineNo != Lines.second + 1; ++LineNo) {
- const char *BufStart = BufData.data();
- const char *BufEnd = BufStart + BufData.size();
+ DisplayLineNo =
+ std::min(DisplayLineNo, SM.getPresumedLineNumber(I.getBegin()));
+ }
+ // Our line numbers look like:
+ // " [number] | "
+ // Where [number] is MaxLineNoDisplayWidth columns
+ // and the full thing is therefore MaxLineNoDisplayWidth + 4 columns.
+ unsigned MaxLineNoDisplayWidth =
+ DiagOpts->ShowLineNumbers
+ ? std::max(4u, getNumDisplayWidth(DisplayLineNo + MaxLines))
+ : 0;
+ auto indentForLineNumbers = [&] {
+ if (MaxLineNoDisplayWidth > 0)
+ OS.indent(MaxLineNoDisplayWidth + 2) << "| ";
+ };
+
+ SmallVector<LineRange> LineRanges =
+ prepareAndFilterRanges(Ranges, SM, Lines, FID, LangOpts);
+
+ for (unsigned LineNo = Lines.first; LineNo != Lines.second + 1;
+ ++LineNo, ++DisplayLineNo) {
// Rewind from the current position to the start of the line.
const char *LineStart =
BufStart +
@@ -1195,34 +1205,28 @@ void TextDiagnostic::emitSnippetAndCaret(
if (size_t(LineEnd - LineStart) > MaxLineLengthToPrint)
return;
- // Trim trailing null-bytes.
- StringRef Line(LineStart, LineEnd - LineStart);
- while (!Line.empty() && Line.back() == '\0' &&
- (LineNo != CaretLineNo || Line.size() > CaretColNo))
- Line = Line.drop_back();
-
// Copy the line of code into an std::string for ease of manipulation.
- std::string SourceLine(Line.begin(), Line.end());
+ std::string SourceLine(LineStart, LineEnd);
+ // Remove trailing null bytes.
+ while (!SourceLine.empty() && SourceLine.back() == '\0' &&
+ (LineNo != CaretLineNo || SourceLine.size() > CaretColNo))
+ SourceLine.pop_back();
// Build the byte to column map.
const SourceColumnMap sourceColMap(SourceLine, DiagOpts->TabStop);
- // Create a line for the caret that is filled with spaces that is the same
- // number of columns as the line of source code.
- std::string CaretLine(sourceColMap.columns(), ' ');
-
+ std::string CaretLine;
// Highlight all of the characters covered by Ranges with ~ characters.
- for (SmallVectorImpl<CharSourceRange>::iterator I = Ranges.begin(),
- E = Ranges.end();
- I != E; ++I)
- highlightRange(*I, LineNo, FID, sourceColMap, CaretLine, SM, LangOpts);
+ for (const auto &LR : LineRanges) {
+ if (LR.LineNo == LineNo)
+ highlightRange(LR, sourceColMap, CaretLine);
+ }
// Next, insert the caret itself.
if (CaretLineNo == LineNo) {
- CaretColNo = sourceColMap.byteToContainingColumn(CaretColNo - 1);
- if (CaretLine.size() < CaretColNo + 1)
- CaretLine.resize(CaretColNo + 1, ' ');
- CaretLine[CaretColNo] = '^';
+ size_t Col = sourceColMap.byteToContainingColumn(CaretColNo - 1);
+ CaretLine.resize(std::max(Col + 1, CaretLine.size()), ' ');
+ CaretLine[Col] = '^';
}
std::string FixItInsertionLine = buildFixItInsertionLine(
@@ -1239,19 +1243,16 @@ void TextDiagnostic::emitSnippetAndCaret(
// to produce easily machine parsable output. Add a space before the
// source line and the caret to make it trivial to tell the main diagnostic
// line from what the user is intended to see.
- if (DiagOpts->ShowSourceRanges) {
+ if (DiagOpts->ShowSourceRanges && !SourceLine.empty()) {
SourceLine = ' ' + SourceLine;
CaretLine = ' ' + CaretLine;
}
- // Finally, remove any blank spaces from the end of CaretLine.
- while (!CaretLine.empty() && CaretLine[CaretLine.size() - 1] == ' ')
- CaretLine.erase(CaretLine.end() - 1);
-
// Emit what we have computed.
- emitSnippet(SourceLine);
+ emitSnippet(SourceLine, MaxLineNoDisplayWidth, DisplayLineNo);
if (!CaretLine.empty()) {
+ indentForLineNumbers();
if (DiagOpts->ShowColors)
OS.changeColor(caretColor, true);
OS << CaretLine << '\n';
@@ -1260,6 +1261,7 @@ void TextDiagnostic::emitSnippetAndCaret(
}
if (!FixItInsertionLine.empty()) {
+ indentForLineNumbers();
if (DiagOpts->ShowColors)
// Print fixit line in color
OS.changeColor(fixitColor, false);
@@ -1275,37 +1277,37 @@ void TextDiagnostic::emitSnippetAndCaret(
emitParseableFixits(Hints, SM);
}
-void TextDiagnostic::emitSnippet(StringRef line) {
- if (line.empty())
- return;
-
- size_t i = 0;
-
- std::string to_print;
- bool print_reversed = false;
-
- while (i<line.size()) {
- std::pair<SmallString<16>,bool> res
- = printableTextForNextCharacter(line, &i, DiagOpts->TabStop);
- bool was_printable = res.second;
+void TextDiagnostic::emitSnippet(StringRef SourceLine,
+ unsigned MaxLineNoDisplayWidth,
+ unsigned LineNo) {
+ // Emit line number.
+ if (MaxLineNoDisplayWidth > 0) {
+ unsigned LineNoDisplayWidth = getNumDisplayWidth(LineNo);
+ OS.indent(MaxLineNoDisplayWidth - LineNoDisplayWidth + 1)
+ << LineNo << " | ";
+ }
- if (DiagOpts->ShowColors && was_printable == print_reversed) {
- if (print_reversed)
- OS.reverseColor();
- OS << to_print;
- to_print.clear();
- if (DiagOpts->ShowColors)
- OS.resetColor();
+ // Print the source line one character at a time.
+ bool PrintReversed = false;
+ size_t I = 0;
+ while (I < SourceLine.size()) {
+ auto [Str, WasPrintable] =
+ printableTextForNextCharacter(SourceLine, &I, DiagOpts->TabStop);
+
+ // Toggle inverted colors on or off for this character.
+ if (DiagOpts->ShowColors) {
+ if (WasPrintable == PrintReversed) {
+ PrintReversed = !PrintReversed;
+ if (PrintReversed)
+ OS.reverseColor();
+ else
+ OS.resetColor();
+ }
}
-
- print_reversed = !was_printable;
- to_print += res.first.str();
+ OS << Str;
}
- if (print_reversed && DiagOpts->ShowColors)
- OS.reverseColor();
- OS << to_print;
- if (print_reversed && DiagOpts->ShowColors)
+ if (DiagOpts->ShowColors)
OS.resetColor();
OS << '\n';
@@ -1318,24 +1320,21 @@ void TextDiagnostic::emitParseableFixits(ArrayRef<FixItHint> Hints,
// We follow FixItRewriter's example in not (yet) handling
// fix-its in macros.
- for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
- I != E; ++I) {
- if (I->RemoveRange.isInvalid() ||
- I->RemoveRange.getBegin().isMacroID() ||
- I->RemoveRange.getEnd().isMacroID())
+ for (const auto &H : Hints) {
+ if (H.RemoveRange.isInvalid() || H.RemoveRange.getBegin().isMacroID() ||
+ H.RemoveRange.getEnd().isMacroID())
return;
}
- for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
- I != E; ++I) {
- SourceLocation BLoc = I->RemoveRange.getBegin();
- SourceLocation ELoc = I->RemoveRange.getEnd();
+ for (const auto &H : Hints) {
+ SourceLocation BLoc = H.RemoveRange.getBegin();
+ SourceLocation ELoc = H.RemoveRange.getEnd();
std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(BLoc);
std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(ELoc);
// Adjust for token ranges.
- if (I->RemoveRange.isTokenRange())
+ if (H.RemoveRange.isTokenRange())
EInfo.second += Lexer::MeasureTokenLength(ELoc, SM, LangOpts);
// We specifically do not do word-wrapping or tab-expansion here,
@@ -1351,7 +1350,7 @@ void TextDiagnostic::emitParseableFixits(ArrayRef<FixItHint> Hints,
<< '-' << SM.getLineNumber(EInfo.first, EInfo.second)
<< ':' << SM.getColumnNumber(EInfo.first, EInfo.second)
<< "}:\"";
- OS.write_escaped(I->CodeToInsert);
+ OS.write_escaped(H.CodeToInsert);
OS << "\"\n";
}
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp b/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
index 378f7ddd0159..d57b27e9e36f 100644
--- a/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
@@ -737,12 +737,12 @@ void VerifyDiagnosticConsumer::HandleDiagnostic(
Loc = SrcManager->getExpansionLoc(Loc);
FileID FID = SrcManager->getFileID(Loc);
- const FileEntry *FE = SrcManager->getFileEntryForID(FID);
+ auto FE = SrcManager->getFileEntryRefForID(FID);
if (FE && CurrentPreprocessor && SrcManager->isLoadedFileID(FID)) {
// If the file is a modules header file it shall not be parsed
// for expected-* directives.
HeaderSearch &HS = CurrentPreprocessor->getHeaderSearchInfo();
- if (HS.findModuleForHeader(FE))
+ if (HS.findModuleForHeader(*FE))
PS = IsUnparsedNoDirectives;
}
diff --git a/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index 47157ca5092b..310f67774a66 100644
--- a/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -178,6 +178,14 @@ CreateFrontendAction(CompilerInstance &CI) {
}
#endif
+ // Wrap the base FE action in an extract api action to generate
+ // symbol graph as a biproduct of comilation ( enabled with
+ // --emit-symbol-graph option )
+ if (!FEOpts.SymbolGraphOutputDir.empty()) {
+ CI.getCodeGenOpts().ClearASTBeforeBackend = false;
+ Act = std::make_unique<WrappingExtractAPIAction>(std::move(Act));
+ }
+
// If there are any AST files to merge, create a frontend action
// adaptor to perform the merge.
if (!FEOpts.ASTMergeFiles.empty())
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h
index b87413e12a27..3c3948863c1d 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h
@@ -513,6 +513,197 @@ __device__ inline cuuint32_t __nvvm_get_smem_pointer(void *__ptr) {
return __nv_cvta_generic_to_shared_impl(__ptr);
}
} // extern "C"
+
+#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
+__device__ inline unsigned __reduce_add_sync(unsigned __mask,
+ unsigned __value) {
+ return __nvvm_redux_sync_add(__mask, __value);
+}
+__device__ inline unsigned __reduce_min_sync(unsigned __mask,
+ unsigned __value) {
+ return __nvvm_redux_sync_umin(__mask, __value);
+}
+__device__ inline unsigned __reduce_max_sync(unsigned __mask,
+ unsigned __value) {
+ return __nvvm_redux_sync_umax(__mask, __value);
+}
+__device__ inline int __reduce_min_sync(unsigned __mask, int __value) {
+ return __nvvm_redux_sync_min(__mask, __value);
+}
+__device__ inline int __reduce_max_sync(unsigned __mask, int __value) {
+ return __nvvm_redux_sync_max(__mask, __value);
+}
+__device__ inline unsigned __reduce_or_sync(unsigned __mask, unsigned __value) {
+ return __nvvm_redux_sync_or(__mask, __value);
+}
+__device__ inline unsigned __reduce_and_sync(unsigned __mask,
+ unsigned __value) {
+ return __nvvm_redux_sync_and(__mask, __value);
+}
+__device__ inline unsigned __reduce_xor_sync(unsigned __mask,
+ unsigned __value) {
+ return __nvvm_redux_sync_xor(__mask, __value);
+}
+
+__device__ inline void __nv_memcpy_async_shared_global_4(void *__dst,
+ const void *__src,
+ unsigned __src_size) {
+ __nvvm_cp_async_ca_shared_global_4(
+ (void __attribute__((address_space(3))) *)__dst,
+ (const void __attribute__((address_space(1))) *)__src, __src_size);
+}
+__device__ inline void __nv_memcpy_async_shared_global_8(void *__dst,
+ const void *__src,
+ unsigned __src_size) {
+ __nvvm_cp_async_ca_shared_global_8(
+ (void __attribute__((address_space(3))) *)__dst,
+ (const void __attribute__((address_space(1))) *)__src, __src_size);
+}
+__device__ inline void __nv_memcpy_async_shared_global_16(void *__dst,
+ const void *__src,
+ unsigned __src_size) {
+ __nvvm_cp_async_ca_shared_global_16(
+ (void __attribute__((address_space(3))) *)__dst,
+ (const void __attribute__((address_space(1))) *)__src, __src_size);
+}
+
+__device__ inline void *
+__nv_associate_access_property(const void *__ptr, unsigned long long __prop) {
+ // TODO: it appears to provide compiler with some sort of a hint. We do not
+ // know what exactly it is supposed to do. However, CUDA headers suggest that
+ // just passing through __ptr should not affect correctness. They do so on
+ // pre-sm80 GPUs where this builtin is not available.
+ return (void*)__ptr;
+}
+#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
+
+#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900
+__device__ inline unsigned __isCtaShared(const void *ptr) {
+ return __isShared(ptr);
+}
+
+__device__ inline unsigned __isClusterShared(const void *__ptr) {
+ return __nvvm_isspacep_shared_cluster(__ptr);
+}
+
+__device__ inline void *__cluster_map_shared_rank(const void *__ptr,
+ unsigned __rank) {
+ return __nvvm_mapa((void *)__ptr, __rank);
+}
+
+__device__ inline unsigned __cluster_query_shared_rank(const void *__ptr) {
+ return __nvvm_getctarank((void *)__ptr);
+}
+
+__device__ inline uint2
+__cluster_map_shared_multicast(const void *__ptr,
+ unsigned int __cluster_cta_mask) {
+ return make_uint2((unsigned)__cvta_generic_to_shared(__ptr),
+ __cluster_cta_mask);
+}
+
+__device__ inline unsigned __clusterDimIsSpecified() {
+ return __nvvm_is_explicit_cluster();
+}
+
+__device__ inline dim3 __clusterDim() {
+ return dim3(__nvvm_read_ptx_sreg_cluster_nctaid_x(),
+ __nvvm_read_ptx_sreg_cluster_nctaid_y(),
+ __nvvm_read_ptx_sreg_cluster_nctaid_z());
+}
+
+__device__ inline dim3 __clusterRelativeBlockIdx() {
+ return dim3(__nvvm_read_ptx_sreg_cluster_ctaid_x(),
+ __nvvm_read_ptx_sreg_cluster_ctaid_y(),
+ __nvvm_read_ptx_sreg_cluster_ctaid_z());
+}
+
+__device__ inline dim3 __clusterGridDimInClusters() {
+ return dim3(__nvvm_read_ptx_sreg_nclusterid_x(),
+ __nvvm_read_ptx_sreg_nclusterid_y(),
+ __nvvm_read_ptx_sreg_nclusterid_z());
+}
+
+__device__ inline dim3 __clusterIdx() {
+ return dim3(__nvvm_read_ptx_sreg_clusterid_x(),
+ __nvvm_read_ptx_sreg_clusterid_y(),
+ __nvvm_read_ptx_sreg_clusterid_z());
+}
+
+__device__ inline unsigned __clusterRelativeBlockRank() {
+ return __nvvm_read_ptx_sreg_cluster_ctarank();
+}
+
+__device__ inline unsigned __clusterSizeInBlocks() {
+ return __nvvm_read_ptx_sreg_cluster_nctarank();
+}
+
+__device__ inline void __cluster_barrier_arrive() {
+ __nvvm_barrier_cluster_arrive();
+}
+
+__device__ inline void __cluster_barrier_arrive_relaxed() {
+ __nvvm_barrier_cluster_arrive_relaxed();
+}
+
+__device__ inline void __cluster_barrier_wait() {
+ __nvvm_barrier_cluster_wait();
+}
+
+__device__ inline void __threadfence_cluster() { __nvvm_fence_sc_cluster(); }
+
+__device__ inline float2 atomicAdd(float2 *__ptr, float2 __val) {
+ float2 __ret;
+ __asm__("atom.add.v2.f32 {%0, %1}, [%2], {%3, %4};"
+ : "=f"(__ret.x), "=f"(__ret.y)
+ : "l"(__ptr), "f"(__val.x), "f"(__val.y));
+ return __ret;
+}
+
+__device__ inline float2 atomicAdd_block(float2 *__ptr, float2 __val) {
+ float2 __ret;
+ __asm__("atom.cta.add.v2.f32 {%0, %1}, [%2], {%3, %4};"
+ : "=f"(__ret.x), "=f"(__ret.y)
+ : "l"(__ptr), "f"(__val.x), "f"(__val.y));
+ return __ret;
+}
+
+__device__ inline float2 atomicAdd_system(float2 *__ptr, float2 __val) {
+ float2 __ret;
+ __asm__("atom.sys.add.v2.f32 {%0, %1}, [%2], {%3, %4};"
+ : "=f"(__ret.x), "=f"(__ret.y)
+ : "l"(__ptr), "f"(__val.x), "f"(__val.y));
+ return __ret;
+}
+
+__device__ inline float4 atomicAdd(float4 *__ptr, float4 __val) {
+ float4 __ret;
+ __asm__("atom.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
+ : "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w)
+ : "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w));
+ return __ret;
+}
+
+__device__ inline float4 atomicAdd_block(float4 *__ptr, float4 __val) {
+ float4 __ret;
+ __asm__(
+ "atom.cta.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
+ : "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w)
+ : "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w));
+ return __ret;
+}
+
+__device__ inline float4 atomicAdd_system(float4 *__ptr, float4 __val) {
+ float4 __ret;
+ __asm__(
+ "atom.sys.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
+ : "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w)
+ : "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w)
+ :);
+ return __ret;
+}
+
+#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900
#endif // CUDA_VERSION >= 11000
#endif // defined(__CLANG_CUDA_INTRINSICS_H__)
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_hip_cmath.h b/contrib/llvm-project/clang/lib/Headers/__clang_hip_cmath.h
index d488db0a94d9..b52d6b781661 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_hip_cmath.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_hip_cmath.h
@@ -171,7 +171,7 @@ __DEVICE__ __CONSTEXPR__ bool signbit(double __x) { return ::__signbit(__x); }
// Other functions.
__DEVICE__ __CONSTEXPR__ _Float16 fma(_Float16 __x, _Float16 __y,
_Float16 __z) {
- return __ocml_fma_f16(__x, __y, __z);
+ return __builtin_fmaf16(__x, __y, __z);
}
__DEVICE__ __CONSTEXPR__ _Float16 pow(_Float16 __base, int __iexp) {
return __ocml_pown_f16(__base, __iexp);
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_hip_libdevice_declares.h b/contrib/llvm-project/clang/lib/Headers/__clang_hip_libdevice_declares.h
index be25f4b4a050..ed576027cb5e 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_hip_libdevice_declares.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_hip_libdevice_declares.h
@@ -10,6 +10,10 @@
#ifndef __CLANG_HIP_LIBDEVICE_DECLARES_H__
#define __CLANG_HIP_LIBDEVICE_DECLARES_H__
+#if !defined(__HIPCC_RTC__) && __has_include("hip/hip_version.h")
+#include "hip/hip_version.h"
+#endif // __has_include("hip/hip_version.h")
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -137,23 +141,6 @@ __device__ __attribute__((const)) float __ocml_fma_rte_f32(float, float, float);
__device__ __attribute__((const)) float __ocml_fma_rtn_f32(float, float, float);
__device__ __attribute__((const)) float __ocml_fma_rtp_f32(float, float, float);
__device__ __attribute__((const)) float __ocml_fma_rtz_f32(float, float, float);
-
-__device__ inline __attribute__((const)) float
-__llvm_amdgcn_cos_f32(float __x) {
- return __builtin_amdgcn_cosf(__x);
-}
-__device__ inline __attribute__((const)) float
-__llvm_amdgcn_rcp_f32(float __x) {
- return __builtin_amdgcn_rcpf(__x);
-}
-__device__ inline __attribute__((const)) float
-__llvm_amdgcn_rsq_f32(float __x) {
- return __builtin_amdgcn_rsqf(__x);
-}
-__device__ inline __attribute__((const)) float
-__llvm_amdgcn_sin_f32(float __x) {
- return __builtin_amdgcn_sinf(__x);
-}
// END INTRINSICS
// END FLOAT
@@ -277,15 +264,6 @@ __device__ __attribute__((const)) double __ocml_fma_rtp_f64(double, double,
__device__ __attribute__((const)) double __ocml_fma_rtz_f64(double, double,
double);
-__device__ inline __attribute__((const)) double
-__llvm_amdgcn_rcp_f64(double __x) {
- return __builtin_amdgcn_rcp(__x);
-}
-__device__ inline __attribute__((const)) double
-__llvm_amdgcn_rsq_f64(double __x) {
- return __builtin_amdgcn_rsq(__x);
-}
-
__device__ __attribute__((const)) _Float16 __ocml_ceil_f16(_Float16);
__device__ _Float16 __ocml_cos_f16(_Float16);
__device__ __attribute__((const)) _Float16 __ocml_cvtrtn_f16_f32(float);
@@ -305,7 +283,6 @@ __device__ __attribute__((const)) int __ocml_isnan_f16(_Float16);
__device__ __attribute__((pure)) _Float16 __ocml_log_f16(_Float16);
__device__ __attribute__((pure)) _Float16 __ocml_log10_f16(_Float16);
__device__ __attribute__((pure)) _Float16 __ocml_log2_f16(_Float16);
-__device__ __attribute__((const)) _Float16 __llvm_amdgcn_rcp_f16(_Float16);
__device__ __attribute__((const)) _Float16 __ocml_rint_f16(_Float16);
__device__ __attribute__((const)) _Float16 __ocml_rsqrt_f16(_Float16);
__device__ _Float16 __ocml_sin_f16(_Float16);
@@ -316,8 +293,15 @@ __device__ __attribute__((pure)) _Float16 __ocml_pown_f16(_Float16, int);
typedef _Float16 __2f16 __attribute__((ext_vector_type(2)));
typedef short __2i16 __attribute__((ext_vector_type(2)));
+// We need to match C99's bool and get an i1 in the IR.
+#ifdef __cplusplus
+typedef bool __ockl_bool;
+#else
+typedef _Bool __ockl_bool;
+#endif
+
__device__ __attribute__((const)) float __ockl_fdot2(__2f16 a, __2f16 b,
- float c, bool s);
+ float c, __ockl_bool s);
__device__ __attribute__((const)) __2f16 __ocml_ceil_2f16(__2f16);
__device__ __attribute__((const)) __2f16 __ocml_fabs_2f16(__2f16);
__device__ __2f16 __ocml_cos_2f16(__2f16);
@@ -332,11 +316,29 @@ __device__ __attribute__((const)) __2i16 __ocml_isnan_2f16(__2f16);
__device__ __attribute__((pure)) __2f16 __ocml_log_2f16(__2f16);
__device__ __attribute__((pure)) __2f16 __ocml_log10_2f16(__2f16);
__device__ __attribute__((pure)) __2f16 __ocml_log2_2f16(__2f16);
+
+#if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 560 || 1
+#define __DEPRECATED_SINCE_HIP_560(X) __attribute__((deprecated(X)))
+#else
+#define __DEPRECATED_SINCE_HIP_560(X)
+#endif
+
+// Deprecated, should be removed when rocm releases using it are no longer
+// relevant.
+__DEPRECATED_SINCE_HIP_560("use ((_Float16)1.0) / ")
+__device__ inline _Float16 __llvm_amdgcn_rcp_f16(_Float16 x) {
+ return ((_Float16)1.0f) / x;
+}
+
+__DEPRECATED_SINCE_HIP_560("use ((__2f16)1.0) / ")
__device__ inline __2f16
-__llvm_amdgcn_rcp_2f16(__2f16 __x) // Not currently exposed by ROCDL.
+__llvm_amdgcn_rcp_2f16(__2f16 __x)
{
- return (__2f16)(__llvm_amdgcn_rcp_f16(__x.x), __llvm_amdgcn_rcp_f16(__x.y));
+ return ((__2f16)1.0f) / __x;
}
+
+#undef __DEPRECATED_SINCE_HIP_560
+
__device__ __attribute__((const)) __2f16 __ocml_rint_2f16(__2f16);
__device__ __attribute__((const)) __2f16 __ocml_rsqrt_2f16(__2f16);
__device__ __2f16 __ocml_sin_2f16(__2f16);
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_hip_math.h b/contrib/llvm-project/clang/lib/Headers/__clang_hip_math.h
index 537dd0fca870..a47dda3327f4 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_hip_math.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_hip_math.h
@@ -182,10 +182,10 @@ __DEVICE__
float cbrtf(float __x) { return __ocml_cbrt_f32(__x); }
__DEVICE__
-float ceilf(float __x) { return __ocml_ceil_f32(__x); }
+float ceilf(float __x) { return __builtin_ceilf(__x); }
__DEVICE__
-float copysignf(float __x, float __y) { return __ocml_copysign_f32(__x, __y); }
+float copysignf(float __x, float __y) { return __builtin_copysignf(__x, __y); }
__DEVICE__
float cosf(float __x) { return __ocml_cos_f32(__x); }
@@ -221,10 +221,10 @@ __DEVICE__
float exp10f(float __x) { return __ocml_exp10_f32(__x); }
__DEVICE__
-float exp2f(float __x) { return __ocml_exp2_f32(__x); }
+float exp2f(float __x) { return __builtin_exp2f(__x); }
__DEVICE__
-float expf(float __x) { return __ocml_exp_f32(__x); }
+float expf(float __x) { return __builtin_expf(__x); }
__DEVICE__
float expm1f(float __x) { return __ocml_expm1_f32(__x); }
@@ -239,33 +239,25 @@ __DEVICE__
float fdividef(float __x, float __y) { return __x / __y; }
__DEVICE__
-float floorf(float __x) { return __ocml_floor_f32(__x); }
+float floorf(float __x) { return __builtin_floorf(__x); }
__DEVICE__
float fmaf(float __x, float __y, float __z) {
- return __ocml_fma_f32(__x, __y, __z);
+ return __builtin_fmaf(__x, __y, __z);
}
__DEVICE__
-float fmaxf(float __x, float __y) { return __ocml_fmax_f32(__x, __y); }
+float fmaxf(float __x, float __y) { return __builtin_fmaxf(__x, __y); }
__DEVICE__
-float fminf(float __x, float __y) { return __ocml_fmin_f32(__x, __y); }
+float fminf(float __x, float __y) { return __builtin_fminf(__x, __y); }
__DEVICE__
float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); }
__DEVICE__
float frexpf(float __x, int *__nptr) {
- int __tmp;
-#ifdef __OPENMP_AMDGCN__
-#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
-#endif
- float __r =
- __ocml_frexp_f32(__x, (__attribute__((address_space(5))) int *)&__tmp);
- *__nptr = __tmp;
-
- return __r;
+ return __builtin_frexpf(__x, __nptr);
}
__DEVICE__
@@ -275,13 +267,13 @@ __DEVICE__
int ilogbf(float __x) { return __ocml_ilogb_f32(__x); }
__DEVICE__
-__RETURN_TYPE __finitef(float __x) { return __ocml_isfinite_f32(__x); }
+__RETURN_TYPE __finitef(float __x) { return __builtin_isfinite(__x); }
__DEVICE__
-__RETURN_TYPE __isinff(float __x) { return __ocml_isinf_f32(__x); }
+__RETURN_TYPE __isinff(float __x) { return __builtin_isinf(__x); }
__DEVICE__
-__RETURN_TYPE __isnanf(float __x) { return __ocml_isnan_f32(__x); }
+__RETURN_TYPE __isnanf(float __x) { return __builtin_isnan(__x); }
__DEVICE__
float j0f(float __x) { return __ocml_j0_f32(__x); }
@@ -311,37 +303,37 @@ float jnf(int __n, float __x) { // TODO: we could use Ahmes multiplication
}
__DEVICE__
-float ldexpf(float __x, int __e) { return __ocml_ldexp_f32(__x, __e); }
+float ldexpf(float __x, int __e) { return __builtin_amdgcn_ldexpf(__x, __e); }
__DEVICE__
float lgammaf(float __x) { return __ocml_lgamma_f32(__x); }
__DEVICE__
-long long int llrintf(float __x) { return __ocml_rint_f32(__x); }
+long long int llrintf(float __x) { return __builtin_rintf(__x); }
__DEVICE__
-long long int llroundf(float __x) { return __ocml_round_f32(__x); }
+long long int llroundf(float __x) { return __builtin_roundf(__x); }
__DEVICE__
-float log10f(float __x) { return __ocml_log10_f32(__x); }
+float log10f(float __x) { return __builtin_log10f(__x); }
__DEVICE__
float log1pf(float __x) { return __ocml_log1p_f32(__x); }
__DEVICE__
-float log2f(float __x) { return __ocml_log2_f32(__x); }
+float log2f(float __x) { return __builtin_log2f(__x); }
__DEVICE__
float logbf(float __x) { return __ocml_logb_f32(__x); }
__DEVICE__
-float logf(float __x) { return __ocml_log_f32(__x); }
+float logf(float __x) { return __builtin_logf(__x); }
__DEVICE__
-long int lrintf(float __x) { return __ocml_rint_f32(__x); }
+long int lrintf(float __x) { return __builtin_rintf(__x); }
__DEVICE__
-long int lroundf(float __x) { return __ocml_round_f32(__x); }
+long int lroundf(float __x) { return __builtin_roundf(__x); }
__DEVICE__
float modff(float __x, float *__iptr) {
@@ -377,7 +369,7 @@ float nanf(const char *__tagp __attribute__((nonnull))) {
}
__DEVICE__
-float nearbyintf(float __x) { return __ocml_nearbyint_f32(__x); }
+float nearbyintf(float __x) { return __builtin_nearbyintf(__x); }
__DEVICE__
float nextafterf(float __x, float __y) {
@@ -443,7 +435,7 @@ __DEVICE__
float rhypotf(float __x, float __y) { return __ocml_rhypot_f32(__x, __y); }
__DEVICE__
-float rintf(float __x) { return __ocml_rint_f32(__x); }
+float rintf(float __x) { return __builtin_rintf(__x); }
__DEVICE__
float rnorm3df(float __x, float __y, float __z) {
@@ -468,22 +460,22 @@ float rnormf(int __dim,
}
__DEVICE__
-float roundf(float __x) { return __ocml_round_f32(__x); }
+float roundf(float __x) { return __builtin_roundf(__x); }
__DEVICE__
float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); }
__DEVICE__
float scalblnf(float __x, long int __n) {
- return (__n < INT_MAX) ? __ocml_scalbn_f32(__x, __n)
+ return (__n < INT_MAX) ? __builtin_amdgcn_ldexpf(__x, __n)
: __ocml_scalb_f32(__x, __n);
}
__DEVICE__
-float scalbnf(float __x, int __n) { return __ocml_scalbn_f32(__x, __n); }
+float scalbnf(float __x, int __n) { return __builtin_amdgcn_ldexpf(__x, __n); }
__DEVICE__
-__RETURN_TYPE __signbitf(float __x) { return __ocml_signbit_f32(__x); }
+__RETURN_TYPE __signbitf(float __x) { return __builtin_signbitf(__x); }
__DEVICE__
void sincosf(float __x, float *__sinptr, float *__cosptr) {
@@ -529,7 +521,7 @@ __DEVICE__
float tgammaf(float __x) { return __ocml_tgamma_f32(__x); }
__DEVICE__
-float truncf(float __x) { return __ocml_trunc_f32(__x); }
+float truncf(float __x) { return __builtin_truncf(__x); }
__DEVICE__
float y0f(float __x) { return __ocml_y0_f32(__x); }
@@ -621,7 +613,7 @@ float __fmaf_rz(float __x, float __y, float __z) {
#else
__DEVICE__
float __fmaf_rn(float __x, float __y, float __z) {
- return __ocml_fma_f32(__x, __y, __z);
+ return __builtin_fmaf(__x, __y, __z);
}
#endif
@@ -654,7 +646,7 @@ float __frcp_rn(float __x) { return 1.0f / __x; }
#endif
__DEVICE__
-float __frsqrt_rn(float __x) { return __llvm_amdgcn_rsq_f32(__x); }
+float __frsqrt_rn(float __x) { return __builtin_amdgcn_rsqf(__x); }
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
@@ -739,11 +731,11 @@ __DEVICE__
double cbrt(double __x) { return __ocml_cbrt_f64(__x); }
__DEVICE__
-double ceil(double __x) { return __ocml_ceil_f64(__x); }
+double ceil(double __x) { return __builtin_ceil(__x); }
__DEVICE__
double copysign(double __x, double __y) {
- return __ocml_copysign_f64(__x, __y);
+ return __builtin_copysign(__x, __y);
}
__DEVICE__
@@ -795,32 +787,25 @@ __DEVICE__
double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); }
__DEVICE__
-double floor(double __x) { return __ocml_floor_f64(__x); }
+double floor(double __x) { return __builtin_floor(__x); }
__DEVICE__
double fma(double __x, double __y, double __z) {
- return __ocml_fma_f64(__x, __y, __z);
+ return __builtin_fma(__x, __y, __z);
}
__DEVICE__
-double fmax(double __x, double __y) { return __ocml_fmax_f64(__x, __y); }
+double fmax(double __x, double __y) { return __builtin_fmax(__x, __y); }
__DEVICE__
-double fmin(double __x, double __y) { return __ocml_fmin_f64(__x, __y); }
+double fmin(double __x, double __y) { return __builtin_fmin(__x, __y); }
__DEVICE__
double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); }
__DEVICE__
double frexp(double __x, int *__nptr) {
- int __tmp;
-#ifdef __OPENMP_AMDGCN__
-#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
-#endif
- double __r =
- __ocml_frexp_f64(__x, (__attribute__((address_space(5))) int *)&__tmp);
- *__nptr = __tmp;
- return __r;
+ return __builtin_frexp(__x, __nptr);
}
__DEVICE__
@@ -830,13 +815,13 @@ __DEVICE__
int ilogb(double __x) { return __ocml_ilogb_f64(__x); }
__DEVICE__
-__RETURN_TYPE __finite(double __x) { return __ocml_isfinite_f64(__x); }
+__RETURN_TYPE __finite(double __x) { return __builtin_isfinite(__x); }
__DEVICE__
-__RETURN_TYPE __isinf(double __x) { return __ocml_isinf_f64(__x); }
+__RETURN_TYPE __isinf(double __x) { return __builtin_isinf(__x); }
__DEVICE__
-__RETURN_TYPE __isnan(double __x) { return __ocml_isnan_f64(__x); }
+__RETURN_TYPE __isnan(double __x) { return __builtin_isnan(__x); }
__DEVICE__
double j0(double __x) { return __ocml_j0_f64(__x); }
@@ -866,16 +851,16 @@ double jn(int __n, double __x) { // TODO: we could use Ahmes multiplication
}
__DEVICE__
-double ldexp(double __x, int __e) { return __ocml_ldexp_f64(__x, __e); }
+double ldexp(double __x, int __e) { return __builtin_amdgcn_ldexp(__x, __e); }
__DEVICE__
double lgamma(double __x) { return __ocml_lgamma_f64(__x); }
__DEVICE__
-long long int llrint(double __x) { return __ocml_rint_f64(__x); }
+long long int llrint(double __x) { return __builtin_rint(__x); }
__DEVICE__
-long long int llround(double __x) { return __ocml_round_f64(__x); }
+long long int llround(double __x) { return __builtin_round(__x); }
__DEVICE__
double log(double __x) { return __ocml_log_f64(__x); }
@@ -893,10 +878,10 @@ __DEVICE__
double logb(double __x) { return __ocml_logb_f64(__x); }
__DEVICE__
-long int lrint(double __x) { return __ocml_rint_f64(__x); }
+long int lrint(double __x) { return __builtin_rint(__x); }
__DEVICE__
-long int lround(double __x) { return __ocml_round_f64(__x); }
+long int lround(double __x) { return __builtin_round(__x); }
__DEVICE__
double modf(double __x, double *__iptr) {
@@ -940,7 +925,7 @@ double nan(const char *__tagp) {
}
__DEVICE__
-double nearbyint(double __x) { return __ocml_nearbyint_f64(__x); }
+double nearbyint(double __x) { return __builtin_nearbyint(__x); }
__DEVICE__
double nextafter(double __x, double __y) {
@@ -1006,7 +991,7 @@ __DEVICE__
double rhypot(double __x, double __y) { return __ocml_rhypot_f64(__x, __y); }
__DEVICE__
-double rint(double __x) { return __ocml_rint_f64(__x); }
+double rint(double __x) { return __builtin_rint(__x); }
__DEVICE__
double rnorm(int __dim,
@@ -1031,21 +1016,21 @@ double rnorm4d(double __x, double __y, double __z, double __w) {
}
__DEVICE__
-double round(double __x) { return __ocml_round_f64(__x); }
+double round(double __x) { return __builtin_round(__x); }
__DEVICE__
double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); }
__DEVICE__
double scalbln(double __x, long int __n) {
- return (__n < INT_MAX) ? __ocml_scalbn_f64(__x, __n)
+ return (__n < INT_MAX) ? __builtin_amdgcn_ldexp(__x, __n)
: __ocml_scalb_f64(__x, __n);
}
__DEVICE__
-double scalbn(double __x, int __n) { return __ocml_scalbn_f64(__x, __n); }
+double scalbn(double __x, int __n) { return __builtin_amdgcn_ldexp(__x, __n); }
__DEVICE__
-__RETURN_TYPE __signbit(double __x) { return __ocml_signbit_f64(__x); }
+__RETURN_TYPE __signbit(double __x) { return __builtin_signbit(__x); }
__DEVICE__
double sin(double __x) { return __ocml_sin_f64(__x); }
@@ -1091,7 +1076,7 @@ __DEVICE__
double tgamma(double __x) { return __ocml_tgamma_f64(__x); }
__DEVICE__
-double trunc(double __x) { return __ocml_trunc_f64(__x); }
+double trunc(double __x) { return __builtin_trunc(__x); }
__DEVICE__
double y0(double __x) { return __ocml_y0_f64(__x); }
@@ -1258,7 +1243,7 @@ double __fma_rz(double __x, double __y, double __z) {
#else
__DEVICE__
double __fma_rn(double __x, double __y, double __z) {
- return __ocml_fma_f64(__x, __y, __z);
+ return __builtin_fma(__x, __y, __z);
}
#endif
// END INTRINSICS
@@ -1290,16 +1275,16 @@ __DEVICE__ int max(int __arg1, int __arg2) {
}
__DEVICE__
-float max(float __x, float __y) { return fmaxf(__x, __y); }
+float max(float __x, float __y) { return __builtin_fmaxf(__x, __y); }
__DEVICE__
-double max(double __x, double __y) { return fmax(__x, __y); }
+double max(double __x, double __y) { return __builtin_fmax(__x, __y); }
__DEVICE__
-float min(float __x, float __y) { return fminf(__x, __y); }
+float min(float __x, float __y) { return __builtin_fminf(__x, __y); }
__DEVICE__
-double min(double __x, double __y) { return fmin(__x, __y); }
+double min(double __x, double __y) { return __builtin_fmin(__x, __y); }
#if !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__)
__host__ inline static int min(int __arg1, int __arg2) {
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h b/contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h
index 0508731de106..e8817073efdb 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h
@@ -80,12 +80,25 @@ extern "C" {
#if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 405
extern "C" __device__ unsigned long long __ockl_dm_alloc(unsigned long long __size);
extern "C" __device__ void __ockl_dm_dealloc(unsigned long long __addr);
+#if __has_feature(address_sanitizer)
+extern "C" __device__ unsigned long long __asan_malloc_impl(unsigned long long __size, unsigned long long __pc);
+extern "C" __device__ void __asan_free_impl(unsigned long long __addr, unsigned long long __pc);
+__attribute__((noinline, weak)) __device__ void *malloc(__hip_size_t __size) {
+ unsigned long long __pc = (unsigned long long)__builtin_return_address(0);
+ return (void *)__asan_malloc_impl(__size, __pc);
+}
+__attribute__((noinline, weak)) __device__ void free(void *__ptr) {
+ unsigned long long __pc = (unsigned long long)__builtin_return_address(0);
+ __asan_free_impl((unsigned long long)__ptr, __pc);
+}
+#else
__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
return (void *) __ockl_dm_alloc(__size);
}
__attribute__((weak)) inline __device__ void free(void *__ptr) {
__ockl_dm_dealloc((unsigned long long)__ptr);
}
+#endif // __has_feature(address_sanitizer)
#else // HIP version check
#if __HIP_ENABLE_DEVICE_MALLOC__
__device__ void *__hip_malloc(__hip_size_t __size);
diff --git a/contrib/llvm-project/clang/lib/Headers/adxintrin.h b/contrib/llvm-project/clang/lib/Headers/adxintrin.h
index 72b9ed08f40c..20f6211e567b 100644
--- a/contrib/llvm-project/clang/lib/Headers/adxintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/adxintrin.h
@@ -17,56 +17,211 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
-/* Intrinsics that are available only if __ADX__ defined */
-static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx")))
-_addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
- unsigned int *__p)
-{
+/* Use C++ inline semantics in C++, GNU inline for C mode. */
+#if defined(__cplusplus)
+#define __INLINE __inline
+#else
+#define __INLINE static __inline
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Intrinsics that are available only if __ADX__ is defined. */
+
+/// Adds unsigned 32-bit integers \a __x and \a __y, plus 0 or 1 as indicated
+/// by the carry flag \a __cf. Stores the unsigned 32-bit sum in the memory
+/// at \a __p, and returns the 8-bit carry-out (carry flag).
+///
+/// \code{.operation}
+/// temp := (__cf == 0) ? 0 : 1
+/// Store32(__p, __x + __y + temp)
+/// result := CF
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c ADCX instruction.
+///
+/// \param __cf
+/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
+/// \param __x
+/// A 32-bit unsigned addend.
+/// \param __y
+/// A 32-bit unsigned addend.
+/// \param __p
+/// Pointer to memory for storing the sum.
+/// \returns The 8-bit unsigned carry-out value.
+__INLINE unsigned char
+ __attribute__((__always_inline__, __nodebug__, __target__("adx")))
+ _addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
+ unsigned int *__p) {
return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);
}
#ifdef __x86_64__
-static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx")))
-_addcarryx_u64(unsigned char __cf, unsigned long long __x,
- unsigned long long __y, unsigned long long *__p)
-{
+/// Adds unsigned 64-bit integers \a __x and \a __y, plus 0 or 1 as indicated
+/// by the carry flag \a __cf. Stores the unsigned 64-bit sum in the memory
+/// at \a __p, and returns the 8-bit carry-out (carry flag).
+///
+/// \code{.operation}
+/// temp := (__cf == 0) ? 0 : 1
+/// Store64(__p, __x + __y + temp)
+/// result := CF
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c ADCX instruction.
+///
+/// \param __cf
+/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
+/// \param __x
+/// A 64-bit unsigned addend.
+/// \param __y
+/// A 64-bit unsigned addend.
+/// \param __p
+/// Pointer to memory for storing the sum.
+/// \returns The 8-bit unsigned carry-out value.
+__INLINE unsigned char
+ __attribute__((__always_inline__, __nodebug__, __target__("adx")))
+ _addcarryx_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p) {
return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
}
#endif
-/* Intrinsics that are also available if __ADX__ undefined */
-static __inline unsigned char __DEFAULT_FN_ATTRS
-_addcarry_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
- unsigned int *__p)
-{
+/* Intrinsics that are also available if __ADX__ is undefined. */
+
+/// Adds unsigned 32-bit integers \a __x and \a __y, plus 0 or 1 as indicated
+/// by the carry flag \a __cf. Stores the unsigned 32-bit sum in the memory
+/// at \a __p, and returns the 8-bit carry-out (carry flag).
+///
+/// \code{.operation}
+/// temp := (__cf == 0) ? 0 : 1
+/// Store32(__p, __x + __y + temp)
+/// result := CF
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c ADC instruction.
+///
+/// \param __cf
+/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
+/// \param __x
+/// A 32-bit unsigned addend.
+/// \param __y
+/// A 32-bit unsigned addend.
+/// \param __p
+/// Pointer to memory for storing the sum.
+/// \returns The 8-bit unsigned carry-out value.
+__INLINE unsigned char __DEFAULT_FN_ATTRS _addcarry_u32(unsigned char __cf,
+ unsigned int __x,
+ unsigned int __y,
+ unsigned int *__p) {
return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);
}
#ifdef __x86_64__
-static __inline unsigned char __DEFAULT_FN_ATTRS
+/// Adds unsigned 64-bit integers \a __x and \a __y, plus 0 or 1 as indicated
+/// by the carry flag \a __cf. Stores the unsigned 64-bit sum in the memory
+/// at \a __p, and returns the 8-bit carry-out (carry flag).
+///
+/// \code{.operation}
+/// temp := (__cf == 0) ? 0 : 1
+/// Store64(__p, __x + __y + temp)
+/// result := CF
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c ADC instruction.
+///
+/// \param __cf
+/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
+/// \param __x
+/// A 64-bit unsigned addend.
+/// \param __y
+/// A 64-bit unsigned addend.
+/// \param __p
+/// Pointer to memory for storing the sum.
+/// \returns The 8-bit unsigned carry-out value.
+__INLINE unsigned char __DEFAULT_FN_ATTRS
_addcarry_u64(unsigned char __cf, unsigned long long __x,
- unsigned long long __y, unsigned long long *__p)
-{
+ unsigned long long __y, unsigned long long *__p) {
return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
}
#endif
-static __inline unsigned char __DEFAULT_FN_ATTRS
-_subborrow_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
- unsigned int *__p)
-{
+/// Adds unsigned 32-bit integer \a __y to 0 or 1 as indicated by the carry
+/// flag \a __cf, and subtracts the result from unsigned 32-bit integer
+/// \a __x. Stores the unsigned 32-bit difference in the memory at \a __p,
+/// and returns the 8-bit carry-out (carry or overflow flag).
+///
+/// \code{.operation}
+/// temp := (__cf == 0) ? 0 : 1
+/// Store32(__p, __x - (__y + temp))
+/// result := CF
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c SBB instruction.
+///
+/// \param __cf
+/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
+/// \param __x
+/// The 32-bit unsigned minuend.
+/// \param __y
+/// The 32-bit unsigned subtrahend.
+/// \param __p
+/// Pointer to memory for storing the difference.
+/// \returns The 8-bit unsigned carry-out value.
+__INLINE unsigned char __DEFAULT_FN_ATTRS _subborrow_u32(unsigned char __cf,
+ unsigned int __x,
+ unsigned int __y,
+ unsigned int *__p) {
return __builtin_ia32_subborrow_u32(__cf, __x, __y, __p);
}
#ifdef __x86_64__
-static __inline unsigned char __DEFAULT_FN_ATTRS
+/// Adds unsigned 64-bit integer \a __y to 0 or 1 as indicated by the carry
+/// flag \a __cf, and subtracts the result from unsigned 64-bit integer
+/// \a __x. Stores the unsigned 64-bit difference in the memory at \a __p,
+/// and returns the 8-bit carry-out (carry or overflow flag).
+///
+/// \code{.operation}
+/// temp := (__cf == 0) ? 0 : 1
+/// Store64(__p, __x - (__y + temp))
+/// result := CF
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c ADC instruction.
+///
+/// \param __cf
+/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
+/// \param __x
+/// The 64-bit unsigned minuend.
+/// \param __y
+/// The 64-bit unsigned subtrahend.
+/// \param __p
+/// Pointer to memory for storing the difference.
+/// \returns The 8-bit unsigned carry-out value.
+__INLINE unsigned char __DEFAULT_FN_ATTRS
_subborrow_u64(unsigned char __cf, unsigned long long __x,
- unsigned long long __y, unsigned long long *__p)
-{
+ unsigned long long __y, unsigned long long *__p) {
return __builtin_ia32_subborrow_u64(__cf, __x, __y, __p);
}
#endif
+#if defined(__cplusplus)
+}
+#endif
+
#undef __DEFAULT_FN_ATTRS
#endif /* __ADXINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/altivec.h b/contrib/llvm-project/clang/lib/Headers/altivec.h
index f50466ec9637..c036f5ebba58 100644
--- a/contrib/llvm-project/clang/lib/Headers/altivec.h
+++ b/contrib/llvm-project/clang/lib/Headers/altivec.h
@@ -3202,71 +3202,79 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
// the XL-compatible signatures are used for those functions.
#ifdef __XL_COMPAT_ALTIVEC__
#define vec_ctf(__a, __b) \
- _Generic( \
- (__a), vector int \
- : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \
- vector unsigned int \
- : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
- (__b)), \
- vector unsigned long long \
- : (vector float)(__builtin_vsx_xvcvuxdsp( \
- (vector unsigned long long)(__a)) * \
- (vector float)(vector unsigned)((0x7f - (__b)) << 23)), \
- vector signed long long \
- : (vector float)(__builtin_vsx_xvcvsxdsp( \
- (vector signed long long)(__a)) * \
- (vector float)(vector unsigned)((0x7f - (__b)) << 23)))
+ _Generic((__a), \
+ vector int: (vector float)__builtin_altivec_vcfsx((vector int)(__a), \
+ ((__b)&0x1F)), \
+ vector unsigned int: (vector float)__builtin_altivec_vcfux( \
+ (vector unsigned int)(__a), ((__b)&0x1F)), \
+ vector unsigned long long: ( \
+ vector float)(__builtin_vsx_xvcvuxdsp( \
+ (vector unsigned long long)(__a)) * \
+ (vector float)(vector unsigned)((0x7f - \
+ ((__b)&0x1F)) \
+ << 23)), \
+ vector signed long long: ( \
+ vector float)(__builtin_vsx_xvcvsxdsp( \
+ (vector signed long long)(__a)) * \
+ (vector float)(vector unsigned)((0x7f - \
+ ((__b)&0x1F)) \
+ << 23)))
#else // __XL_COMPAT_ALTIVEC__
-#define vec_ctf(__a, __b) \
- _Generic( \
- (__a), vector int \
- : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \
- vector unsigned int \
- : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
- (__b)), \
- vector unsigned long long \
- : (vector float)(__builtin_convertvector( \
- (vector unsigned long long)(__a), vector double) * \
- (vector double)(vector unsigned long long)((0x3ffULL - \
- (__b)) \
- << 52)), \
- vector signed long long \
- : (vector float)(__builtin_convertvector((vector signed long long)(__a), \
- vector double) * \
- (vector double)(vector unsigned long long)((0x3ffULL - \
- (__b)) \
- << 52)))
+#define vec_ctf(__a, __b) \
+ _Generic( \
+ (__a), \
+ vector int: (vector float)__builtin_altivec_vcfsx((vector int)(__a), \
+ ((__b)&0x1F)), \
+ vector unsigned int: (vector float)__builtin_altivec_vcfux( \
+ (vector unsigned int)(__a), ((__b)&0x1F)), \
+ vector unsigned long long: ( \
+ vector float)(__builtin_convertvector( \
+ (vector unsigned long long)(__a), vector double) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ ((__b)&0x1F)) \
+ << 52)), \
+ vector signed long long: ( \
+ vector float)(__builtin_convertvector( \
+ (vector signed long long)(__a), vector double) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ ((__b)&0x1F)) \
+ << 52)))
#endif // __XL_COMPAT_ALTIVEC__
#else
#define vec_ctf(__a, __b) \
- _Generic((__a), vector int \
- : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \
- vector unsigned int \
- : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
- (__b)))
+ _Generic((__a), \
+ vector int: (vector float)__builtin_altivec_vcfsx((vector int)(__a), \
+ ((__b)&0x1F)), \
+ vector unsigned int: (vector float)__builtin_altivec_vcfux( \
+ (vector unsigned int)(__a), ((__b)&0x1F)))
#endif
/* vec_ctd */
#ifdef __VSX__
#define vec_ctd(__a, __b) \
- _Generic((__a), vector signed int \
- : (vec_doublee((vector signed int)(__a)) * \
- (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
- << 52)), \
- vector unsigned int \
- : (vec_doublee((vector unsigned int)(__a)) * \
- (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
- << 52)), \
- vector unsigned long long \
- : (__builtin_convertvector((vector unsigned long long)(__a), \
- vector double) * \
- (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
- << 52)), \
- vector signed long long \
- : (__builtin_convertvector((vector signed long long)(__a), \
- vector double) * \
- (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
- << 52)))
+ _Generic((__a), \
+ vector signed int: ( \
+ vec_doublee((vector signed int)(__a)) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ ((__b)&0x1F)) \
+ << 52)), \
+ vector unsigned int: ( \
+ vec_doublee((vector unsigned int)(__a)) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ ((__b)&0x1F)) \
+ << 52)), \
+ vector unsigned long long: ( \
+ __builtin_convertvector((vector unsigned long long)(__a), \
+ vector double) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ ((__b)&0x1F)) \
+ << 52)), \
+ vector signed long long: ( \
+ __builtin_convertvector((vector signed long long)(__a), \
+ vector double) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ ((__b)&0x1F)) \
+ << 52)))
#endif // __VSX__
/* vec_vcfsx */
@@ -3281,27 +3289,27 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
#ifdef __VSX__
#ifdef __XL_COMPAT_ALTIVEC__
#define vec_cts(__a, __b) \
- _Generic((__a), vector float \
- : (vector signed int)__builtin_altivec_vctsxs((vector float)(__a), \
- (__b)), \
- vector double \
- : __extension__({ \
+ _Generic((__a), \
+ vector float: (vector signed int)__builtin_altivec_vctsxs( \
+ (vector float)(__a), ((__b)&0x1F)), \
+ vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
<< 52); \
(vector signed long long)__builtin_vsx_xvcvdpsxws(__ret); \
}))
#else // __XL_COMPAT_ALTIVEC__
#define vec_cts(__a, __b) \
- _Generic((__a), vector float \
- : (vector signed int)__builtin_altivec_vctsxs((vector float)(__a), \
- (__b)), \
- vector double \
- : __extension__({ \
+ _Generic((__a), \
+ vector float: (vector signed int)__builtin_altivec_vctsxs( \
+ (vector float)(__a), ((__b)&0x1F)), \
+ vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
<< 52); \
(vector signed long long)__builtin_convertvector( \
__ret, vector signed long long); \
@@ -3320,27 +3328,27 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
#ifdef __VSX__
#ifdef __XL_COMPAT_ALTIVEC__
#define vec_ctu(__a, __b) \
- _Generic((__a), vector float \
- : (vector unsigned int)__builtin_altivec_vctuxs( \
- (vector float)(__a), (__b)), \
- vector double \
- : __extension__({ \
+ _Generic((__a), \
+ vector float: (vector unsigned int)__builtin_altivec_vctuxs( \
+ (vector float)(__a), ((__b)&0x1F)), \
+ vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + __b) \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
<< 52); \
(vector unsigned long long)__builtin_vsx_xvcvdpuxws(__ret); \
}))
#else // __XL_COMPAT_ALTIVEC__
#define vec_ctu(__a, __b) \
- _Generic((__a), vector float \
- : (vector unsigned int)__builtin_altivec_vctuxs( \
- (vector float)(__a), (__b)), \
- vector double \
- : __extension__({ \
+ _Generic((__a), \
+ vector float: (vector unsigned int)__builtin_altivec_vctuxs( \
+ (vector float)(__a), ((__b)&0x1F)), \
+ vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + __b) \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
<< 52); \
(vector unsigned long long)__builtin_convertvector( \
__ret, vector unsigned long long); \
@@ -3355,60 +3363,62 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
#ifdef __VSX__
#define vec_ctsl(__a, __b) \
- _Generic((__a), vector float \
- : __extension__({ \
- vector float __ret = \
- (vector float)(__a) * \
- (vector float)(vector unsigned)((0x7f + (__b)) << 23); \
- __builtin_vsx_xvcvspsxds( \
- __builtin_vsx_xxsldwi(__ret, __ret, 1)); \
- }), \
- vector double \
- : __extension__({ \
- vector double __ret = \
- (vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + __b) \
- << 52); \
- __builtin_convertvector(__ret, vector signed long long); \
- }))
+ _Generic( \
+ (__a), vector float \
+ : __extension__({ \
+ vector float __ret = \
+ (vector float)(__a) * \
+ (vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) << 23); \
+ __builtin_vsx_xvcvspsxds(__builtin_vsx_xxsldwi(__ret, __ret, 1)); \
+ }), \
+ vector double \
+ : __extension__({ \
+ vector double __ret = \
+ (vector double)(__a) * \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
+ << 52); \
+ __builtin_convertvector(__ret, vector signed long long); \
+ }))
/* vec_ctul */
#define vec_ctul(__a, __b) \
- _Generic((__a), vector float \
- : __extension__({ \
- vector float __ret = \
- (vector float)(__a) * \
- (vector float)(vector unsigned)((0x7f + (__b)) << 23); \
- __builtin_vsx_xvcvspuxds( \
- __builtin_vsx_xxsldwi(__ret, __ret, 1)); \
- }), \
- vector double \
- : __extension__({ \
- vector double __ret = \
- (vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + __b) \
- << 52); \
- __builtin_convertvector(__ret, vector unsigned long long); \
- }))
+ _Generic( \
+ (__a), vector float \
+ : __extension__({ \
+ vector float __ret = \
+ (vector float)(__a) * \
+ (vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) << 23); \
+ __builtin_vsx_xvcvspuxds(__builtin_vsx_xxsldwi(__ret, __ret, 1)); \
+ }), \
+ vector double \
+ : __extension__({ \
+ vector double __ret = \
+ (vector double)(__a) * \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
+ << 52); \
+ __builtin_convertvector(__ret, vector unsigned long long); \
+ }))
#endif
#else // __LITTLE_ENDIAN__
/* vec_ctsl */
#ifdef __VSX__
#define vec_ctsl(__a, __b) \
- _Generic((__a), vector float \
- : __extension__({ \
- vector float __ret = \
- (vector float)(__a) * \
- (vector float)(vector unsigned)((0x7f + (__b)) << 23); \
- __builtin_vsx_xvcvspsxds(__ret); \
- }), \
- vector double \
- : __extension__({ \
+ _Generic((__a), \
+ vector float: __extension__({ \
+ vector float __ret = \
+ (vector float)(__a) * \
+ (vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) << 23); \
+ __builtin_vsx_xvcvspsxds(__ret); \
+ }), \
+ vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + __b) \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
<< 52); \
__builtin_convertvector(__ret, vector signed long long); \
}))
@@ -3420,14 +3430,16 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
: __extension__({ \
vector float __ret = \
(vector float)(__a) * \
- (vector float)(vector unsigned)((0x7f + (__b)) << 23); \
+ (vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) \
+ << 23); \
__builtin_vsx_xvcvspuxds(__ret); \
}), \
vector double \
: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + __b) \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
<< 52); \
__builtin_convertvector(__ret, vector unsigned long long); \
}))
diff --git a/contrib/llvm-project/clang/lib/Headers/amxcomplexintrin.h b/contrib/llvm-project/clang/lib/Headers/amxcomplexintrin.h
new file mode 100644
index 000000000000..84ef972fcadf
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/amxcomplexintrin.h
@@ -0,0 +1,169 @@
+/*===--------- amxcomplexintrin.h - AMXCOMPLEX intrinsics -*- C++ -*---------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===------------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <amxcomplexintrin.h> directly; include <immintrin.h> instead."
+#endif // __IMMINTRIN_H
+
+#ifndef __AMX_COMPLEXINTRIN_H
+#define __AMX_COMPLEXINTRIN_H
+#ifdef __x86_64__
+
+#define __DEFAULT_FN_ATTRS_COMPLEX \
+ __attribute__((__always_inline__, __nodebug__, __target__("amx-complex")))
+
+/// Perform matrix multiplication of two tiles containing complex elements and
+/// accumulate the results into a packed single precision tile. Each dword
+/// element in input tiles \a a and \a b is interpreted as a complex number
+/// with FP16 real part and FP16 imaginary part.
+/// Calculates the imaginary part of the result. For each possible combination
+/// of (row of \a a, column of \a b), it performs a set of multiplication
+/// and accumulations on all corresponding complex numbers (one from \a a
+/// and one from \a b). The imaginary part of the \a a element is multiplied
+/// with the real part of the corresponding \a b element, and the real part
+/// of the \a a element is multiplied with the imaginary part of the
+/// corresponding \a b elements. The two accumulated results are added, and
+/// then accumulated into the corresponding row and column of \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// void _tile_cmmimfp16ps(__tile dst, __tile a, __tile b);
+/// \endcode
+///
+/// \code{.operation}
+/// FOR m := 0 TO dst.rows - 1
+/// tmp := dst.row[m]
+/// FOR k := 0 TO (a.colsb / 4) - 1
+/// FOR n := 0 TO (dst.colsb / 4) - 1
+/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+1])
+/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+0])
+/// ENDFOR
+/// ENDFOR
+/// write_row_and_zero(dst, m, tmp, dst.colsb)
+/// ENDFOR
+/// zero_upper_rows(dst, dst.rows)
+/// zero_tileconfig_start()
+/// \endcode
+///
+/// This intrinsic corresponds to the \c TCMMIMFP16PS instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param a
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param b
+/// The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_cmmimfp16ps(dst, a, b) __builtin_ia32_tcmmimfp16ps(dst, a, b)
+
+/// Perform matrix multiplication of two tiles containing complex elements and
+/// accumulate the results into a packed single precision tile. Each dword
+/// element in input tiles \a a and \a b is interpreted as a complex number
+/// with FP16 real part and FP16 imaginary part.
+/// Calculates the real part of the result. For each possible combination
+/// of (row of \a a, column of \a b), it performs a set of multiplication
+/// and accumulations on all corresponding complex numbers (one from \a a
+/// and one from \a b). The real part of the \a a element is multiplied
+/// with the real part of the corresponding \a b element, and the negated
+/// imaginary part of the \a a element is multiplied with the imaginary
+/// part of the corresponding \a b elements. The two accumulated results
+/// are added, and then accumulated into the corresponding row and column
+/// of \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// void _tile_cmmrlfp16ps(__tile dst, __tile a, __tile b);
+/// \endcode
+///
+/// \code{.operation}
+/// FOR m := 0 TO dst.rows - 1
+/// tmp := dst.row[m]
+/// FOR k := 0 TO (a.colsb / 4) - 1
+/// FOR n := 0 TO (dst.colsb / 4) - 1
+/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+0])
+/// tmp.fp32[n] += FP32(-a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+1])
+/// ENDFOR
+/// ENDFOR
+/// write_row_and_zero(dst, m, tmp, dst.colsb)
+/// ENDFOR
+/// zero_upper_rows(dst, dst.rows)
+/// zero_tileconfig_start()
+/// \endcode
+///
+/// This intrinsic corresponds to the \c TCMMIMFP16PS instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param a
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param b
+/// The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_cmmrlfp16ps(dst, a, b) __builtin_ia32_tcmmrlfp16ps(dst, a, b)
+
+static __inline__ _tile1024i __DEFAULT_FN_ATTRS_COMPLEX
+_tile_cmmimfp16ps_internal(unsigned short m, unsigned short n, unsigned short k,
+ _tile1024i dst, _tile1024i src1, _tile1024i src2) {
+ return __builtin_ia32_tcmmimfp16ps_internal(m, n, k, dst, src1, src2);
+}
+
+static __inline__ _tile1024i __DEFAULT_FN_ATTRS_COMPLEX
+_tile_cmmrlfp16ps_internal(unsigned short m, unsigned short n, unsigned short k,
+ _tile1024i dst, _tile1024i src1, _tile1024i src2) {
+ return __builtin_ia32_tcmmrlfp16ps_internal(m, n, k, dst, src1, src2);
+}
+
+/// Perform matrix multiplication of two tiles containing complex elements and
+/// accumulate the results into a packed single precision tile. Each dword
+/// element in input tiles src0 and src1 is interpreted as a complex number with
+/// FP16 real part and FP16 imaginary part.
+/// This function calculates the imaginary part of the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> TCMMIMFP16PS </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+__DEFAULT_FN_ATTRS_COMPLEX
+static void __tile_cmmimfp16ps(__tile1024i *dst, __tile1024i src0,
+ __tile1024i src1) {
+ dst->tile = _tile_cmmimfp16ps_internal(src0.row, src1.col, src0.col,
+ dst->tile, src0.tile, src1.tile);
+}
+
+/// Perform matrix multiplication of two tiles containing complex elements and
+/// accumulate the results into a packed single precision tile. Each dword
+/// element in input tiles src0 and src1 is interpreted as a complex number with
+/// FP16 real part and FP16 imaginary part.
+/// This function calculates the real part of the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> TCMMRLFP16PS </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+__DEFAULT_FN_ATTRS_COMPLEX
+static void __tile_cmmrlfp16ps(__tile1024i *dst, __tile1024i src0,
+ __tile1024i src1) {
+ dst->tile = _tile_cmmrlfp16ps_internal(src0.row, src1.col, src0.col,
+ dst->tile, src0.tile, src1.tile);
+}
+
+#endif // __x86_64__
+#endif // __AMX_COMPLEXINTRIN_H
diff --git a/contrib/llvm-project/clang/lib/Headers/arm_acle.h b/contrib/llvm-project/clang/lib/Headers/arm_acle.h
index e086f1f02dad..c208512bab59 100644
--- a/contrib/llvm-project/clang/lib/Headers/arm_acle.h
+++ b/contrib/llvm-project/clang/lib/Headers/arm_acle.h
@@ -138,28 +138,32 @@ __rorl(unsigned long __x, uint32_t __y) {
/* CLZ */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clz(uint32_t __t) {
- return (uint32_t)__builtin_clz(__t);
+ return __builtin_arm_clz(__t);
}
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clzl(unsigned long __t) {
- return (unsigned long)__builtin_clzl(__t);
+#if __SIZEOF_LONG__ == 4
+ return __builtin_arm_clz(__t);
+#else
+ return __builtin_arm_clz64(__t);
+#endif
}
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clzll(uint64_t __t) {
- return (uint64_t)__builtin_clzll(__t);
+ return __builtin_arm_clz64(__t);
}
/* CLS */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__cls(uint32_t __t) {
return __builtin_arm_cls(__t);
}
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clsl(unsigned long __t) {
#if __SIZEOF_LONG__ == 4
return __builtin_arm_cls(__t);
@@ -168,7 +172,7 @@ __clsl(unsigned long __t) {
#endif
}
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clsll(uint64_t __t) {
return __builtin_arm_cls64(__t);
}
diff --git a/contrib/llvm-project/clang/lib/Headers/avx2intrin.h b/contrib/llvm-project/clang/lib/Headers/avx2intrin.h
index f8521e7d72b5..8f2de05674c8 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx2intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx2intrin.h
@@ -19,128 +19,539 @@
#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(128)))
/* SSE4 Multiple Packed Sums of Absolute Difference. */
+/// Computes sixteen sum of absolute difference (SAD) operations on sets of
+/// four unsigned 8-bit integers from the 256-bit integer vectors \a X and
+/// \a Y.
+///
+/// Eight SAD results are computed using the lower half of the input
+/// vectors, and another eight using the upper half. These 16-bit values
+/// are returned in the lower and upper halves of the 256-bit result,
+/// respectively.
+///
+/// A single SAD operation selects four bytes from \a X and four bytes from
+/// \a Y as input. It computes the differences between each \a X byte and
+/// the corresponding \a Y byte, takes the absolute value of each
+/// difference, and sums these four values to form one 16-bit result. The
+/// intrinsic computes 16 of these results with different sets of input
+/// bytes.
+///
+/// For each set of eight results, the SAD operations use the same four
+/// bytes from \a Y; the starting bit position for these four bytes is
+/// specified by \a M[1:0] times 32. The eight operations use successive
+/// sets of four bytes from \a X; the starting bit position for the first
+/// set of four bytes is specified by \a M[2] times 32. These bit positions
+/// are all relative to the 128-bit lane for each set of eight operations.
+///
+/// \code{.operation}
+/// r := 0
+/// FOR i := 0 TO 1
+/// j := i*3
+/// Ybase := M[j+1:j]*32 + i*128
+/// Xbase := M[j+2]*32 + i*128
+/// FOR k := 0 TO 3
+/// temp0 := ABS(X[Xbase+7:Xbase] - Y[Ybase+7:Ybase])
+/// temp1 := ABS(X[Xbase+15:Xbase+8] - Y[Ybase+15:Ybase+8])
+/// temp2 := ABS(X[Xbase+23:Xbase+16] - Y[Ybase+23:Ybase+16])
+/// temp3 := ABS(X[Xbase+31:Xbase+24] - Y[Ybase+31:Ybase+24])
+/// result[r+15:r] := temp0 + temp1 + temp2 + temp3
+/// Xbase := Xbase + 8
+/// r := r + 16
+/// ENDFOR
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_mpsadbw_epu8(__m256i X, __m256i Y, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VMPSADBW instruction.
+///
+/// \param X
+/// A 256-bit integer vector containing one of the inputs.
+/// \param Y
+/// A 256-bit integer vector containing one of the inputs.
+/// \param M
+/// An unsigned immediate value specifying the starting positions of the
+/// bytes to operate on.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
#define _mm256_mpsadbw_epu8(X, Y, M) \
((__m256i)__builtin_ia32_mpsadbw256((__v32qi)(__m256i)(X), \
(__v32qi)(__m256i)(Y), (int)(M)))
+/// Computes the absolute value of each signed byte in the 256-bit integer
+/// vector \a __a and returns each value in the corresponding byte of
+/// the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPABSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_abs_epi8(__m256i __a)
{
return (__m256i)__builtin_elementwise_abs((__v32qs)__a);
}
+/// Computes the absolute value of each signed 16-bit element in the 256-bit
+/// vector of [16 x i16] in \a __a and returns each value in the
+/// corresponding element of the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPABSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_abs_epi16(__m256i __a)
{
return (__m256i)__builtin_elementwise_abs((__v16hi)__a);
}
+/// Computes the absolute value of each signed 32-bit element in the 256-bit
+/// vector of [8 x i32] in \a __a and returns each value in the
+/// corresponding element of the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPABSD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_abs_epi32(__m256i __a)
{
return (__m256i)__builtin_elementwise_abs((__v8si)__a);
}
+/// Converts the elements of two 256-bit vectors of [16 x i16] to 8-bit
+/// integers using signed saturation, and returns the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*16
+/// k := i*8
+/// result[7+k:k] := SATURATE8(__a[15+j:j])
+/// result[71+k:64+k] := SATURATE8(__b[15+j:j])
+/// result[135+k:128+k] := SATURATE8(__a[143+j:128+j])
+/// result[199+k:192+k] := SATURATE8(__b[143+j:128+j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPACKSSWB instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] used to generate result[63:0] and
+/// result[191:128].
+/// \param __b
+/// A 256-bit vector of [16 x i16] used to generate result[127:64] and
+/// result[255:192].
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_packs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b);
}
+/// Converts the elements of two 256-bit vectors of [8 x i32] to 16-bit
+/// integers using signed saturation, and returns the resulting 256-bit
+/// vector of [16 x i16].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*32
+/// k := i*16
+/// result[15+k:k] := SATURATE16(__a[31+j:j])
+/// result[79+k:64+k] := SATURATE16(__b[31+j:j])
+/// result[143+k:128+k] := SATURATE16(__a[159+j:128+j])
+/// result[207+k:192+k] := SATURATE16(__b[159+j:128+j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPACKSSDW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] used to generate result[63:0] and
+/// result[191:128].
+/// \param __b
+/// A 256-bit vector of [8 x i32] used to generate result[127:64] and
+/// result[255:192].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_packs_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b);
}
+/// Converts elements from two 256-bit vectors of [16 x i16] to 8-bit integers
+/// using unsigned saturation, and returns the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*16
+/// k := i*8
+/// result[7+k:k] := SATURATE8U(__a[15+j:j])
+/// result[71+k:64+k] := SATURATE8U(__b[15+j:j])
+/// result[135+k:128+k] := SATURATE8U(__a[143+j:128+j])
+/// result[199+k:192+k] := SATURATE8U(__b[143+j:128+j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPACKUSWB instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] used to generate result[63:0] and
+/// result[191:128].
+/// \param __b
+/// A 256-bit vector of [16 x i16] used to generate result[127:64] and
+/// result[255:192].
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_packus_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_packuswb256((__v16hi)__a, (__v16hi)__b);
}
+/// Converts elements from two 256-bit vectors of [8 x i32] to 16-bit integers
+/// using unsigned saturation, and returns the resulting 256-bit vector of
+/// [16 x i16].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*32
+/// k := i*16
+/// result[15+k:k] := SATURATE16U(__V1[31+j:j])
+/// result[79+k:64+k] := SATURATE16U(__V2[31+j:j])
+/// result[143+k:128+k] := SATURATE16U(__V1[159+j:128+j])
+/// result[207+k:192+k] := SATURATE16U(__V2[159+j:128+j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPACKUSDW instruction.
+///
+/// \param __V1
+/// A 256-bit vector of [8 x i32] used to generate result[63:0] and
+/// result[191:128].
+/// \param __V2
+/// A 256-bit vector of [8 x i32] used to generate result[127:64] and
+/// result[255:192].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_packus_epi32(__m256i __V1, __m256i __V2)
{
return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2);
}
+/// Adds 8-bit integers from corresponding bytes of two 256-bit integer
+/// vectors and returns the lower 8 bits of each sum in the corresponding
+/// byte of the 256-bit integer vector result (overflow is ignored).
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPADDB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 256-bit integer vector containing one of the source operands.
+/// \returns A 256-bit integer vector containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_add_epi8(__m256i __a, __m256i __b)
{
return (__m256i)((__v32qu)__a + (__v32qu)__b);
}
+/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of
+/// [16 x i16] and returns the lower 16 bits of each sum in the
+/// corresponding element of the [16 x i16] result (overflow is ignored).
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPADDW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_add_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hu)__a + (__v16hu)__b);
}
+/// Adds 32-bit integers from corresponding elements of two 256-bit vectors of
+/// [8 x i32] and returns the lower 32 bits of each sum in the corresponding
+/// element of the [8 x i32] result (overflow is ignored).
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPADDD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x i32] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_add_epi32(__m256i __a, __m256i __b)
{
return (__m256i)((__v8su)__a + (__v8su)__b);
}
+/// Adds 64-bit integers from corresponding elements of two 256-bit vectors of
+/// [4 x i64] and returns the lower 64 bits of each sum in the corresponding
+/// element of the [4 x i64] result (overflow is ignored).
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPADDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [4 x i64] containing one of the source operands.
+/// \returns A 256-bit vector of [4 x i64] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_add_epi64(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a + (__v4du)__b);
}
+/// Adds 8-bit integers from corresponding bytes of two 256-bit integer
+/// vectors using signed saturation, and returns each sum in the
+/// corresponding byte of the 256-bit integer vector result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPADDSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 256-bit integer vector containing one of the source operands.
+/// \returns A 256-bit integer vector containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_add_sat((__v32qs)__a, (__v32qs)__b);
}
+/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of
+/// [16 x i16] using signed saturation, and returns the [16 x i16] result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPADDSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_add_sat((__v16hi)__a, (__v16hi)__b);
}
+/// Adds 8-bit integers from corresponding bytes of two 256-bit integer
+/// vectors using unsigned saturation, and returns each sum in the
+/// corresponding byte of the 256-bit integer vector result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPADDUSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 256-bit integer vector containing one of the source operands.
+/// \returns A 256-bit integer vector containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epu8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_add_sat((__v32qu)__a, (__v32qu)__b);
}
+/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of
+/// [16 x i16] using unsigned saturation, and returns the [16 x i16] result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPADDUSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_add_sat((__v16hu)__a, (__v16hu)__b);
}
+/// Uses the lower half of the 256-bit vector \a a as the upper half of a
+/// temporary 256-bit value, and the lower half of the 256-bit vector \a b
+/// as the lower half of the temporary value. Right-shifts the temporary
+/// value by \a n bytes, and uses the lower 16 bytes of the shifted value
+/// as the lower 16 bytes of the result. Uses the upper halves of \a a and
+/// \a b to make another temporary value, right shifts by \a n, and uses
+/// the lower 16 bytes of the shifted value as the upper 16 bytes of the
+/// result.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_alignr_epi8(__m256i a, __m256i b, const int n);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPALIGNR instruction.
+///
+/// \param a
+/// A 256-bit integer vector containing source values.
+/// \param b
+/// A 256-bit integer vector containing source values.
+/// \param n
+/// An immediate value specifying the number of bytes to shift.
+/// \returns A 256-bit integer vector containing the result.
#define _mm256_alignr_epi8(a, b, n) \
((__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \
(__v32qi)(__m256i)(b), (n)))
+/// Computes the bitwise AND of the 256-bit integer vectors in \a __a and
+/// \a __b.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPAND instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_and_si256(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a & (__v4du)__b);
}
+/// Computes the bitwise AND of the 256-bit integer vector in \a __b with
+/// the bitwise NOT of the 256-bit integer vector in \a __a.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPANDN instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_andnot_si256(__m256i __a, __m256i __b)
{
return (__m256i)(~(__v4du)__a & (__v4du)__b);
}
+/// Computes the averages of the corresponding unsigned bytes in the two
+/// 256-bit integer vectors in \a __a and \a __b and returns each
+/// average in the corresponding byte of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[j+7:j] := (__a[j+7:j] + __b[j+7:j] + 1) >> 1
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPAVGB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_avg_epu8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pavgb256((__v32qi)__a, (__v32qi)__b);
}
+/// Computes the averages of the corresponding unsigned 16-bit integers in
+/// the two 256-bit vectors of [16 x i16] in \a __a and \a __b and returns
+/// each average in the corresponding element of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// result[j+15:j] := (__a[j+15:j] + __b[j+15:j] + 1) >> 1
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPAVGW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \param __b
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_avg_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pavgw256((__v16hi)__a, (__v16hi)__b);
}
+/// Merges 8-bit integer values from either of the two 256-bit vectors
+/// \a __V1 or \a __V2, as specified by the 256-bit mask \a __M and returns
+/// the resulting 256-bit integer vector.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// IF __M[7+i] == 0
+/// result[7+j:j] := __V1[7+j:j]
+/// ELSE
+/// result[7+j:j] := __V2[7+j:j]
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPBLENDVB instruction.
+///
+/// \param __V1
+/// A 256-bit integer vector containing source values.
+/// \param __V2
+/// A 256-bit integer vector containing source values.
+/// \param __M
+/// A 256-bit integer vector, with bit [7] of each byte specifying the
+/// source for each corresponding byte of the result. When the mask bit
+/// is 0, the byte is copied from \a __V1; otherwise, it is copied from
+/// \a __V2.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
{
@@ -148,34 +559,171 @@ _mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
(__v32qi)__M);
}
+/// Merges 16-bit integer values from either of the two 256-bit vectors
+/// \a V1 or \a V2, as specified by the immediate integer operand \a M,
+/// and returns the resulting 256-bit vector of [16 x i16].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*16
+/// IF M[i] == 0
+/// result[7+j:j] := V1[7+j:j]
+/// result[135+j:128+j] := V1[135+j:128+j]
+/// ELSE
+/// result[7+j:j] := V2[7+j:j]
+/// result[135+j:128+j] := V2[135+j:128+j]
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_blend_epi16(__m256i V1, __m256i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPBLENDW instruction.
+///
+/// \param V1
+/// A 256-bit vector of [16 x i16] containing source values.
+/// \param V2
+/// A 256-bit vector of [16 x i16] containing source values.
+/// \param M
+/// An immediate 8-bit integer operand, with bits [7:0] specifying the
+/// source for each element of the result. The position of the mask bit
+/// corresponds to the index of a copied value. When a mask bit is 0, the
+/// element is copied from \a V1; otherwise, it is copied from \a V2.
+/// \a M[0] determines the source for elements 0 and 8, \a M[1] for
+/// elements 1 and 9, and so forth.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
#define _mm256_blend_epi16(V1, V2, M) \
((__m256i)__builtin_ia32_pblendw256((__v16hi)(__m256i)(V1), \
(__v16hi)(__m256i)(V2), (int)(M)))
+/// Compares corresponding bytes in the 256-bit integer vectors in \a __a and
+/// \a __b for equality and returns the outcomes in the corresponding
+/// bytes of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[j+7:j] := (__a[j+7:j] == __b[j+7:j]) ? 0xFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPEQB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing one of the inputs.
+/// \param __b
+/// A 256-bit integer vector containing one of the inputs.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpeq_epi8(__m256i __a, __m256i __b)
{
return (__m256i)((__v32qi)__a == (__v32qi)__b);
}
+/// Compares corresponding elements in the 256-bit vectors of [16 x i16] in
+/// \a __a and \a __b for equality and returns the outcomes in the
+/// corresponding elements of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// result[j+15:j] := (__a[j+15:j] == __b[j+15:j]) ? 0xFFFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPEQW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the inputs.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the inputs.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpeq_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hi)__a == (__v16hi)__b);
}
+/// Compares corresponding elements in the 256-bit vectors of [8 x i32] in
+/// \a __a and \a __b for equality and returns the outcomes in the
+/// corresponding elements of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// result[j+31:j] := (__a[j+31:j] == __b[j+31:j]) ? 0xFFFFFFFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPEQD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the inputs.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the inputs.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpeq_epi32(__m256i __a, __m256i __b)
{
return (__m256i)((__v8si)__a == (__v8si)__b);
}
+/// Compares corresponding elements in the 256-bit vectors of [4 x i64] in
+/// \a __a and \a __b for equality and returns the outcomes in the
+/// corresponding elements of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// result[j+63:j] := (__a[j+63:j] == __b[j+63:j]) ? 0xFFFFFFFFFFFFFFFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPEQQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] containing one of the inputs.
+/// \param __b
+/// A 256-bit vector of [4 x i64] containing one of the inputs.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpeq_epi64(__m256i __a, __m256i __b)
{
return (__m256i)((__v4di)__a == (__v4di)__b);
}
+/// Compares corresponding signed bytes in the 256-bit integer vectors in
+/// \a __a and \a __b for greater-than and returns the outcomes in the
+/// corresponding bytes of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[j+7:j] := (__a[j+7:j] > __b[j+7:j]) ? 0xFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing one of the inputs.
+/// \param __b
+/// A 256-bit integer vector containing one of the inputs.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpgt_epi8(__m256i __a, __m256i __b)
{
@@ -184,138 +732,575 @@ _mm256_cmpgt_epi8(__m256i __a, __m256i __b)
return (__m256i)((__v32qs)__a > (__v32qs)__b);
}
+/// Compares corresponding signed elements in the 256-bit vectors of
+/// [16 x i16] in \a __a and \a __b for greater-than and returns the
+/// outcomes in the corresponding elements of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// result[j+15:j] := (__a[j+15:j] > __b[j+15:j]) ? 0xFFFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the inputs.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the inputs.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpgt_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hi)__a > (__v16hi)__b);
}
+/// Compares corresponding signed elements in the 256-bit vectors of
+/// [8 x i32] in \a __a and \a __b for greater-than and returns the
+/// outcomes in the corresponding elements of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// result[j+31:j] := (__a[j+31:j] > __b[j+31:j]) ? 0xFFFFFFFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the inputs.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the inputs.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpgt_epi32(__m256i __a, __m256i __b)
{
return (__m256i)((__v8si)__a > (__v8si)__b);
}
+/// Compares corresponding signed elements in the 256-bit vectors of
+/// [4 x i64] in \a __a and \a __b for greater-than and returns the
+/// outcomes in the corresponding elements of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// result[j+63:j] := (__a[j+63:j] > __b[j+63:j]) ? 0xFFFFFFFFFFFFFFFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] containing one of the inputs.
+/// \param __b
+/// A 256-bit vector of [4 x i64] containing one of the inputs.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpgt_epi64(__m256i __a, __m256i __b)
{
return (__m256i)((__v4di)__a > (__v4di)__b);
}
+/// Horizontally adds the adjacent pairs of 16-bit integers from two 256-bit
+/// vectors of [16 x i16] and returns the lower 16 bits of each sum in an
+/// element of the [16 x i16] result (overflow is ignored). Sums from
+/// \a __a are returned in the lower 64 bits of each 128-bit half of the
+/// result; sums from \a __b are returned in the upper 64 bits of each
+/// 128-bit half of the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// result[j+15:j] := __a[j+15:j] + __a[j+31:j+16]
+/// result[j+31:j+16] := __a[j+47:j+32] + __a[j+63:j+48]
+/// result[j+47:j+32] := __a[j+79:j+64] + __a[j+95:j+80]
+/// result[j+63:j+48] := __a[j+111:j+96] + __a[j+127:j+112]
+/// result[j+79:j+64] := __b[j+15:j] + __b[j+31:j+16]
+/// result[j+95:j+80] := __b[j+47:j+32] + __b[j+63:j+48]
+/// result[j+111:j+96] := __b[j+79:j+64] + __b[j+95:j+80]
+/// result[j+127:j+112] := __b[j+111:j+96] + __b[j+127:j+112]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPHADDW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hadd_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phaddw256((__v16hi)__a, (__v16hi)__b);
}
+/// Horizontally adds the adjacent pairs of 32-bit integers from two 256-bit
+/// vectors of [8 x i32] and returns the lower 32 bits of each sum in an
+/// element of the [8 x i32] result (overflow is ignored). Sums from \a __a
+/// are returned in the lower 64 bits of each 128-bit half of the result;
+/// sums from \a __b are returned in the upper 64 bits of each 128-bit half
+/// of the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// result[j+31:j] := __a[j+31:j] + __a[j+63:j+32]
+/// result[j+63:j+32] := __a[j+95:j+64] + __a[j+127:j+96]
+/// result[j+95:j+64] := __b[j+31:j] + __b[j+63:j+32]
+/// result[j+127:j+96] := __b[j+95:j+64] + __b[j+127:j+96]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPHADDD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x i32] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hadd_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phaddd256((__v8si)__a, (__v8si)__b);
}
+/// Horizontally adds the adjacent pairs of 16-bit integers from two 256-bit
+/// vectors of [16 x i16] using signed saturation and returns each sum in
+/// an element of the [16 x i16] result. Sums from \a __a are returned in
+/// the lower 64 bits of each 128-bit half of the result; sums from \a __b
+/// are returned in the upper 64 bits of each 128-bit half of the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// result[j+15:j] := SATURATE16(__a[j+15:j] + __a[j+31:j+16])
+/// result[j+31:j+16] := SATURATE16(__a[j+47:j+32] + __a[j+63:j+48])
+/// result[j+47:j+32] := SATURATE16(__a[j+79:j+64] + __a[j+95:j+80])
+/// result[j+63:j+48] := SATURATE16(__a[j+111:j+96] + __a[j+127:j+112])
+/// result[j+79:j+64] := SATURATE16(__b[j+15:j] + __b[j+31:j+16])
+/// result[j+95:j+80] := SATURATE16(__b[j+47:j+32] + __b[j+63:j+48])
+/// result[j+111:j+96] := SATURATE16(__b[j+79:j+64] + __b[j+95:j+80])
+/// result[j+127:j+112] := SATURATE16(__b[j+111:j+96] + __b[j+127:j+112])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPHADDSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hadds_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__a, (__v16hi)__b);
}
+/// Horizontally subtracts adjacent pairs of 16-bit integers from two 256-bit
+/// vectors of [16 x i16] and returns the lower 16 bits of each difference
+/// in an element of the [16 x i16] result (overflow is ignored).
+/// Differences from \a __a are returned in the lower 64 bits of each
+/// 128-bit half of the result; differences from \a __b are returned in the
+/// upper 64 bits of each 128-bit half of the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// result[j+15:j] := __a[j+15:j] - __a[j+31:j+16]
+/// result[j+31:j+16] := __a[j+47:j+32] - __a[j+63:j+48]
+/// result[j+47:j+32] := __a[j+79:j+64] - __a[j+95:j+80]
+/// result[j+63:j+48] := __a[j+111:j+96] - __a[j+127:j+112]
+/// result[j+79:j+64] := __b[j+15:j] - __b[j+31:j+16]
+/// result[j+95:j+80] := __b[j+47:j+32] - __b[j+63:j+48]
+/// result[j+111:j+96] := __b[j+79:j+64] - __b[j+95:j+80]
+/// result[j+127:j+112] := __b[j+111:j+96] - __b[j+127:j+112]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPHSUBW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hsub_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phsubw256((__v16hi)__a, (__v16hi)__b);
}
+/// Horizontally subtracts adjacent pairs of 32-bit integers from two 256-bit
+/// vectors of [8 x i32] and returns the lower 32 bits of each difference in
+/// an element of the [8 x i32] result (overflow is ignored). Differences
+/// from \a __a are returned in the lower 64 bits of each 128-bit half of
+/// the result; differences from \a __b are returned in the upper 64 bits
+/// of each 128-bit half of the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// result[j+31:j] := __a[j+31:j] - __a[j+63:j+32]
+/// result[j+63:j+32] := __a[j+95:j+64] - __a[j+127:j+96]
+/// result[j+95:j+64] := __b[j+31:j] - __b[j+63:j+32]
+/// result[j+127:j+96] := __b[j+95:j+64] - __b[j+127:j+96]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPHSUBD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x i32] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hsub_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phsubd256((__v8si)__a, (__v8si)__b);
}
+/// Horizontally subtracts adjacent pairs of 16-bit integers from two 256-bit
+/// vectors of [16 x i16] using signed saturation and returns each sum in
+/// an element of the [16 x i16] result. Differences from \a __a are
+/// returned in the lower 64 bits of each 128-bit half of the result;
+/// differences from \a __b are returned in the upper 64 bits of each
+/// 128-bit half of the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// result[j+15:j] := SATURATE16(__a[j+15:j] - __a[j+31:j+16])
+/// result[j+31:j+16] := SATURATE16(__a[j+47:j+32] - __a[j+63:j+48])
+/// result[j+47:j+32] := SATURATE16(__a[j+79:j+64] - __a[j+95:j+80])
+/// result[j+63:j+48] := SATURATE16(__a[j+111:j+96] - __a[j+127:j+112])
+/// result[j+79:j+64] := SATURATE16(__b[j+15:j] - __b[j+31:j+16])
+/// result[j+95:j+80] := SATURATE16(__b[j+47:j+32] - __b[j+63:j+48])
+/// result[j+111:j+96] := SATURATE16(__b[j+79:j+64] - __b[j+95:j+80])
+/// result[j+127:j+112] := SATURATE16(__b[j+111:j+96] - __b[j+127:j+112])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPHSUBSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hsubs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__a, (__v16hi)__b);
}
+/// Multiplies each unsigned byte from the 256-bit integer vector in \a __a
+/// with the corresponding signed byte from the 256-bit integer vector in
+/// \a __b, forming signed 16-bit intermediate products. Adds adjacent
+/// pairs of those products using signed saturation to form 16-bit sums
+/// returned as elements of the [16 x i16] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// temp1 := __a[j+7:j] * __b[j+7:j]
+/// temp2 := __a[j+15:j+8] * __b[j+15:j+8]
+/// result[j+15:j] := SATURATE16(temp1 + temp2)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMADDUBSW instruction.
+///
+/// \param __a
+/// A 256-bit vector containing one of the source operands.
+/// \param __b
+/// A 256-bit vector containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maddubs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)__a, (__v32qi)__b);
}
+/// Multiplies corresponding 16-bit elements of two 256-bit vectors of
+/// [16 x i16], forming 32-bit intermediate products, and adds pairs of
+/// those products to form 32-bit sums returned as elements of the
+/// [8 x i32] result.
+///
+/// There is only one wraparound case: when all four of the 16-bit sources
+/// are \c 0x8000, the result will be \c 0x80000000.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// temp1 := __a[j+15:j] * __b[j+15:j]
+/// temp2 := __a[j+31:j+16] * __b[j+31:j+16]
+/// result[j+31:j] := temp1 + temp2
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMADDWD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_madd_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)__a, (__v16hi)__b);
}
+/// Compares the corresponding signed bytes in the two 256-bit integer vectors
+/// in \a __a and \a __b and returns the larger of each pair in the
+/// corresponding byte of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMAXSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_max((__v32qs)__a, (__v32qs)__b);
}
+/// Compares the corresponding signed 16-bit integers in the two 256-bit
+/// vectors of [16 x i16] in \a __a and \a __b and returns the larger of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMAXSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \param __b
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_max((__v16hi)__a, (__v16hi)__b);
}
+/// Compares the corresponding signed 32-bit integers in the two 256-bit
+/// vectors of [8 x i32] in \a __a and \a __b and returns the larger of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMAXSD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \param __b
+/// A 256-bit vector of [8 x i32].
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_max((__v8si)__a, (__v8si)__b);
}
+/// Compares the corresponding unsigned bytes in the two 256-bit integer
+/// vectors in \a __a and \a __b and returns the larger of each pair in
+/// the corresponding byte of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMAXUB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epu8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_max((__v32qu)__a, (__v32qu)__b);
}
+/// Compares the corresponding unsigned 16-bit integers in the two 256-bit
+/// vectors of [16 x i16] in \a __a and \a __b and returns the larger of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMAXUW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \param __b
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_max((__v16hu)__a, (__v16hu)__b);
}
+/// Compares the corresponding unsigned 32-bit integers in the two 256-bit
+/// vectors of [8 x i32] in \a __a and \a __b and returns the larger of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMAXUD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \param __b
+/// A 256-bit vector of [8 x i32].
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epu32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_max((__v8su)__a, (__v8su)__b);
}
+/// Compares the corresponding signed bytes in the two 256-bit integer vectors
+/// in \a __a and \a __b and returns the smaller of each pair in the
+/// corresponding byte of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMINSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_min((__v32qs)__a, (__v32qs)__b);
}
+/// Compares the corresponding signed 16-bit integers in the two 256-bit
+/// vectors of [16 x i16] in \a __a and \a __b and returns the smaller of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMINSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \param __b
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_min((__v16hi)__a, (__v16hi)__b);
}
+/// Compares the corresponding signed 32-bit integers in the two 256-bit
+/// vectors of [8 x i32] in \a __a and \a __b and returns the smaller of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMINSD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \param __b
+/// A 256-bit vector of [8 x i32].
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_min((__v8si)__a, (__v8si)__b);
}
+/// Compares the corresponding unsigned bytes in the two 256-bit integer
+/// vectors in \a __a and \a __b and returns the smaller of each pair in
+/// the corresponding byte of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMINUB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epu8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_min((__v32qu)__a, (__v32qu)__b);
}
+/// Compares the corresponding unsigned 16-bit integers in the two 256-bit
+/// vectors of [16 x i16] in \a __a and \a __b and returns the smaller of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMINUW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \param __b
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_min((__v16hu)__a, (__v16hu)__b);
}
+/// Compares the corresponding unsigned 32-bit integers in the two 256-bit
+/// vectors of [8 x i32] in \a __a and \a __b and returns the smaller of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMINUD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \param __b
+/// A 256-bit vector of [8 x i32].
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epu32(__m256i __a, __m256i __b)
{
@@ -328,6 +1313,26 @@ _mm256_movemask_epi8(__m256i __a)
return __builtin_ia32_pmovmskb256((__v32qi)__a);
}
+/// Sign-extends bytes from the 128-bit integer vector in \a __V and returns
+/// the 16-bit values in the corresponding elements of a 256-bit vector
+/// of [16 x i16].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*8
+/// k := i*16
+/// result[k+15:k] := SignExtend(__V[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVSXBW instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the source bytes.
+/// \returns A 256-bit vector of [16 x i16] containing the sign-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi8_epi16(__m128i __V)
{
@@ -336,6 +1341,26 @@ _mm256_cvtepi8_epi16(__m128i __V)
return (__m256i)__builtin_convertvector((__v16qs)__V, __v16hi);
}
+/// Sign-extends bytes from the lower half of the 128-bit integer vector in
+/// \a __V and returns the 32-bit values in the corresponding elements of a
+/// 256-bit vector of [8 x i32].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*8
+/// k := i*32
+/// result[k+31:k] := SignExtend(__V[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVSXBD instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the source bytes.
+/// \returns A 256-bit vector of [8 x i32] containing the sign-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi8_epi32(__m128i __V)
{
@@ -344,6 +1369,25 @@ _mm256_cvtepi8_epi32(__m128i __V)
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
}
+/// Sign-extends the first four bytes from the 128-bit integer vector in
+/// \a __V and returns the 64-bit values in the corresponding elements of a
+/// 256-bit vector of [4 x i64].
+///
+/// \code{.operation}
+/// result[63:0] := SignExtend(__V[7:0])
+/// result[127:64] := SignExtend(__V[15:8])
+/// result[191:128] := SignExtend(__V[23:16])
+/// result[255:192] := SignExtend(__V[31:24])
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVSXBQ instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the source bytes.
+/// \returns A 256-bit vector of [4 x i64] containing the sign-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi8_epi64(__m128i __V)
{
@@ -352,357 +1396,1571 @@ _mm256_cvtepi8_epi64(__m128i __V)
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4di);
}
+/// Sign-extends 16-bit elements from the 128-bit vector of [8 x i16] in
+/// \a __V and returns the 32-bit values in the corresponding elements of a
+/// 256-bit vector of [8 x i32].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*16
+/// k := i*32
+/// result[k+31:k] := SignExtend(__V[j+15:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVSXWD instruction.
+///
+/// \param __V
+/// A 128-bit vector of [8 x i16] containing the source values.
+/// \returns A 256-bit vector of [8 x i32] containing the sign-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi16_epi32(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v8hi)__V, __v8si);
}
+/// Sign-extends 16-bit elements from the lower half of the 128-bit vector of
+/// [8 x i16] in \a __V and returns the 64-bit values in the corresponding
+/// elements of a 256-bit vector of [4 x i64].
+///
+/// \code{.operation}
+/// result[63:0] := SignExtend(__V[15:0])
+/// result[127:64] := SignExtend(__V[31:16])
+/// result[191:128] := SignExtend(__V[47:32])
+/// result[255:192] := SignExtend(__V[64:48])
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVSXWQ instruction.
+///
+/// \param __V
+/// A 128-bit vector of [8 x i16] containing the source values.
+/// \returns A 256-bit vector of [4 x i64] containing the sign-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi16_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4di);
}
+/// Sign-extends 32-bit elements from the 128-bit vector of [4 x i32] in
+/// \a __V and returns the 64-bit values in the corresponding elements of a
+/// 256-bit vector of [4 x i64].
+///
+/// \code{.operation}
+/// result[63:0] := SignExtend(__V[31:0])
+/// result[127:64] := SignExtend(__V[63:32])
+/// result[191:128] := SignExtend(__V[95:64])
+/// result[255:192] := SignExtend(__V[127:96])
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVSXDQ instruction.
+///
+/// \param __V
+/// A 128-bit vector of [4 x i32] containing the source values.
+/// \returns A 256-bit vector of [4 x i64] containing the sign-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi32_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v4si)__V, __v4di);
}
+/// Zero-extends bytes from the 128-bit integer vector in \a __V and returns
+/// the 16-bit values in the corresponding elements of a 256-bit vector
+/// of [16 x i16].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*8
+/// k := i*16
+/// result[k+15:k] := ZeroExtend(__V[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVZXBW instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the source bytes.
+/// \returns A 256-bit vector of [16 x i16] containing the zero-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu8_epi16(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v16qu)__V, __v16hi);
}
+/// Zero-extends bytes from the lower half of the 128-bit integer vector in
+/// \a __V and returns the 32-bit values in the corresponding elements of a
+/// 256-bit vector of [8 x i32].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*8
+/// k := i*32
+/// result[k+31:k] := ZeroExtend(__V[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVZXBD instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the source bytes.
+/// \returns A 256-bit vector of [8 x i32] containing the zero-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu8_epi32(__m128i __V)
{
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
}
+/// Zero-extends the first four bytes from the 128-bit integer vector in
+/// \a __V and returns the 64-bit values in the corresponding elements of a
+/// 256-bit vector of [4 x i64].
+///
+/// \code{.operation}
+/// result[63:0] := ZeroExtend(__V[7:0])
+/// result[127:64] := ZeroExtend(__V[15:8])
+/// result[191:128] := ZeroExtend(__V[23:16])
+/// result[255:192] := ZeroExtend(__V[31:24])
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVZXBQ instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the source bytes.
+/// \returns A 256-bit vector of [4 x i64] containing the zero-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu8_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4di);
}
+/// Zero-extends 16-bit elements from the 128-bit vector of [8 x i16] in
+/// \a __V and returns the 32-bit values in the corresponding elements of a
+/// 256-bit vector of [8 x i32].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*16
+/// k := i*32
+/// result[k+31:k] := ZeroExtend(__V[j+15:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVZXWD instruction.
+///
+/// \param __V
+/// A 128-bit vector of [8 x i16] containing the source values.
+/// \returns A 256-bit vector of [8 x i32] containing the zero-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu16_epi32(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v8hu)__V, __v8si);
}
+/// Zero-extends 16-bit elements from the lower half of the 128-bit vector of
+/// [8 x i16] in \a __V and returns the 64-bit values in the corresponding
+/// elements of a 256-bit vector of [4 x i64].
+///
+/// \code{.operation}
+/// result[63:0] := ZeroExtend(__V[15:0])
+/// result[127:64] := ZeroExtend(__V[31:16])
+/// result[191:128] := ZeroExtend(__V[47:32])
+/// result[255:192] := ZeroExtend(__V[64:48])
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVSXWQ instruction.
+///
+/// \param __V
+/// A 128-bit vector of [8 x i16] containing the source values.
+/// \returns A 256-bit vector of [4 x i64] containing the zero-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu16_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4di);
}
+/// Zero-extends 32-bit elements from the 128-bit vector of [4 x i32] in
+/// \a __V and returns the 64-bit values in the corresponding elements of a
+/// 256-bit vector of [4 x i64].
+///
+/// \code{.operation}
+/// result[63:0] := ZeroExtend(__V[31:0])
+/// result[127:64] := ZeroExtend(__V[63:32])
+/// result[191:128] := ZeroExtend(__V[95:64])
+/// result[255:192] := ZeroExtend(__V[127:96])
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVZXDQ instruction.
+///
+/// \param __V
+/// A 128-bit vector of [4 x i32] containing the source values.
+/// \returns A 256-bit vector of [4 x i64] containing the zero-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu32_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v4su)__V, __v4di);
}
+/// Multiplies signed 32-bit integers from even-numbered elements of two
+/// 256-bit vectors of [8 x i32] and returns the 64-bit products in the
+/// [4 x i64] result.
+///
+/// \code{.operation}
+/// result[63:0] := __a[31:0] * __b[31:0]
+/// result[127:64] := __a[95:64] * __b[95:64]
+/// result[191:128] := __a[159:128] * __b[159:128]
+/// result[255:192] := __a[223:192] * __b[223:192]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \returns A 256-bit vector of [4 x i64] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mul_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmuldq256((__v8si)__a, (__v8si)__b);
}
+/// Multiplies signed 16-bit integer elements of two 256-bit vectors of
+/// [16 x i16], truncates the 32-bit results to the most significant 18
+/// bits, rounds by adding 1, and returns bits [16:1] of each rounded
+/// product in the [16 x i16] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// temp := ((__a[j+15:j] * __b[j+15:j]) >> 14) + 1
+/// result[j+15:j] := temp[16:1]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULHRSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the rounded products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mulhrs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b);
}
+/// Multiplies unsigned 16-bit integer elements of two 256-bit vectors of
+/// [16 x i16], and returns the upper 16 bits of each 32-bit product in the
+/// [16 x i16] result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULHUW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mulhi_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__a, (__v16hi)__b);
}
+/// Multiplies signed 16-bit integer elements of two 256-bit vectors of
+/// [16 x i16], and returns the upper 16 bits of each 32-bit product in the
+/// [16 x i16] result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULHW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mulhi_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__a, (__v16hi)__b);
}
+/// Multiplies signed 16-bit integer elements of two 256-bit vectors of
+/// [16 x i16], and returns the lower 16 bits of each 32-bit product in the
+/// [16 x i16] result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULLW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mullo_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hu)__a * (__v16hu)__b);
}
+/// Multiplies signed 32-bit integer elements of two 256-bit vectors of
+/// [8 x i32], and returns the lower 32 bits of each 64-bit product in the
+/// [8 x i32] result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULLD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x i32] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mullo_epi32 (__m256i __a, __m256i __b)
{
return (__m256i)((__v8su)__a * (__v8su)__b);
}
+/// Multiplies unsigned 32-bit integers from even-numered elements of two
+/// 256-bit vectors of [8 x i32] and returns the 64-bit products in the
+/// [4 x i64] result.
+///
+/// \code{.operation}
+/// result[63:0] := __a[31:0] * __b[31:0]
+/// result[127:64] := __a[95:64] * __b[95:64]
+/// result[191:128] := __a[159:128] * __b[159:128]
+/// result[255:192] := __a[223:192] * __b[223:192]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULUDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \returns A 256-bit vector of [4 x i64] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mul_epu32(__m256i __a, __m256i __b)
{
return __builtin_ia32_pmuludq256((__v8si)__a, (__v8si)__b);
}
+/// Computes the bitwise OR of the 256-bit integer vectors in \a __a and
+/// \a __b.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPOR instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_or_si256(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a | (__v4du)__b);
}
+/// Computes four sum of absolute difference (SAD) operations on sets of eight
+/// unsigned 8-bit integers from the 256-bit integer vectors \a __a and
+/// \a __b.
+///
+/// One SAD result is computed for each set of eight bytes from \a __a and
+/// eight bytes from \a __b. The zero-extended SAD value is returned in the
+/// corresponding 64-bit element of the result.
+///
+/// A single SAD operation takes the differences between the corresponding
+/// bytes of \a __a and \a __b, takes the absolute value of each difference,
+/// and sums these eight values to form one 16-bit result. This operation
+/// is repeated four times with successive sets of eight bytes.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// temp0 := ABS(__a[j+7:j] - __b[j+7:j])
+/// temp1 := ABS(__a[j+15:j+8] - __b[j+15:j+8])
+/// temp2 := ABS(__a[j+23:j+16] - __b[j+23:j+16])
+/// temp3 := ABS(__a[j+31:j+24] - __b[j+31:j+24])
+/// temp4 := ABS(__a[j+39:j+32] - __b[j+39:j+32])
+/// temp5 := ABS(__a[j+47:j+40] - __b[j+47:j+40])
+/// temp6 := ABS(__a[j+55:j+48] - __b[j+55:j+48])
+/// temp7 := ABS(__a[j+63:j+56] - __b[j+63:j+56])
+/// result[j+15:j] := temp0 + temp1 + temp2 + temp3 +
+/// temp4 + temp5 + temp6 + temp7
+/// result[j+63:j+16] := 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSADBW instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sad_epu8(__m256i __a, __m256i __b)
{
return __builtin_ia32_psadbw256((__v32qi)__a, (__v32qi)__b);
}
+/// Shuffles 8-bit integers in the 256-bit integer vector \a __a according
+/// to control information in the 256-bit integer vector \a __b, and
+/// returns the 256-bit result. In effect there are two separate 128-bit
+/// shuffles in the lower and upper halves.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// IF __b[j+7] == 1
+/// result[j+7:j] := 0
+/// ELSE
+/// k := __b[j+3:j] * 8
+/// IF i > 15
+/// k := k + 128
+/// FI
+/// result[j+7:j] := __a[k+7:k]
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSHUFB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing source values.
+/// \param __b
+/// A 256-bit integer vector containing control information to determine
+/// what goes into the corresponding byte of the result. If bit 7 of the
+/// control byte is 1, the result byte is 0; otherwise, bits 3:0 of the
+/// control byte specify the index (within the same 128-bit half) of \a __a
+/// to copy to the result byte.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_shuffle_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pshufb256((__v32qi)__a, (__v32qi)__b);
}
+/// Shuffles 32-bit integers from the 256-bit vector of [8 x i32] in \a a
+/// according to control information in the integer literal \a imm, and
+/// returns the 256-bit result. In effect there are two parallel 128-bit
+/// shuffles in the lower and upper halves.
+///
+/// \code{.operation}
+/// FOR i := 0 to 3
+/// j := i*32
+/// k := (imm >> i*2)[1:0] * 32
+/// result[j+31:j] := a[k+31:k]
+/// result[128+j+31:128+j] := a[128+k+31:128+k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_shuffle_epi32(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSHUFB instruction.
+///
+/// \param a
+/// A 256-bit vector of [8 x i32] containing source values.
+/// \param imm
+/// An immediate 8-bit value specifying which elements to copy from \a a.
+/// \a imm[1:0] specifies the index in \a a for elements 0 and 4 of the
+/// result, \a imm[3:2] specifies the index for elements 1 and 5, and so
+/// forth.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
#define _mm256_shuffle_epi32(a, imm) \
((__m256i)__builtin_ia32_pshufd256((__v8si)(__m256i)(a), (int)(imm)))
+/// Shuffles 16-bit integers from the 256-bit vector of [16 x i16] in \a a
+/// according to control information in the integer literal \a imm, and
+/// returns the 256-bit result. The upper 64 bits of each 128-bit half
+/// are shuffled in parallel; the lower 64 bits of each 128-bit half are
+/// copied from \a a unchanged.
+///
+/// \code{.operation}
+/// result[63:0] := a[63:0]
+/// result[191:128] := a[191:128]
+/// FOR i := 0 TO 3
+/// j := i * 16 + 64
+/// k := (imm >> i*2)[1:0] * 16 + 64
+/// result[j+15:j] := a[k+15:k]
+/// result[128+j+15:128+j] := a[128+k+15:128+k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_shufflehi_epi16(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSHUFHW instruction.
+///
+/// \param a
+/// A 256-bit vector of [16 x i16] containing source values.
+/// \param imm
+/// An immediate 8-bit value specifying which elements to copy from \a a.
+/// \a imm[1:0] specifies the index in \a a for elements 4 and 8 of the
+/// result, \a imm[3:2] specifies the index for elements 5 and 9, and so
+/// forth. Indexes are offset by 4 (so 0 means index 4, and so forth).
+/// \returns A 256-bit vector of [16 x i16] containing the result.
#define _mm256_shufflehi_epi16(a, imm) \
((__m256i)__builtin_ia32_pshufhw256((__v16hi)(__m256i)(a), (int)(imm)))
+/// Shuffles 16-bit integers from the 256-bit vector of [16 x i16] \a a
+/// according to control information in the integer literal \a imm, and
+/// returns the 256-bit [16 x i16] result. The lower 64 bits of each
+/// 128-bit half are shuffled; the upper 64 bits of each 128-bit half are
+/// copied from \a a unchanged.
+///
+/// \code{.operation}
+/// result[127:64] := a[127:64]
+/// result[255:192] := a[255:192]
+/// FOR i := 0 TO 3
+/// j := i * 16
+/// k := (imm >> i*2)[1:0] * 16
+/// result[j+15:j] := a[k+15:k]
+/// result[128+j+15:128+j] := a[128+k+15:128+k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_shufflelo_epi16(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSHUFLW instruction.
+///
+/// \param a
+/// A 256-bit vector of [16 x i16] to use as a source of data for the
+/// result.
+/// \param imm
+/// An immediate 8-bit value specifying which elements to copy from \a a.
+/// \a imm[1:0] specifies the index in \a a for elements 0 and 8 of the
+/// result, \a imm[3:2] specifies the index for elements 1 and 9, and so
+/// forth.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
#define _mm256_shufflelo_epi16(a, imm) \
((__m256i)__builtin_ia32_pshuflw256((__v16hi)(__m256i)(a), (int)(imm)))
+/// Sets each byte of the result to the corresponding byte of the 256-bit
+/// integer vector in \a __a, the negative of that byte, or zero, depending
+/// on whether the corresponding byte of the 256-bit integer vector in
+/// \a __b is greater than zero, less than zero, or equal to zero,
+/// respectively.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSIGNB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector].
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sign_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b);
}
+/// Sets each element of the result to the corresponding element of the
+/// 256-bit vector of [16 x i16] in \a __a, the negative of that element,
+/// or zero, depending on whether the corresponding element of the 256-bit
+/// vector of [16 x i16] in \a __b is greater than zero, less than zero, or
+/// equal to zero, respectively.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSIGNW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \param __b
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sign_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b);
}
+/// Sets each element of the result to the corresponding element of the
+/// 256-bit vector of [8 x i32] in \a __a, the negative of that element, or
+/// zero, depending on whether the corresponding element of the 256-bit
+/// vector of [8 x i32] in \a __b is greater than zero, less than zero, or
+/// equal to zero, respectively.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSIGND instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \param __b
+/// A 256-bit vector of [8 x i32].
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sign_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b);
}
+/// Shifts each 128-bit half of the 256-bit integer vector \a a left by
+/// \a imm bytes, shifting in zero bytes, and returns the result. If \a imm
+/// is greater than 15, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_slli_si256(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSLLDQ instruction.
+///
+/// \param a
+/// A 256-bit integer vector to be shifted.
+/// \param imm
+/// An unsigned immediate value specifying the shift count (in bytes).
+/// \returns A 256-bit integer vector containing the result.
#define _mm256_slli_si256(a, imm) \
((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm)))
+/// Shifts each 128-bit half of the 256-bit integer vector \a a left by
+/// \a imm bytes, shifting in zero bytes, and returns the result. If \a imm
+/// is greater than 15, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_bslli_epi128(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSLLDQ instruction.
+///
+/// \param a
+/// A 256-bit integer vector to be shifted.
+/// \param imm
+/// An unsigned immediate value specifying the shift count (in bytes).
+/// \returns A 256-bit integer vector containing the result.
#define _mm256_bslli_epi128(a, imm) \
((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm)))
+/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
+/// left by \a __count bits, shifting in zero bits, and returns the result.
+/// If \a __count is greater than 15, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_slli_epi16(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psllwi256((__v16hi)__a, __count);
}
+/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
+/// left by the number of bits specified by the lower 64 bits of \a __count,
+/// shifting in zero bits, and returns the result. If \a __count is greater
+/// than 15, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sll_epi16(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psllw256((__v16hi)__a, (__v8hi)__count);
}
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
+/// left by \a __count bits, shifting in zero bits, and returns the result.
+/// If \a __count is greater than 31, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_slli_epi32(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count);
}
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
+/// left by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in zero bits, and returns the result. If \a __count is greater
+/// than 31, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sll_epi32(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_pslld256((__v8si)__a, (__v4si)__count);
}
+/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a
+/// left by \a __count bits, shifting in zero bits, and returns the result.
+/// If \a __count is greater than 63, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_slli_epi64(__m256i __a, int __count)
{
return __builtin_ia32_psllqi256((__v4di)__a, __count);
}
+/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a
+/// left by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in zero bits, and returns the result. If \a __count is greater
+/// than 63, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sll_epi64(__m256i __a, __m128i __count)
{
return __builtin_ia32_psllq256((__v4di)__a, __count);
}
+/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
+/// right by \a __count bits, shifting in sign bits, and returns the result.
+/// If \a __count is greater than 15, each element of the result is either
+/// 0 or -1 according to the corresponding input sign bit.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srai_epi16(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psrawi256((__v16hi)__a, __count);
}
+/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
+/// right by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in sign bits, and returns the result. If \a __count is greater
+/// than 15, each element of the result is either 0 or -1 according to the
+/// corresponding input sign bit.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sra_epi16(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psraw256((__v16hi)__a, (__v8hi)__count);
}
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
+/// right by \a __count bits, shifting in sign bits, and returns the result.
+/// If \a __count is greater than 31, each element of the result is either
+/// 0 or -1 according to the corresponding input sign bit.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srai_epi32(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psradi256((__v8si)__a, __count);
}
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
+/// right by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in sign bits, and returns the result. If \a __count is greater
+/// than 31, each element of the result is either 0 or -1 according to the
+/// corresponding input sign bit.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sra_epi32(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count);
}
+/// Shifts each 128-bit half of the 256-bit integer vector in \a a right by
+/// \a imm bytes, shifting in zero bytes, and returns the result. If
+/// \a imm is greater than 15, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_srli_si256(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSRLDQ instruction.
+///
+/// \param a
+/// A 256-bit integer vector to be shifted.
+/// \param imm
+/// An unsigned immediate value specifying the shift count (in bytes).
+/// \returns A 256-bit integer vector containing the result.
#define _mm256_srli_si256(a, imm) \
((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm)))
+/// Shifts each 128-bit half of the 256-bit integer vector in \a a right by
+/// \a imm bytes, shifting in zero bytes, and returns the result. If
+/// \a imm is greater than 15, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_bsrli_epi128(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSRLDQ instruction.
+///
+/// \param a
+/// A 256-bit integer vector to be shifted.
+/// \param imm
+/// An unsigned immediate value specifying the shift count (in bytes).
+/// \returns A 256-bit integer vector containing the result.
#define _mm256_bsrli_epi128(a, imm) \
((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm)))
+/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
+/// right by \a __count bits, shifting in zero bits, and returns the result.
+/// If \a __count is greater than 15, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srli_epi16(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psrlwi256((__v16hi)__a, __count);
}
+/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
+/// right by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in zero bits, and returns the result. If \a __count is greater
+/// than 15, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srl_epi16(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psrlw256((__v16hi)__a, (__v8hi)__count);
}
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
+/// right by \a __count bits, shifting in zero bits, and returns the result.
+/// If \a __count is greater than 31, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srli_epi32(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count);
}
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
+/// right by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in zero bits, and returns the result. If \a __count is greater
+/// than 31, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srl_epi32(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psrld256((__v8si)__a, (__v4si)__count);
}
+/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a
+/// right by \a __count bits, shifting in zero bits, and returns the result.
+/// If \a __count is greater than 63, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srli_epi64(__m256i __a, int __count)
{
return __builtin_ia32_psrlqi256((__v4di)__a, __count);
}
+/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a
+/// right by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in zero bits, and returns the result. If \a __count is greater
+/// than 63, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srl_epi64(__m256i __a, __m128i __count)
{
return __builtin_ia32_psrlq256((__v4di)__a, __count);
}
+/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer
+/// vectors. Returns the lower 8 bits of each difference in the
+/// corresponding byte of the 256-bit integer vector result (overflow is
+/// ignored).
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[j+7:j] := __a[j+7:j] - __b[j+7:j]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing the minuends.
+/// \param __b
+/// A 256-bit integer vector containing the subtrahends.
+/// \returns A 256-bit integer vector containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sub_epi8(__m256i __a, __m256i __b)
{
return (__m256i)((__v32qu)__a - (__v32qu)__b);
}
+/// Subtracts 16-bit integers from corresponding elements of two 256-bit
+/// vectors of [16 x i16]. Returns the lower 16 bits of each difference in
+/// the corresponding element of the [16 x i16] result (overflow is
+/// ignored).
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// result[j+15:j] := __a[j+15:j] - __b[j+15:j]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing the minuends.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing the subtrahends.
+/// \returns A 256-bit vector of [16 x i16] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sub_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hu)__a - (__v16hu)__b);
}
+/// Subtracts 32-bit integers from corresponding elements of two 256-bit
+/// vectors of [8 x i32]. Returns the lower 32 bits of each difference in
+/// the corresponding element of the [8 x i32] result (overflow is ignored).
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// result[j+31:j] := __a[j+31:j] - __b[j+31:j]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing the minuends.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing the subtrahends.
+/// \returns A 256-bit vector of [8 x i32] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sub_epi32(__m256i __a, __m256i __b)
{
return (__m256i)((__v8su)__a - (__v8su)__b);
}
+/// Subtracts 64-bit integers from corresponding elements of two 256-bit
+/// vectors of [4 x i64]. Returns the lower 64 bits of each difference in
+/// the corresponding element of the [4 x i64] result (overflow is ignored).
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// result[j+63:j] := __a[j+63:j] - __b[j+63:j]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] containing the minuends.
+/// \param __b
+/// A 256-bit vector of [4 x i64] containing the subtrahends.
+/// \returns A 256-bit vector of [4 x i64] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sub_epi64(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a - (__v4du)__b);
}
+/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer
+/// vectors using signed saturation, and returns each differences in the
+/// corresponding byte of the 256-bit integer vector result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[j+7:j] := SATURATE8(__a[j+7:j] - __b[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing the minuends.
+/// \param __b
+/// A 256-bit integer vector containing the subtrahends.
+/// \returns A 256-bit integer vector containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_sub_sat((__v32qs)__a, (__v32qs)__b);
}
+/// Subtracts 16-bit integers from corresponding elements of two 256-bit
+/// vectors of [16 x i16] using signed saturation, and returns each
+/// difference in the corresponding element of the [16 x i16] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// result[j+7:j] := SATURATE16(__a[j+7:j] - __b[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing the minuends.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing the subtrahends.
+/// \returns A 256-bit vector of [16 x i16] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_sub_sat((__v16hi)__a, (__v16hi)__b);
}
+/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer
+/// vectors using unsigned saturation, and returns each difference in the
+/// corresponding byte of the 256-bit integer vector result. For each byte,
+/// computes <c> result = __a - __b </c>.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[j+7:j] := SATURATE8U(__a[j+7:j] - __b[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBUSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing the minuends.
+/// \param __b
+/// A 256-bit integer vector containing the subtrahends.
+/// \returns A 256-bit integer vector containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epu8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_sub_sat((__v32qu)__a, (__v32qu)__b);
}
+/// Subtracts 16-bit integers from corresponding elements of two 256-bit
+/// vectors of [16 x i16] using unsigned saturation, and returns each
+/// difference in the corresponding element of the [16 x i16] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// result[j+15:j] := SATURATE16U(__a[j+15:j] - __b[j+15:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBUSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing the minuends.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing the subtrahends.
+/// \returns A 256-bit vector of [16 x i16] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_sub_sat((__v16hu)__a, (__v16hu)__b);
}
+/// Unpacks and interleaves 8-bit integers from parts of the 256-bit integer
+/// vectors in \a __a and \a __b to form the 256-bit result. Specifically,
+/// uses the upper 64 bits of each 128-bit half of \a __a and \a __b as
+/// input; other bits in these parameters are ignored.
+///
+/// \code{.operation}
+/// result[7:0] := __a[71:64]
+/// result[15:8] := __b[71:64]
+/// result[23:16] := __a[79:72]
+/// result[31:24] := __b[79:72]
+/// . . .
+/// result[127:120] := __b[127:120]
+/// result[135:128] := __a[199:192]
+/// . . .
+/// result[255:248] := __b[255:248]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPUNPCKHBW instruction.
+///
+/// \param __a
+/// A 256-bit integer vector used as the source for the even-numbered bytes
+/// of the result.
+/// \param __b
+/// A 256-bit integer vector used as the source for the odd-numbered bytes
+/// of the result.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpackhi_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31);
}
+/// Unpacks and interleaves 16-bit integers from parts of the 256-bit vectors
+/// of [16 x i16] in \a __a and \a __b to return the resulting 256-bit
+/// vector of [16 x i16]. Specifically, uses the upper 64 bits of each
+/// 128-bit half of \a __a and \a __b as input; other bits in these
+/// parameters are ignored.
+///
+/// \code{.operation}
+/// result[15:0] := __a[79:64]
+/// result[31:16] := __b[79:64]
+/// result[47:32] := __a[95:80]
+/// result[63:48] := __b[95:80]
+/// . . .
+/// result[127:112] := __b[127:112]
+/// result[143:128] := __a[211:196]
+/// . . .
+/// result[255:240] := __b[255:240]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPUNPCKHWD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] used as the source for the even-numbered
+/// elements of the result.
+/// \param __b
+/// A 256-bit vector of [16 x i16] used as the source for the odd-numbered
+/// elements of the result.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpackhi_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
}
+/// Unpacks and interleaves 32-bit integers from parts of the 256-bit vectors
+/// of [8 x i32] in \a __a and \a __b to return the resulting 256-bit vector
+/// of [8 x i32]. Specifically, uses the upper 64 bits of each 128-bit half
+/// of \a __a and \a __b as input; other bits in these parameters are
+/// ignored.
+///
+/// \code{.operation}
+/// result[31:0] := __a[95:64]
+/// result[63:32] := __b[95:64]
+/// result[95:64] := __a[127:96]
+/// result[127:96] := __b[127:96]
+/// result[159:128] := __a[223:192]
+/// result[191:160] := __b[223:192]
+/// result[223:192] := __a[255:224]
+/// result[255:224] := __b[255:224]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPUNPCKHDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] used as the source for the even-numbered
+/// elements of the result.
+/// \param __b
+/// A 256-bit vector of [8 x i32] used as the source for the odd-numbered
+/// elements of the result.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpackhi_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7);
}
+/// Unpacks and interleaves 64-bit integers from parts of the 256-bit vectors
+/// of [4 x i64] in \a __a and \a __b to return the resulting 256-bit vector
+/// of [4 x i64]. Specifically, uses the upper 64 bits of each 128-bit half
+/// of \a __a and \a __b as input; other bits in these parameters are
+/// ignored.
+///
+/// \code{.operation}
+/// result[63:0] := __a[127:64]
+/// result[127:64] := __b[127:64]
+/// result[191:128] := __a[255:192]
+/// result[255:192] := __b[255:192]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPUNPCKHQDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] used as the source for the even-numbered
+/// elements of the result.
+/// \param __b
+/// A 256-bit vector of [4 x i64] used as the source for the odd-numbered
+/// elements of the result.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpackhi_epi64(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 1, 4+1, 3, 4+3);
}
+/// Unpacks and interleaves 8-bit integers from parts of the 256-bit integer
+/// vectors in \a __a and \a __b to form the 256-bit result. Specifically,
+/// uses the lower 64 bits of each 128-bit half of \a __a and \a __b as
+/// input; other bits in these parameters are ignored.
+///
+/// \code{.operation}
+/// result[7:0] := __a[7:0]
+/// result[15:8] := __b[7:0]
+/// result[23:16] := __a[15:8]
+/// result[31:24] := __b[15:8]
+/// . . .
+/// result[127:120] := __b[63:56]
+/// result[135:128] := __a[135:128]
+/// . . .
+/// result[255:248] := __b[191:184]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPUNPCKLBW instruction.
+///
+/// \param __a
+/// A 256-bit integer vector used as the source for the even-numbered bytes
+/// of the result.
+/// \param __b
+/// A 256-bit integer vector used as the source for the odd-numbered bytes
+/// of the result.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpacklo_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23);
}
+/// Unpacks and interleaves 16-bit integers from parts of the 256-bit vectors
+/// of [16 x i16] in \a __a and \a __b to return the resulting 256-bit
+/// vector of [16 x i16]. Specifically, uses the lower 64 bits of each
+/// 128-bit half of \a __a and \a __b as input; other bits in these
+/// parameters are ignored.
+///
+/// \code{.operation}
+/// result[15:0] := __a[15:0]
+/// result[31:16] := __b[15:0]
+/// result[47:32] := __a[31:16]
+/// result[63:48] := __b[31:16]
+/// . . .
+/// result[127:112] := __b[63:48]
+/// result[143:128] := __a[143:128]
+/// . . .
+/// result[255:239] := __b[191:176]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPUNPCKLWD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] used as the source for the even-numbered
+/// elements of the result.
+/// \param __b
+/// A 256-bit vector of [16 x i16] used as the source for the odd-numbered
+/// elements of the result.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpacklo_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11);
}
+/// Unpacks and interleaves 32-bit integers from parts of the 256-bit vectors
+/// of [8 x i32] in \a __a and \a __b to return the resulting 256-bit vector
+/// of [8 x i32]. Specifically, uses the lower 64 bits of each 128-bit half
+/// of \a __a and \a __b as input; other bits in these parameters are
+/// ignored.
+///
+/// \code{.operation}
+/// result[31:0] := __a[31:0]
+/// result[63:32] := __b[31:0]
+/// result[95:64] := __a[63:32]
+/// result[127:96] := __b[63:32]
+/// result[159:128] := __a[159:128]
+/// result[191:160] := __b[159:128]
+/// result[223:192] := __a[191:160]
+/// result[255:224] := __b[191:190]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPUNPCKLDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] used as the source for the even-numbered
+/// elements of the result.
+/// \param __b
+/// A 256-bit vector of [8 x i32] used as the source for the odd-numbered
+/// elements of the result.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpacklo_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5);
}
+/// Unpacks and interleaves 64-bit integers from parts of the 256-bit vectors
+/// of [4 x i64] in \a __a and \a __b to return the resulting 256-bit vector
+/// of [4 x i64]. Specifically, uses the lower 64 bits of each 128-bit half
+/// of \a __a and \a __b as input; other bits in these parameters are
+/// ignored.
+///
+/// \code{.operation}
+/// result[63:0] := __a[63:0]
+/// result[127:64] := __b[63:0]
+/// result[191:128] := __a[191:128]
+/// result[255:192] := __b[191:128]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPUNPCKLQDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] used as the source for the even-numbered
+/// elements of the result.
+/// \param __b
+/// A 256-bit vector of [4 x i64] used as the source for the odd-numbered
+/// elements of the result.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpacklo_epi64(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 0, 4+0, 2, 4+2);
}
+/// Computes the bitwise XOR of the 256-bit integer vectors in \a __a and
+/// \a __b.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPXOR instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_xor_si256(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a ^ (__v4du)__b);
}
+/// Loads the 256-bit integer vector from memory \a __V using a non-temporal
+/// memory hint and returns the vector. \a __V must be aligned on a 32-byte
+/// boundary.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVNTDQA instruction.
+///
+/// \param __V
+/// A pointer to the 32-byte aligned memory containing the vector to load.
+/// \returns A 256-bit integer vector loaded from memory.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_stream_load_si256(__m256i const *__V)
{
@@ -710,30 +2968,84 @@ _mm256_stream_load_si256(__m256i const *__V)
return (__m256i)__builtin_nontemporal_load((const __v4di_aligned *)__V);
}
+/// Broadcasts the 32-bit floating-point value from the low element of the
+/// 128-bit vector of [4 x float] in \a __X to all elements of the result's
+/// 128-bit vector of [4 x float].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VBROADCASTSS instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x float] whose low element will be broadcast.
+/// \returns A 128-bit vector of [4 x float] containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_broadcastss_ps(__m128 __X)
{
return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0);
}
+/// Broadcasts the 64-bit floating-point value from the low element of the
+/// 128-bit vector of [2 x double] in \a __a to both elements of the
+/// result's 128-bit vector of [2 x double].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c MOVDDUP instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] whose low element will be broadcast.
+/// \returns A 128-bit vector of [2 x double] containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_broadcastsd_pd(__m128d __a)
{
return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
}
+/// Broadcasts the 32-bit floating-point value from the low element of the
+/// 128-bit vector of [4 x float] in \a __X to all elements of the
+/// result's 256-bit vector of [8 x float].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VBROADCASTSS instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x float] whose low element will be broadcast.
+/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_broadcastss_ps(__m128 __X)
{
return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0);
}
+/// Broadcasts the 64-bit floating-point value from the low element of the
+/// 128-bit vector of [2 x double] in \a __X to all elements of the
+/// result's 256-bit vector of [4 x double].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VBROADCASTSD instruction.
+///
+/// \param __X
+/// A 128-bit vector of [2 x double] whose low element will be broadcast.
+/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_broadcastsd_pd(__m128d __X)
{
return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0);
}
+/// Broadcasts the 128-bit integer data from \a __X to both the lower and
+/// upper halves of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VBROADCASTI128 instruction.
+///
+/// \param __X
+/// A 128-bit integer vector to be broadcast.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastsi128_si256(__m128i __X)
{
@@ -742,295 +3054,1688 @@ _mm256_broadcastsi128_si256(__m128i __X)
#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X)
+/// Merges 32-bit integer elements from either of the two 128-bit vectors of
+/// [4 x i32] in \a V1 or \a V2 to the result's 128-bit vector of [4 x i32],
+/// as specified by the immediate integer operand \a M.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*32
+/// IF M[i] == 0
+/// result[31+j:j] := V1[31+j:j]
+/// ELSE
+/// result[31+j:j] := V2[32+j:j]
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_blend_epi32(__m128i V1, __m128i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPBLENDDD instruction.
+///
+/// \param V1
+/// A 128-bit vector of [4 x i32] containing source values.
+/// \param V2
+/// A 128-bit vector of [4 x i32] containing source values.
+/// \param M
+/// An immediate 8-bit integer operand, with bits [3:0] specifying the
+/// source for each element of the result. The position of the mask bit
+/// corresponds to the index of a copied value. When a mask bit is 0, the
+/// element is copied from \a V1; otherwise, it is copied from \a V2.
+/// \returns A 128-bit vector of [4 x i32] containing the result.
#define _mm_blend_epi32(V1, V2, M) \
((__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(V1), \
(__v4si)(__m128i)(V2), (int)(M)))
+/// Merges 32-bit integer elements from either of the two 256-bit vectors of
+/// [8 x i32] in \a V1 or \a V2 to return a 256-bit vector of [8 x i32],
+/// as specified by the immediate integer operand \a M.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// IF M[i] == 0
+/// result[31+j:j] := V1[31+j:j]
+/// ELSE
+/// result[31+j:j] := V2[32+j:j]
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_blend_epi32(__m256i V1, __m256i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPBLENDDD instruction.
+///
+/// \param V1
+/// A 256-bit vector of [8 x i32] containing source values.
+/// \param V2
+/// A 256-bit vector of [8 x i32] containing source values.
+/// \param M
+/// An immediate 8-bit integer operand, with bits [7:0] specifying the
+/// source for each element of the result. The position of the mask bit
+/// corresponds to the index of a copied value. When a mask bit is 0, the
+/// element is copied from \a V1; otherwise, it is is copied from \a V2.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
#define _mm256_blend_epi32(V1, V2, M) \
((__m256i)__builtin_ia32_pblendd256((__v8si)(__m256i)(V1), \
(__v8si)(__m256i)(V2), (int)(M)))
+/// Broadcasts the low byte from the 128-bit integer vector in \a __X to all
+/// bytes of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPBROADCASTB instruction.
+///
+/// \param __X
+/// A 128-bit integer vector whose low byte will be broadcast.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastb_epi8(__m128i __X)
{
return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
+/// Broadcasts the low element from the 128-bit vector of [8 x i16] in \a __X
+/// to all elements of the result's 256-bit vector of [16 x i16].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPBROADCASTW instruction.
+///
+/// \param __X
+/// A 128-bit vector of [8 x i16] whose low element will be broadcast.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastw_epi16(__m128i __X)
{
return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
+/// Broadcasts the low element from the 128-bit vector of [4 x i32] in \a __X
+/// to all elements of the result's 256-bit vector of [8 x i32].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPBROADCASTD instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] whose low element will be broadcast.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastd_epi32(__m128i __X)
{
return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0);
}
+/// Broadcasts the low element from the 128-bit vector of [2 x i64] in \a __X
+/// to all elements of the result's 256-bit vector of [4 x i64].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPBROADCASTQ instruction.
+///
+/// \param __X
+/// A 128-bit vector of [2 x i64] whose low element will be broadcast.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastq_epi64(__m128i __X)
{
return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0, 0, 0);
}
+/// Broadcasts the low byte from the 128-bit integer vector in \a __X to all
+/// bytes of the 128-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPBROADCASTB instruction.
+///
+/// \param __X
+/// A 128-bit integer vector whose low byte will be broadcast.
+/// \returns A 128-bit integer vector containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastb_epi8(__m128i __X)
{
return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
+/// Broadcasts the low element from the 128-bit vector of [8 x i16] in
+/// \a __X to all elements of the result's 128-bit vector of [8 x i16].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPBROADCASTW instruction.
+///
+/// \param __X
+/// A 128-bit vector of [8 x i16] whose low element will be broadcast.
+/// \returns A 128-bit vector of [8 x i16] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastw_epi16(__m128i __X)
{
return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0);
}
-
+/// Broadcasts the low element from the 128-bit vector of [4 x i32] in \a __X
+/// to all elements of the result's vector of [4 x i32].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPBROADCASTD instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] whose low element will be broadcast.
+/// \returns A 128-bit vector of [4 x i32] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastd_epi32(__m128i __X)
{
return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0);
}
+/// Broadcasts the low element from the 128-bit vector of [2 x i64] in \a __X
+/// to both elements of the result's 128-bit vector of [2 x i64].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPBROADCASTQ instruction.
+///
+/// \param __X
+/// A 128-bit vector of [2 x i64] whose low element will be broadcast.
+/// \returns A 128-bit vector of [2 x i64] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastq_epi64(__m128i __X)
{
return (__m128i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0);
}
+/// Sets the result's 256-bit vector of [8 x i32] to copies of elements of the
+/// 256-bit vector of [8 x i32] in \a __a as specified by indexes in the
+/// elements of the 256-bit vector of [8 x i32] in \a __b.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// k := __b[j+2:j] * 32
+/// result[j+31:j] := __a[k+31:k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPERMD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing the source values.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing indexes of values to use from
+/// \a __a.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_permutevar8x32_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_permvarsi256((__v8si)__a, (__v8si)__b);
}
+/// Sets the result's 256-bit vector of [4 x double] to copies of elements of
+/// the 256-bit vector of [4 x double] in \a V as specified by the
+/// immediate value \a M.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// k := (M >> i*2)[1:0] * 64
+/// result[j+63:j] := V[k+63:k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256d _mm256_permute4x64_pd(__m256d V, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERMPD instruction.
+///
+/// \param V
+/// A 256-bit vector of [4 x double] containing the source values.
+/// \param M
+/// An immediate 8-bit value specifying which elements to copy from \a V.
+/// \a M[1:0] specifies the index in \a a for element 0 of the result,
+/// \a M[3:2] specifies the index for element 1, and so forth.
+/// \returns A 256-bit vector of [4 x double] containing the result.
#define _mm256_permute4x64_pd(V, M) \
((__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(V), (int)(M)))
+/// Sets the result's 256-bit vector of [8 x float] to copies of elements of
+/// the 256-bit vector of [8 x float] in \a __a as specified by indexes in
+/// the elements of the 256-bit vector of [8 x i32] in \a __b.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// k := __b[j+2:j] * 32
+/// result[j+31:j] := __a[k+31:k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPERMPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float] containing the source values.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing indexes of values to use from
+/// \a __a.
+/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_permutevar8x32_ps(__m256 __a, __m256i __b)
{
return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b);
}
+/// Sets the result's 256-bit vector of [4 x i64] result to copies of elements
+/// of the 256-bit vector of [4 x i64] in \a V as specified by the
+/// immediate value \a M.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// k := (M >> i*2)[1:0] * 64
+/// result[j+63:j] := V[k+63:k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_permute4x64_epi64(__m256i V, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERMQ instruction.
+///
+/// \param V
+/// A 256-bit vector of [4 x i64] containing the source values.
+/// \param M
+/// An immediate 8-bit value specifying which elements to copy from \a V.
+/// \a M[1:0] specifies the index in \a a for element 0 of the result,
+/// \a M[3:2] specifies the index for element 1, and so forth.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
#define _mm256_permute4x64_epi64(V, M) \
((__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(V), (int)(M)))
+/// Sets each half of the 256-bit result either to zero or to one of the
+/// four possible 128-bit halves of the 256-bit vectors \a V1 and \a V2,
+/// as specified by the immediate value \a M.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// k := M >> (i*4)
+/// IF k[3] == 0
+/// CASE (k[1:0]) OF
+/// 0: result[127+j:j] := V1[127:0]
+/// 1: result[127+j:j] := V1[255:128]
+/// 2: result[127+j:j] := V2[127:0]
+/// 3: result[127+j:j] := V2[255:128]
+/// ESAC
+/// ELSE
+/// result[127+j:j] := 0
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_permute2x128_si256(__m256i V1, __m256i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERM2I128 instruction.
+///
+/// \param V1
+/// A 256-bit integer vector containing source values.
+/// \param V2
+/// A 256-bit integer vector containing source values.
+/// \param M
+/// An immediate value specifying how to form the result. Bits [3:0]
+/// control the lower half of the result, bits [7:4] control the upper half.
+/// Within each 4-bit control value, if bit 3 is 1, the result is zero,
+/// otherwise bits [1:0] determine the source as follows. \n
+/// 0: the lower half of \a V1 \n
+/// 1: the upper half of \a V1 \n
+/// 2: the lower half of \a V2 \n
+/// 3: the upper half of \a V2
+/// \returns A 256-bit integer vector containing the result.
#define _mm256_permute2x128_si256(V1, V2, M) \
((__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (int)(M)))
+/// Extracts half of the 256-bit vector \a V to the 128-bit result. If bit 0
+/// of the immediate \a M is zero, extracts the lower half of the result;
+/// otherwise, extracts the upper half.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm256_extracti128_si256(__m256i V, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VEXTRACTI128 instruction.
+///
+/// \param V
+/// A 256-bit integer vector containing the source values.
+/// \param M
+/// An immediate value specifying which half of \a V to extract.
+/// \returns A 128-bit integer vector containing the result.
#define _mm256_extracti128_si256(V, M) \
((__m128i)__builtin_ia32_extract128i256((__v4di)(__m256i)(V), (int)(M)))
+/// Copies the 256-bit vector \a V1 to the result, then overwrites half of the
+/// result with the 128-bit vector \a V2. If bit 0 of the immediate \a M
+/// is zero, overwrites the lower half of the result; otherwise,
+/// overwrites the upper half.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_inserti128_si256(__m256i V1, __m128i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VINSERTI128 instruction.
+///
+/// \param V1
+/// A 256-bit integer vector containing a source value.
+/// \param V2
+/// A 128-bit integer vector containing a source value.
+/// \param M
+/// An immediate value specifying where to put \a V2 in the result.
+/// \returns A 256-bit integer vector containing the result.
#define _mm256_inserti128_si256(V1, V2, M) \
((__m256i)__builtin_ia32_insert128i256((__v4di)(__m256i)(V1), \
(__v2di)(__m128i)(V2), (int)(M)))
+/// Conditionally loads eight 32-bit integer elements from memory \a __X, if
+/// the most significant bit of the corresponding element in the mask
+/// \a __M is set; otherwise, sets that element of the result to zero.
+/// Returns the 256-bit [8 x i32] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// IF __M[j+31] == 1
+/// result[j+31:j] := Load32(__X+(i*4))
+/// ELSE
+/// result[j+31:j] := 0
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMASKMOVD instruction.
+///
+/// \param __X
+/// A pointer to the memory used for loading values.
+/// \param __M
+/// A 256-bit vector of [8 x i32] containing the mask bits.
+/// \returns A 256-bit vector of [8 x i32] containing the loaded or zeroed
+/// elements.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskload_epi32(int const *__X, __m256i __M)
{
return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M);
}
+/// Conditionally loads four 64-bit integer elements from memory \a __X, if
+/// the most significant bit of the corresponding element in the mask
+/// \a __M is set; otherwise, sets that element of the result to zero.
+/// Returns the 256-bit [4 x i64] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// IF __M[j+63] == 1
+/// result[j+63:j] := Load64(__X+(i*8))
+/// ELSE
+/// result[j+63:j] := 0
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMASKMOVQ instruction.
+///
+/// \param __X
+/// A pointer to the memory used for loading values.
+/// \param __M
+/// A 256-bit vector of [4 x i64] containing the mask bits.
+/// \returns A 256-bit vector of [4 x i64] containing the loaded or zeroed
+/// elements.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskload_epi64(long long const *__X, __m256i __M)
{
return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, (__v4di)__M);
}
+/// Conditionally loads four 32-bit integer elements from memory \a __X, if
+/// the most significant bit of the corresponding element in the mask
+/// \a __M is set; otherwise, sets that element of the result to zero.
+/// Returns the 128-bit [4 x i32] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*32
+/// IF __M[j+31] == 1
+/// result[j+31:j] := Load32(__X+(i*4))
+/// ELSE
+/// result[j+31:j] := 0
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMASKMOVD instruction.
+///
+/// \param __X
+/// A pointer to the memory used for loading values.
+/// \param __M
+/// A 128-bit vector of [4 x i32] containing the mask bits.
+/// \returns A 128-bit vector of [4 x i32] containing the loaded or zeroed
+/// elements.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskload_epi32(int const *__X, __m128i __M)
{
return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M);
}
+/// Conditionally loads two 64-bit integer elements from memory \a __X, if
+/// the most significant bit of the corresponding element in the mask
+/// \a __M is set; otherwise, sets that element of the result to zero.
+/// Returns the 128-bit [2 x i64] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*64
+/// IF __M[j+63] == 1
+/// result[j+63:j] := Load64(__X+(i*8))
+/// ELSE
+/// result[j+63:j] := 0
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMASKMOVQ instruction.
+///
+/// \param __X
+/// A pointer to the memory used for loading values.
+/// \param __M
+/// A 128-bit vector of [2 x i64] containing the mask bits.
+/// \returns A 128-bit vector of [2 x i64] containing the loaded or zeroed
+/// elements.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskload_epi64(long long const *__X, __m128i __M)
{
return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M);
}
+/// Conditionally stores eight 32-bit integer elements from the 256-bit vector
+/// of [8 x i32] in \a __Y to memory \a __X, if the most significant bit of
+/// the corresponding element in the mask \a __M is set; otherwise, the
+/// memory element is unchanged.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// IF __M[j+31] == 1
+/// Store32(__X+(i*4), __Y[j+31:j])
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMASKMOVD instruction.
+///
+/// \param __X
+/// A pointer to the memory used for storing values.
+/// \param __M
+/// A 256-bit vector of [8 x i32] containing the mask bits.
+/// \param __Y
+/// A 256-bit vector of [8 x i32] containing the values to store.
static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y)
{
__builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y);
}
+/// Conditionally stores four 64-bit integer elements from the 256-bit vector
+/// of [4 x i64] in \a __Y to memory \a __X, if the most significant bit of
+/// the corresponding element in the mask \a __M is set; otherwise, the
+/// memory element is unchanged.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// IF __M[j+63] == 1
+/// Store64(__X+(i*8), __Y[j+63:j])
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMASKMOVQ instruction.
+///
+/// \param __X
+/// A pointer to the memory used for storing values.
+/// \param __M
+/// A 256-bit vector of [4 x i64] containing the mask bits.
+/// \param __Y
+/// A 256-bit vector of [4 x i64] containing the values to store.
static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y)
{
__builtin_ia32_maskstoreq256((__v4di *)__X, (__v4di)__M, (__v4di)__Y);
}
+/// Conditionally stores four 32-bit integer elements from the 128-bit vector
+/// of [4 x i32] in \a __Y to memory \a __X, if the most significant bit of
+/// the corresponding element in the mask \a __M is set; otherwise, the
+/// memory element is unchanged.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*32
+/// IF __M[j+31] == 1
+/// Store32(__X+(i*4), __Y[j+31:j])
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMASKMOVD instruction.
+///
+/// \param __X
+/// A pointer to the memory used for storing values.
+/// \param __M
+/// A 128-bit vector of [4 x i32] containing the mask bits.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing the values to store.
static __inline__ void __DEFAULT_FN_ATTRS128
_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y)
{
__builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y);
}
+/// Conditionally stores two 64-bit integer elements from the 128-bit vector
+/// of [2 x i64] in \a __Y to memory \a __X, if the most significant bit of
+/// the corresponding element in the mask \a __M is set; otherwise, the
+/// memory element is unchanged.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*64
+/// IF __M[j+63] == 1
+/// Store64(__X+(i*8), __Y[j+63:j])
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMASKMOVQ instruction.
+///
+/// \param __X
+/// A pointer to the memory used for storing values.
+/// \param __M
+/// A 128-bit vector of [2 x i64] containing the mask bits.
+/// \param __Y
+/// A 128-bit vector of [2 x i64] containing the values to store.
static __inline__ void __DEFAULT_FN_ATTRS128
_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y)
{
__builtin_ia32_maskstoreq(( __v2di *)__X, (__v2di)__M, (__v2di)__Y);
}
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __X
+/// left by the number of bits given in the corresponding element of the
+/// 256-bit vector of [8 x i32] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 31, the result for that element is zero.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLVD instruction.
+///
+/// \param __X
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __Y
+/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sllv_epi32(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y);
}
+/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \a __X
+/// left by the number of bits given in the corresponding element of the
+/// 128-bit vector of [4 x i32] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 31, the result for that element is zero.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLVD instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] to be shifted.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 128-bit vector of [4 x i32] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_sllv_epi32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y);
}
+/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __X
+/// left by the number of bits given in the corresponding element of the
+/// 128-bit vector of [4 x i64] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 63, the result for that element is zero.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLVQ instruction.
+///
+/// \param __X
+/// A 256-bit vector of [4 x i64] to be shifted.
+/// \param __Y
+/// A 256-bit vector of [4 x i64] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sllv_epi64(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psllv4di((__v4di)__X, (__v4di)__Y);
}
+/// Shifts each 64-bit element of the 128-bit vector of [2 x i64] in \a __X
+/// left by the number of bits given in the corresponding element of the
+/// 128-bit vector of [2 x i64] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 63, the result for that element is zero.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLVQ instruction.
+///
+/// \param __X
+/// A 128-bit vector of [2 x i64] to be shifted.
+/// \param __Y
+/// A 128-bit vector of [2 x i64] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 128-bit vector of [2 x i64] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_sllv_epi64(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psllv2di((__v2di)__X, (__v2di)__Y);
}
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __X
+/// right by the number of bits given in the corresponding element of the
+/// 256-bit vector of [8 x i32] in \a __Y, shifting in sign bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 31, the result for that element is 0 or -1 according to the sign bit
+/// for that element.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAVD instruction.
+///
+/// \param __X
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __Y
+/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srav_epi32(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y);
}
+/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \a __X
+/// right by the number of bits given in the corresponding element of the
+/// 128-bit vector of [4 x i32] in \a __Y, shifting in sign bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 31, the result for that element is 0 or -1 according to the sign bit
+/// for that element.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAVD instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] to be shifted.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 128-bit vector of [4 x i32] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_srav_epi32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y);
}
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __X
+/// right by the number of bits given in the corresponding element of the
+/// 256-bit vector of [8 x i32] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 31, the result for that element is zero.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLVD instruction.
+///
+/// \param __X
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __Y
+/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srlv_epi32(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y);
}
+/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \a __X
+/// right by the number of bits given in the corresponding element of the
+/// 128-bit vector of [4 x i32] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 31, the result for that element is zero.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLVD instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] to be shifted.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 128-bit vector of [4 x i32] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_srlv_epi32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y);
}
+/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __X
+/// right by the number of bits given in the corresponding element of the
+/// 128-bit vector of [4 x i64] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 63, the result for that element is zero.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLVQ instruction.
+///
+/// \param __X
+/// A 256-bit vector of [4 x i64] to be shifted.
+/// \param __Y
+/// A 256-bit vector of [4 x i64] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srlv_epi64(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psrlv4di((__v4di)__X, (__v4di)__Y);
}
+/// Shifts each 64-bit element of the 128-bit vector of [2 x i64] in \a __X
+/// right by the number of bits given in the corresponding element of the
+/// 128-bit vector of [2 x i64] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 63, the result for that element is zero.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLVQ instruction.
+///
+/// \param __X
+/// A 128-bit vector of [2 x i64] to be shifted.
+/// \param __Y
+/// A 128-bit vector of [2 x i64] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 128-bit vector of [2 x i64] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_srlv_epi64(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psrlv2di((__v2di)__X, (__v2di)__Y);
}
+/// Conditionally gathers two 64-bit floating-point values, either from the
+/// 128-bit vector of [2 x double] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector
+/// of [2 x double] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*32
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128d _mm_mask_i32gather_pd(__m128d a, const double *m, __m128i i,
+/// __m128d mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPD instruction.
+///
+/// \param a
+/// A 128-bit vector of [2 x double] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only
+/// the first two elements are used.
+/// \param mask
+/// A 128-bit vector of [2 x double] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x double] containing the gathered values.
#define _mm_mask_i32gather_pd(a, m, i, mask, s) \
((__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \
(double const *)(m), \
(__v4si)(__m128i)(i), \
(__v2df)(__m128d)(mask), (s)))
+/// Conditionally gathers four 64-bit floating-point values, either from the
+/// 256-bit vector of [4 x double] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i. The 256-bit vector
+/// of [4 x double] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*32
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256d _mm256_mask_i32gather_pd(__m256d a, const double *m, __m128i i,
+/// __m256d mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPD instruction.
+///
+/// \param a
+/// A 256-bit vector of [4 x double] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param mask
+/// A 256-bit vector of [4 x double] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x double] containing the gathered values.
#define _mm256_mask_i32gather_pd(a, m, i, mask, s) \
((__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \
(double const *)(m), \
(__v4si)(__m128i)(i), \
(__v4df)(__m256d)(mask), (s)))
+/// Conditionally gathers two 64-bit floating-point values, either from the
+/// 128-bit vector of [2 x double] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector
+/// of [2 x double] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*64
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128d _mm_mask_i64gather_pd(__m128d a, const double *m, __m128i i,
+/// __m128d mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPD instruction.
+///
+/// \param a
+/// A 128-bit vector of [2 x double] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [2 x double] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x double] containing the gathered values.
#define _mm_mask_i64gather_pd(a, m, i, mask, s) \
((__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \
(double const *)(m), \
(__v2di)(__m128i)(i), \
(__v2df)(__m128d)(mask), (s)))
+/// Conditionally gathers four 64-bit floating-point values, either from the
+/// 256-bit vector of [4 x double] in \a a, or from memory \a m using scaled
+/// indexes from the 256-bit vector of [4 x i64] in \a i. The 256-bit vector
+/// of [4 x double] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*64
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256d _mm256_mask_i64gather_pd(__m256d a, const double *m, __m256i i,
+/// __m256d mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPD instruction.
+///
+/// \param a
+/// A 256-bit vector of [4 x double] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 256-bit vector of [4 x double] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x double] containing the gathered values.
#define _mm256_mask_i64gather_pd(a, m, i, mask, s) \
((__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \
(double const *)(m), \
(__v4di)(__m256i)(i), \
(__v4df)(__m256d)(mask), (s)))
+/// Conditionally gathers four 32-bit floating-point values, either from the
+/// 128-bit vector of [4 x float] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector
+/// of [4 x float] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*32
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128 _mm_mask_i32gather_ps(__m128 a, const float *m, __m128i i,
+/// __m128 mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPS instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x float] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [4 x float] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x float] containing the gathered values.
#define _mm_mask_i32gather_ps(a, m, i, mask, s) \
((__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \
(float const *)(m), \
(__v4si)(__m128i)(i), \
(__v4sf)(__m128)(mask), (s)))
+/// Conditionally gathers eight 32-bit floating-point values, either from the
+/// 256-bit vector of [8 x float] in \a a, or from memory \a m using scaled
+/// indexes from the 256-bit vector of [8 x i32] in \a i. The 256-bit vector
+/// of [8 x float] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 7
+/// j := element*32
+/// k := element*32
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256 _mm256_mask_i32gather_ps(__m256 a, const float *m, __m256i i,
+/// __m256 mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPS instruction.
+///
+/// \param a
+/// A 256-bit vector of [8 x float] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [8 x i32] containing signed indexes into \a m.
+/// \param mask
+/// A 256-bit vector of [8 x float] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [8 x float] containing the gathered values.
#define _mm256_mask_i32gather_ps(a, m, i, mask, s) \
((__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \
(float const *)(m), \
(__v8si)(__m256i)(i), \
(__v8sf)(__m256)(mask), (s)))
+/// Conditionally gathers two 32-bit floating-point values, either from the
+/// 128-bit vector of [4 x float] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector
+/// of [4 x float] in \a mask determines the source for the lower two
+/// elements. The upper two elements of the result are zeroed.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*32
+/// k := element*64
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// result[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128 _mm_mask_i64gather_ps(__m128 a, const float *m, __m128i i,
+/// __m128 mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPS instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x float] used as the source when a mask bit is
+/// zero. Only the first two elements are used.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [4 x float] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory. Only the first
+/// two elements are used.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x float] containing the gathered values.
#define _mm_mask_i64gather_ps(a, m, i, mask, s) \
((__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \
(float const *)(m), \
(__v2di)(__m128i)(i), \
(__v4sf)(__m128)(mask), (s)))
+/// Conditionally gathers four 32-bit floating-point values, either from the
+/// 128-bit vector of [4 x float] in \a a, or from memory \a m using scaled
+/// indexes from the 256-bit vector of [4 x i64] in \a i. The 128-bit vector
+/// of [4 x float] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*64
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128 _mm256_mask_i64gather_ps(__m128 a, const float *m, __m256i i,
+/// __m128 mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPS instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x float] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [4 x float] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x float] containing the gathered values.
#define _mm256_mask_i64gather_ps(a, m, i, mask, s) \
((__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \
(float const *)(m), \
(__v4di)(__m256i)(i), \
(__v4sf)(__m128)(mask), (s)))
+/// Conditionally gathers four 32-bit integer values, either from the
+/// 128-bit vector of [4 x i32] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector
+/// of [4 x i32] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*32
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_mask_i32gather_epi32(__m128i a, const int *m, __m128i i,
+/// __m128i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDD instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x i32] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [4 x i32] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
#define _mm_mask_i32gather_epi32(a, m, i, mask, s) \
((__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \
(int const *)(m), \
(__v4si)(__m128i)(i), \
(__v4si)(__m128i)(mask), (s)))
+/// Conditionally gathers eight 32-bit integer values, either from the
+/// 256-bit vector of [8 x i32] in \a a, or from memory \a m using scaled
+/// indexes from the 256-bit vector of [8 x i32] in \a i. The 256-bit vector
+/// of [8 x i32] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 7
+/// j := element*32
+/// k := element*32
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_mask_i32gather_epi32(__m256i a, const int *m, __m256i i,
+/// __m256i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDD instruction.
+///
+/// \param a
+/// A 256-bit vector of [8 x i32] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [8 x i32] containing signed indexes into \a m.
+/// \param mask
+/// A 256-bit vector of [8 x i32] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [8 x i32] containing the gathered values.
#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) \
((__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \
(int const *)(m), \
(__v8si)(__m256i)(i), \
(__v8si)(__m256i)(mask), (s)))
+/// Conditionally gathers two 32-bit integer values, either from the
+/// 128-bit vector of [4 x i32] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector
+/// of [4 x i32] in \a mask determines the source for the lower two
+/// elements. The upper two elements of the result are zeroed.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*32
+/// k := element*64
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// result[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_mask_i64gather_epi32(__m128i a, const int *m, __m128i i,
+/// __m128i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQD instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x i32] used as the source when a mask bit is
+/// zero. Only the first two elements are used.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [4 x i32] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory. Only the first two elements
+/// are used.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
#define _mm_mask_i64gather_epi32(a, m, i, mask, s) \
((__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \
(int const *)(m), \
(__v2di)(__m128i)(i), \
(__v4si)(__m128i)(mask), (s)))
+/// Conditionally gathers four 32-bit integer values, either from the
+/// 128-bit vector of [4 x i32] in \a a, or from memory \a m using scaled
+/// indexes from the 256-bit vector of [4 x i64] in \a i. The 128-bit vector
+/// of [4 x i32] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*64
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm256_mask_i64gather_epi32(__m128i a, const int *m, __m256i i,
+/// __m128i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQD instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x i32] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [4 x i32] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) \
((__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \
(int const *)(m), \
(__v4di)(__m256i)(i), \
(__v4si)(__m128i)(mask), (s)))
+/// Conditionally gathers two 64-bit integer values, either from the
+/// 128-bit vector of [2 x i64] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector
+/// of [2 x i64] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*32
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_mask_i32gather_epi64(__m128i a, const long long *m, __m128i i,
+/// __m128i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDQ instruction.
+///
+/// \param a
+/// A 128-bit vector of [2 x i64] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only
+/// the first two elements are used.
+/// \param mask
+/// A 128-bit vector of [2 x i64] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x i64] containing the gathered values.
#define _mm_mask_i32gather_epi64(a, m, i, mask, s) \
((__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \
(long long const *)(m), \
(__v4si)(__m128i)(i), \
(__v2di)(__m128i)(mask), (s)))
+/// Conditionally gathers four 64-bit integer values, either from the
+/// 256-bit vector of [4 x i64] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i. The 256-bit vector
+/// of [4 x i64] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*32
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_mask_i32gather_epi64(__m256i a, const long long *m,
+/// __m128i i, __m256i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDQ instruction.
+///
+/// \param a
+/// A 256-bit vector of [4 x i64] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param mask
+/// A 256-bit vector of [4 x i64] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x i64] containing the gathered values.
#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) \
((__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \
(long long const *)(m), \
(__v4si)(__m128i)(i), \
(__v4di)(__m256i)(mask), (s)))
+/// Conditionally gathers two 64-bit integer values, either from the
+/// 128-bit vector of [2 x i64] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector
+/// of [2 x i64] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*64
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_mask_i64gather_epi64(__m128i a, const long long *m, __m128i i,
+/// __m128i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQQ instruction.
+///
+/// \param a
+/// A 128-bit vector of [2 x i64] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [2 x i64] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x i64] containing the gathered values.
#define _mm_mask_i64gather_epi64(a, m, i, mask, s) \
((__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \
(long long const *)(m), \
(__v2di)(__m128i)(i), \
(__v2di)(__m128i)(mask), (s)))
+/// Conditionally gathers four 64-bit integer values, either from the
+/// 256-bit vector of [4 x i64] in \a a, or from memory \a m using scaled
+/// indexes from the 256-bit vector of [4 x i64] in \a i. The 256-bit vector
+/// of [4 x i64] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*64
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_mask_i64gather_epi64(__m256i a, const long long *m,
+/// __m256i i, __m256i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQQ instruction.
+///
+/// \param a
+/// A 256-bit vector of [4 x i64] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 256-bit vector of [4 x i64] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x i64] containing the gathered values.
#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) \
((__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \
(long long const *)(m), \
(__v4di)(__m256i)(i), \
(__v4di)(__m256i)(mask), (s)))
+/// Gathers two 64-bit floating-point values from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*32
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128d _mm_i32gather_pd(const double *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only
+/// the first two elements are used.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x double] containing the gathered values.
#define _mm_i32gather_pd(m, i, s) \
((__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \
(double const *)(m), \
@@ -1039,6 +4744,33 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
_mm_setzero_pd()), \
(s)))
+/// Gathers four 64-bit floating-point values from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*32
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256d _mm256_i32gather_pd(const double *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x double] containing the gathered values.
#define _mm256_i32gather_pd(m, i, s) \
((__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \
(double const *)(m), \
@@ -1048,6 +4780,33 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
_CMP_EQ_OQ), \
(s)))
+/// Gathers two 64-bit floating-point values from memory \a m using scaled
+/// indexes from the 128-bit vector of [2 x i64] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*64
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128d _mm_i64gather_pd(const double *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x double] containing the gathered values.
#define _mm_i64gather_pd(m, i, s) \
((__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \
(double const *)(m), \
@@ -1056,6 +4815,33 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
_mm_setzero_pd()), \
(s)))
+/// Gathers four 64-bit floating-point values from memory \a m using scaled
+/// indexes from the 256-bit vector of [4 x i64] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*64
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256d _mm256_i64gather_pd(const double *m, __m256i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x double] containing the gathered values.
#define _mm256_i64gather_pd(m, i, s) \
((__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \
(double const *)(m), \
@@ -1065,6 +4851,33 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
_CMP_EQ_OQ), \
(s)))
+/// Gathers four 32-bit floating-point values from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*32
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128 _mm_i32gather_ps(const float *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPS instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x float] containing the gathered values.
#define _mm_i32gather_ps(m, i, s) \
((__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \
(float const *)(m), \
@@ -1073,6 +4886,33 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
_mm_setzero_ps()), \
(s)))
+/// Gathers eight 32-bit floating-point values from memory \a m using scaled
+/// indexes from the 256-bit vector of [8 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 7
+/// j := element*32
+/// k := element*32
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256 _mm256_i32gather_ps(const float *m, __m256i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPS instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [8 x i32] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [8 x float] containing the gathered values.
#define _mm256_i32gather_ps(m, i, s) \
((__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \
(float const *)(m), \
@@ -1082,6 +4922,35 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
_CMP_EQ_OQ), \
(s)))
+/// Gathers two 32-bit floating-point values from memory \a m using scaled
+/// indexes from the 128-bit vector of [2 x i64] in \a i. The upper two
+/// elements of the result are zeroed.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*32
+/// k := element*64
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// result[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128 _mm_i64gather_ps(const float *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPS instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x float] containing the gathered values.
#define _mm_i64gather_ps(m, i, s) \
((__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \
(float const *)(m), \
@@ -1090,6 +4959,33 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
_mm_setzero_ps()), \
(s)))
+/// Gathers four 32-bit floating-point values from memory \a m using scaled
+/// indexes from the 256-bit vector of [4 x i64] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*64
+/// result[j+31:j] := Load32(m + SignExtend(i[k+64:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128 _mm256_i64gather_ps(const float *m, __m256i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPS instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x float] containing the gathered values.
#define _mm256_i64gather_ps(m, i, s) \
((__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \
(float const *)(m), \
@@ -1098,44 +4994,263 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
_mm_setzero_ps()), \
(s)))
+/// Gathers four 32-bit floating-point values from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*32
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_i32gather_epi32(const int *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
#define _mm_i32gather_epi32(m, i, s) \
((__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \
(int const *)(m), (__v4si)(__m128i)(i), \
(__v4si)_mm_set1_epi32(-1), (s)))
+/// Gathers eight 32-bit floating-point values from memory \a m using scaled
+/// indexes from the 256-bit vector of [8 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 7
+/// j := element*32
+/// k := element*32
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_i32gather_epi32(const int *m, __m256i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [8 x i32] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [8 x i32] containing the gathered values.
#define _mm256_i32gather_epi32(m, i, s) \
((__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \
(int const *)(m), (__v8si)(__m256i)(i), \
(__v8si)_mm256_set1_epi32(-1), (s)))
+/// Gathers two 32-bit integer values from memory \a m using scaled indexes
+/// from the 128-bit vector of [2 x i64] in \a i. The upper two elements
+/// of the result are zeroed.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*32
+/// k := element*64
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// result[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_i64gather_epi32(const int *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
#define _mm_i64gather_epi32(m, i, s) \
((__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \
(int const *)(m), (__v2di)(__m128i)(i), \
(__v4si)_mm_set1_epi32(-1), (s)))
+/// Gathers four 32-bit integer values from memory \a m using scaled indexes
+/// from the 256-bit vector of [4 x i64] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*64
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm256_i64gather_epi32(const int *m, __m256i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
#define _mm256_i64gather_epi32(m, i, s) \
((__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \
(int const *)(m), (__v4di)(__m256i)(i), \
(__v4si)_mm_set1_epi32(-1), (s)))
+/// Gathers two 64-bit integer values from memory \a m using scaled indexes
+/// from the 128-bit vector of [4 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*32
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_i32gather_epi64(const long long *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDQ instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only
+/// the first two elements are used.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x i64] containing the gathered values.
#define _mm_i32gather_epi64(m, i, s) \
((__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \
(long long const *)(m), \
(__v4si)(__m128i)(i), \
(__v2di)_mm_set1_epi64x(-1), (s)))
+/// Gathers four 64-bit integer values from memory \a m using scaled indexes
+/// from the 128-bit vector of [4 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*32
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_i32gather_epi64(const long long *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDQ instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x i64] containing the gathered values.
#define _mm256_i32gather_epi64(m, i, s) \
((__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \
(long long const *)(m), \
(__v4si)(__m128i)(i), \
(__v4di)_mm256_set1_epi64x(-1), (s)))
+/// Gathers two 64-bit integer values from memory \a m using scaled indexes
+/// from the 128-bit vector of [2 x i64] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*64
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_i64gather_epi64(const long long *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQQ instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x i64] containing the gathered values.
#define _mm_i64gather_epi64(m, i, s) \
((__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \
(long long const *)(m), \
(__v2di)(__m128i)(i), \
(__v2di)_mm_set1_epi64x(-1), (s)))
+/// Gathers four 64-bit integer values from memory \a m using scaled indexes
+/// from the 256-bit vector of [4 x i64] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*64
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_i64gather_epi64(const long long *m, __m256i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQQ instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x i64] containing the gathered values.
#define _mm256_i64gather_epi64(m, i, s) \
((__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \
(long long const *)(m), \
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
index b19d2fb90ff5..88a8cebbee30 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
@@ -397,14 +397,15 @@ _mm512_broadcastsd_pd(__m128d __A)
static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_castpd256_pd512(__m256d __a)
{
- return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
+ return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0,
+ 1, 2, 3, 4, 5, 6, 7);
}
static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_castps256_ps512(__m256 __a)
{
- return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7,
- -1, -1, -1, -1, -1, -1, -1, -1);
+ return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
}
static __inline __m128d __DEFAULT_FN_ATTRS512
@@ -446,7 +447,10 @@ _mm512_castpd_si512 (__m512d __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_castpd128_pd512 (__m128d __A)
{
- return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
+ __m256d __B = __builtin_nondeterministic_value(__B);
+ return __builtin_shufflevector(
+ __builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3),
+ __B, 0, 1, 2, 3, 4, 5, 6, 7);
}
static __inline __m512d __DEFAULT_FN_ATTRS512
@@ -464,19 +468,25 @@ _mm512_castps_si512 (__m512 __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_castps128_ps512 (__m128 __A)
{
- return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+ __m256 __B = __builtin_nondeterministic_value(__B);
+ return __builtin_shufflevector(
+ __builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3, 4, 5, 6, 7),
+ __B, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_castsi128_si512 (__m128i __A)
{
- return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
+ __m256i __B = __builtin_nondeterministic_value(__B);
+ return __builtin_shufflevector(
+ __builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3),
+ __B, 0, 1, 2, 3, 4, 5, 6, 7);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_castsi256_si512 (__m256i __A)
{
- return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1);
+ return __builtin_shufflevector( __A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3, 4, 5, 6, 7);
}
static __inline __m512 __DEFAULT_FN_ATTRS512
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512fp16intrin.h b/contrib/llvm-project/clang/lib/Headers/avx512fp16intrin.h
index 5cdc37fde629..d326586578bb 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512fp16intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512fp16intrin.h
@@ -192,22 +192,26 @@ _mm512_castph512_ph256(__m512h __a) {
static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_castph128_ph256(__m128h __a) {
- return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, -1, -1, -1,
- -1, -1, -1, -1, -1);
+ return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a),
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
}
static __inline__ __m512h __DEFAULT_FN_ATTRS512
_mm512_castph128_ph512(__m128h __a) {
- return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1);
+ __m256h __b = __builtin_nondeterministic_value(__b);
+ return __builtin_shufflevector(
+ __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a),
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15),
+ __b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
}
static __inline__ __m512h __DEFAULT_FN_ATTRS512
_mm512_castph256_ph512(__m256h __a) {
- return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
- 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1);
+ return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31);
}
/// Constructs a 256-bit floating-point vector of [16 x half] from a
diff --git a/contrib/llvm-project/clang/lib/Headers/avxintrin.h b/contrib/llvm-project/clang/lib/Headers/avxintrin.h
index ee31569c1623..94fac5e6c9da 100644
--- a/contrib/llvm-project/clang/lib/Headers/avxintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avxintrin.h
@@ -3017,8 +3017,11 @@ _mm256_zeroupper(void)
static __inline __m128 __DEFAULT_FN_ATTRS128
_mm_broadcast_ss(float const *__a)
{
- float __f = *__a;
- return __extension__ (__m128)(__v4sf){ __f, __f, __f, __f };
+ struct __mm_broadcast_ss_struct {
+ float __f;
+ } __attribute__((__packed__, __may_alias__));
+ float __f = ((const struct __mm_broadcast_ss_struct*)__a)->__f;
+ return __extension__ (__m128){ __f, __f, __f, __f };
}
/// Loads a scalar double-precision floating point value from the
@@ -3036,7 +3039,10 @@ _mm_broadcast_ss(float const *__a)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_broadcast_sd(double const *__a)
{
- double __d = *__a;
+ struct __mm256_broadcast_sd_struct {
+ double __d;
+ } __attribute__((__packed__, __may_alias__));
+ double __d = ((const struct __mm256_broadcast_sd_struct*)__a)->__d;
return __extension__ (__m256d)(__v4df){ __d, __d, __d, __d };
}
@@ -3055,7 +3061,10 @@ _mm256_broadcast_sd(double const *__a)
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_broadcast_ss(float const *__a)
{
- float __f = *__a;
+ struct __mm256_broadcast_ss_struct {
+ float __f;
+ } __attribute__((__packed__, __may_alias__));
+ float __f = ((const struct __mm256_broadcast_ss_struct*)__a)->__f;
return __extension__ (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f };
}
@@ -4499,7 +4508,8 @@ _mm256_castsi256_si128(__m256i __a)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_castpd128_pd256(__m128d __a)
{
- return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 1, -1, -1);
+ return __builtin_shufflevector(
+ (__v2df)__a, (__v2df)__builtin_nondeterministic_value(__a), 0, 1, 2, 3);
}
/// Constructs a 256-bit floating-point vector of [8 x float] from a
@@ -4520,7 +4530,9 @@ _mm256_castpd128_pd256(__m128d __a)
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_castps128_ps256(__m128 __a)
{
- return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1, 2, 3, -1, -1, -1, -1);
+ return __builtin_shufflevector((__v4sf)__a,
+ (__v4sf)__builtin_nondeterministic_value(__a),
+ 0, 1, 2, 3, 4, 5, 6, 7);
}
/// Constructs a 256-bit integer vector from a 128-bit integer vector.
@@ -4539,7 +4551,8 @@ _mm256_castps128_ps256(__m128 __a)
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_castsi128_si256(__m128i __a)
{
- return __builtin_shufflevector((__v2di)__a, (__v2di)__a, 0, 1, -1, -1);
+ return __builtin_shufflevector(
+ (__v2di)__a, (__v2di)__builtin_nondeterministic_value(__a), 0, 1, 2, 3);
}
/// Constructs a 256-bit floating-point vector of [4 x double] from a
diff --git a/contrib/llvm-project/clang/lib/Headers/avxvnniint16intrin.h b/contrib/llvm-project/clang/lib/Headers/avxvnniint16intrin.h
new file mode 100644
index 000000000000..e4d342a8b45b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/avxvnniint16intrin.h
@@ -0,0 +1,473 @@
+/*===----------- avxvnniint16intrin.h - AVXVNNIINT16 intrinsics-------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error \
+ "Never use <avxvnniint16intrin.h> directly; include <immintrin.h> instead."
+#endif // __IMMINTRIN_H
+
+#ifndef __AVXVNNIINT16INTRIN_H
+#define __AVXVNNIINT16INTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, __target__("avxvnniint16"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, __target__("avxvnniint16"), \
+ __min_vector_width__(256)))
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_dpwsud_epi32(__m128i __W, __m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUD instruction.
+///
+/// \param __W
+/// A 128-bit vector of [4 x int].
+/// \param __A
+/// A 128-bit vector of [8 x short].
+/// \param __B
+/// A 128-bit vector of [8 x unsigned short].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwsud_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpwsud128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_dpwsud_epi32(__m256i __W, __m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUD instruction.
+///
+/// \param __W
+/// A 256-bit vector of [8 x int].
+/// \param __A
+/// A 256-bit vector of [16 x short].
+/// \param __B
+/// A 256-bit vector of [16 x unsigned short].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwsud_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpwsud256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_dpwsuds_epi32(__m128i __W, __m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
+///
+/// \param __W
+/// A 128-bit vector of [4 x int].
+/// \param __A
+/// A 128-bit vector of [8 x short].
+/// \param __B
+/// A 128-bit vector of [8 x unsigned short].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwsuds_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpwsuds128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
+///
+/// \param __W
+/// A 256-bit vector of [8 x int].
+/// \param __A
+/// A 256-bit vector of [16 x short].
+/// \param __B
+/// A 256-bit vector of [16 x unsigned short].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpwsuds256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_dpbusd_epi32(__m128i __W, __m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWUSD instruction.
+///
+/// \param __W
+/// A 128-bit vector of [4 x int].
+/// \param __A
+/// A 128-bit vector of [8 x unsigned short].
+/// \param __B
+/// A 128-bit vector of [8 x short].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwusd_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpwusd128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_dpwusd_epi32(__m256i __W, __m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWUSD instruction.
+///
+/// \param __W
+/// A 256-bit vector of [8 x int].
+/// \param __A
+/// A 256-bit vector of [16 x unsigned short].
+/// \param __B
+/// A 256-bit vector of [16 x short].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwusd_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpwusd256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_dpwusds_epi32(__m128i __W, __m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
+///
+/// \param __W
+/// A 128-bit vector of [4 x int].
+/// \param __A
+/// A 128-bit vector of [8 x unsigned short].
+/// \param __B
+/// A 128-bit vector of [8 x short].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
+/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwusds_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpwusds128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
+///
+/// \param __W
+/// A 256-bit vector of [8 x int].
+/// \param __A
+/// A 256-bit vector of [16 x unsigned short].
+/// \param __B
+/// A 256-bit vector of [16 x short].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
+/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwusds_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpwusds256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_dpwuud_epi32(__m128i __W, __m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWUUD instruction.
+///
+/// \param __W
+/// A 128-bit vector of [4 x unsigned int].
+/// \param __A
+/// A 128-bit vector of [8 x unsigned short].
+/// \param __B
+/// A 128-bit vector of [8 x unsigned short].
+/// \returns
+/// A 128-bit vector of [4 x unsigned int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwuud_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpwuud128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_dpwuud_epi32(__m256i __W, __m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWUUD instruction.
+///
+/// \param __W
+/// A 256-bit vector of [8 x unsigned int].
+/// \param __A
+/// A 256-bit vector of [16 x unsigned short].
+/// \param __B
+/// A 256-bit vector of [16 x unsigned short].
+/// \returns
+/// A 256-bit vector of [8 x unsigned int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwuud_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpwuud256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_dpwsuds_epi32(__m128i __W, __m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
+///
+/// \param __W
+/// A 128-bit vector of [4 x unsigned int].
+/// \param __A
+/// A 128-bit vector of [8 x unsigned short].
+/// \param __B
+/// A 128-bit vector of [8 x unsigned short].
+/// \returns
+/// A 128-bit vector of [4 x unsigned int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwuuds_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpwuuds128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_dpwuuds_epi32(__m256i __W, __m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
+///
+/// \param __W
+/// A 256-bit vector of [8 x unsigned int].
+/// \param __A
+/// A 256-bit vector of [16 x unsigned short].
+/// \param __B
+/// A 256-bit vector of [16 x unsigned short].
+/// \returns
+/// A 256-bit vector of [8 x unsigned int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwuuds_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpwuuds256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
+
+#endif // __AVXVNNIINT16INTRIN_H
diff --git a/contrib/llvm-project/clang/lib/Headers/bmi2intrin.h b/contrib/llvm-project/clang/lib/Headers/bmi2intrin.h
index 0b56aed5f4cb..f0a3343bef91 100644
--- a/contrib/llvm-project/clang/lib/Headers/bmi2intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/bmi2intrin.h
@@ -7,8 +7,8 @@
*===-----------------------------------------------------------------------===
*/
-#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
-#error "Never use <bmi2intrin.h> directly; include <x86intrin.h> instead."
+#ifndef __IMMINTRIN_H
+#error "Never use <bmi2intrin.h> directly; include <immintrin.h> instead."
#endif
#ifndef __BMI2INTRIN_H
@@ -17,44 +17,228 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi2")))
+/// Copies the unsigned 32-bit integer \a __X and zeroes the upper bits
+/// starting at bit number \a __Y.
+///
+/// \code{.operation}
+/// i := __Y[7:0]
+/// result := __X
+/// IF i < 32
+/// result[31:i] := 0
+/// FI
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c BZHI instruction.
+///
+/// \param __X
+/// The 32-bit source value to copy.
+/// \param __Y
+/// The lower 8 bits specify the bit number of the lowest bit to zero.
+/// \returns The partially zeroed 32-bit value.
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_bzhi_u32(unsigned int __X, unsigned int __Y)
{
return __builtin_ia32_bzhi_si(__X, __Y);
}
+/// Deposit (scatter) low-order bits from the unsigned 32-bit integer \a __X
+/// into the 32-bit result, according to the mask in the unsigned 32-bit
+/// integer \a __Y. All other bits of the result are zero.
+///
+/// \code{.operation}
+/// i := 0
+/// result := 0
+/// FOR m := 0 TO 31
+/// IF __Y[m] == 1
+/// result[m] := __X[i]
+/// i := i + 1
+/// ENDIF
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c PDEP instruction.
+///
+/// \param __X
+/// The 32-bit source value to copy.
+/// \param __Y
+/// The 32-bit mask specifying where to deposit source bits.
+/// \returns The 32-bit result.
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_pdep_u32(unsigned int __X, unsigned int __Y)
{
return __builtin_ia32_pdep_si(__X, __Y);
}
+/// Extract (gather) bits from the unsigned 32-bit integer \a __X into the
+/// low-order bits of the 32-bit result, according to the mask in the
+/// unsigned 32-bit integer \a __Y. All other bits of the result are zero.
+///
+/// \code{.operation}
+/// i := 0
+/// result := 0
+/// FOR m := 0 TO 31
+/// IF __Y[m] == 1
+/// result[i] := __X[m]
+/// i := i + 1
+/// ENDIF
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c PEXT instruction.
+///
+/// \param __X
+/// The 32-bit source value to copy.
+/// \param __Y
+/// The 32-bit mask specifying which source bits to extract.
+/// \returns The 32-bit result.
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_pext_u32(unsigned int __X, unsigned int __Y)
{
return __builtin_ia32_pext_si(__X, __Y);
}
+/// Multiplies the unsigned 32-bit integers \a __X and \a __Y to form a
+/// 64-bit product. Stores the upper 32 bits of the product in the
+/// memory at \a __P and returns the lower 32 bits.
+///
+/// \code{.operation}
+/// Store32(__P, (__X * __Y)[63:32])
+/// result := (__X * __Y)[31:0]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c MULX instruction.
+///
+/// \param __X
+/// An unsigned 32-bit multiplicand.
+/// \param __Y
+/// An unsigned 32-bit multiplicand.
+/// \param __P
+/// A pointer to memory for storing the upper half of the product.
+/// \returns The lower half of the product.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mulx_u32(unsigned int __X, unsigned int __Y, unsigned int *__P)
+{
+ unsigned long long __res = (unsigned long long) __X * __Y;
+ *__P = (unsigned int)(__res >> 32);
+ return (unsigned int)__res;
+}
+
#ifdef __x86_64__
+/// Copies the unsigned 64-bit integer \a __X and zeroes the upper bits
+/// starting at bit number \a __Y.
+///
+/// \code{.operation}
+/// i := __Y[7:0]
+/// result := __X
+/// IF i < 64
+/// result[63:i] := 0
+/// FI
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c BZHI instruction.
+///
+/// \param __X
+/// The 64-bit source value to copy.
+/// \param __Y
+/// The lower 8 bits specify the bit number of the lowest bit to zero.
+/// \returns The partially zeroed 64-bit value.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
_bzhi_u64(unsigned long long __X, unsigned long long __Y)
{
return __builtin_ia32_bzhi_di(__X, __Y);
}
+/// Deposit (scatter) low-order bits from the unsigned 64-bit integer \a __X
+/// into the 64-bit result, according to the mask in the unsigned 64-bit
+/// integer \a __Y. All other bits of the result are zero.
+///
+/// \code{.operation}
+/// i := 0
+/// result := 0
+/// FOR m := 0 TO 63
+/// IF __Y[m] == 1
+/// result[m] := __X[i]
+/// i := i + 1
+/// ENDIF
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c PDEP instruction.
+///
+/// \param __X
+/// The 64-bit source value to copy.
+/// \param __Y
+/// The 64-bit mask specifying where to deposit source bits.
+/// \returns The 64-bit result.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
_pdep_u64(unsigned long long __X, unsigned long long __Y)
{
return __builtin_ia32_pdep_di(__X, __Y);
}
+/// Extract (gather) bits from the unsigned 64-bit integer \a __X into the
+/// low-order bits of the 64-bit result, according to the mask in the
+/// unsigned 64-bit integer \a __Y. All other bits of the result are zero.
+///
+/// \code{.operation}
+/// i := 0
+/// result := 0
+/// FOR m := 0 TO 63
+/// IF __Y[m] == 1
+/// result[i] := __X[m]
+/// i := i + 1
+/// ENDIF
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c PEXT instruction.
+///
+/// \param __X
+/// The 64-bit source value to copy.
+/// \param __Y
+/// The 64-bit mask specifying which source bits to extract.
+/// \returns The 64-bit result.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
_pext_u64(unsigned long long __X, unsigned long long __Y)
{
return __builtin_ia32_pext_di(__X, __Y);
}
+/// Multiplies the unsigned 64-bit integers \a __X and \a __Y to form a
+/// 128-bit product. Stores the upper 64 bits of the product to the
+/// memory addressed by \a __P and returns the lower 64 bits.
+///
+/// \code{.operation}
+/// Store64(__P, (__X * __Y)[127:64])
+/// result := (__X * __Y)[63:0]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c MULX instruction.
+///
+/// \param __X
+/// An unsigned 64-bit multiplicand.
+/// \param __Y
+/// An unsigned 64-bit multiplicand.
+/// \param __P
+/// A pointer to memory for storing the upper half of the product.
+/// \returns The lower half of the product.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
_mulx_u64 (unsigned long long __X, unsigned long long __Y,
unsigned long long *__P)
@@ -64,17 +248,7 @@ _mulx_u64 (unsigned long long __X, unsigned long long __Y,
return (unsigned long long) __res;
}
-#else /* !__x86_64__ */
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_mulx_u32 (unsigned int __X, unsigned int __Y, unsigned int *__P)
-{
- unsigned long long __res = (unsigned long long) __X * __Y;
- *__P = (unsigned int) (__res >> 32);
- return (unsigned int) __res;
-}
-
-#endif /* !__x86_64__ */
+#endif /* __x86_64__ */
#undef __DEFAULT_FN_ATTRS
diff --git a/contrib/llvm-project/clang/lib/Headers/clflushoptintrin.h b/contrib/llvm-project/clang/lib/Headers/clflushoptintrin.h
index 060eb36f30f9..ae0a0244c497 100644
--- a/contrib/llvm-project/clang/lib/Headers/clflushoptintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/clflushoptintrin.h
@@ -17,6 +17,15 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("clflushopt")))
+/// Invalidates all levels of the cache hierarchy and flushes modified data to
+/// memory for the cache line specified by the address \a __m.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c CLFLUSHOPT instruction.
+///
+/// \param __m
+/// An address within the cache line to flush and invalidate.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_clflushopt(void const * __m) {
__builtin_ia32_clflushopt(__m);
diff --git a/contrib/llvm-project/clang/lib/Headers/clzerointrin.h b/contrib/llvm-project/clang/lib/Headers/clzerointrin.h
index a180984a3f28..acccfe94ff31 100644
--- a/contrib/llvm-project/clang/lib/Headers/clzerointrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/clzerointrin.h
@@ -6,7 +6,7 @@
*
*===-----------------------------------------------------------------------===
*/
-#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#ifndef __X86INTRIN_H
#error "Never use <clzerointrin.h> directly; include <x86intrin.h> instead."
#endif
@@ -17,14 +17,16 @@
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("clzero")))
-/// Loads the cache line address and zero's out the cacheline
+/// Zeroes out the cache line for the address \a __line. This uses a
+/// non-temporal store. Calling \c _mm_sfence() afterward might be needed
+/// to enforce ordering.
///
-/// \headerfile <clzerointrin.h>
+/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> CLZERO </c> instruction.
+/// This intrinsic corresponds to the \c CLZERO instruction.
///
/// \param __line
-/// A pointer to a cacheline which needs to be zeroed out.
+/// An address within the cache line to zero out.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_clzero (void * __line)
{
diff --git a/contrib/llvm-project/clang/lib/Headers/cpuid.h b/contrib/llvm-project/clang/lib/Headers/cpuid.h
index 1ad6853a97c9..454f74e92f85 100644
--- a/contrib/llvm-project/clang/lib/Headers/cpuid.h
+++ b/contrib/llvm-project/clang/lib/Headers/cpuid.h
@@ -328,4 +328,14 @@ static __inline int __get_cpuid_count (unsigned int __leaf,
return 1;
}
+// If MS extensions are enabled, __cpuidex is defined as a builtin which will
+// conflict with the __cpuidex definition below.
+#ifndef _MSC_EXTENSIONS
+static __inline void __cpuidex (int __cpu_info[4], int __leaf, int __subleaf)
+{
+ __cpuid_count(__leaf, __subleaf, __cpu_info[0], __cpu_info[1], __cpu_info[2],
+ __cpu_info[3]);
+}
+#endif
+
#endif /* __CPUID_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/bits/shared_ptr_base.h b/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/bits/shared_ptr_base.h
new file mode 100644
index 000000000000..10028dd7bd9a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/bits/shared_ptr_base.h
@@ -0,0 +1,9 @@
+// CUDA headers define __noinline__ which interferes with libstdc++'s use of
+// `__attribute((__noinline__))`. In order to avoid compilation error,
+// temporarily unset __noinline__ when we include affected libstdc++ header.
+
+#pragma push_macro("__noinline__")
+#undef __noinline__
+#include_next "bits/shared_ptr_base.h"
+
+#pragma pop_macro("__noinline__")
diff --git a/contrib/llvm-project/clang/lib/Headers/fmaintrin.h b/contrib/llvm-project/clang/lib/Headers/fmaintrin.h
index d889b7c5e270..ea832fac4f99 100644
--- a/contrib/llvm-project/clang/lib/Headers/fmaintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/fmaintrin.h
@@ -18,192 +18,756 @@
#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(128)))
#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(256)))
+/// Computes a multiply-add of 128-bit vectors of [4 x float].
+/// For each element, computes <c> (__A * __B) + __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADD213PS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the addend.
+/// \returns A 128-bit vector of [4 x float] containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
+/// Computes a multiply-add of 128-bit vectors of [2 x double].
+/// For each element, computes <c> (__A * __B) + __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADD213PD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the addend.
+/// \returns A 128-bit [2 x double] vector containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
}
+/// Computes a scalar multiply-add of the single-precision values in the
+/// low 32 bits of 128-bit vectors of [4 x float].
+/// \code
+/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0]
+/// result[127:32] = __A[127:32]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADD213SS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand in the low
+/// 32 bits.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier in the low
+/// 32 bits.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the addend in the low
+/// 32 bits.
+/// \returns A 128-bit vector of [4 x float] containing the result in the low
+/// 32 bits and a copy of \a __A[127:32] in the upper 96 bits.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
+/// Computes a scalar multiply-add of the double-precision values in the
+/// low 64 bits of 128-bit vectors of [2 x double].
+/// \code
+/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0]
+/// result[127:64] = __A[127:64]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADD213SD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand in the low
+/// 64 bits.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier in the low
+/// 64 bits.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the addend in the low
+/// 64 bits.
+/// \returns A 128-bit vector of [2 x double] containing the result in the low
+/// 64 bits and a copy of \a __A[127:64] in the upper 64 bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, (__v2df)__C);
}
+/// Computes a multiply-subtract of 128-bit vectors of [4 x float].
+/// For each element, computes <c> (__A * __B) - __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMSUB213PS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the subtrahend.
+/// \returns A 128-bit vector of [4 x float] containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
+/// Computes a multiply-subtract of 128-bit vectors of [2 x double].
+/// For each element, computes <c> (__A * __B) - __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMSUB213PD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the addend.
+/// \returns A 128-bit vector of [2 x double] containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
+/// Computes a scalar multiply-subtract of the single-precision values in
+/// the low 32 bits of 128-bit vectors of [4 x float].
+/// \code
+/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0]
+/// result[127:32] = __A[127:32]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMSUB213SS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand in the low
+/// 32 bits.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier in the low
+/// 32 bits.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the subtrahend in the low
+/// 32 bits.
+/// \returns A 128-bit vector of [4 x float] containing the result in the low
+/// 32 bits, and a copy of \a __A[127:32] in the upper 96 bits.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
+/// Computes a scalar multiply-subtract of the double-precision values in
+/// the low 64 bits of 128-bit vectors of [2 x double].
+/// \code
+/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0]
+/// result[127:64] = __A[127:64]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMSUB213SD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand in the low
+/// 64 bits.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier in the low
+/// 64 bits.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the subtrahend in the low
+/// 64 bits.
+/// \returns A 128-bit vector of [2 x double] containing the result in the low
+/// 64 bits, and a copy of \a __A[127:64] in the upper 64 bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
+/// Computes a negated multiply-add of 128-bit vectors of [4 x float].
+/// For each element, computes <c> -(__A * __B) + __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMADD213DPS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the addend.
+/// \returns A 128-bit [4 x float] vector containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
+/// Computes a negated multiply-add of 128-bit vectors of [2 x double].
+/// For each element, computes <c> -(__A * __B) + __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMADD213PD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the addend.
+/// \returns A 128-bit vector of [2 x double] containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C);
}
+/// Computes a scalar negated multiply-add of the single-precision values in
+/// the low 32 bits of 128-bit vectors of [4 x float].
+/// \code
+/// result[31:0] = -(__A[31:0] * __B[31:0]) + __C[31:0]
+/// result[127:32] = __A[127:32]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMADD213SS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand in the low
+/// 32 bits.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier in the low
+/// 32 bits.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the addend in the low
+/// 32 bits.
+/// \returns A 128-bit vector of [4 x float] containing the result in the low
+/// 32 bits, and a copy of \a __A[127:32] in the upper 96 bits.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, (__v4sf)__C);
}
+/// Computes a scalar negated multiply-add of the double-precision values
+/// in the low 64 bits of 128-bit vectors of [2 x double].
+/// \code
+/// result[63:0] = -(__A[63:0] * __B[63:0]) + __C[63:0]
+/// result[127:64] = __A[127:64]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMADD213SD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand in the low
+/// 64 bits.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier in the low
+/// 64 bits.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the addend in the low
+/// 64 bits.
+/// \returns A 128-bit vector of [2 x double] containing the result in the low
+/// 64 bits, and a copy of \a __A[127:64] in the upper 64 bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, (__v2df)__C);
}
+/// Computes a negated multiply-subtract of 128-bit vectors of [4 x float].
+/// For each element, computes <c> -(__A * __B) - __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMSUB213PS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the subtrahend.
+/// \returns A 128-bit vector of [4 x float] containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
+/// Computes a negated multiply-subtract of 128-bit vectors of [2 x double].
+/// For each element, computes <c> -(__A * __B) - __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMSUB213PD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the subtrahend.
+/// \returns A 128-bit vector of [2 x double] containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
+/// Computes a scalar negated multiply-subtract of the single-precision
+/// values in the low 32 bits of 128-bit vectors of [4 x float].
+/// \code
+/// result[31:0] = -(__A[31:0] * __B[31:0]) - __C[31:0]
+/// result[127:32] = __A[127:32]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMSUB213SS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand in the low
+/// 32 bits.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier in the low
+/// 32 bits.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the subtrahend in the low
+/// 32 bits.
+/// \returns A 128-bit vector of [4 x float] containing the result in the low
+/// 32 bits, and a copy of \a __A[127:32] in the upper 96 bits.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C);
}
+/// Computes a scalar negated multiply-subtract of the double-precision
+/// values in the low 64 bits of 128-bit vectors of [2 x double].
+/// \code
+/// result[63:0] = -(__A[63:0] * __B[63:0]) - __C[63:0]
+/// result[127:64] = __A[127:64]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMSUB213SD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand in the low
+/// 64 bits.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier in the low
+/// 64 bits.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the subtrahend in the low
+/// 64 bits.
+/// \returns A 128-bit vector of [2 x double] containing the result in the low
+/// 64 bits, and a copy of \a __A[127:64] in the upper 64 bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, -(__v2df)__C);
}
+/// Computes a multiply with alternating add/subtract of 128-bit vectors of
+/// [4 x float].
+/// \code
+/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0]
+/// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32]
+/// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64]
+/// result[127:96] = (__A[127:96] * __B[127:96]) + __C[127:96]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADDSUB213PS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the addend/subtrahend.
+/// \returns A 128-bit vector of [4 x float] containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
+/// Computes a multiply with alternating add/subtract of 128-bit vectors of
+/// [2 x double].
+/// \code
+/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0]
+/// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADDSUB213PD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the addend/subtrahend.
+/// \returns A 128-bit vector of [2 x double] containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
}
+/// Computes a multiply with alternating add/subtract of 128-bit vectors of
+/// [4 x float].
+/// \code
+/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0]
+/// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32]
+/// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64]
+/// result[127:96 = (__A[127:96] * __B[127:96]) - __C[127:96]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMSUBADD213PS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the addend/subtrahend.
+/// \returns A 128-bit vector of [4 x float] containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
+/// Computes a multiply with alternating add/subtract of 128-bit vectors of
+/// [2 x double].
+/// \code
+/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0]
+/// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADDSUB213PD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the addend/subtrahend.
+/// \returns A 128-bit vector of [2 x double] containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
+/// Computes a multiply-add of 256-bit vectors of [8 x float].
+/// For each element, computes <c> (__A * __B) + __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADD213PS instruction.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [8 x float] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [8 x float] containing the addend.
+/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
}
+/// Computes a multiply-add of 256-bit vectors of [4 x double].
+/// For each element, computes <c> (__A * __B) + __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADD213PD instruction.
+///
+/// \param __A
+/// A 256-bit vector of [4 x double] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [4 x double] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [4 x double] containing the addend.
+/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
}
+/// Computes a multiply-subtract of 256-bit vectors of [8 x float].
+/// For each element, computes <c> (__A * __B) - __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMSUB213PS instruction.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [8 x float] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [8 x float] containing the subtrahend.
+/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
}
+/// Computes a multiply-subtract of 256-bit vectors of [4 x double].
+/// For each element, computes <c> (__A * __B) - __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMSUB213PD instruction.
+///
+/// \param __A
+/// A 256-bit vector of [4 x double] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [4 x double] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [4 x double] containing the subtrahend.
+/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C);
}
+/// Computes a negated multiply-add of 256-bit vectors of [8 x float].
+/// For each element, computes <c> -(__A * __B) + __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMADD213PS instruction.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [8 x float] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [8 x float] containing the addend.
+/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
}
+/// Computes a negated multiply-add of 256-bit vectors of [4 x double].
+/// For each element, computes <c> -(__A * __B) + __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMADD213PD instruction.
+///
+/// \param __A
+/// A 256-bit vector of [4 x double] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [4 x double] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [4 x double] containing the addend.
+/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C);
}
+/// Computes a negated multiply-subtract of 256-bit vectors of [8 x float].
+/// For each element, computes <c> -(__A * __B) - __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMSUB213PS instruction.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [8 x float] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [8 x float] containing the subtrahend.
+/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
}
+/// Computes a negated multiply-subtract of 256-bit vectors of [4 x double].
+/// For each element, computes <c> -(__A * __B) - __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMSUB213PD instruction.
+///
+/// \param __A
+/// A 256-bit vector of [4 x double] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [4 x double] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [4 x double] containing the subtrahend.
+/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C);
}
+/// Computes a multiply with alternating add/subtract of 256-bit vectors of
+/// [8 x float].
+/// \code
+/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0]
+/// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32]
+/// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64]
+/// result[127:96] = (__A[127:96] * __B[127:96]) + __C[127:96]
+/// result[159:128] = (__A[159:128] * __B[159:128]) - __C[159:128]
+/// result[191:160] = (__A[191:160] * __B[191:160]) + __C[191:160]
+/// result[223:192] = (__A[223:192] * __B[223:192]) - __C[223:192]
+/// result[255:224] = (__A[255:224] * __B[255:224]) + __C[255:224]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADDSUB213PS instruction.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [8 x float] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [8 x float] containing the addend/subtrahend.
+/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
}
+/// Computes a multiply with alternating add/subtract of 256-bit vectors of
+/// [4 x double].
+/// \code
+/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0]
+/// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64]
+/// result[191:128] = (__A[191:128] * __B[191:128]) - __C[191:128]
+/// result[255:192] = (__A[255:192] * __B[255:192]) + __C[255:192]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADDSUB213PD instruction.
+///
+/// \param __A
+/// A 256-bit vector of [4 x double] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [4 x double] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [4 x double] containing the addend/subtrahend.
+/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
}
+/// Computes a vector multiply with alternating add/subtract of 256-bit
+/// vectors of [8 x float].
+/// \code
+/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0]
+/// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32]
+/// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64]
+/// result[127:96] = (__A[127:96] * __B[127:96]) - __C[127:96]
+/// result[159:128] = (__A[159:128] * __B[159:128]) + __C[159:128]
+/// result[191:160] = (__A[191:160] * __B[191:160]) - __C[191:160]
+/// result[223:192] = (__A[223:192] * __B[223:192]) + __C[223:192]
+/// result[255:224] = (__A[255:224] * __B[255:224]) - __C[255:224]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMSUBADD213PS instruction.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [8 x float] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [8 x float] containing the addend/subtrahend.
+/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
}
+/// Computes a vector multiply with alternating add/subtract of 256-bit
+/// vectors of [4 x double].
+/// \code
+/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0]
+/// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64]
+/// result[191:128] = (__A[191:128] * __B[191:128]) + __C[191:128]
+/// result[255:192] = (__A[255:192] * __B[255:192]) - __C[255:192]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMSUBADD213PD instruction.
+///
+/// \param __A
+/// A 256-bit vector of [4 x double] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [4 x double] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [4 x double] containing the addend/subtrahend.
+/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
diff --git a/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_intrinsics.h b/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_intrinsics.h
index d811a28a4335..1a34e1626e5a 100644
--- a/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_intrinsics.h
+++ b/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_intrinsics.h
@@ -219,5 +219,262 @@ double3 trunc(double3);
__attribute__((clang_builtin_alias(__builtin_elementwise_trunc)))
double4 trunc(double4);
+// log builtins
+#ifdef __HLSL_ENABLE_16_BIT
+__attribute__((clang_builtin_alias(__builtin_elementwise_log))) half log(half);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log)))
+half2 log(half2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log)))
+half3 log(half3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log)))
+half4 log(half4);
+#endif
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_log))) float
+log(float);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log)))
+float2 log(float2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log)))
+float3 log(float3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log)))
+float4 log(float4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_log))) double
+log(double);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log)))
+double2 log(double2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log)))
+double3 log(double3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log)))
+double4 log(double4);
+
+// log2 builtins
+#ifdef __HLSL_ENABLE_16_BIT
+__attribute__((clang_builtin_alias(__builtin_elementwise_log2)))
+half log2(half);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log2)))
+half2 log2(half2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log2)))
+half3 log2(half3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log2)))
+half4 log2(half4);
+#endif
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_log2))) float
+log2(float);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log2)))
+float2 log2(float2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log2)))
+float3 log2(float3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log2)))
+float4 log2(float4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_log2))) double
+log2(double);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log2)))
+double2 log2(double2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log2)))
+double3 log2(double3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log2)))
+double4 log2(double4);
+
+// log10 builtins
+#ifdef __HLSL_ENABLE_16_BIT
+__attribute__((clang_builtin_alias(__builtin_elementwise_log10)))
+half log10(half);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log10)))
+half2 log10(half2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log10)))
+half3 log10(half3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log10)))
+half4 log10(half4);
+#endif
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_log10))) float
+log10(float);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log10)))
+float2 log10(float2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log10)))
+float3 log10(float3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log10)))
+float4 log10(float4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_log10))) double
+log10(double);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log10)))
+double2 log10(double2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log10)))
+double3 log10(double3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_log10)))
+double4 log10(double4);
+
+// max builtins
+#ifdef __HLSL_ENABLE_16_BIT
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+half max(half, half);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+half2 max(half2, half2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+half3 max(half3, half3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+half4 max(half4, half4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+int16_t max(int16_t, int16_t);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+int16_t2 max(int16_t2, int16_t2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+int16_t3 max(int16_t3, int16_t3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+int16_t4 max(int16_t4, int16_t4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+uint16_t max(uint16_t, uint16_t);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+uint16_t2 max(uint16_t2, uint16_t2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+uint16_t3 max(uint16_t3, uint16_t3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+uint16_t4 max(uint16_t4, uint16_t4);
+#endif
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_max))) int max(int,
+ int);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+int2 max(int2, int2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+int3 max(int3, int3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+int4 max(int4, int4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+uint max(uint, uint);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+uint2 max(uint2, uint2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+uint3 max(uint3, uint3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+uint4 max(uint4, uint4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+int64_t max(int64_t, int64_t);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+int64_t2 max(int64_t2, int64_t2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+int64_t3 max(int64_t3, int64_t3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+int64_t4 max(int64_t4, int64_t4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+uint64_t max(uint64_t, uint64_t);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+uint64_t2 max(uint64_t2, uint64_t2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+uint64_t3 max(uint64_t3, uint64_t3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+uint64_t4 max(uint64_t4, uint64_t4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_max))) float
+max(float, float);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+float2 max(float2, float2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+float3 max(float3, float3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+float4 max(float4, float4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_max))) double
+max(double, double);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+double2 max(double2, double2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+double3 max(double3, double3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_max)))
+double4 max(double4, double4);
+
+// min builtins
+#ifdef __HLSL_ENABLE_16_BIT
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+half min(half, half);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+half2 min(half2, half2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+half3 min(half3, half3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+half4 min(half4, half4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+int16_t min(int16_t, int16_t);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+int16_t2 min(int16_t2, int16_t2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+int16_t3 min(int16_t3, int16_t3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+int16_t4 min(int16_t4, int16_t4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+uint16_t min(uint16_t, uint16_t);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+uint16_t2 min(uint16_t2, uint16_t2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+uint16_t3 min(uint16_t3, uint16_t3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+uint16_t4 min(uint16_t4, uint16_t4);
+#endif
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_min))) int min(int,
+ int);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+int2 min(int2, int2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+int3 min(int3, int3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+int4 min(int4, int4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+uint min(uint, uint);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+uint2 min(uint2, uint2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+uint3 min(uint3, uint3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+uint4 min(uint4, uint4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+int64_t min(int64_t, int64_t);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+int64_t2 min(int64_t2, int64_t2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+int64_t3 min(int64_t3, int64_t3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+int64_t4 min(int64_t4, int64_t4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+uint64_t min(uint64_t, uint64_t);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+uint64_t2 min(uint64_t2, uint64_t2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+uint64_t3 min(uint64_t3, uint64_t3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+uint64_t4 min(uint64_t4, uint64_t4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_min))) float
+min(float, float);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+float2 min(float2, float2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+float3 min(float3, float3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+float4 min(float4, float4);
+
+__attribute__((clang_builtin_alias(__builtin_elementwise_min))) double
+min(double, double);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+double2 min(double2, double2);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+double3 min(double3, double3);
+__attribute__((clang_builtin_alias(__builtin_elementwise_min)))
+double4 min(double4, double4);
+
} // namespace hlsl
#endif //_HLSL_HLSL_INTRINSICS_H_
diff --git a/contrib/llvm-project/clang/lib/Headers/immintrin.h b/contrib/llvm-project/clang/lib/Headers/immintrin.h
index 0d2e8be6e486..642602be14e6 100644
--- a/contrib/llvm-project/clang/lib/Headers/immintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/immintrin.h
@@ -270,6 +270,26 @@
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SHA512__)
+#include <sha512intrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SM3__)
+#include <sm3intrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SM4__)
+#include <sm4intrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVXVNNIINT16__)
+#include <avxvnniint16intrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__RDPID__)
/// Returns the value of the IA32_TSC_AUX MSR (0xc0000103).
///
@@ -284,30 +304,53 @@ _rdpid_u32(void) {
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__RDRND__)
+/// Returns a 16-bit hardware-generated random value.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> RDRAND </c> instruction.
+///
+/// \param __p
+/// A pointer to a 16-bit memory location to place the random value.
+/// \returns 1 if the value was successfully generated, 0 otherwise.
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand16_step(unsigned short *__p)
{
return (int)__builtin_ia32_rdrand16_step(__p);
}
+/// Returns a 32-bit hardware-generated random value.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> RDRAND </c> instruction.
+///
+/// \param __p
+/// A pointer to a 32-bit memory location to place the random value.
+/// \returns 1 if the value was successfully generated, 0 otherwise.
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand32_step(unsigned int *__p)
{
return (int)__builtin_ia32_rdrand32_step(__p);
}
-#ifdef __x86_64__
+/// Returns a 64-bit hardware-generated random value.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> RDRAND </c> instruction.
+///
+/// \param __p
+/// A pointer to a 64-bit memory location to place the random value.
+/// \returns 1 if the value was successfully generated, 0 otherwise.
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand64_step(unsigned long long *__p)
{
+#ifdef __x86_64__
return (int)__builtin_ia32_rdrand64_step(__p);
-}
#else
-// We need to emulate the functionality of 64-bit rdrand with 2 32-bit
-// rdrand instructions.
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
-_rdrand64_step(unsigned long long *__p)
-{
+ // We need to emulate the functionality of 64-bit rdrand with 2 32-bit
+ // rdrand instructions.
unsigned int __lo, __hi;
unsigned int __res_lo = __builtin_ia32_rdrand32_step(&__lo);
unsigned int __res_hi = __builtin_ia32_rdrand32_step(&__hi);
@@ -318,55 +361,115 @@ _rdrand64_step(unsigned long long *__p)
*__p = 0;
return 0;
}
-}
#endif
+}
#endif /* __RDRND__ */
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__FSGSBASE__)
#ifdef __x86_64__
+/// Reads the FS base register.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> RDFSBASE </c> instruction.
+///
+/// \returns The lower 32 bits of the FS base register.
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_readfsbase_u32(void)
{
return __builtin_ia32_rdfsbase32();
}
+/// Reads the FS base register.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> RDFSBASE </c> instruction.
+///
+/// \returns The contents of the FS base register.
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_readfsbase_u64(void)
{
return __builtin_ia32_rdfsbase64();
}
+/// Reads the GS base register.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> RDGSBASE </c> instruction.
+///
+/// \returns The lower 32 bits of the GS base register.
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_readgsbase_u32(void)
{
return __builtin_ia32_rdgsbase32();
}
+/// Reads the GS base register.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> RDGSBASE </c> instruction.
+///
+/// \returns The contents of the GS base register.
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_readgsbase_u64(void)
{
return __builtin_ia32_rdgsbase64();
}
+/// Modifies the FS base register.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> WRFSBASE </c> instruction.
+///
+/// \param __V
+/// Value to use for the lower 32 bits of the FS base register.
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writefsbase_u32(unsigned int __V)
{
__builtin_ia32_wrfsbase32(__V);
}
+/// Modifies the FS base register.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> WRFSBASE </c> instruction.
+///
+/// \param __V
+/// Value to use for the FS base register.
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writefsbase_u64(unsigned long long __V)
{
__builtin_ia32_wrfsbase64(__V);
}
+/// Modifies the GS base register.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> WRGSBASE </c> instruction.
+///
+/// \param __V
+/// Value to use for the lower 32 bits of the GS base register.
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writegsbase_u32(unsigned int __V)
{
__builtin_ia32_wrgsbase32(__V);
}
+/// Modifies the GS base register.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> WRFSBASE </c> instruction.
+///
+/// \param __V
+/// Value to use for GS base register.
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writegsbase_u64(unsigned long long __V)
{
@@ -539,6 +642,11 @@ _storebe_i64(void * __P, long long __D) {
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AMX_COMPLEX__)
+#include <amxcomplexintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__AVX512VP2INTERSECT__)
#include <avx512vp2intersectintrin.h>
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/limits.h b/contrib/llvm-project/clang/lib/Headers/limits.h
index 32cc901b26be..354e031a9d7b 100644
--- a/contrib/llvm-project/clang/lib/Headers/limits.h
+++ b/contrib/llvm-project/clang/lib/Headers/limits.h
@@ -52,7 +52,11 @@
#define LONG_MIN (-__LONG_MAX__ -1L)
#define UCHAR_MAX (__SCHAR_MAX__*2 +1)
-#define USHRT_MAX (__SHRT_MAX__ *2 +1)
+#if __SHRT_WIDTH__ < __INT_WIDTH__
+#define USHRT_MAX (__SHRT_MAX__ * 2 + 1)
+#else
+#define USHRT_MAX (__SHRT_MAX__ * 2U + 1U)
+#endif
#define UINT_MAX (__INT_MAX__ *2U +1U)
#define ULONG_MAX (__LONG_MAX__ *2UL+1UL)
diff --git a/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/ctype.h b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/ctype.h
new file mode 100644
index 000000000000..e20b7bb58f43
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/ctype.h
@@ -0,0 +1,85 @@
+//===-- Wrapper for C standard ctype.h declarations on the GPU ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLANG_LLVM_LIBC_WRAPPERS_CTYPE_H__
+#define __CLANG_LLVM_LIBC_WRAPPERS_CTYPE_H__
+
+#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__)
+#error "This file is for GPU offloading compilation only"
+#endif
+
+#include_next <ctype.h>
+
+#if __has_include(<llvm-libc-decls/ctype.h>)
+
+#if defined(__HIP__) || defined(__CUDA__)
+#define __LIBC_ATTRS __attribute__((device))
+#endif
+
+// The GNU headers like to provide these as macros, we need to undefine them so
+// they do not conflict with the following definitions for the GPU.
+
+#pragma push_macro("isalnum")
+#pragma push_macro("isalpha")
+#pragma push_macro("isblank")
+#pragma push_macro("iscntrl")
+#pragma push_macro("isdigit")
+#pragma push_macro("isgraph")
+#pragma push_macro("islower")
+#pragma push_macro("isprint")
+#pragma push_macro("ispunct")
+#pragma push_macro("isspace")
+#pragma push_macro("isupper")
+#pragma push_macro("isxdigit")
+#pragma push_macro("tolower")
+#pragma push_macro("toupper")
+
+#undef isalnum
+#undef isalpha
+#undef iscntrl
+#undef isdigit
+#undef islower
+#undef isgraph
+#undef isprint
+#undef ispunct
+#undef isspace
+#undef isupper
+#undef isblank
+#undef isxdigit
+#undef tolower
+#undef toupper
+
+#pragma omp begin declare target
+
+#include <llvm-libc-decls/ctype.h>
+
+#pragma omp end declare target
+
+// Restore the original macros when compiling on the host.
+#if !defined(__NVPTX__) && !defined(__AMDGPU__)
+#pragma pop_macro("isalnum")
+#pragma pop_macro("isalpha")
+#pragma pop_macro("isblank")
+#pragma pop_macro("iscntrl")
+#pragma pop_macro("isdigit")
+#pragma pop_macro("isgraph")
+#pragma pop_macro("islower")
+#pragma pop_macro("isprint")
+#pragma pop_macro("ispunct")
+#pragma pop_macro("isspace")
+#pragma pop_macro("isupper")
+#pragma pop_macro("isxdigit")
+#pragma pop_macro("tolower")
+#pragma pop_macro("toupper")
+#endif
+
+#undef __LIBC_ATTRS
+
+#endif
+
+#endif // __CLANG_LLVM_LIBC_WRAPPERS_CTYPE_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/inttypes.h b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/inttypes.h
new file mode 100644
index 000000000000..415f1e4b7bca
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/inttypes.h
@@ -0,0 +1,34 @@
+//===-- Wrapper for C standard inttypes.h declarations on the GPU ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLANG_LLVM_LIBC_WRAPPERS_INTTYPES_H__
+#define __CLANG_LLVM_LIBC_WRAPPERS_INTTYPES_H__
+
+#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__)
+#error "This file is for GPU offloading compilation only"
+#endif
+
+#include_next <inttypes.h>
+
+#if __has_include(<llvm-libc-decls/inttypes.h>)
+
+#if defined(__HIP__) || defined(__CUDA__)
+#define __LIBC_ATTRS __attribute__((device))
+#endif
+
+#pragma omp begin declare target
+
+#include <llvm-libc-decls/inttypes.h>
+
+#pragma omp end declare target
+
+#undef __LIBC_ATTRS
+
+#endif
+
+#endif // __CLANG_LLVM_LIBC_WRAPPERS_INTTYPES_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/llvm-libc-decls/README.txt b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/llvm-libc-decls/README.txt
new file mode 100644
index 000000000000..e012cd9e2931
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/llvm-libc-decls/README.txt
@@ -0,0 +1,6 @@
+LLVM libc declarations
+======================
+
+This directory will be filled by the `libc` project with declarations that are
+availible on the device. Each declaration will use the `__LIBC_ATTRS` attribute
+to control emission on the device side.
diff --git a/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/stdio.h b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/stdio.h
new file mode 100644
index 000000000000..51b0f0e33077
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/stdio.h
@@ -0,0 +1,34 @@
+//===-- Wrapper for C standard stdio.h declarations on the GPU ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLANG_LLVM_LIBC_WRAPPERS_STDIO_H__
+#define __CLANG_LLVM_LIBC_WRAPPERS_STDIO_H__
+
+#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__)
+#error "This file is for GPU offloading compilation only"
+#endif
+
+#include_next <stdio.h>
+
+#if __has_include(<llvm-libc-decls/stdio.h>)
+
+#if defined(__HIP__) || defined(__CUDA__)
+#define __LIBC_ATTRS __attribute__((device))
+#endif
+
+#pragma omp begin declare target
+
+#include <llvm-libc-decls/stdio.h>
+
+#pragma omp end declare target
+
+#undef __LIBC_ATTRS
+
+#endif
+
+#endif // __CLANG_LLVM_LIBC_WRAPPERS_STDIO_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/stdlib.h b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/stdlib.h
new file mode 100644
index 000000000000..9cb2b4e64aa6
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/stdlib.h
@@ -0,0 +1,42 @@
+//===-- Wrapper for C standard stdlib.h declarations on the GPU -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLANG_LLVM_LIBC_WRAPPERS_STDLIB_H__
+#define __CLANG_LLVM_LIBC_WRAPPERS_STDLIB_H__
+
+#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__)
+#error "This file is for GPU offloading compilation only"
+#endif
+
+#include_next <stdlib.h>
+
+#if __has_include(<llvm-libc-decls/stdlib.h>)
+
+#if defined(__HIP__) || defined(__CUDA__)
+#define __LIBC_ATTRS __attribute__((device))
+#endif
+
+#pragma omp begin declare target
+
+// The LLVM C library uses this type so we forward declare it.
+typedef void (*__atexithandler_t)(void);
+
+// Enforce ABI compatibility with the structs used by the LLVM C library.
+_Static_assert(__builtin_offsetof(div_t, quot) == 0, "ABI mismatch!");
+_Static_assert(__builtin_offsetof(ldiv_t, quot) == 0, "ABI mismatch!");
+_Static_assert(__builtin_offsetof(lldiv_t, quot) == 0, "ABI mismatch!");
+
+#include <llvm-libc-decls/stdlib.h>
+
+#pragma omp end declare target
+
+#undef __LIBC_ATTRS
+
+#endif
+
+#endif // __CLANG_LLVM_LIBC_WRAPPERS_STDLIB_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/string.h b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/string.h
new file mode 100644
index 000000000000..027c415c1d0f
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/string.h
@@ -0,0 +1,37 @@
+//===-- Wrapper for C standard string.h declarations on the GPU -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLANG_LLVM_LIBC_WRAPPERS_STRING_H__
+#define __CLANG_LLVM_LIBC_WRAPPERS_STRING_H__
+
+#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__)
+#error "This file is for GPU offloading compilation only"
+#endif
+
+// FIXME: The GNU headers provide C++ standard compliant headers when in C++
+// mode and the LLVM libc does not. We cannot enable memchr, strchr, strchrnul,
+// strpbrk, strrchr, strstr, or strcasestr until this is addressed.
+#include_next <string.h>
+
+#if __has_include(<llvm-libc-decls/string.h>)
+
+#if defined(__HIP__) || defined(__CUDA__)
+#define __LIBC_ATTRS __attribute__((device))
+#endif
+
+#pragma omp begin declare target
+
+#include <llvm-libc-decls/string.h>
+
+#pragma omp end declare target
+
+#undef __LIBC_ATTRS
+
+#endif
+
+#endif // __CLANG_LLVM_LIBC_WRAPPERS_STRING_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/mwaitxintrin.h b/contrib/llvm-project/clang/lib/Headers/mwaitxintrin.h
index ed485380af79..65f427105b41 100644
--- a/contrib/llvm-project/clang/lib/Headers/mwaitxintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/mwaitxintrin.h
@@ -16,12 +16,41 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mwaitx")))
+
+/// Establishes a linear address memory range to be monitored and puts
+/// the processor in the monitor event pending state. Data stored in the
+/// monitored address range causes the processor to exit the pending state.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c MONITORX instruction.
+///
+/// \param __p
+/// The memory range to be monitored. The size of the range is determined by
+/// CPUID function 0000_0005h.
+/// \param __extensions
+/// Optional extensions for the monitoring state.
+/// \param __hints
+/// Optional hints for the monitoring state.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_monitorx(void * __p, unsigned __extensions, unsigned __hints)
{
__builtin_ia32_monitorx(__p, __extensions, __hints);
}
+/// Used with the \c MONITORX instruction to wait while the processor is in
+/// the monitor event pending state. Data stored in the monitored address
+/// range, or an interrupt, causes the processor to exit the pending state.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c MWAITX instruction.
+///
+/// \param __extensions
+/// Optional extensions for the monitoring state, which can vary by
+/// processor.
+/// \param __hints
+/// Optional hints for the monitoring state, which can vary by processor.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_mwaitx(unsigned __extensions, unsigned __hints, unsigned __clock)
{
diff --git a/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h b/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h
index fad2f9c0272b..af3deae892c7 100644
--- a/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h
+++ b/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h
@@ -474,6 +474,9 @@ typedef enum memory_order
#define CLK_HALF_FLOAT 0x10DD
#define CLK_FLOAT 0x10DE
#define CLK_UNORM_INT24 0x10DF
+#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0
+#define CLK_UNORM_INT_101010_2 0x10E0
+#endif // __OPENCL_C_VERSION__ >= CL_VERSION_3_0
// Channel order, numbering must be aligned with cl_channel_order in cl.h
//
diff --git a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h
index 279fb26fbaf7..d5b6846b0348 100644
--- a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h
+++ b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h
@@ -40,7 +40,6 @@ extern "C" {
// Import types which will be used by __clang_hip_libdevice_declares.h
#ifndef __cplusplus
-#include <stdbool.h>
#include <stdint.h>
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/new b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/new
index 985ddc567f49..8bad3f19d625 100644
--- a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/new
+++ b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/new
@@ -13,7 +13,7 @@
// which do not use nothrow_t are provided without the <new> header.
#include_next <new>
-#if defined(__NVPTX__) && defined(_OPENMP)
+#if (defined(__NVPTX__) || defined(__AMDGPU__)) && defined(_OPENMP)
#include <cstdlib>
diff --git a/contrib/llvm-project/clang/lib/Headers/pmmintrin.h b/contrib/llvm-project/clang/lib/Headers/pmmintrin.h
index ee660e95d274..203c0aa0f8c6 100644
--- a/contrib/llvm-project/clang/lib/Headers/pmmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/pmmintrin.h
@@ -253,9 +253,12 @@ _mm_movedup_pd(__m128d __a)
/// the processor in the monitor event pending state. Data stored in the
/// monitored address range causes the processor to exit the pending state.
///
+/// The \c MONITOR instruction can be used in kernel mode, and in other modes
+/// if MSR <c> C001_0015h[MonMwaitUserEn] </c> is set.
+///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> MONITOR </c> instruction.
+/// This intrinsic corresponds to the \c MONITOR instruction.
///
/// \param __p
/// The memory range to be monitored. The size of the range is determined by
@@ -270,19 +273,22 @@ _mm_monitor(void const *__p, unsigned __extensions, unsigned __hints)
__builtin_ia32_monitor(__p, __extensions, __hints);
}
-/// Used with the MONITOR instruction to wait while the processor is in
+/// Used with the \c MONITOR instruction to wait while the processor is in
/// the monitor event pending state. Data stored in the monitored address
-/// range causes the processor to exit the pending state.
+/// range, or an interrupt, causes the processor to exit the pending state.
+///
+/// The \c MWAIT instruction can be used in kernel mode, and in other modes if
+/// MSR <c> C001_0015h[MonMwaitUserEn] </c> is set.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> MWAIT </c> instruction.
+/// This intrinsic corresponds to the \c MWAIT instruction.
///
/// \param __extensions
-/// Optional extensions for the monitoring state, which may vary by
+/// Optional extensions for the monitoring state, which can vary by
/// processor.
/// \param __hints
-/// Optional hints for the monitoring state, which may vary by processor.
+/// Optional hints for the monitoring state, which can vary by processor.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_mwait(unsigned __extensions, unsigned __hints)
{
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/emmintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/emmintrin.h
index 0814ea5593ba..fc18ab9d43b1 100644
--- a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/emmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/emmintrin.h
@@ -46,6 +46,7 @@
/* SSE2 */
typedef __vector double __v2df;
+typedef __vector float __v4f;
typedef __vector long long __v2di;
typedef __vector unsigned long long __v2du;
typedef __vector int __v4si;
@@ -951,7 +952,7 @@ extern __inline __m128d
_mm_cvtpi32_pd(__m64 __A) {
__v4si __temp;
__v2di __tmp2;
- __v2df __result;
+ __v4f __result;
__temp = (__v4si)vec_splats(__A);
__tmp2 = (__v2di)vec_unpackl(__temp);
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/smmintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/smmintrin.h
index 6fe6d2a157a5..349b395c4f00 100644
--- a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/smmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/smmintrin.h
@@ -305,9 +305,9 @@ extern __inline int
extern __inline __m128i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_blend_epi16(__m128i __A, __m128i __B, const int __imm8) {
- __v16qi __charmask = vec_splats((signed char)__imm8);
+ __v16qu __charmask = vec_splats((unsigned char)__imm8);
__charmask = vec_gb(__charmask);
- __v8hu __shortmask = (__v8hu)vec_unpackh(__charmask);
+ __v8hu __shortmask = (__v8hu)vec_unpackh((__v16qi)__charmask);
#ifdef __BIG_ENDIAN__
__shortmask = vec_reve(__shortmask);
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/rdseedintrin.h b/contrib/llvm-project/clang/lib/Headers/rdseedintrin.h
index 405bc2451eb8..8a4fe093055b 100644
--- a/contrib/llvm-project/clang/lib/Headers/rdseedintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/rdseedintrin.h
@@ -7,8 +7,8 @@
*===-----------------------------------------------------------------------===
*/
-#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
-#error "Never use <rdseedintrin.h> directly; include <x86intrin.h> instead."
+#ifndef __IMMINTRIN_H
+#error "Never use <rdseedintrin.h> directly; include <immintrin.h> instead."
#endif
#ifndef __RDSEEDINTRIN_H
@@ -17,12 +17,54 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rdseed")))
+/// Stores a hardware-generated 16-bit random value in the memory at \a __p.
+///
+/// The random number generator complies with NIST SP800-90B and SP800-90C.
+///
+/// \code{.operation}
+/// IF HW_NRND_GEN.ready == 1
+/// Store16(__p, HW_NRND_GEN.data)
+/// result := 1
+/// ELSE
+/// Store16(__p, 0)
+/// result := 0
+/// END
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c RDSEED instruction.
+///
+/// \param __p
+/// Pointer to memory for storing the 16-bit random number.
+/// \returns 1 if a random number was generated, 0 if not.
static __inline__ int __DEFAULT_FN_ATTRS
_rdseed16_step(unsigned short *__p)
{
return (int) __builtin_ia32_rdseed16_step(__p);
}
+/// Stores a hardware-generated 32-bit random value in the memory at \a __p.
+///
+/// The random number generator complies with NIST SP800-90B and SP800-90C.
+///
+/// \code{.operation}
+/// IF HW_NRND_GEN.ready == 1
+/// Store32(__p, HW_NRND_GEN.data)
+/// result := 1
+/// ELSE
+/// Store32(__p, 0)
+/// result := 0
+/// END
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c RDSEED instruction.
+///
+/// \param __p
+/// Pointer to memory for storing the 32-bit random number.
+/// \returns 1 if a random number was generated, 0 if not.
static __inline__ int __DEFAULT_FN_ATTRS
_rdseed32_step(unsigned int *__p)
{
@@ -30,6 +72,27 @@ _rdseed32_step(unsigned int *__p)
}
#ifdef __x86_64__
+/// Stores a hardware-generated 64-bit random value in the memory at \a __p.
+///
+/// The random number generator complies with NIST SP800-90B and SP800-90C.
+///
+/// \code{.operation}
+/// IF HW_NRND_GEN.ready == 1
+/// Store64(__p, HW_NRND_GEN.data)
+/// result := 1
+/// ELSE
+/// Store64(__p, 0)
+/// result := 0
+/// END
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c RDSEED instruction.
+///
+/// \param __p
+/// Pointer to memory for storing the 64-bit random number.
+/// \returns 1 if a random number was generated, 0 if not.
static __inline__ int __DEFAULT_FN_ATTRS
_rdseed64_step(unsigned long long *__p)
{
diff --git a/contrib/llvm-project/clang/lib/Headers/riscv_ntlh.h b/contrib/llvm-project/clang/lib/Headers/riscv_ntlh.h
new file mode 100644
index 000000000000..9ce170920583
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/riscv_ntlh.h
@@ -0,0 +1,28 @@
+/*===---- riscv_ntlh.h - RISC-V NTLH intrinsics ----------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __RISCV_NTLH_H
+#define __RISCV_NTLH_H
+
+#ifndef __riscv_zihintntl
+#error "NTLH intrinsics require the NTLH extension."
+#endif
+
+enum {
+ __RISCV_NTLH_INNERMOST_PRIVATE = 2,
+ __RISCV_NTLH_ALL_PRIVATE,
+ __RISCV_NTLH_INNERMOST_SHARED,
+ __RISCV_NTLH_ALL
+};
+
+#define __riscv_ntl_load(PTR, DOMAIN) __builtin_riscv_ntl_load((PTR), (DOMAIN))
+#define __riscv_ntl_store(PTR, VAL, DOMAIN) \
+ __builtin_riscv_ntl_store((PTR), (VAL), (DOMAIN))
+
+#endif \ No newline at end of file
diff --git a/contrib/llvm-project/clang/lib/Headers/sha512intrin.h b/contrib/llvm-project/clang/lib/Headers/sha512intrin.h
new file mode 100644
index 000000000000..065ef5dac25a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/sha512intrin.h
@@ -0,0 +1,200 @@
+/*===--------------- sha512intrin.h - SHA512 intrinsics -----------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <sha512intrin.h> directly; include <immintrin.h> instead."
+#endif // __IMMINTRIN_H
+
+#ifndef __SHA512INTRIN_H
+#define __SHA512INTRIN_H
+
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, __target__("sha512"), \
+ __min_vector_width__(256)))
+
+/// This intrinisc is one of the two SHA512 message scheduling instructions.
+/// The intrinsic performs an intermediate calculation for the next four
+/// SHA512 message qwords. The calculated results are stored in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_sha512msg1_epi64(__m256i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSHA512MSG1 instruction.
+///
+/// \param __A
+/// A 256-bit vector of [4 x long long].
+/// \param __B
+/// A 128-bit vector of [2 x long long].
+/// \returns
+/// A 256-bit vector of [4 x long long].
+///
+/// \code{.operation}
+/// DEFINE ROR64(qword, n) {
+/// count := n % 64
+/// dest := (qword >> count) | (qword << (64 - count))
+/// RETURN dest
+/// }
+/// DEFINE SHR64(qword, n) {
+/// RETURN qword >> n
+/// }
+/// DEFINE s0(qword):
+/// RETURN ROR64(qword,1) ^ ROR64(qword, 8) ^ SHR64(qword, 7)
+/// }
+/// W[4] := __B.qword[0]
+/// W[3] := __A.qword[3]
+/// W[2] := __A.qword[2]
+/// W[1] := __A.qword[1]
+/// W[0] := __A.qword[0]
+/// dst.qword[3] := W[3] + s0(W[4])
+/// dst.qword[2] := W[2] + s0(W[3])
+/// dst.qword[1] := W[1] + s0(W[2])
+/// dst.qword[0] := W[0] + s0(W[1])
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sha512msg1_epi64(__m256i __A, __m128i __B) {
+ return (__m256i)__builtin_ia32_vsha512msg1((__v4du)__A, (__v2du)__B);
+}
+
+/// This intrinisc is one of the two SHA512 message scheduling instructions.
+/// The intrinsic performs the final calculation for the next four SHA512
+/// message qwords. The calculated results are stored in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_sha512msg2_epi64(__m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSHA512MSG2 instruction.
+///
+/// \param __A
+/// A 256-bit vector of [4 x long long].
+/// \param __B
+/// A 256-bit vector of [4 x long long].
+/// \returns
+/// A 256-bit vector of [4 x long long].
+///
+/// \code{.operation}
+/// DEFINE ROR64(qword, n) {
+/// count := n % 64
+/// dest := (qword >> count) | (qword << (64 - count))
+/// RETURN dest
+/// }
+/// DEFINE SHR64(qword, n) {
+/// RETURN qword >> n
+/// }
+/// DEFINE s1(qword) {
+/// RETURN ROR64(qword,19) ^ ROR64(qword, 61) ^ SHR64(qword, 6)
+/// }
+/// W[14] := __B.qword[2]
+/// W[15] := __B.qword[3]
+/// W[16] := __A.qword[0] + s1(W[14])
+/// W[17] := __A.qword[1] + s1(W[15])
+/// W[18] := __A.qword[2] + s1(W[16])
+/// W[19] := __A.qword[3] + s1(W[17])
+/// dst.qword[3] := W[19]
+/// dst.qword[2] := W[18]
+/// dst.qword[1] := W[17]
+/// dst.qword[0] := W[16]
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sha512msg2_epi64(__m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vsha512msg2((__v4du)__A, (__v4du)__B);
+}
+
+/// This intrinisc performs two rounds of SHA512 operation using initial SHA512
+/// state (C,D,G,H) from \a __A, an initial SHA512 state (A,B,E,F) from
+/// \a __A, and a pre-computed sum of the next two round message qwords and
+/// the corresponding round constants from \a __C (only the two lower qwords
+/// of the third operand). The updated SHA512 state (A,B,E,F) is written to
+/// \a __A, and \a __A can be used as the updated state (C,D,G,H) in later
+/// rounds.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_sha512rnds2_epi64(__m256i __A, __m256i __B, __m128i __C)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSHA512RNDS2 instruction.
+///
+/// \param __A
+/// A 256-bit vector of [4 x long long].
+/// \param __B
+/// A 256-bit vector of [4 x long long].
+/// \param __C
+/// A 128-bit vector of [2 x long long].
+/// \returns
+/// A 256-bit vector of [4 x long long].
+///
+/// \code{.operation}
+/// DEFINE ROR64(qword, n) {
+/// count := n % 64
+/// dest := (qword >> count) | (qword << (64 - count))
+/// RETURN dest
+/// }
+/// DEFINE SHR64(qword, n) {
+/// RETURN qword >> n
+/// }
+/// DEFINE cap_sigma0(qword) {
+/// RETURN ROR64(qword,28) ^ ROR64(qword, 34) ^ ROR64(qword, 39)
+/// }
+/// DEFINE cap_sigma1(qword) {
+/// RETURN ROR64(qword,14) ^ ROR64(qword, 18) ^ ROR64(qword, 41)
+/// }
+/// DEFINE MAJ(a,b,c) {
+/// RETURN (a & b) ^ (a & c) ^ (b & c)
+/// }
+/// DEFINE CH(e,f,g) {
+/// RETURN (e & f) ^ (g & ~e)
+/// }
+/// A[0] := __B.qword[3]
+/// B[0] := __B.qword[2]
+/// C[0] := __C.qword[3]
+/// D[0] := __C.qword[2]
+/// E[0] := __B.qword[1]
+/// F[0] := __B.qword[0]
+/// G[0] := __C.qword[1]
+/// H[0] := __C.qword[0]
+/// WK[0]:= __A.qword[0]
+/// WK[1]:= __A.qword[1]
+/// FOR i := 0 to 1:
+/// A[i+1] := CH(E[i], F[i], G[i]) +
+/// cap_sigma1(E[i]) + WK[i] + H[i] +
+/// MAJ(A[i], B[i], C[i]) +
+/// cap_sigma0(A[i])
+/// B[i+1] := A[i]
+/// C[i+1] := B[i]
+/// D[i+1] := C[i]
+/// E[i+1] := CH(E[i], F[i], G[i]) +
+/// cap_sigma1(E[i]) + WK[i] + H[i] + D[i]
+/// F[i+1] := E[i]
+/// G[i+1] := F[i]
+/// H[i+1] := G[i]
+/// ENDFOR
+/// dst.qword[3] := A[2]
+/// dst.qword[2] := B[2]
+/// dst.qword[1] := E[2]
+/// dst.qword[0] := F[2]
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sha512rnds2_epi64(__m256i __A, __m256i __B, __m128i __C) {
+ return (__m256i)__builtin_ia32_vsha512rnds2((__v4du)__A, (__v4du)__B,
+ (__v2du)__C);
+}
+
+#undef __DEFAULT_FN_ATTRS256
+
+#endif // __SHA512INTRIN_H
diff --git a/contrib/llvm-project/clang/lib/Headers/shaintrin.h b/contrib/llvm-project/clang/lib/Headers/shaintrin.h
index 08b1fb1dc16a..232e1fa29823 100644
--- a/contrib/llvm-project/clang/lib/Headers/shaintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/shaintrin.h
@@ -17,39 +17,167 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sha"), __min_vector_width__(128)))
+/// Performs four iterations of the inner loop of the SHA-1 message digest
+/// algorithm using the starting SHA-1 state (A, B, C, D) from the 128-bit
+/// vector of [4 x i32] in \a V1 and the next four 32-bit elements of the
+/// message from the 128-bit vector of [4 x i32] in \a V2. Note that the
+/// SHA-1 state variable E must have already been added to \a V2
+/// (\c _mm_sha1nexte_epu32() can perform this step). Returns the updated
+/// SHA-1 state (A, B, C, D) as a 128-bit vector of [4 x i32].
+///
+/// The SHA-1 algorithm has an inner loop of 80 iterations, twenty each
+/// with a different combining function and rounding constant. This
+/// intrinsic performs four iterations using a combining function and
+/// rounding constant selected by \a M[1:0].
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_sha1rnds4_epu32(__m128i V1, __m128i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c SHA1RNDS4 instruction.
+///
+/// \param V1
+/// A 128-bit vector of [4 x i32] containing the initial SHA-1 state.
+/// \param V2
+/// A 128-bit vector of [4 x i32] containing the next four elements of
+/// the message, plus SHA-1 state variable E.
+/// \param M
+/// An immediate value where bits [1:0] select among four possible
+/// combining functions and rounding constants (not specified here).
+/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1 state.
#define _mm_sha1rnds4_epu32(V1, V2, M) \
__builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), (__v4si)(__m128i)(V2), (M))
+/// Calculates the SHA-1 state variable E from the SHA-1 state variables in
+/// the 128-bit vector of [4 x i32] in \a __X, adds that to the next set of
+/// four message elements in the 128-bit vector of [4 x i32] in \a __Y, and
+/// returns the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c SHA1NEXTE instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] containing the current SHA-1 state.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing the next four elements of the
+/// message.
+/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1
+/// values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha1nexte_epu32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_sha1nexte((__v4si)__X, (__v4si)__Y);
}
+/// Performs an intermediate calculation for deriving the next four SHA-1
+/// message elements using previous message elements from the 128-bit
+/// vectors of [4 x i32] in \a __X and \a __Y, and returns the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c SHA1MSG1 instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] containing previous message elements.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing previous message elements.
+/// \returns A 128-bit vector of [4 x i32] containing the derived SHA-1
+/// elements.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha1msg1_epu32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_sha1msg1((__v4si)__X, (__v4si)__Y);
}
+/// Performs the final calculation for deriving the next four SHA-1 message
+/// elements using previous message elements from the 128-bit vectors of
+/// [4 x i32] in \a __X and \a __Y, and returns the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c SHA1MSG2 instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] containing an intermediate result.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing previous message values.
+/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1
+/// values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha1msg2_epu32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_sha1msg2((__v4si)__X, (__v4si)__Y);
}
+/// Performs two rounds of SHA-256 operation using the following inputs: a
+/// starting SHA-256 state (C, D, G, H) from the 128-bit vector of
+/// [4 x i32] in \a __X; a starting SHA-256 state (A, B, E, F) from the
+/// 128-bit vector of [4 x i32] in \a __Y; and a pre-computed sum of the
+/// next two message elements (unsigned 32-bit integers) and corresponding
+/// rounding constants from the 128-bit vector of [4 x i32] in \a __Z.
+/// Returns the updated SHA-256 state (A, B, E, F) as a 128-bit vector of
+/// [4 x i32].
+///
+/// The SHA-256 algorithm has a core loop of 64 iterations. This intrinsic
+/// performs two of those iterations.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c SHA256RNDS2 instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] containing part of the initial SHA-256
+/// state.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing part of the initial SHA-256
+/// state.
+/// \param __Z
+/// A 128-bit vector of [4 x i32] containing additional input to the
+/// SHA-256 operation.
+/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1 state.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha256rnds2_epu32(__m128i __X, __m128i __Y, __m128i __Z)
{
return (__m128i)__builtin_ia32_sha256rnds2((__v4si)__X, (__v4si)__Y, (__v4si)__Z);
}
+/// Performs an intermediate calculation for deriving the next four SHA-256
+/// message elements using previous message elements from the 128-bit
+/// vectors of [4 x i32] in \a __X and \a __Y, and returns the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c SHA256MSG1 instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] containing previous message elements.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing previous message elements.
+/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-256
+/// values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha256msg1_epu32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_sha256msg1((__v4si)__X, (__v4si)__Y);
}
+/// Performs the final calculation for deriving the next four SHA-256 message
+/// elements using previous message elements from the 128-bit vectors of
+/// [4 x i32] in \a __X and \a __Y, and returns the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c SHA256MSG2 instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] containing an intermediate result.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing previous message values.
+/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-256
+/// values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha256msg2_epu32(__m128i __X, __m128i __Y)
{
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAnalysisKinds.td b/contrib/llvm-project/clang/lib/Headers/sifive_vector.h
index 20efd96b85fd..42d7224db614 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAnalysisKinds.td
+++ b/contrib/llvm-project/clang/lib/Headers/sifive_vector.h
@@ -1,4 +1,4 @@
-//==--- DiagnosticAnalysisKinds.td - libanalysis diagnostics --------------===//
+//===----- sifive_vector.h - SiFive Vector definitions --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,6 +6,11 @@
//
//===----------------------------------------------------------------------===//
-let Component = "Analysis" in {
+#ifndef _SIFIVE_VECTOR_H_
+#define _SIFIVE_VECTOR_H_
-}
+#include "riscv_vector.h"
+
+#pragma clang riscv intrinsic sifive_vector
+
+#endif //_SIFIVE_VECTOR_H_
diff --git a/contrib/llvm-project/clang/lib/Headers/sm3intrin.h b/contrib/llvm-project/clang/lib/Headers/sm3intrin.h
new file mode 100644
index 000000000000..8a3d8bc9ef01
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/sm3intrin.h
@@ -0,0 +1,238 @@
+/*===-------------------- sm3intrin.h - SM3 intrinsics ---------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <sm3intrin.h> directly; include <immintrin.h> instead."
+#endif // __IMMINTRIN_H
+
+#ifndef __SM3INTRIN_H
+#define __SM3INTRIN_H
+
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, __target__("sm3"), \
+ __min_vector_width__(128)))
+
+/// This intrinisc is one of the two SM3 message scheduling intrinsics. The
+/// intrinsic performs an initial calculation for the next four SM3 message
+/// words. The calculated results are stored in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_sm3msg1_epi32(__m128i __A, __m128i __B, __m128i __C)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSM3MSG1 instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x int].
+/// \param __B
+/// A 128-bit vector of [4 x int].
+/// \param __C
+/// A 128-bit vector of [4 x int].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// DEFINE ROL32(dword, n) {
+/// count := n % 32
+/// dest := (dword << count) | (dword >> (32 - count))
+/// RETURN dest
+/// }
+/// DEFINE P1(x) {
+/// RETURN x ^ ROL32(x, 15) ^ ROL32(x, 23)
+/// }
+/// W[0] := __C.dword[0]
+/// W[1] := __C.dword[1]
+/// W[2] := __C.dword[2]
+/// W[3] := __C.dword[3]
+/// W[7] := __A.dword[0]
+/// W[8] := __A.dword[1]
+/// W[9] := __A.dword[2]
+/// W[10] := __A.dword[3]
+/// W[13] := __B.dword[0]
+/// W[14] := __B.dword[1]
+/// W[15] := __B.dword[2]
+/// TMP0 := W[7] ^ W[0] ^ ROL32(W[13], 15)
+/// TMP1 := W[8] ^ W[1] ^ ROL32(W[14], 15)
+/// TMP2 := W[9] ^ W[2] ^ ROL32(W[15], 15)
+/// TMP3 := W[10] ^ W[3]
+/// dst.dword[0] := P1(TMP0)
+/// dst.dword[1] := P1(TMP1)
+/// dst.dword[2] := P1(TMP2)
+/// dst.dword[3] := P1(TMP3)
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sm3msg1_epi32(__m128i __A,
+ __m128i __B,
+ __m128i __C) {
+ return (__m128i)__builtin_ia32_vsm3msg1((__v4su)__A, (__v4su)__B,
+ (__v4su)__C);
+}
+
+/// This intrinisc is one of the two SM3 message scheduling intrinsics. The
+/// intrinsic performs the final calculation for the next four SM3 message
+/// words. The calculated results are stored in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_sm3msg2_epi32(__m128i __A, __m128i __B, __m128i __C)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSM3MSG2 instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x int].
+/// \param __B
+/// A 128-bit vector of [4 x int].
+/// \param __C
+/// A 128-bit vector of [4 x int].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// DEFINE ROL32(dword, n) {
+/// count := n % 32
+/// dest := (dword << count) | (dword >> (32-count))
+/// RETURN dest
+/// }
+/// WTMP[0] := __A.dword[0]
+/// WTMP[1] := __A.dword[1]
+/// WTMP[2] := __A.dword[2]
+/// WTMP[3] := __A.dword[3]
+/// W[3] := __B.dword[0]
+/// W[4] := __B.dword[1]
+/// W[5] := __B.dword[2]
+/// W[6] := __B.dword[3]
+/// W[10] := __C.dword[0]
+/// W[11] := __C.dword[1]
+/// W[12] := __C.dword[2]
+/// W[13] := __C.dword[3]
+/// W[16] := ROL32(W[3], 7) ^ W[10] ^ WTMP[0]
+/// W[17] := ROL32(W[4], 7) ^ W[11] ^ WTMP[1]
+/// W[18] := ROL32(W[5], 7) ^ W[12] ^ WTMP[2]
+/// W[19] := ROL32(W[6], 7) ^ W[13] ^ WTMP[3]
+/// W[19] := W[19] ^ ROL32(W[16], 6) ^ ROL32(W[16], 15) ^ ROL32(W[16], 30)
+/// dst.dword[0] := W[16]
+/// dst.dword[1] := W[17]
+/// dst.dword[2] := W[18]
+/// dst.dword[3] := W[19]
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sm3msg2_epi32(__m128i __A,
+ __m128i __B,
+ __m128i __C) {
+ return (__m128i)__builtin_ia32_vsm3msg2((__v4su)__A, (__v4su)__B,
+ (__v4su)__C);
+}
+
+/// This intrinsic performs two rounds of SM3 operation using initial SM3 state
+/// (C, D, G, H) from \a __A, an initial SM3 states (A, B, E, F)
+/// from \a __B and a pre-computed words from the \a __C. \a __A with
+/// initial SM3 state of (C, D, G, H) assumes input of non-rotated left
+/// variables from previous state. The updated SM3 state (A, B, E, F) is
+/// written to \a __A. The \a imm8 should contain the even round number
+/// for the first of the two rounds computed by this instruction. The
+/// computation masks the \a imm8 value by AND’ing it with 0x3E so that only
+/// even round numbers from 0 through 62 are used for this operation. The
+/// calculated results are stored in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_sm3rnds2_epi32(__m128i __A, __m128i __B, __m128i __C, const int
+/// imm8) \endcode
+///
+/// This intrinsic corresponds to the \c VSM3RNDS2 instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x int].
+/// \param __B
+/// A 128-bit vector of [4 x int].
+/// \param __C
+/// A 128-bit vector of [4 x int].
+/// \param imm8
+/// A 8-bit constant integer.
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// DEFINE ROL32(dword, n) {
+/// count := n % 32
+/// dest := (dword << count) | (dword >> (32-count))
+/// RETURN dest
+/// }
+/// DEFINE P0(dword) {
+/// RETURN dword ^ ROL32(dword, 9) ^ ROL32(dword, 17)
+/// }
+/// DEFINE FF(x,y,z, round){
+/// IF round < 16
+/// RETURN (x ^ y ^ z)
+/// ELSE
+/// RETURN (x & y) | (x & z) | (y & z)
+/// FI
+/// }
+/// DEFINE GG(x, y, z, round){
+/// IF round < 16
+/// RETURN (x ^ y ^ z)
+/// ELSE
+/// RETURN (x & y) | (~x & z)
+/// FI
+/// }
+/// A[0] := __B.dword[3]
+/// B[0] := __B.dword[2]
+/// C[0] := __A.dword[3]
+/// D[0] := __A.dword[2]
+/// E[0] := __B.dword[1]
+/// F[0] := __B.dword[0]
+/// G[0] := __A.dword[1]
+/// H[0] := __A.dword[0]
+/// W[0] := __C.dword[0]
+/// W[1] := __C.dword[1]
+/// W[4] := __C.dword[2]
+/// W[5] := __C.dword[3]
+/// C[0] := ROL32(C[0], 9)
+/// D[0] := ROL32(D[0], 9)
+/// G[0] := ROL32(G[0], 19)
+/// H[0] := ROL32(H[0], 19)
+/// ROUND := __D & 0x3E
+/// IF ROUND < 16
+/// CONST := 0x79CC4519
+/// ELSE
+/// CONST := 0x7A879D8A
+/// FI
+/// CONST := ROL32(CONST,ROUND)
+/// FOR i:= 0 to 1
+/// S1 := ROL32((ROL32(A[i], 12) + E[i] + CONST), 7)
+/// S2 := S1 ^ ROL32(A[i], 12)
+/// T1 := FF(A[i], B[i], C[i], ROUND) + D[i] + S2 + (W[i] ^ W[i+4])
+/// T2 := GG(E[i], F[i], G[i], ROUND) + H[i] + S1 + W[i]
+/// D[i+1] := C[i]
+/// C[i+1] := ROL32(B[i],9)
+/// B[i+1] := A[i]
+/// A[i+1] := T1
+/// H[i+1] := G[i]
+/// G[i+1] := ROL32(F[i], 19)
+/// F[i+1] := E[i]
+/// E[i+1] := P0(T2)
+/// CONST := ROL32(CONST, 1)
+/// ENDFOR
+/// dst.dword[3] := A[2]
+/// dst.dword[2] := B[2]
+/// dst.dword[1] := E[2]
+/// dst.dword[0] := F[2]
+/// dst[MAX:128] := 0
+/// \endcode
+#define _mm_sm3rnds2_epi32(A, B, C, D) \
+ (__m128i) __builtin_ia32_vsm3rnds2((__v4su)A, (__v4su)B, (__v4su)C, (int)D)
+
+#undef __DEFAULT_FN_ATTRS128
+
+#endif // __SM3INTRIN_H
diff --git a/contrib/llvm-project/clang/lib/Headers/sm4intrin.h b/contrib/llvm-project/clang/lib/Headers/sm4intrin.h
new file mode 100644
index 000000000000..47aeec46a6fc
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/sm4intrin.h
@@ -0,0 +1,269 @@
+/*===--------------- sm4intrin.h - SM4 intrinsics -----------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <sm4intrin.h> directly; include <immintrin.h> instead."
+#endif // __IMMINTRIN_H
+
+#ifndef __SM4INTRIN_H
+#define __SM4INTRIN_H
+
+/// This intrinsic performs four rounds of SM4 key expansion. The intrinsic
+/// operates on independent 128-bit lanes. The calculated results are
+/// stored in \a dst.
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_sm4key4_epi32(__m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSM4KEY4 instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x int].
+/// \param __B
+/// A 128-bit vector of [4 x int].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// DEFINE ROL32(dword, n) {
+/// count := n % 32
+/// dest := (dword << count) | (dword >> (32-count))
+/// RETURN dest
+/// }
+/// DEFINE SBOX_BYTE(dword, i) {
+/// RETURN sbox[dword.byte[i]]
+/// }
+/// DEFINE lower_t(dword) {
+/// tmp.byte[0] := SBOX_BYTE(dword, 0)
+/// tmp.byte[1] := SBOX_BYTE(dword, 1)
+/// tmp.byte[2] := SBOX_BYTE(dword, 2)
+/// tmp.byte[3] := SBOX_BYTE(dword, 3)
+/// RETURN tmp
+/// }
+/// DEFINE L_KEY(dword) {
+/// RETURN dword ^ ROL32(dword, 13) ^ ROL32(dword, 23)
+/// }
+/// DEFINE T_KEY(dword) {
+/// RETURN L_KEY(lower_t(dword))
+/// }
+/// DEFINE F_KEY(X0, X1, X2, X3, round_key) {
+/// RETURN X0 ^ T_KEY(X1 ^ X2 ^ X3 ^ round_key)
+/// }
+/// FOR i:= 0 to 0
+/// P[0] := __B.xmm[i].dword[0]
+/// P[1] := __B.xmm[i].dword[1]
+/// P[2] := __B.xmm[i].dword[2]
+/// P[3] := __B.xmm[i].dword[3]
+/// C[0] := F_KEY(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0])
+/// C[1] := F_KEY(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1])
+/// C[2] := F_KEY(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2])
+/// C[3] := F_KEY(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3])
+/// DEST.xmm[i].dword[0] := C[0]
+/// DEST.xmm[i].dword[1] := C[1]
+/// DEST.xmm[i].dword[2] := C[2]
+/// DEST.xmm[i].dword[3] := C[3]
+/// ENDFOR
+/// DEST[MAX:128] := 0
+/// \endcode
+#define _mm_sm4key4_epi32(A, B) \
+ (__m128i) __builtin_ia32_vsm4key4128((__v4su)A, (__v4su)B)
+
+/// This intrinsic performs four rounds of SM4 key expansion. The intrinsic
+/// operates on independent 128-bit lanes. The calculated results are
+/// stored in \a dst.
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_sm4key4_epi32(__m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSM4KEY4 instruction.
+///
+/// \param __A
+/// A 256-bit vector of [8 x int].
+/// \param __B
+/// A 256-bit vector of [8 x int].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// DEFINE ROL32(dword, n) {
+/// count := n % 32
+/// dest := (dword << count) | (dword >> (32-count))
+/// RETURN dest
+/// }
+/// DEFINE SBOX_BYTE(dword, i) {
+/// RETURN sbox[dword.byte[i]]
+/// }
+/// DEFINE lower_t(dword) {
+/// tmp.byte[0] := SBOX_BYTE(dword, 0)
+/// tmp.byte[1] := SBOX_BYTE(dword, 1)
+/// tmp.byte[2] := SBOX_BYTE(dword, 2)
+/// tmp.byte[3] := SBOX_BYTE(dword, 3)
+/// RETURN tmp
+/// }
+/// DEFINE L_KEY(dword) {
+/// RETURN dword ^ ROL32(dword, 13) ^ ROL32(dword, 23)
+/// }
+/// DEFINE T_KEY(dword) {
+/// RETURN L_KEY(lower_t(dword))
+/// }
+/// DEFINE F_KEY(X0, X1, X2, X3, round_key) {
+/// RETURN X0 ^ T_KEY(X1 ^ X2 ^ X3 ^ round_key)
+/// }
+/// FOR i:= 0 to 1
+/// P[0] := __B.xmm[i].dword[0]
+/// P[1] := __B.xmm[i].dword[1]
+/// P[2] := __B.xmm[i].dword[2]
+/// P[3] := __B.xmm[i].dword[3]
+/// C[0] := F_KEY(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0])
+/// C[1] := F_KEY(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1])
+/// C[2] := F_KEY(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2])
+/// C[3] := F_KEY(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3])
+/// DEST.xmm[i].dword[0] := C[0]
+/// DEST.xmm[i].dword[1] := C[1]
+/// DEST.xmm[i].dword[2] := C[2]
+/// DEST.xmm[i].dword[3] := C[3]
+/// ENDFOR
+/// DEST[MAX:256] := 0
+/// \endcode
+#define _mm256_sm4key4_epi32(A, B) \
+ (__m256i) __builtin_ia32_vsm4key4256((__v8su)A, (__v8su)B)
+
+/// This intrinisc performs four rounds of SM4 encryption. The intrinisc
+/// operates on independent 128-bit lanes. The calculated results are
+/// stored in \a dst.
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_sm4rnds4_epi32(__m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSM4RNDS4 instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x int].
+/// \param __B
+/// A 128-bit vector of [4 x int].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// DEFINE ROL32(dword, n) {
+/// count := n % 32
+/// dest := (dword << count) | (dword >> (32-count))
+/// RETURN dest
+/// }
+/// DEFINE lower_t(dword) {
+/// tmp.byte[0] := SBOX_BYTE(dword, 0)
+/// tmp.byte[1] := SBOX_BYTE(dword, 1)
+/// tmp.byte[2] := SBOX_BYTE(dword, 2)
+/// tmp.byte[3] := SBOX_BYTE(dword, 3)
+/// RETURN tmp
+/// }
+/// DEFINE L_RND(dword) {
+/// tmp := dword
+/// tmp := tmp ^ ROL32(dword, 2)
+/// tmp := tmp ^ ROL32(dword, 10)
+/// tmp := tmp ^ ROL32(dword, 18)
+/// tmp := tmp ^ ROL32(dword, 24)
+/// RETURN tmp
+/// }
+/// DEFINE T_RND(dword) {
+/// RETURN L_RND(lower_t(dword))
+/// }
+/// DEFINE F_RND(X0, X1, X2, X3, round_key) {
+/// RETURN X0 ^ T_RND(X1 ^ X2 ^ X3 ^ round_key)
+/// }
+/// FOR i:= 0 to 0
+/// P[0] := __B.xmm[i].dword[0]
+/// P[1] := __B.xmm[i].dword[1]
+/// P[2] := __B.xmm[i].dword[2]
+/// P[3] := __B.xmm[i].dword[3]
+/// C[0] := F_RND(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0])
+/// C[1] := F_RND(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1])
+/// C[2] := F_RND(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2])
+/// C[3] := F_RND(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3])
+/// DEST.xmm[i].dword[0] := C[0]
+/// DEST.xmm[i].dword[1] := C[1]
+/// DEST.xmm[i].dword[2] := C[2]
+/// DEST.xmm[i].dword[3] := C[3]
+/// ENDFOR
+/// DEST[MAX:128] := 0
+/// \endcode
+#define _mm_sm4rnds4_epi32(A, B) \
+ (__m128i) __builtin_ia32_vsm4rnds4128((__v4su)A, (__v4su)B)
+
+/// This intrinisc performs four rounds of SM4 encryption. The intrinisc
+/// operates on independent 128-bit lanes. The calculated results are
+/// stored in \a dst.
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_sm4rnds4_epi32(__m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSM4RNDS4 instruction.
+///
+/// \param __A
+/// A 256-bit vector of [8 x int].
+/// \param __B
+/// A 256-bit vector of [8 x int].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// DEFINE ROL32(dword, n) {
+/// count := n % 32
+/// dest := (dword << count) | (dword >> (32-count))
+/// RETURN dest
+/// }
+/// DEFINE lower_t(dword) {
+/// tmp.byte[0] := SBOX_BYTE(dword, 0)
+/// tmp.byte[1] := SBOX_BYTE(dword, 1)
+/// tmp.byte[2] := SBOX_BYTE(dword, 2)
+/// tmp.byte[3] := SBOX_BYTE(dword, 3)
+/// RETURN tmp
+/// }
+/// DEFINE L_RND(dword) {
+/// tmp := dword
+/// tmp := tmp ^ ROL32(dword, 2)
+/// tmp := tmp ^ ROL32(dword, 10)
+/// tmp := tmp ^ ROL32(dword, 18)
+/// tmp := tmp ^ ROL32(dword, 24)
+/// RETURN tmp
+/// }
+/// DEFINE T_RND(dword) {
+/// RETURN L_RND(lower_t(dword))
+/// }
+/// DEFINE F_RND(X0, X1, X2, X3, round_key) {
+/// RETURN X0 ^ T_RND(X1 ^ X2 ^ X3 ^ round_key)
+/// }
+/// FOR i:= 0 to 0
+/// P[0] := __B.xmm[i].dword[0]
+/// P[1] := __B.xmm[i].dword[1]
+/// P[2] := __B.xmm[i].dword[2]
+/// P[3] := __B.xmm[i].dword[3]
+/// C[0] := F_RND(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0])
+/// C[1] := F_RND(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1])
+/// C[2] := F_RND(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2])
+/// C[3] := F_RND(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3])
+/// DEST.xmm[i].dword[0] := C[0]
+/// DEST.xmm[i].dword[1] := C[1]
+/// DEST.xmm[i].dword[2] := C[2]
+/// DEST.xmm[i].dword[3] := C[3]
+/// ENDFOR
+/// DEST[MAX:256] := 0
+/// \endcode
+#define _mm256_sm4rnds4_epi32(A, B) \
+ (__m256i) __builtin_ia32_vsm4rnds4256((__v8su)A, (__v8su)B)
+
+#endif // __SM4INTRIN_H
diff --git a/contrib/llvm-project/clang/lib/Headers/stdalign.h b/contrib/llvm-project/clang/lib/Headers/stdalign.h
index 6ad25db4539a..8ae6e658dd0a 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdalign.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdalign.h
@@ -10,6 +10,10 @@
#ifndef __STDALIGN_H
#define __STDALIGN_H
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+ in C2x mode; switch to the correct values once they've been published. */
+#if defined(__cplusplus) || \
+ (defined(__STDC_VERSION__) && __STDC_VERSION__ < 202000L)
#ifndef __cplusplus
#define alignas _Alignas
#define alignof _Alignof
@@ -17,5 +21,6 @@
#define __alignas_is_defined 1
#define __alignof_is_defined 1
+#endif /* __STDC_VERSION__ */
#endif /* __STDALIGN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/stdatomic.h b/contrib/llvm-project/clang/lib/Headers/stdatomic.h
index 0f893beea6ca..aed33d4333e8 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdatomic.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdatomic.h
@@ -45,9 +45,16 @@ extern "C" {
#define ATOMIC_POINTER_LOCK_FREE __CLANG_ATOMIC_POINTER_LOCK_FREE
/* 7.17.2 Initialization */
-
+/* FIXME: This is using the placeholder dates Clang produces for these macros
+ in C2x mode; switch to the correct values once they've been published. */
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ < 202000L) || \
+ defined(__cplusplus)
+/* ATOMIC_VAR_INIT was removed in C2x, but still remains in C++23. */
#define ATOMIC_VAR_INIT(value) (value)
-#if ((defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201710L) || \
+#endif
+
+#if ((defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201710L && \
+ __STDC_VERSION__ < 202000L) || \
(defined(__cplusplus) && __cplusplus >= 202002L)) && \
!defined(_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS)
/* ATOMIC_VAR_INIT was deprecated in C17 and C++20. */
diff --git a/contrib/llvm-project/clang/lib/Headers/stddef.h b/contrib/llvm-project/clang/lib/Headers/stddef.h
index 42815176dcd0..539541f0ed41 100644
--- a/contrib/llvm-project/clang/lib/Headers/stddef.h
+++ b/contrib/llvm-project/clang/lib/Headers/stddef.h
@@ -103,6 +103,11 @@ using ::std::nullptr_t;
typedef typeof(nullptr) nullptr_t;
#endif /* defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L */
+#if defined(__need_STDDEF_H_misc) && defined(__STDC_VERSION__) && \
+ __STDC_VERSION__ >= 202000L
+#define unreachable() __builtin_unreachable()
+#endif /* defined(__need_STDDEF_H_misc) && >= C23 */
+
#if defined(__need_STDDEF_H_misc)
#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \
(defined(__cplusplus) && __cplusplus >= 201103L)
diff --git a/contrib/llvm-project/clang/lib/Headers/wasm_simd128.h b/contrib/llvm-project/clang/lib/Headers/wasm_simd128.h
index f93de129f957..2327bec52522 100644
--- a/contrib/llvm-project/clang/lib/Headers/wasm_simd128.h
+++ b/contrib/llvm-project/clang/lib/Headers/wasm_simd128.h
@@ -961,17 +961,17 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_popcnt(v128_t __a) {
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shl(v128_t __a,
uint32_t __b) {
- return (v128_t)((__i8x16)__a << __b);
+ return (v128_t)((__i8x16)__a << (__b & 0x7));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shr(v128_t __a,
uint32_t __b) {
- return (v128_t)((__i8x16)__a >> __b);
+ return (v128_t)((__i8x16)__a >> (__b & 0x7));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_shr(v128_t __a,
uint32_t __b) {
- return (v128_t)((__u8x16)__a >> __b);
+ return (v128_t)((__u8x16)__a >> (__b & 0x7));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add(v128_t __a,
@@ -1047,17 +1047,17 @@ static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i16x8_bitmask(v128_t __a) {
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shl(v128_t __a,
uint32_t __b) {
- return (v128_t)((__i16x8)__a << __b);
+ return (v128_t)((__i16x8)__a << (__b & 0xF));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shr(v128_t __a,
uint32_t __b) {
- return (v128_t)((__i16x8)__a >> __b);
+ return (v128_t)((__i16x8)__a >> (__b & 0xF));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_shr(v128_t __a,
uint32_t __b) {
- return (v128_t)((__u16x8)__a >> __b);
+ return (v128_t)((__u16x8)__a >> (__b & 0xF));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add(v128_t __a,
@@ -1138,17 +1138,17 @@ static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i32x4_bitmask(v128_t __a) {
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shl(v128_t __a,
uint32_t __b) {
- return (v128_t)((__i32x4)__a << __b);
+ return (v128_t)((__i32x4)__a << (__b & 0x1F));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shr(v128_t __a,
uint32_t __b) {
- return (v128_t)((__i32x4)__a >> __b);
+ return (v128_t)((__i32x4)__a >> (__b & 0x1F));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_shr(v128_t __a,
uint32_t __b) {
- return (v128_t)((__u32x4)__a >> __b);
+ return (v128_t)((__u32x4)__a >> (__b & 0x1F));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_add(v128_t __a,
@@ -1209,17 +1209,17 @@ static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i64x2_bitmask(v128_t __a) {
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shl(v128_t __a,
uint32_t __b) {
- return (v128_t)((__i64x2)__a << (int64_t)__b);
+ return (v128_t)((__i64x2)__a << ((int64_t)__b & 0x3F));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shr(v128_t __a,
uint32_t __b) {
- return (v128_t)((__i64x2)__a >> (int64_t)__b);
+ return (v128_t)((__i64x2)__a >> ((int64_t)__b & 0x3F));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_shr(v128_t __a,
uint32_t __b) {
- return (v128_t)((__u64x2)__a >> (int64_t)__b);
+ return (v128_t)((__u64x2)__a >> ((int64_t)__b & 0x3F));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_add(v128_t __a,
@@ -1760,6 +1760,126 @@ wasm_u64x2_load_32x2(const void *__mem) {
__DEPRECATED_WASM_MACRO("wasm_v64x2_shuffle", "wasm_i64x2_shuffle") \
wasm_i64x2_shuffle(__a, __b, __c0, __c1)
+// Relaxed SIMD intrinsics
+
+#define __RELAXED_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("relaxed-simd"), \
+ __min_vector_width__(128)))
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_f32x4_relaxed_madd(v128_t __a, v128_t __b, v128_t __c) {
+ return (v128_t)__builtin_wasm_relaxed_madd_f32x4((__f32x4)__a, (__f32x4)__b,
+ (__f32x4)__c);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_f32x4_relaxed_nmadd(v128_t __a, v128_t __b, v128_t __c) {
+ return (v128_t)__builtin_wasm_relaxed_nmadd_f32x4((__f32x4)__a, (__f32x4)__b,
+ (__f32x4)__c);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_f64x2_relaxed_madd(v128_t __a, v128_t __b, v128_t __c) {
+ return (v128_t)__builtin_wasm_relaxed_madd_f64x2((__f64x2)__a, (__f64x2)__b,
+ (__f64x2)__c);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_f64x2_relaxed_nmadd(v128_t __a, v128_t __b, v128_t __c) {
+ return (v128_t)__builtin_wasm_relaxed_nmadd_f64x2((__f64x2)__a, (__f64x2)__b,
+ (__f64x2)__c);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i8x16_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) {
+ return (v128_t)__builtin_wasm_relaxed_laneselect_i8x16(
+ (__i8x16)__a, (__i8x16)__b, (__i8x16)__m);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i16x8_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) {
+ return (v128_t)__builtin_wasm_relaxed_laneselect_i16x8(
+ (__i16x8)__a, (__i16x8)__b, (__i16x8)__m);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i32x4_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) {
+ return (v128_t)__builtin_wasm_relaxed_laneselect_i32x4(
+ (__i32x4)__a, (__i32x4)__b, (__i32x4)__m);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i64x2_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) {
+ return (v128_t)__builtin_wasm_relaxed_laneselect_i64x2(
+ (__i64x2)__a, (__i64x2)__b, (__i64x2)__m);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i8x16_relaxed_swizzle(v128_t __a, v128_t __s) {
+ return (v128_t)__builtin_wasm_relaxed_swizzle_i8x16((__i8x16)__a,
+ (__i8x16)__s);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f32x4_relaxed_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_relaxed_min_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f32x4_relaxed_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_relaxed_max_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f64x2_relaxed_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_relaxed_min_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f64x2_relaxed_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_relaxed_max_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i32x4_relaxed_trunc_f32x4(v128_t __a) {
+ return (v128_t)__builtin_wasm_relaxed_trunc_s_i32x4_f32x4((__f32x4)__a);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_u32x4_relaxed_trunc_f32x4(v128_t __a) {
+ return (v128_t)__builtin_wasm_relaxed_trunc_u_i32x4_f32x4((__f32x4)__a);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i32x4_relaxed_trunc_f64x2_zero(v128_t __a) {
+ return (v128_t)__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2((__f64x2)__a);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_u32x4_relaxed_trunc_f64x2_zero(v128_t __a) {
+ return (v128_t)__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2((__f64x2)__a);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i16x8_relaxed_q15mulr(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_relaxed_q15mulr_s_i16x8((__i16x8)__a,
+ (__i16x8)__b);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i16x8_relaxed_dot_i8x16_i7x16(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_relaxed_dot_i8x16_i7x16_s_i16x8((__i8x16)__a,
+ (__i8x16)__b);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i32x4_relaxed_dot_i8x16_i7x16_add(v128_t __a, v128_t __b, v128_t __c) {
+ return (v128_t)__builtin_wasm_relaxed_dot_i8x16_i7x16_add_s_i32x4(
+ (__i8x16)__a, (__i8x16)__b, (__i32x4)__c);
+}
+
+// Deprecated intrinsics
+
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i8x16_swizzle")
wasm_v8x16_swizzle(v128_t __a, v128_t __b) {
return wasm_i8x16_swizzle(__a, __b);
diff --git a/contrib/llvm-project/clang/lib/Headers/xsavecintrin.h b/contrib/llvm-project/clang/lib/Headers/xsavecintrin.h
index 5524947fa98e..1f2d001207e7 100644
--- a/contrib/llvm-project/clang/lib/Headers/xsavecintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/xsavecintrin.h
@@ -17,12 +17,62 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsavec")))
+/// Performs a full or partial save of processor state to the memory at
+/// \a __p. The exact state saved depends on the 64-bit mask \a __m and
+/// processor control register \c XCR0.
+///
+/// \code{.operation}
+/// mask[62:0] := __m[62:0] AND XCR0[62:0]
+/// FOR i := 0 TO 62
+/// IF mask[i] == 1
+/// CASE (i) OF
+/// 0: save X87 FPU state
+/// 1: save SSE state
+/// DEFAULT: __p.Ext_Save_Area[i] := ProcessorState[i]
+/// FI
+/// ENDFOR
+/// __p.Header.XSTATE_BV[62:0] := INIT_FUNCTION(mask[62:0])
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c XSAVEC instruction.
+///
+/// \param __p
+/// Pointer to the save area; must be 64-byte aligned.
+/// \param __m
+/// A 64-bit mask indicating what state should be saved.
static __inline__ void __DEFAULT_FN_ATTRS
_xsavec(void *__p, unsigned long long __m) {
__builtin_ia32_xsavec(__p, __m);
}
#ifdef __x86_64__
+/// Performs a full or partial save of processor state to the memory at
+/// \a __p. The exact state saved depends on the 64-bit mask \a __m and
+/// processor control register \c XCR0.
+///
+/// \code{.operation}
+/// mask[62:0] := __m[62:0] AND XCR0[62:0]
+/// FOR i := 0 TO 62
+/// IF mask[i] == 1
+/// CASE (i) OF
+/// 0: save X87 FPU state
+/// 1: save SSE state
+/// DEFAULT: __p.Ext_Save_Area[i] := ProcessorState[i]
+/// FI
+/// ENDFOR
+/// __p.Header.XSTATE_BV[62:0] := INIT_FUNCTION(mask[62:0])
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c XSAVEC64 instruction.
+///
+/// \param __p
+/// Pointer to the save area; must be 64-byte aligned.
+/// \param __m
+/// A 64-bit mask indicating what state should be saved.
static __inline__ void __DEFAULT_FN_ATTRS
_xsavec64(void *__p, unsigned long long __m) {
__builtin_ia32_xsavec64(__p, __m);
diff --git a/contrib/llvm-project/clang/lib/Index/IndexBody.cpp b/contrib/llvm-project/clang/lib/Index/IndexBody.cpp
index 8b8235c13302..e5f1764550ff 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexBody.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexBody.cpp
@@ -203,9 +203,12 @@ public:
bool VisitDesignatedInitExpr(DesignatedInitExpr *E) {
for (DesignatedInitExpr::Designator &D : llvm::reverse(E->designators())) {
- if (D.isFieldDesignator() && D.getField())
- return IndexCtx.handleReference(D.getField(), D.getFieldLoc(), Parent,
- ParentDC, SymbolRoleSet(), {}, E);
+ if (D.isFieldDesignator()) {
+ if (const FieldDecl *FD = D.getFieldDecl()) {
+ return IndexCtx.handleReference(FD, D.getFieldLoc(), Parent,
+ ParentDC, SymbolRoleSet(), {}, E);
+ }
+ }
}
return true;
}
@@ -417,10 +420,13 @@ public:
auto visitSyntacticDesignatedInitExpr = [&](DesignatedInitExpr *E) -> bool {
for (DesignatedInitExpr::Designator &D : llvm::reverse(E->designators())) {
- if (D.isFieldDesignator() && D.getField())
- return IndexCtx.handleReference(D.getField(), D.getFieldLoc(),
- Parent, ParentDC, SymbolRoleSet(),
- {}, E);
+ if (D.isFieldDesignator()) {
+ if (const FieldDecl *FD = D.getFieldDecl()) {
+ return IndexCtx.handleReference(FD, D.getFieldLoc(), Parent,
+ ParentDC, SymbolRoleSet(),
+ /*Relations=*/{}, E);
+ }
+ }
}
return true;
};
diff --git a/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp b/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp
index 882e02836d4f..1c04aa17d53f 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp
@@ -705,6 +705,7 @@ public:
IndexCtx.handleReference(C->getNamedConcept(), C->getConceptNameLoc(),
Parent, TTP->getLexicalDeclContext());
} else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(TP)) {
+ IndexCtx.indexTypeSourceInfo(NTTP->getTypeSourceInfo(), Parent);
if (NTTP->hasDefaultArgument())
IndexCtx.indexBody(NTTP->getDefaultArgument(), Parent);
} else if (const auto *TTPD = dyn_cast<TemplateTemplateParmDecl>(TP)) {
diff --git a/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp b/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp
index a40c218a3c43..d7316538f606 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp
@@ -71,7 +71,6 @@ bool index::isFunctionLocalSymbol(const Decl *D) {
return true;
case VisibleNoLinkage:
case UniqueExternalLinkage:
- case ModuleInternalLinkage:
llvm_unreachable("Not a sema linkage");
case ModuleLinkage:
case ExternalLinkage:
diff --git a/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp b/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp
index d41c54348ac8..b10028a526ed 100644
--- a/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp
+++ b/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp
@@ -226,6 +226,11 @@ void USRGenerator::VisitFunctionDecl(const FunctionDecl *D) {
if (ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D)))
return;
+ if (D->getType().isNull()) {
+ IgnoreResults = true;
+ return;
+ }
+
const unsigned StartSize = Buf.size();
VisitDeclContext(D->getDeclContext());
if (Buf.size() == StartSize)
@@ -744,6 +749,8 @@ void USRGenerator::VisitType(QualType T) {
case BuiltinType::Id: \
Out << "@BT@" << Name; break;
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
case BuiltinType::ShortAccum:
Out << "@BT@ShortAccum"; break;
case BuiltinType::Accum:
@@ -1139,6 +1146,15 @@ bool clang::index::generateUSRForDecl(const Decl *D,
// C++'s operator new function, can have invalid locations but it is fine to
// create USRs that can identify them.
+ // Check if the declaration has explicit external USR specified.
+ auto *CD = D->getCanonicalDecl();
+ if (auto *ExternalSymAttr = CD->getAttr<ExternalSourceSymbolAttr>()) {
+ if (!ExternalSymAttr->getUSR().empty()) {
+ llvm::raw_svector_ostream Out(Buf);
+ Out << ExternalSymAttr->getUSR();
+ return false;
+ }
+ }
USRGenerator UG(&D->getASTContext(), Buf);
UG.Visit(D);
return UG.ignoreResults();
diff --git a/contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.cpp b/contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.cpp
new file mode 100644
index 000000000000..8e39af6abf9d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.cpp
@@ -0,0 +1,176 @@
+//===---------- DeviceOffload.cpp - Device Offloading------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements offloading to CUDA devices.
+//
+//===----------------------------------------------------------------------===//
+
+#include "DeviceOffload.h"
+
+#include "clang/Basic/TargetOptions.h"
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "clang/Frontend/CompilerInstance.h"
+
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace clang {
+
+IncrementalCUDADeviceParser::IncrementalCUDADeviceParser(
+ Interpreter &Interp, std::unique_ptr<CompilerInstance> Instance,
+ IncrementalParser &HostParser, llvm::LLVMContext &LLVMCtx,
+ llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> FS,
+ llvm::Error &Err)
+ : IncrementalParser(Interp, std::move(Instance), LLVMCtx, Err),
+ HostParser(HostParser), VFS(FS) {
+ if (Err)
+ return;
+ StringRef Arch = CI->getTargetOpts().CPU;
+ if (!Arch.starts_with("sm_") || Arch.substr(3).getAsInteger(10, SMVersion)) {
+ Err = llvm::joinErrors(std::move(Err), llvm::make_error<llvm::StringError>(
+ "Invalid CUDA architecture",
+ llvm::inconvertibleErrorCode()));
+ return;
+ }
+}
+
+llvm::Expected<PartialTranslationUnit &>
+IncrementalCUDADeviceParser::Parse(llvm::StringRef Input) {
+ auto PTU = IncrementalParser::Parse(Input);
+ if (!PTU)
+ return PTU.takeError();
+
+ auto PTX = GeneratePTX();
+ if (!PTX)
+ return PTX.takeError();
+
+ auto Err = GenerateFatbinary();
+ if (Err)
+ return std::move(Err);
+
+ std::string FatbinFileName =
+ "/incr_module_" + std::to_string(PTUs.size()) + ".fatbin";
+ VFS->addFile(FatbinFileName, 0,
+ llvm::MemoryBuffer::getMemBuffer(
+ llvm::StringRef(FatbinContent.data(), FatbinContent.size()),
+ "", false));
+
+ HostParser.getCI()->getCodeGenOpts().CudaGpuBinaryFileName = FatbinFileName;
+
+ FatbinContent.clear();
+
+ return PTU;
+}
+
+llvm::Expected<llvm::StringRef> IncrementalCUDADeviceParser::GeneratePTX() {
+ auto &PTU = PTUs.back();
+ std::string Error;
+
+ const llvm::Target *Target = llvm::TargetRegistry::lookupTarget(
+ PTU.TheModule->getTargetTriple(), Error);
+ if (!Target)
+ return llvm::make_error<llvm::StringError>(std::move(Error),
+ std::error_code());
+ llvm::TargetOptions TO = llvm::TargetOptions();
+ llvm::TargetMachine *TargetMachine = Target->createTargetMachine(
+ PTU.TheModule->getTargetTriple(), getCI()->getTargetOpts().CPU, "", TO,
+ llvm::Reloc::Model::PIC_);
+ PTU.TheModule->setDataLayout(TargetMachine->createDataLayout());
+
+ PTXCode.clear();
+ llvm::raw_svector_ostream dest(PTXCode);
+
+ llvm::legacy::PassManager PM;
+ if (TargetMachine->addPassesToEmitFile(PM, dest, nullptr,
+ llvm::CGFT_AssemblyFile)) {
+ return llvm::make_error<llvm::StringError>(
+ "NVPTX backend cannot produce PTX code.",
+ llvm::inconvertibleErrorCode());
+ }
+
+ if (!PM.run(*PTU.TheModule))
+ return llvm::make_error<llvm::StringError>("Failed to emit PTX code.",
+ llvm::inconvertibleErrorCode());
+
+ PTXCode += '\0';
+ while (PTXCode.size() % 8)
+ PTXCode += '\0';
+ return PTXCode.str();
+}
+
+llvm::Error IncrementalCUDADeviceParser::GenerateFatbinary() {
+ enum FatBinFlags {
+ AddressSize64 = 0x01,
+ HasDebugInfo = 0x02,
+ ProducerCuda = 0x04,
+ HostLinux = 0x10,
+ HostMac = 0x20,
+ HostWindows = 0x40
+ };
+
+ struct FatBinInnerHeader {
+ uint16_t Kind; // 0x00
+ uint16_t unknown02; // 0x02
+ uint32_t HeaderSize; // 0x04
+ uint32_t DataSize; // 0x08
+ uint32_t unknown0c; // 0x0c
+ uint32_t CompressedSize; // 0x10
+ uint32_t SubHeaderSize; // 0x14
+ uint16_t VersionMinor; // 0x18
+ uint16_t VersionMajor; // 0x1a
+ uint32_t CudaArch; // 0x1c
+ uint32_t unknown20; // 0x20
+ uint32_t unknown24; // 0x24
+ uint32_t Flags; // 0x28
+ uint32_t unknown2c; // 0x2c
+ uint32_t unknown30; // 0x30
+ uint32_t unknown34; // 0x34
+ uint32_t UncompressedSize; // 0x38
+ uint32_t unknown3c; // 0x3c
+ uint32_t unknown40; // 0x40
+ uint32_t unknown44; // 0x44
+ FatBinInnerHeader(uint32_t DataSize, uint32_t CudaArch, uint32_t Flags)
+ : Kind(1 /*PTX*/), unknown02(0x0101), HeaderSize(sizeof(*this)),
+ DataSize(DataSize), unknown0c(0), CompressedSize(0),
+ SubHeaderSize(HeaderSize - 8), VersionMinor(2), VersionMajor(4),
+ CudaArch(CudaArch), unknown20(0), unknown24(0), Flags(Flags),
+ unknown2c(0), unknown30(0), unknown34(0), UncompressedSize(0),
+ unknown3c(0), unknown40(0), unknown44(0) {}
+ };
+
+ struct FatBinHeader {
+ uint32_t Magic; // 0x00
+ uint16_t Version; // 0x04
+ uint16_t HeaderSize; // 0x06
+ uint32_t DataSize; // 0x08
+ uint32_t unknown0c; // 0x0c
+ public:
+ FatBinHeader(uint32_t DataSize)
+ : Magic(0xba55ed50), Version(1), HeaderSize(sizeof(*this)),
+ DataSize(DataSize), unknown0c(0) {}
+ };
+
+ FatBinHeader OuterHeader(sizeof(FatBinInnerHeader) + PTXCode.size());
+ FatbinContent.append((char *)&OuterHeader,
+ ((char *)&OuterHeader) + OuterHeader.HeaderSize);
+
+ FatBinInnerHeader InnerHeader(PTXCode.size(), SMVersion,
+ FatBinFlags::AddressSize64 |
+ FatBinFlags::HostLinux);
+ FatbinContent.append((char *)&InnerHeader,
+ ((char *)&InnerHeader) + InnerHeader.HeaderSize);
+
+ FatbinContent.append(PTXCode.begin(), PTXCode.end());
+
+ return llvm::Error::success();
+}
+
+IncrementalCUDADeviceParser::~IncrementalCUDADeviceParser() {}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.h b/contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.h
new file mode 100644
index 000000000000..ce4f218c94c7
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.h
@@ -0,0 +1,51 @@
+//===----------- DeviceOffload.h - Device Offloading ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements classes required for offloading to CUDA devices.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_INTERPRETER_DEVICE_OFFLOAD_H
+#define LLVM_CLANG_LIB_INTERPRETER_DEVICE_OFFLOAD_H
+
+#include "IncrementalParser.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/VirtualFileSystem.h"
+
+namespace clang {
+
+class IncrementalCUDADeviceParser : public IncrementalParser {
+public:
+ IncrementalCUDADeviceParser(
+ Interpreter &Interp, std::unique_ptr<CompilerInstance> Instance,
+ IncrementalParser &HostParser, llvm::LLVMContext &LLVMCtx,
+ llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> VFS,
+ llvm::Error &Err);
+
+ llvm::Expected<PartialTranslationUnit &>
+ Parse(llvm::StringRef Input) override;
+
+ // Generate PTX for the last PTU
+ llvm::Expected<llvm::StringRef> GeneratePTX();
+
+ // Generate fatbinary contents in memory
+ llvm::Error GenerateFatbinary();
+
+ ~IncrementalCUDADeviceParser();
+
+protected:
+ IncrementalParser &HostParser;
+ int SMVersion;
+ llvm::SmallString<1024> PTXCode;
+ llvm::SmallVector<char, 1024> FatbinContent;
+ llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> VFS;
+};
+
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_INTERPRETER_DEVICE_OFFLOAD_H
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp b/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp
index 37d230b61f76..3f8d60630de4 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp
@@ -21,11 +21,18 @@
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
#include "llvm/ExecutionEngine/Orc/LLJIT.h"
#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.h"
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/TargetSelect.h"
+// Force linking some of the runtimes that helps attaching to a debugger.
+LLVM_ATTRIBUTE_USED void linkComponents() {
+ llvm::errs() << (void *)&llvm_orc_registerJITLoaderGDBWrapper
+ << (void *)&llvm_orc_registerJITLoaderGDBAllocAction;
+}
+
namespace clang {
IncrementalExecutor::IncrementalExecutor(llvm::orc::ThreadSafeContext &TSC,
@@ -37,21 +44,17 @@ IncrementalExecutor::IncrementalExecutor(llvm::orc::ThreadSafeContext &TSC,
auto JTMB = JITTargetMachineBuilder(TI.getTriple());
JTMB.addFeatures(TI.getTargetOpts().Features);
- if (auto JitOrErr = LLJITBuilder().setJITTargetMachineBuilder(JTMB).create())
+ LLJITBuilder Builder;
+ Builder.setJITTargetMachineBuilder(JTMB);
+ // Enable debugging of JIT'd code (only works on JITLink for ELF and MachO).
+ Builder.setEnableDebuggerSupport(true);
+
+ if (auto JitOrErr = Builder.create())
Jit = std::move(*JitOrErr);
else {
Err = JitOrErr.takeError();
return;
}
-
- const char Pref = Jit->getDataLayout().getGlobalPrefix();
- // Discover symbols from the process as a fallback.
- if (auto PSGOrErr = DynamicLibrarySearchGenerator::GetForCurrentProcess(Pref))
- Jit->getMainJITDylib().addGenerator(std::move(*PSGOrErr));
- else {
- Err = PSGOrErr.takeError();
- return;
- }
}
IncrementalExecutor::~IncrementalExecutor() {}
@@ -86,7 +89,7 @@ llvm::Error IncrementalExecutor::runCtors() const {
return Jit->initialize(Jit->getMainJITDylib());
}
-llvm::Expected<llvm::JITTargetAddress>
+llvm::Expected<llvm::orc::ExecutorAddr>
IncrementalExecutor::getSymbolAddress(llvm::StringRef Name,
SymbolNameKind NameKind) const {
auto Sym = (NameKind == LinkerName) ? Jit->lookupLinkerMangled(Name)
@@ -94,7 +97,7 @@ IncrementalExecutor::getSymbolAddress(llvm::StringRef Name,
if (!Sym)
return Sym.takeError();
- return Sym->getValue();
+ return Sym;
}
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h b/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h
index 54d37c76326b..dd0a210a0614 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include <memory>
@@ -51,9 +52,10 @@ public:
llvm::Error removeModule(PartialTranslationUnit &PTU);
llvm::Error runCtors() const;
llvm::Error cleanUp();
- llvm::Expected<llvm::JITTargetAddress>
+ llvm::Expected<llvm::orc::ExecutorAddr>
getSymbolAddress(llvm::StringRef Name, SymbolNameKind NameKind) const;
- llvm::orc::LLJIT *getExecutionEngine() const { return Jit.get(); }
+
+ llvm::orc::LLJIT &GetExecutionEngine() { return *Jit; }
};
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp
index 373e2844b4e4..9e5cf358700b 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp
@@ -11,7 +11,6 @@
//===----------------------------------------------------------------------===//
#include "IncrementalParser.h"
-
#include "clang/AST/DeclContextInternals.h"
#include "clang/CodeGen/BackendUtil.h"
#include "clang/CodeGen/CodeGenAction.h"
@@ -19,9 +18,9 @@
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendAction.h"
#include "clang/FrontendTool/Utils.h"
+#include "clang/Interpreter/Interpreter.h"
#include "clang/Parse/Parser.h"
#include "clang/Sema/Sema.h"
-
#include "llvm/Option/ArgList.h"
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/Error.h"
@@ -31,6 +30,79 @@
namespace clang {
+class IncrementalASTConsumer final : public ASTConsumer {
+ Interpreter &Interp;
+ std::unique_ptr<ASTConsumer> Consumer;
+
+public:
+ IncrementalASTConsumer(Interpreter &InterpRef, std::unique_ptr<ASTConsumer> C)
+ : Interp(InterpRef), Consumer(std::move(C)) {}
+
+ bool HandleTopLevelDecl(DeclGroupRef DGR) override final {
+ if (DGR.isNull())
+ return true;
+ if (!Consumer)
+ return true;
+
+ for (Decl *D : DGR)
+ if (auto *TSD = llvm::dyn_cast<TopLevelStmtDecl>(D);
+ TSD && TSD->isSemiMissing())
+ TSD->setStmt(Interp.SynthesizeExpr(cast<Expr>(TSD->getStmt())));
+
+ return Consumer->HandleTopLevelDecl(DGR);
+ }
+ void HandleTranslationUnit(ASTContext &Ctx) override final {
+ Consumer->HandleTranslationUnit(Ctx);
+ }
+ void HandleInlineFunctionDefinition(FunctionDecl *D) override final {
+ Consumer->HandleInlineFunctionDefinition(D);
+ }
+ void HandleInterestingDecl(DeclGroupRef D) override final {
+ Consumer->HandleInterestingDecl(D);
+ }
+ void HandleTagDeclDefinition(TagDecl *D) override final {
+ Consumer->HandleTagDeclDefinition(D);
+ }
+ void HandleTagDeclRequiredDefinition(const TagDecl *D) override final {
+ Consumer->HandleTagDeclRequiredDefinition(D);
+ }
+ void HandleCXXImplicitFunctionInstantiation(FunctionDecl *D) override final {
+ Consumer->HandleCXXImplicitFunctionInstantiation(D);
+ }
+ void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) override final {
+ Consumer->HandleTopLevelDeclInObjCContainer(D);
+ }
+ void HandleImplicitImportDecl(ImportDecl *D) override final {
+ Consumer->HandleImplicitImportDecl(D);
+ }
+ void CompleteTentativeDefinition(VarDecl *D) override final {
+ Consumer->CompleteTentativeDefinition(D);
+ }
+ void CompleteExternalDeclaration(VarDecl *D) override final {
+ Consumer->CompleteExternalDeclaration(D);
+ }
+ void AssignInheritanceModel(CXXRecordDecl *RD) override final {
+ Consumer->AssignInheritanceModel(RD);
+ }
+ void HandleCXXStaticMemberVarInstantiation(VarDecl *D) override final {
+ Consumer->HandleCXXStaticMemberVarInstantiation(D);
+ }
+ void HandleVTable(CXXRecordDecl *RD) override final {
+ Consumer->HandleVTable(RD);
+ }
+ ASTMutationListener *GetASTMutationListener() override final {
+ return Consumer->GetASTMutationListener();
+ }
+ ASTDeserializationListener *GetASTDeserializationListener() override final {
+ return Consumer->GetASTDeserializationListener();
+ }
+ void PrintStats() override final { Consumer->PrintStats(); }
+ bool shouldSkipFunctionBody(Decl *D) override final {
+ return Consumer->shouldSkipFunctionBody(D);
+ }
+ static bool classof(const clang::ASTConsumer *) { return true; }
+};
+
/// A custom action enabling the incremental processing functionality.
///
/// The usual \p FrontendAction expects one call to ExecuteAction and once it
@@ -122,7 +194,17 @@ public:
}
};
-IncrementalParser::IncrementalParser(std::unique_ptr<CompilerInstance> Instance,
+CodeGenerator *IncrementalParser::getCodeGen() const {
+ FrontendAction *WrappedAct = Act->getWrapped();
+ if (!WrappedAct->hasIRSupport())
+ return nullptr;
+ return static_cast<CodeGenAction *>(WrappedAct)->getCodeGenerator();
+}
+
+IncrementalParser::IncrementalParser() {}
+
+IncrementalParser::IncrementalParser(Interpreter &Interp,
+ std::unique_ptr<CompilerInstance> Instance,
llvm::LLVMContext &LLVMCtx,
llvm::Error &Err)
: CI(std::move(Instance)) {
@@ -131,10 +213,28 @@ IncrementalParser::IncrementalParser(std::unique_ptr<CompilerInstance> Instance,
if (Err)
return;
CI->ExecuteAction(*Act);
+ std::unique_ptr<ASTConsumer> IncrConsumer =
+ std::make_unique<IncrementalASTConsumer>(Interp, CI->takeASTConsumer());
+ CI->setASTConsumer(std::move(IncrConsumer));
Consumer = &CI->getASTConsumer();
P.reset(
new Parser(CI->getPreprocessor(), CI->getSema(), /*SkipBodies=*/false));
P->Initialize();
+
+ // An initial PTU is needed as CUDA includes some headers automatically
+ auto PTU = ParseOrWrapTopLevelDecl();
+ if (auto E = PTU.takeError()) {
+ consumeError(std::move(E)); // FIXME
+ return; // PTU.takeError();
+ }
+
+ if (CodeGenerator *CG = getCodeGen()) {
+ std::unique_ptr<llvm::Module> M(CG->ReleaseModule());
+ CG->StartModule("incr_module_" + std::to_string(PTUs.size()),
+ M->getContext());
+ PTU->TheModule = std::move(M);
+ assert(PTU->TheModule && "Failed to create initial PTU");
+ }
}
IncrementalParser::~IncrementalParser() {
@@ -158,8 +258,8 @@ IncrementalParser::ParseOrWrapTopLevelDecl() {
LastPTU.TUPart = C.getTranslationUnitDecl();
// Skip previous eof due to last incremental input.
- if (P->getCurToken().is(tok::eof)) {
- P->ConsumeToken();
+ if (P->getCurToken().is(tok::annot_repl_input_end)) {
+ P->ConsumeAnyToken();
// FIXME: Clang does not call ExitScope on finalizing the regular TU, we
// might want to do that around HandleEndOfTranslationUnit.
P->ExitScope();
@@ -205,14 +305,6 @@ IncrementalParser::ParseOrWrapTopLevelDecl() {
return LastPTU;
}
-static CodeGenerator *getCodeGen(FrontendAction *Act) {
- IncrementalAction *IncrAct = static_cast<IncrementalAction *>(Act);
- FrontendAction *WrappedAct = IncrAct->getWrapped();
- if (!WrappedAct->hasIRSupport())
- return nullptr;
- return static_cast<CodeGenAction *>(WrappedAct)->getCodeGenerator();
-}
-
llvm::Expected<PartialTranslationUnit &>
IncrementalParser::Parse(llvm::StringRef input) {
Preprocessor &PP = CI->getPreprocessor();
@@ -259,25 +351,30 @@ IncrementalParser::Parse(llvm::StringRef input) {
Token Tok;
do {
PP.Lex(Tok);
- } while (Tok.isNot(tok::eof));
+ } while (Tok.isNot(tok::annot_repl_input_end));
+ } else {
+ Token AssertTok;
+ PP.Lex(AssertTok);
+ assert(AssertTok.is(tok::annot_repl_input_end) &&
+ "Lexer must be EOF when starting incremental parse!");
}
- Token AssertTok;
- PP.Lex(AssertTok);
- assert(AssertTok.is(tok::eof) &&
- "Lexer must be EOF when starting incremental parse!");
-
- if (CodeGenerator *CG = getCodeGen(Act.get())) {
- std::unique_ptr<llvm::Module> M(CG->ReleaseModule());
- CG->StartModule("incr_module_" + std::to_string(PTUs.size()),
- M->getContext());
-
+ if (std::unique_ptr<llvm::Module> M = GenModule())
PTU->TheModule = std::move(M);
- }
return PTU;
}
+std::unique_ptr<llvm::Module> IncrementalParser::GenModule() {
+ static unsigned ID = 0;
+ if (CodeGenerator *CG = getCodeGen()) {
+ std::unique_ptr<llvm::Module> M(CG->ReleaseModule());
+ CG->StartModule("incr_module_" + std::to_string(ID++), M->getContext());
+ return M;
+ }
+ return nullptr;
+}
+
void IncrementalParser::CleanUpPTU(PartialTranslationUnit &PTU) {
TranslationUnitDecl *MostRecentTU = PTU.TUPart;
TranslationUnitDecl *FirstTU = MostRecentTU->getFirstDecl();
@@ -297,7 +394,7 @@ void IncrementalParser::CleanUpPTU(PartialTranslationUnit &PTU) {
}
llvm::StringRef IncrementalParser::GetMangledName(GlobalDecl GD) const {
- CodeGenerator *CG = getCodeGen(Act.get());
+ CodeGenerator *CG = getCodeGen();
assert(CG);
return CG->GetMangledName(GD);
}
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h
index 8e45d6b5931b..def5750d1667 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h
@@ -16,7 +16,6 @@
#include "clang/Interpreter/PartialTranslationUnit.h"
#include "clang/AST/GlobalDecl.h"
-
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
@@ -29,14 +28,16 @@ class LLVMContext;
namespace clang {
class ASTConsumer;
+class CodeGenerator;
class CompilerInstance;
class IncrementalAction;
+class Interpreter;
class Parser;
-
/// Provides support for incremental compilation. Keeps track of the state
/// changes between the subsequent incremental input.
///
class IncrementalParser {
+protected:
/// Long-lived, incremental parsing action.
std::unique_ptr<IncrementalAction> Act;
@@ -56,17 +57,21 @@ class IncrementalParser {
/// of code.
std::list<PartialTranslationUnit> PTUs;
+ IncrementalParser();
+
public:
- IncrementalParser(std::unique_ptr<CompilerInstance> Instance,
+ IncrementalParser(Interpreter &Interp,
+ std::unique_ptr<CompilerInstance> Instance,
llvm::LLVMContext &LLVMCtx, llvm::Error &Err);
- ~IncrementalParser();
+ virtual ~IncrementalParser();
- const CompilerInstance *getCI() const { return CI.get(); }
+ CompilerInstance *getCI() { return CI.get(); }
+ CodeGenerator *getCodeGen() const;
/// Parses incremental input by creating an in-memory file.
///\returns a \c PartialTranslationUnit which holds information about the
/// \c TranslationUnitDecl and \c llvm::Module corresponding to the input.
- llvm::Expected<PartialTranslationUnit &> Parse(llvm::StringRef Input);
+ virtual llvm::Expected<PartialTranslationUnit &> Parse(llvm::StringRef Input);
/// Uses the CodeGenModule mangled name cache and avoids recomputing.
///\returns the mangled name of a \c GD.
@@ -76,6 +81,8 @@ public:
std::list<PartialTranslationUnit> &getPTUs() { return PTUs; }
+ std::unique_ptr<llvm::Module> GenModule();
+
private:
llvm::Expected<PartialTranslationUnit &> ParseOrWrapTopLevelDecl();
};
diff --git a/contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp b/contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp
index a6f5fdc6eefc..4e1045298537 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp
+++ b/contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp
@@ -13,11 +13,17 @@
#include "clang/Interpreter/Interpreter.h"
+#include "DeviceOffload.h"
#include "IncrementalExecutor.h"
#include "IncrementalParser.h"
+#include "InterpreterUtils.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Mangle.h"
+#include "clang/AST/TypeVisitor.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/CodeGen/CodeGenAction.h"
#include "clang/CodeGen/ModuleBuilder.h"
#include "clang/CodeGen/ObjectFilePCHContainerOperations.h"
#include "clang/Driver/Compilation.h"
@@ -27,12 +33,16 @@
#include "clang/Driver/Tool.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/TextDiagnosticBuffer.h"
+#include "clang/Interpreter/Value.h"
#include "clang/Lex/PreprocessorOptions.h"
-
+#include "clang/Sema/Lookup.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/LLJIT.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Errc.h"
-#include "llvm/Support/Host.h"
-
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
using namespace clang;
// FIXME: Figure out how to unify with namespace init_convenience from
@@ -138,7 +148,6 @@ IncrementalCompilerBuilder::create(std::vector<const char *> &ClangArgv) {
// action and use other actions in incremental mode.
// FIXME: Print proper driver diagnostics if the driver flags are wrong.
// We do C++ by default; append right after argv[0] if no "-x" given
- ClangArgv.insert(ClangArgv.end(), "-xc++");
ClangArgv.insert(ClangArgv.end(), "-Xclang");
ClangArgv.insert(ClangArgv.end(), "-fincremental-extensions");
ClangArgv.insert(ClangArgv.end(), "-c");
@@ -171,12 +180,60 @@ IncrementalCompilerBuilder::create(std::vector<const char *> &ClangArgv) {
return CreateCI(**ErrOrCC1Args);
}
+llvm::Expected<std::unique_ptr<CompilerInstance>>
+IncrementalCompilerBuilder::CreateCpp() {
+ std::vector<const char *> Argv;
+ Argv.reserve(5 + 1 + UserArgs.size());
+ Argv.push_back("-xc++");
+ Argv.insert(Argv.end(), UserArgs.begin(), UserArgs.end());
+
+ return IncrementalCompilerBuilder::create(Argv);
+}
+
+llvm::Expected<std::unique_ptr<CompilerInstance>>
+IncrementalCompilerBuilder::createCuda(bool device) {
+ std::vector<const char *> Argv;
+ Argv.reserve(5 + 4 + UserArgs.size());
+
+ Argv.push_back("-xcuda");
+ if (device)
+ Argv.push_back("--cuda-device-only");
+ else
+ Argv.push_back("--cuda-host-only");
+
+ std::string SDKPathArg = "--cuda-path=";
+ if (!CudaSDKPath.empty()) {
+ SDKPathArg += CudaSDKPath;
+ Argv.push_back(SDKPathArg.c_str());
+ }
+
+ std::string ArchArg = "--offload-arch=";
+ if (!OffloadArch.empty()) {
+ ArchArg += OffloadArch;
+ Argv.push_back(ArchArg.c_str());
+ }
+
+ Argv.insert(Argv.end(), UserArgs.begin(), UserArgs.end());
+
+ return IncrementalCompilerBuilder::create(Argv);
+}
+
+llvm::Expected<std::unique_ptr<CompilerInstance>>
+IncrementalCompilerBuilder::CreateCudaDevice() {
+ return IncrementalCompilerBuilder::createCuda(true);
+}
+
+llvm::Expected<std::unique_ptr<CompilerInstance>>
+IncrementalCompilerBuilder::CreateCudaHost() {
+ return IncrementalCompilerBuilder::createCuda(false);
+}
+
Interpreter::Interpreter(std::unique_ptr<CompilerInstance> CI,
llvm::Error &Err) {
llvm::ErrorAsOutParameter EAO(&Err);
auto LLVMCtx = std::make_unique<llvm::LLVMContext>();
TSCtx = std::make_unique<llvm::orc::ThreadSafeContext>(std::move(LLVMCtx));
- IncrParser = std::make_unique<IncrementalParser>(std::move(CI),
+ IncrParser = std::make_unique<IncrementalParser>(*this, std::move(CI),
*TSCtx->getContext(), Err);
}
@@ -189,6 +246,29 @@ Interpreter::~Interpreter() {
}
}
+// These better to put in a runtime header but we can't. This is because we
+// can't find the precise resource directory in unittests so we have to hard
+// code them.
+const char *const Runtimes = R"(
+ void* operator new(__SIZE_TYPE__, void* __p) noexcept;
+ void *__clang_Interpreter_SetValueWithAlloc(void*, void*, void*);
+ void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*);
+ void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*, void*);
+ void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*, float);
+ void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*, double);
+ void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*, long double);
+ void __clang_Interpreter_SetValueNoAlloc(void*,void*,void*,unsigned long long);
+ template <class T, class = T (*)() /*disable for arrays*/>
+ void __clang_Interpreter_SetValueCopyArr(T* Src, void* Placement, unsigned long Size) {
+ for (auto Idx = 0; Idx < Size; ++Idx)
+ new ((void*)(((T*)Placement) + Idx)) T(Src[Idx]);
+ }
+ template <class T, unsigned long N>
+ void __clang_Interpreter_SetValueCopyArr(const T (*Src)[N], void* Placement, unsigned long Size) {
+ __clang_Interpreter_SetValueCopyArr(Src[0], Placement, Size);
+ }
+)";
+
llvm::Expected<std::unique_ptr<Interpreter>>
Interpreter::create(std::unique_ptr<CompilerInstance> CI) {
llvm::Error Err = llvm::Error::success();
@@ -196,32 +276,105 @@ Interpreter::create(std::unique_ptr<CompilerInstance> CI) {
std::unique_ptr<Interpreter>(new Interpreter(std::move(CI), Err));
if (Err)
return std::move(Err);
+ auto PTU = Interp->Parse(Runtimes);
+ if (!PTU)
+ return PTU.takeError();
+
+ Interp->ValuePrintingInfo.resize(3);
+ // FIXME: This is a ugly hack. Undo command checks its availability by looking
+ // at the size of the PTU list. However we have parsed something in the
+ // beginning of the REPL so we have to mark them as 'Irrevocable'.
+ Interp->InitPTUSize = Interp->IncrParser->getPTUs().size();
return std::move(Interp);
}
+llvm::Expected<std::unique_ptr<Interpreter>>
+Interpreter::createWithCUDA(std::unique_ptr<CompilerInstance> CI,
+ std::unique_ptr<CompilerInstance> DCI) {
+ // avoid writing fat binary to disk using an in-memory virtual file system
+ llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> IMVFS =
+ std::make_unique<llvm::vfs::InMemoryFileSystem>();
+ llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayVFS =
+ std::make_unique<llvm::vfs::OverlayFileSystem>(
+ llvm::vfs::getRealFileSystem());
+ OverlayVFS->pushOverlay(IMVFS);
+ CI->createFileManager(OverlayVFS);
+
+ auto Interp = Interpreter::create(std::move(CI));
+ if (auto E = Interp.takeError())
+ return std::move(E);
+
+ llvm::Error Err = llvm::Error::success();
+ auto DeviceParser = std::make_unique<IncrementalCUDADeviceParser>(
+ **Interp, std::move(DCI), *(*Interp)->IncrParser.get(),
+ *(*Interp)->TSCtx->getContext(), IMVFS, Err);
+ if (Err)
+ return std::move(Err);
+
+ (*Interp)->DeviceParser = std::move(DeviceParser);
+
+ return Interp;
+}
+
const CompilerInstance *Interpreter::getCompilerInstance() const {
return IncrParser->getCI();
}
-const llvm::orc::LLJIT *Interpreter::getExecutionEngine() const {
- if (IncrExecutor)
- return IncrExecutor->getExecutionEngine();
- return nullptr;
+llvm::Expected<llvm::orc::LLJIT &> Interpreter::getExecutionEngine() {
+ if (!IncrExecutor) {
+ if (auto Err = CreateExecutor())
+ return std::move(Err);
+ }
+
+ return IncrExecutor->GetExecutionEngine();
+}
+
+ASTContext &Interpreter::getASTContext() {
+ return getCompilerInstance()->getASTContext();
+}
+
+const ASTContext &Interpreter::getASTContext() const {
+ return getCompilerInstance()->getASTContext();
+}
+
+size_t Interpreter::getEffectivePTUSize() const {
+ std::list<PartialTranslationUnit> &PTUs = IncrParser->getPTUs();
+ assert(PTUs.size() >= InitPTUSize && "empty PTU list?");
+ return PTUs.size() - InitPTUSize;
}
llvm::Expected<PartialTranslationUnit &>
Interpreter::Parse(llvm::StringRef Code) {
+ // If we have a device parser, parse it first.
+ // The generated code will be included in the host compilation
+ if (DeviceParser) {
+ auto DevicePTU = DeviceParser->Parse(Code);
+ if (auto E = DevicePTU.takeError())
+ return std::move(E);
+ }
+
+ // Tell the interpreter sliently ignore unused expressions since value
+ // printing could cause it.
+ getCompilerInstance()->getDiagnostics().setSeverity(
+ clang::diag::warn_unused_expr, diag::Severity::Ignored, SourceLocation());
return IncrParser->Parse(Code);
}
+llvm::Error Interpreter::CreateExecutor() {
+ const clang::TargetInfo &TI =
+ getCompilerInstance()->getASTContext().getTargetInfo();
+ llvm::Error Err = llvm::Error::success();
+ auto Executor = std::make_unique<IncrementalExecutor>(*TSCtx, Err, TI);
+ if (!Err)
+ IncrExecutor = std::move(Executor);
+
+ return Err;
+}
+
llvm::Error Interpreter::Execute(PartialTranslationUnit &T) {
assert(T.TheModule);
if (!IncrExecutor) {
- const clang::TargetInfo &TI =
- getCompilerInstance()->getASTContext().getTargetInfo();
- llvm::Error Err = llvm::Error::success();
- IncrExecutor = std::make_unique<IncrementalExecutor>(*TSCtx, Err, TI);
-
+ auto Err = CreateExecutor();
if (Err)
return Err;
}
@@ -235,7 +388,26 @@ llvm::Error Interpreter::Execute(PartialTranslationUnit &T) {
return llvm::Error::success();
}
-llvm::Expected<llvm::JITTargetAddress>
+llvm::Error Interpreter::ParseAndExecute(llvm::StringRef Code, Value *V) {
+
+ auto PTU = Parse(Code);
+ if (!PTU)
+ return PTU.takeError();
+ if (PTU->TheModule)
+ if (llvm::Error Err = Execute(*PTU))
+ return Err;
+
+ if (LastValue.isValid()) {
+ if (!V) {
+ LastValue.dump();
+ LastValue.clear();
+ } else
+ *V = std::move(LastValue);
+ }
+ return llvm::Error::success();
+}
+
+llvm::Expected<llvm::orc::ExecutorAddr>
Interpreter::getSymbolAddress(GlobalDecl GD) const {
if (!IncrExecutor)
return llvm::make_error<llvm::StringError>("Operation failed. "
@@ -245,7 +417,7 @@ Interpreter::getSymbolAddress(GlobalDecl GD) const {
return getSymbolAddress(MangledName);
}
-llvm::Expected<llvm::JITTargetAddress>
+llvm::Expected<llvm::orc::ExecutorAddr>
Interpreter::getSymbolAddress(llvm::StringRef IRName) const {
if (!IncrExecutor)
return llvm::make_error<llvm::StringError>("Operation failed. "
@@ -255,7 +427,7 @@ Interpreter::getSymbolAddress(llvm::StringRef IRName) const {
return IncrExecutor->getSymbolAddress(IRName, IncrementalExecutor::IRName);
}
-llvm::Expected<llvm::JITTargetAddress>
+llvm::Expected<llvm::orc::ExecutorAddr>
Interpreter::getSymbolAddressFromLinkerName(llvm::StringRef Name) const {
if (!IncrExecutor)
return llvm::make_error<llvm::StringError>("Operation failed. "
@@ -268,7 +440,7 @@ Interpreter::getSymbolAddressFromLinkerName(llvm::StringRef Name) const {
llvm::Error Interpreter::Undo(unsigned N) {
std::list<PartialTranslationUnit> &PTUs = IncrParser->getPTUs();
- if (N > PTUs.size())
+ if (N > getEffectivePTUSize())
return llvm::make_error<llvm::StringError>("Operation failed. "
"Too many undos",
std::error_code());
@@ -283,3 +455,359 @@ llvm::Error Interpreter::Undo(unsigned N) {
}
return llvm::Error::success();
}
+
+llvm::Error Interpreter::LoadDynamicLibrary(const char *name) {
+ auto EE = getExecutionEngine();
+ if (!EE)
+ return EE.takeError();
+
+ auto &DL = EE->getDataLayout();
+
+ if (auto DLSG = llvm::orc::DynamicLibrarySearchGenerator::Load(
+ name, DL.getGlobalPrefix()))
+ EE->getMainJITDylib().addGenerator(std::move(*DLSG));
+ else
+ return DLSG.takeError();
+
+ return llvm::Error::success();
+}
+
+llvm::Expected<llvm::orc::ExecutorAddr>
+Interpreter::CompileDtorCall(CXXRecordDecl *CXXRD) {
+ assert(CXXRD && "Cannot compile a destructor for a nullptr");
+ if (auto Dtor = Dtors.find(CXXRD); Dtor != Dtors.end())
+ return Dtor->getSecond();
+
+ if (CXXRD->hasIrrelevantDestructor())
+ return llvm::orc::ExecutorAddr{};
+
+ CXXDestructorDecl *DtorRD =
+ getCompilerInstance()->getSema().LookupDestructor(CXXRD);
+
+ llvm::StringRef Name =
+ IncrParser->GetMangledName(GlobalDecl(DtorRD, Dtor_Base));
+ auto AddrOrErr = getSymbolAddress(Name);
+ if (!AddrOrErr)
+ return AddrOrErr.takeError();
+
+ Dtors[CXXRD] = *AddrOrErr;
+ return AddrOrErr;
+}
+
+static constexpr llvm::StringRef MagicRuntimeInterface[] = {
+ "__clang_Interpreter_SetValueNoAlloc",
+ "__clang_Interpreter_SetValueWithAlloc",
+ "__clang_Interpreter_SetValueCopyArr"};
+
+bool Interpreter::FindRuntimeInterface() {
+ if (llvm::all_of(ValuePrintingInfo, [](Expr *E) { return E != nullptr; }))
+ return true;
+
+ Sema &S = getCompilerInstance()->getSema();
+ ASTContext &Ctx = S.getASTContext();
+
+ auto LookupInterface = [&](Expr *&Interface, llvm::StringRef Name) {
+ LookupResult R(S, &Ctx.Idents.get(Name), SourceLocation(),
+ Sema::LookupOrdinaryName, Sema::ForVisibleRedeclaration);
+ S.LookupQualifiedName(R, Ctx.getTranslationUnitDecl());
+ if (R.empty())
+ return false;
+
+ CXXScopeSpec CSS;
+ Interface = S.BuildDeclarationNameExpr(CSS, R, /*ADL=*/false).get();
+ return true;
+ };
+
+ if (!LookupInterface(ValuePrintingInfo[NoAlloc],
+ MagicRuntimeInterface[NoAlloc]))
+ return false;
+ if (!LookupInterface(ValuePrintingInfo[WithAlloc],
+ MagicRuntimeInterface[WithAlloc]))
+ return false;
+ if (!LookupInterface(ValuePrintingInfo[CopyArray],
+ MagicRuntimeInterface[CopyArray]))
+ return false;
+ return true;
+}
+
+namespace {
+
+class RuntimeInterfaceBuilder
+ : public TypeVisitor<RuntimeInterfaceBuilder, Interpreter::InterfaceKind> {
+ clang::Interpreter &Interp;
+ ASTContext &Ctx;
+ Sema &S;
+ Expr *E;
+ llvm::SmallVector<Expr *, 3> Args;
+
+public:
+ RuntimeInterfaceBuilder(clang::Interpreter &In, ASTContext &C, Sema &SemaRef,
+ Expr *VE, ArrayRef<Expr *> FixedArgs)
+ : Interp(In), Ctx(C), S(SemaRef), E(VE) {
+ // The Interpreter* parameter and the out parameter `OutVal`.
+ for (Expr *E : FixedArgs)
+ Args.push_back(E);
+
+ // Get rid of ExprWithCleanups.
+ if (auto *EWC = llvm::dyn_cast_if_present<ExprWithCleanups>(E))
+ E = EWC->getSubExpr();
+ }
+
+ ExprResult getCall() {
+ QualType Ty = E->getType();
+ QualType DesugaredTy = Ty.getDesugaredType(Ctx);
+
+ // For lvalue struct, we treat it as a reference.
+ if (DesugaredTy->isRecordType() && E->isLValue()) {
+ DesugaredTy = Ctx.getLValueReferenceType(DesugaredTy);
+ Ty = Ctx.getLValueReferenceType(Ty);
+ }
+
+ Expr *TypeArg =
+ CStyleCastPtrExpr(S, Ctx.VoidPtrTy, (uintptr_t)Ty.getAsOpaquePtr());
+ // The QualType parameter `OpaqueType`, represented as `void*`.
+ Args.push_back(TypeArg);
+
+ // We push the last parameter based on the type of the Expr. Note we need
+ // special care for rvalue struct.
+ Interpreter::InterfaceKind Kind = Visit(&*DesugaredTy);
+ switch (Kind) {
+ case Interpreter::InterfaceKind::WithAlloc:
+ case Interpreter::InterfaceKind::CopyArray: {
+ // __clang_Interpreter_SetValueWithAlloc.
+ ExprResult AllocCall = S.ActOnCallExpr(
+ /*Scope=*/nullptr,
+ Interp.getValuePrintingInfo()[Interpreter::InterfaceKind::WithAlloc],
+ E->getBeginLoc(), Args, E->getEndLoc());
+ assert(!AllocCall.isInvalid() && "Can't create runtime interface call!");
+
+ TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(Ty, SourceLocation());
+
+ // Force CodeGen to emit destructor.
+ if (auto *RD = Ty->getAsCXXRecordDecl()) {
+ auto *Dtor = S.LookupDestructor(RD);
+ Dtor->addAttr(UsedAttr::CreateImplicit(Ctx));
+ Interp.getCompilerInstance()->getASTConsumer().HandleTopLevelDecl(
+ DeclGroupRef(Dtor));
+ }
+
+ // __clang_Interpreter_SetValueCopyArr.
+ if (Kind == Interpreter::InterfaceKind::CopyArray) {
+ const auto *ConstantArrTy =
+ cast<ConstantArrayType>(DesugaredTy.getTypePtr());
+ size_t ArrSize = Ctx.getConstantArrayElementCount(ConstantArrTy);
+ Expr *ArrSizeExpr = IntegerLiteralExpr(Ctx, ArrSize);
+ Expr *Args[] = {E, AllocCall.get(), ArrSizeExpr};
+ return S.ActOnCallExpr(
+ /*Scope *=*/nullptr,
+ Interp
+ .getValuePrintingInfo()[Interpreter::InterfaceKind::CopyArray],
+ SourceLocation(), Args, SourceLocation());
+ }
+ Expr *Args[] = {AllocCall.get()};
+ ExprResult CXXNewCall = S.BuildCXXNew(
+ E->getSourceRange(),
+ /*UseGlobal=*/true, /*PlacementLParen=*/SourceLocation(), Args,
+ /*PlacementRParen=*/SourceLocation(),
+ /*TypeIdParens=*/SourceRange(), TSI->getType(), TSI, std::nullopt,
+ E->getSourceRange(), E);
+
+ assert(!CXXNewCall.isInvalid() &&
+ "Can't create runtime placement new call!");
+
+ return S.ActOnFinishFullExpr(CXXNewCall.get(),
+ /*DiscardedValue=*/false);
+ }
+ // __clang_Interpreter_SetValueNoAlloc.
+ case Interpreter::InterfaceKind::NoAlloc: {
+ return S.ActOnCallExpr(
+ /*Scope=*/nullptr,
+ Interp.getValuePrintingInfo()[Interpreter::InterfaceKind::NoAlloc],
+ E->getBeginLoc(), Args, E->getEndLoc());
+ }
+ }
+ llvm_unreachable("Unhandled Interpreter::InterfaceKind");
+ }
+
+ Interpreter::InterfaceKind VisitRecordType(const RecordType *Ty) {
+ return Interpreter::InterfaceKind::WithAlloc;
+ }
+
+ Interpreter::InterfaceKind
+ VisitMemberPointerType(const MemberPointerType *Ty) {
+ return Interpreter::InterfaceKind::WithAlloc;
+ }
+
+ Interpreter::InterfaceKind
+ VisitConstantArrayType(const ConstantArrayType *Ty) {
+ return Interpreter::InterfaceKind::CopyArray;
+ }
+
+ Interpreter::InterfaceKind
+ VisitFunctionProtoType(const FunctionProtoType *Ty) {
+ HandlePtrType(Ty);
+ return Interpreter::InterfaceKind::NoAlloc;
+ }
+
+ Interpreter::InterfaceKind VisitPointerType(const PointerType *Ty) {
+ HandlePtrType(Ty);
+ return Interpreter::InterfaceKind::NoAlloc;
+ }
+
+ Interpreter::InterfaceKind VisitReferenceType(const ReferenceType *Ty) {
+ ExprResult AddrOfE = S.CreateBuiltinUnaryOp(SourceLocation(), UO_AddrOf, E);
+ assert(!AddrOfE.isInvalid() && "Can not create unary expression");
+ Args.push_back(AddrOfE.get());
+ return Interpreter::InterfaceKind::NoAlloc;
+ }
+
+ Interpreter::InterfaceKind VisitBuiltinType(const BuiltinType *Ty) {
+ if (Ty->isNullPtrType())
+ Args.push_back(E);
+ else if (Ty->isFloatingType())
+ Args.push_back(E);
+ else if (Ty->isIntegralOrEnumerationType())
+ HandleIntegralOrEnumType(Ty);
+ else if (Ty->isVoidType()) {
+ // Do we need to still run `E`?
+ }
+
+ return Interpreter::InterfaceKind::NoAlloc;
+ }
+
+ Interpreter::InterfaceKind VisitEnumType(const EnumType *Ty) {
+ HandleIntegralOrEnumType(Ty);
+ return Interpreter::InterfaceKind::NoAlloc;
+ }
+
+private:
+ // Force cast these types to uint64 to reduce the number of overloads of
+ // `__clang_Interpreter_SetValueNoAlloc`.
+ void HandleIntegralOrEnumType(const Type *Ty) {
+ TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(Ctx.UnsignedLongLongTy);
+ ExprResult CastedExpr =
+ S.BuildCStyleCastExpr(SourceLocation(), TSI, SourceLocation(), E);
+ assert(!CastedExpr.isInvalid() && "Cannot create cstyle cast expr");
+ Args.push_back(CastedExpr.get());
+ }
+
+ void HandlePtrType(const Type *Ty) {
+ TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(Ctx.VoidPtrTy);
+ ExprResult CastedExpr =
+ S.BuildCStyleCastExpr(SourceLocation(), TSI, SourceLocation(), E);
+ assert(!CastedExpr.isInvalid() && "Can not create cstyle cast expression");
+ Args.push_back(CastedExpr.get());
+ }
+};
+} // namespace
+
+// This synthesizes a call expression to a speciall
+// function that is responsible for generating the Value.
+// In general, we transform:
+// clang-repl> x
+// To:
+// // 1. If x is a built-in type like int, float.
+// __clang_Interpreter_SetValueNoAlloc(ThisInterp, OpaqueValue, xQualType, x);
+// // 2. If x is a struct, and a lvalue.
+// __clang_Interpreter_SetValueNoAlloc(ThisInterp, OpaqueValue, xQualType,
+// &x);
+// // 3. If x is a struct, but a rvalue.
+// new (__clang_Interpreter_SetValueWithAlloc(ThisInterp, OpaqueValue,
+// xQualType)) (x);
+
+Expr *Interpreter::SynthesizeExpr(Expr *E) {
+ Sema &S = getCompilerInstance()->getSema();
+ ASTContext &Ctx = S.getASTContext();
+
+ if (!FindRuntimeInterface())
+ llvm_unreachable("We can't find the runtime iterface for pretty print!");
+
+ // Create parameter `ThisInterp`.
+ auto *ThisInterp = CStyleCastPtrExpr(S, Ctx.VoidPtrTy, (uintptr_t)this);
+
+ // Create parameter `OutVal`.
+ auto *OutValue = CStyleCastPtrExpr(S, Ctx.VoidPtrTy, (uintptr_t)&LastValue);
+
+ // Build `__clang_Interpreter_SetValue*` call.
+ RuntimeInterfaceBuilder Builder(*this, Ctx, S, E, {ThisInterp, OutValue});
+
+ ExprResult Result = Builder.getCall();
+ // It could fail, like printing an array type in C. (not supported)
+ if (Result.isInvalid())
+ return E;
+ return Result.get();
+}
+
+// Temporary rvalue struct that need special care.
+REPL_EXTERNAL_VISIBILITY void *
+__clang_Interpreter_SetValueWithAlloc(void *This, void *OutVal,
+ void *OpaqueType) {
+ Value &VRef = *(Value *)OutVal;
+ VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
+ return VRef.getPtr();
+}
+
+// Pointers, lvalue struct that can take as a reference.
+REPL_EXTERNAL_VISIBILITY void
+__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
+ void *Val) {
+ Value &VRef = *(Value *)OutVal;
+ VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
+ VRef.setPtr(Val);
+}
+
+REPL_EXTERNAL_VISIBILITY void
+__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal,
+ void *OpaqueType) {
+ Value &VRef = *(Value *)OutVal;
+ VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
+}
+
+static void SetValueDataBasedOnQualType(Value &V, unsigned long long Data) {
+ QualType QT = V.getType();
+ if (const auto *ET = QT->getAs<EnumType>())
+ QT = ET->getDecl()->getIntegerType();
+
+ switch (QT->castAs<BuiltinType>()->getKind()) {
+ default:
+ llvm_unreachable("unknown type kind!");
+#define X(type, name) \
+ case BuiltinType::name: \
+ V.set##name(Data); \
+ break;
+ REPL_BUILTIN_TYPES
+#undef X
+ }
+}
+
+REPL_EXTERNAL_VISIBILITY void
+__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
+ unsigned long long Val) {
+ Value &VRef = *(Value *)OutVal;
+ VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
+ SetValueDataBasedOnQualType(VRef, Val);
+}
+
+REPL_EXTERNAL_VISIBILITY void
+__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
+ float Val) {
+ Value &VRef = *(Value *)OutVal;
+ VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
+ VRef.setFloat(Val);
+}
+
+REPL_EXTERNAL_VISIBILITY void
+__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
+ double Val) {
+ Value &VRef = *(Value *)OutVal;
+ VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
+ VRef.setDouble(Val);
+}
+
+REPL_EXTERNAL_VISIBILITY void
+__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
+ long double Val) {
+ Value &VRef = *(Value *)OutVal;
+ VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
+ VRef.setLongDouble(Val);
+}
diff --git a/contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.cpp b/contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.cpp
new file mode 100644
index 000000000000..c19cf6aa3156
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.cpp
@@ -0,0 +1,111 @@
+//===--- InterpreterUtils.cpp - Incremental Utils --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements some common utils used in the incremental library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "InterpreterUtils.h"
+
+namespace clang {
+
+IntegerLiteral *IntegerLiteralExpr(ASTContext &C, uint64_t Val) {
+ return IntegerLiteral::Create(C, llvm::APSInt::getUnsigned(Val),
+ C.UnsignedLongLongTy, SourceLocation());
+}
+
+Expr *CStyleCastPtrExpr(Sema &S, QualType Ty, Expr *E) {
+ ASTContext &Ctx = S.getASTContext();
+ if (!Ty->isPointerType())
+ Ty = Ctx.getPointerType(Ty);
+
+ TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(Ty, SourceLocation());
+ Expr *Result =
+ S.BuildCStyleCastExpr(SourceLocation(), TSI, SourceLocation(), E).get();
+ assert(Result && "Cannot create CStyleCastPtrExpr");
+ return Result;
+}
+
+Expr *CStyleCastPtrExpr(Sema &S, QualType Ty, uintptr_t Ptr) {
+ ASTContext &Ctx = S.getASTContext();
+ return CStyleCastPtrExpr(S, Ty, IntegerLiteralExpr(Ctx, (uint64_t)Ptr));
+}
+
+Sema::DeclGroupPtrTy CreateDGPtrFrom(Sema &S, Decl *D) {
+ SmallVector<Decl *, 1> DeclsInGroup;
+ DeclsInGroup.push_back(D);
+ Sema::DeclGroupPtrTy DeclGroupPtr = S.BuildDeclaratorGroup(DeclsInGroup);
+ return DeclGroupPtr;
+}
+
+NamespaceDecl *LookupNamespace(Sema &S, llvm::StringRef Name,
+ const DeclContext *Within) {
+ DeclarationName DName = &S.Context.Idents.get(Name);
+ LookupResult R(S, DName, SourceLocation(),
+ Sema::LookupNestedNameSpecifierName);
+ R.suppressDiagnostics();
+ if (!Within)
+ S.LookupName(R, S.TUScope);
+ else {
+ if (const auto *TD = dyn_cast<clang::TagDecl>(Within);
+ TD && !TD->getDefinition())
+ // No definition, no lookup result.
+ return nullptr;
+
+ S.LookupQualifiedName(R, const_cast<DeclContext *>(Within));
+ }
+
+ if (R.empty())
+ return nullptr;
+
+ R.resolveKind();
+
+ return dyn_cast<NamespaceDecl>(R.getFoundDecl());
+}
+
+NamedDecl *LookupNamed(Sema &S, llvm::StringRef Name,
+ const DeclContext *Within) {
+ DeclarationName DName = &S.Context.Idents.get(Name);
+ LookupResult R(S, DName, SourceLocation(), Sema::LookupOrdinaryName,
+ Sema::ForVisibleRedeclaration);
+
+ R.suppressDiagnostics();
+
+ if (!Within)
+ S.LookupName(R, S.TUScope);
+ else {
+ const DeclContext *PrimaryWithin = nullptr;
+ if (const auto *TD = dyn_cast<TagDecl>(Within))
+ PrimaryWithin = llvm::dyn_cast_or_null<DeclContext>(TD->getDefinition());
+ else
+ PrimaryWithin = Within->getPrimaryContext();
+
+ // No definition, no lookup result.
+ if (!PrimaryWithin)
+ return nullptr;
+
+ S.LookupQualifiedName(R, const_cast<DeclContext *>(PrimaryWithin));
+ }
+
+ if (R.empty())
+ return nullptr;
+ R.resolveKind();
+
+ if (R.isSingleResult())
+ return llvm::dyn_cast<NamedDecl>(R.getFoundDecl());
+
+ return nullptr;
+}
+
+std::string GetFullTypeName(ASTContext &Ctx, QualType QT) {
+ PrintingPolicy Policy(Ctx.getPrintingPolicy());
+ Policy.SuppressScope = false;
+ Policy.AnonymousTagLocations = false;
+ return QT.getAsString(Policy);
+}
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.h b/contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.h
new file mode 100644
index 000000000000..8df158c17d49
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.h
@@ -0,0 +1,54 @@
+//===--- InterpreterUtils.h - Incremental Utils --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements some common utils used in the incremental library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INTERPRETER_UTILS_H
+#define LLVM_CLANG_INTERPRETER_UTILS_H
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Mangle.h"
+#include "clang/AST/TypeVisitor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "clang/CodeGen/ObjectFilePCHContainerOperations.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Job.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/TextDiagnosticBuffer.h"
+#include "clang/Lex/PreprocessorOptions.h"
+
+#include "clang/Sema/Lookup.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/TargetParser/Host.h"
+
+namespace clang {
+IntegerLiteral *IntegerLiteralExpr(ASTContext &C, uint64_t Val);
+
+Expr *CStyleCastPtrExpr(Sema &S, QualType Ty, Expr *E);
+
+Expr *CStyleCastPtrExpr(Sema &S, QualType Ty, uintptr_t Ptr);
+
+Sema::DeclGroupPtrTy CreateDGPtrFrom(Sema &S, Decl *D);
+
+NamespaceDecl *LookupNamespace(Sema &S, llvm::StringRef Name,
+ const DeclContext *Within = nullptr);
+
+NamedDecl *LookupNamed(Sema &S, llvm::StringRef Name,
+ const DeclContext *Within);
+
+std::string GetFullTypeName(ASTContext &Ctx, QualType QT);
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Interpreter/Value.cpp b/contrib/llvm-project/clang/lib/Interpreter/Value.cpp
new file mode 100644
index 000000000000..6d0eaf1b82e1
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Interpreter/Value.cpp
@@ -0,0 +1,266 @@
+//===--- Interpreter.h - Incremental Compiation and Execution---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the class that used to represent a value in incremental
+// C++.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Interpreter/Value.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Type.h"
+#include "clang/Interpreter/Interpreter.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_os_ostream.h"
+#include <cassert>
+#include <cstdint>
+#include <utility>
+
+using namespace clang;
+
+namespace {
+
+// This is internal buffer maintained by Value, used to hold temporaries.
+class ValueStorage {
+public:
+ using DtorFunc = void (*)(void *);
+
+ static unsigned char *CreatePayload(void *DtorF, size_t AllocSize,
+ size_t ElementsSize) {
+ if (AllocSize < sizeof(Canary))
+ AllocSize = sizeof(Canary);
+ unsigned char *Buf =
+ new unsigned char[ValueStorage::getPayloadOffset() + AllocSize];
+ ValueStorage *VS = new (Buf) ValueStorage(DtorF, AllocSize, ElementsSize);
+ std::memcpy(VS->getPayload(), Canary, sizeof(Canary));
+ return VS->getPayload();
+ }
+
+ unsigned char *getPayload() { return Storage; }
+ const unsigned char *getPayload() const { return Storage; }
+
+ static unsigned getPayloadOffset() {
+ static ValueStorage Dummy(nullptr, 0, 0);
+ return Dummy.getPayload() - reinterpret_cast<unsigned char *>(&Dummy);
+ }
+
+ static ValueStorage *getFromPayload(void *Payload) {
+ ValueStorage *R = reinterpret_cast<ValueStorage *>(
+ (unsigned char *)Payload - getPayloadOffset());
+ return R;
+ }
+
+ void Retain() { ++RefCnt; }
+
+ void Release() {
+ assert(RefCnt > 0 && "Can't release if reference count is already zero");
+ if (--RefCnt == 0) {
+ // We hace a non-trivial dtor.
+ if (Dtor && IsAlive()) {
+ assert(Elements && "We at least should have 1 element in Value");
+ size_t Stride = AllocSize / Elements;
+ for (size_t Idx = 0; Idx < Elements; ++Idx)
+ (*Dtor)(getPayload() + Idx * Stride);
+ }
+ delete[] reinterpret_cast<unsigned char *>(this);
+ }
+ }
+
+ // Check whether the storage is valid by validating the canary bits.
+ // If someone accidentally write some invalid bits in the storage, the canary
+ // will be changed first, and `IsAlive` will return false then.
+ bool IsAlive() const {
+ return std::memcmp(getPayload(), Canary, sizeof(Canary)) != 0;
+ }
+
+private:
+ ValueStorage(void *DtorF, size_t AllocSize, size_t ElementsNum)
+ : RefCnt(1), Dtor(reinterpret_cast<DtorFunc>(DtorF)),
+ AllocSize(AllocSize), Elements(ElementsNum) {}
+
+ mutable unsigned RefCnt;
+ DtorFunc Dtor = nullptr;
+ size_t AllocSize = 0;
+ size_t Elements = 0;
+ unsigned char Storage[1];
+
+ // These are some canary bits that are used for protecting the storage been
+ // damaged.
+ static constexpr unsigned char Canary[8] = {0x4c, 0x37, 0xad, 0x8f,
+ 0x2d, 0x23, 0x95, 0x91};
+};
+} // namespace
+
+static Value::Kind ConvertQualTypeToKind(const ASTContext &Ctx, QualType QT) {
+ if (Ctx.hasSameType(QT, Ctx.VoidTy))
+ return Value::K_Void;
+
+ if (const auto *ET = QT->getAs<EnumType>())
+ QT = ET->getDecl()->getIntegerType();
+
+ const auto *BT = QT->getAs<BuiltinType>();
+ if (!BT || BT->isNullPtrType())
+ return Value::K_PtrOrObj;
+
+ switch (QT->castAs<BuiltinType>()->getKind()) {
+ default:
+ assert(false && "Type not supported");
+ return Value::K_Unspecified;
+#define X(type, name) \
+ case BuiltinType::name: \
+ return Value::K_##name;
+ REPL_BUILTIN_TYPES
+#undef X
+ }
+}
+
+Value::Value(Interpreter *In, void *Ty) : Interp(In), OpaqueType(Ty) {
+ setKind(ConvertQualTypeToKind(getASTContext(), getType()));
+ if (ValueKind == K_PtrOrObj) {
+ QualType Canon = getType().getCanonicalType();
+ if ((Canon->isPointerType() || Canon->isObjectType() ||
+ Canon->isReferenceType()) &&
+ (Canon->isRecordType() || Canon->isConstantArrayType() ||
+ Canon->isMemberPointerType())) {
+ IsManuallyAlloc = true;
+ // Compile dtor function.
+ Interpreter &Interp = getInterpreter();
+ void *DtorF = nullptr;
+ size_t ElementsSize = 1;
+ QualType DtorTy = getType();
+
+ if (const auto *ArrTy =
+ llvm::dyn_cast<ConstantArrayType>(DtorTy.getTypePtr())) {
+ DtorTy = ArrTy->getElementType();
+ llvm::APInt ArrSize(sizeof(size_t) * 8, 1);
+ do {
+ ArrSize *= ArrTy->getSize();
+ ArrTy = llvm::dyn_cast<ConstantArrayType>(
+ ArrTy->getElementType().getTypePtr());
+ } while (ArrTy);
+ ElementsSize = static_cast<size_t>(ArrSize.getZExtValue());
+ }
+ if (const auto *RT = DtorTy->getAs<RecordType>()) {
+ if (CXXRecordDecl *CXXRD =
+ llvm::dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (llvm::Expected<llvm::orc::ExecutorAddr> Addr =
+ Interp.CompileDtorCall(CXXRD))
+ DtorF = reinterpret_cast<void *>(Addr->getValue());
+ else
+ llvm::logAllUnhandledErrors(Addr.takeError(), llvm::errs());
+ }
+ }
+
+ size_t AllocSize =
+ getASTContext().getTypeSizeInChars(getType()).getQuantity();
+ unsigned char *Payload =
+ ValueStorage::CreatePayload(DtorF, AllocSize, ElementsSize);
+ setPtr((void *)Payload);
+ }
+ }
+}
+
+Value::Value(const Value &RHS)
+ : Interp(RHS.Interp), OpaqueType(RHS.OpaqueType), Data(RHS.Data),
+ ValueKind(RHS.ValueKind), IsManuallyAlloc(RHS.IsManuallyAlloc) {
+ if (IsManuallyAlloc)
+ ValueStorage::getFromPayload(getPtr())->Retain();
+}
+
+Value::Value(Value &&RHS) noexcept {
+ Interp = std::exchange(RHS.Interp, nullptr);
+ OpaqueType = std::exchange(RHS.OpaqueType, nullptr);
+ Data = RHS.Data;
+ ValueKind = std::exchange(RHS.ValueKind, K_Unspecified);
+ IsManuallyAlloc = std::exchange(RHS.IsManuallyAlloc, false);
+
+ if (IsManuallyAlloc)
+ ValueStorage::getFromPayload(getPtr())->Release();
+}
+
+Value &Value::operator=(const Value &RHS) {
+ if (IsManuallyAlloc)
+ ValueStorage::getFromPayload(getPtr())->Release();
+
+ Interp = RHS.Interp;
+ OpaqueType = RHS.OpaqueType;
+ Data = RHS.Data;
+ ValueKind = RHS.ValueKind;
+ IsManuallyAlloc = RHS.IsManuallyAlloc;
+
+ if (IsManuallyAlloc)
+ ValueStorage::getFromPayload(getPtr())->Retain();
+
+ return *this;
+}
+
+Value &Value::operator=(Value &&RHS) noexcept {
+ if (IsManuallyAlloc)
+ ValueStorage::getFromPayload(getPtr())->Release();
+
+ Interp = std::exchange(RHS.Interp, nullptr);
+ OpaqueType = std::exchange(RHS.OpaqueType, nullptr);
+ ValueKind = std::exchange(RHS.ValueKind, K_Unspecified);
+ IsManuallyAlloc = std::exchange(RHS.IsManuallyAlloc, false);
+
+ Data = RHS.Data;
+
+ return *this;
+}
+
+void Value::clear() {
+ if (IsManuallyAlloc)
+ ValueStorage::getFromPayload(getPtr())->Release();
+ ValueKind = K_Unspecified;
+ OpaqueType = nullptr;
+ Interp = nullptr;
+ IsManuallyAlloc = false;
+}
+
+Value::~Value() { clear(); }
+
+void *Value::getPtr() const {
+ assert(ValueKind == K_PtrOrObj);
+ return Data.m_Ptr;
+}
+
+QualType Value::getType() const {
+ return QualType::getFromOpaquePtr(OpaqueType);
+}
+
+Interpreter &Value::getInterpreter() {
+ assert(Interp != nullptr &&
+ "Can't get interpreter from a default constructed value");
+ return *Interp;
+}
+
+const Interpreter &Value::getInterpreter() const {
+ assert(Interp != nullptr &&
+ "Can't get interpreter from a default constructed value");
+ return *Interp;
+}
+
+ASTContext &Value::getASTContext() { return getInterpreter().getASTContext(); }
+
+const ASTContext &Value::getASTContext() const {
+ return getInterpreter().getASTContext();
+}
+
+void Value::dump() const { print(llvm::outs()); }
+
+void Value::printType(llvm::raw_ostream &Out) const {
+ Out << "Not implement yet.\n";
+}
+void Value::printData(llvm::raw_ostream &Out) const {
+ Out << "Not implement yet.\n";
+}
+void Value::print(llvm::raw_ostream &Out) const {
+ assert(OpaqueType != nullptr && "Can't print default Value");
+ Out << "Not implement yet.\n";
+}
diff --git a/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesScanner.cpp b/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesScanner.cpp
index 0adbaa36bf7c..2bd2c5f8388c 100644
--- a/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesScanner.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesScanner.cpp
@@ -19,6 +19,7 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/Lexer.h"
+#include "clang/Lex/Pragma.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
@@ -72,6 +73,8 @@ struct Scanner {
// Set the lexer to use 'tok::at' for '@', instead of 'tok::unknown'.
LangOpts.ObjC = true;
LangOpts.LineComment = true;
+ // FIXME: we do not enable C11 or C++11, so we are missing u/u8/U"" and
+ // R"()" literals.
return LangOpts;
}
@@ -91,6 +94,10 @@ private:
void skipLine(const char *&First, const char *const End);
void skipDirective(StringRef Name, const char *&First, const char *const End);
+ /// Returns the spelling of a string literal or identifier after performing
+ /// any processing needed to handle \c clang::Token::NeedsCleaning.
+ StringRef cleanStringIfNeeded(const dependency_directives_scan::Token &Tok);
+
/// Lexes next token and if it is identifier returns its string, otherwise
/// it skips the current line and returns \p std::nullopt.
///
@@ -112,6 +119,22 @@ private:
const char *&First,
const char *const End);
+ /// Lexes next token and returns true iff it matches the kind \p K.
+ /// Otherwise it skips the current line and returns false.
+ ///
+ /// In any case (whatever the token kind) \p First and the \p Lexer will
+ /// advance beyond the token.
+ [[nodiscard]] bool isNextTokenOrSkipLine(tok::TokenKind K, const char *&First,
+ const char *const End);
+
+ /// Lexes next token and if it is string literal, returns its string.
+ /// Otherwise, it skips the current line and returns \p std::nullopt.
+ ///
+ /// In any case (whatever the token kind) \p First and the \p Lexer will
+ /// advance beyond the token.
+ [[nodiscard]] std::optional<StringRef>
+ tryLexStringLiteralOrSkipLine(const char *&First, const char *const End);
+
[[nodiscard]] bool scanImpl(const char *First, const char *const End);
[[nodiscard]] bool lexPPLine(const char *&First, const char *const End);
[[nodiscard]] bool lexAt(const char *&First, const char *const End);
@@ -119,6 +142,7 @@ private:
[[nodiscard]] bool lexDefine(const char *HashLoc, const char *&First,
const char *const End);
[[nodiscard]] bool lexPragma(const char *&First, const char *const End);
+ [[nodiscard]] bool lex_Pragma(const char *&First, const char *const End);
[[nodiscard]] bool lexEndif(const char *&First, const char *const End);
[[nodiscard]] bool lexDefault(DirectiveKind Kind, const char *&First,
const char *const End);
@@ -525,15 +549,8 @@ void Scanner::lexPPDirectiveBody(const char *&First, const char *const End) {
}
}
-[[nodiscard]] std::optional<StringRef>
-Scanner::tryLexIdentifierOrSkipLine(const char *&First, const char *const End) {
- const dependency_directives_scan::Token &Tok = lexToken(First, End);
- if (Tok.isNot(tok::raw_identifier)) {
- if (!Tok.is(tok::eod))
- skipLine(First, End);
- return std::nullopt;
- }
-
+StringRef
+Scanner::cleanStringIfNeeded(const dependency_directives_scan::Token &Tok) {
bool NeedsCleaning = Tok.Flags & clang::Token::NeedsCleaning;
if (LLVM_LIKELY(!NeedsCleaning))
return Input.slice(Tok.Offset, Tok.getEnd());
@@ -541,6 +558,9 @@ Scanner::tryLexIdentifierOrSkipLine(const char *&First, const char *const End) {
SmallString<64> Spelling;
Spelling.resize(Tok.Length);
+ // FIXME: C++11 raw string literals need special handling (see getSpellingSlow
+ // in the Lexer). Currently we cannot see them due to our LangOpts.
+
unsigned SpellingLength = 0;
const char *BufPtr = Input.begin() + Tok.Offset;
const char *AfterIdent = Input.begin() + Tok.getEnd();
@@ -555,6 +575,18 @@ Scanner::tryLexIdentifierOrSkipLine(const char *&First, const char *const End) {
.first->first();
}
+std::optional<StringRef>
+Scanner::tryLexIdentifierOrSkipLine(const char *&First, const char *const End) {
+ const dependency_directives_scan::Token &Tok = lexToken(First, End);
+ if (Tok.isNot(tok::raw_identifier)) {
+ if (!Tok.is(tok::eod))
+ skipLine(First, End);
+ return std::nullopt;
+ }
+
+ return cleanStringIfNeeded(Tok);
+}
+
StringRef Scanner::lexIdentifier(const char *&First, const char *const End) {
std::optional<StringRef> Id = tryLexIdentifierOrSkipLine(First, End);
assert(Id && "expected identifier token");
@@ -572,6 +604,28 @@ bool Scanner::isNextIdentifierOrSkipLine(StringRef Id, const char *&First,
return false;
}
+bool Scanner::isNextTokenOrSkipLine(tok::TokenKind K, const char *&First,
+ const char *const End) {
+ const dependency_directives_scan::Token &Tok = lexToken(First, End);
+ if (Tok.is(K))
+ return true;
+ skipLine(First, End);
+ return false;
+}
+
+std::optional<StringRef>
+Scanner::tryLexStringLiteralOrSkipLine(const char *&First,
+ const char *const End) {
+ const dependency_directives_scan::Token &Tok = lexToken(First, End);
+ if (!tok::isStringLiteral(Tok.Kind)) {
+ if (!Tok.is(tok::eod))
+ skipLine(First, End);
+ return std::nullopt;
+ }
+
+ return cleanStringIfNeeded(Tok);
+}
+
bool Scanner::lexAt(const char *&First, const char *const End) {
// Handle "@import".
@@ -629,6 +683,41 @@ bool Scanner::lexModule(const char *&First, const char *const End) {
return lexModuleDirectiveBody(Kind, First, End);
}
+bool Scanner::lex_Pragma(const char *&First, const char *const End) {
+ if (!isNextTokenOrSkipLine(tok::l_paren, First, End))
+ return false;
+
+ std::optional<StringRef> Str = tryLexStringLiteralOrSkipLine(First, End);
+
+ if (!Str || !isNextTokenOrSkipLine(tok::r_paren, First, End))
+ return false;
+
+ SmallString<64> Buffer(*Str);
+ prepare_PragmaString(Buffer);
+
+ // Use a new scanner instance since the tokens will be inside the allocated
+ // string. We should already have captured all the relevant tokens in the
+ // current scanner.
+ SmallVector<dependency_directives_scan::Token> DiscardTokens;
+ const char *Begin = Buffer.c_str();
+ Scanner PragmaScanner{StringRef(Begin, Buffer.size()), DiscardTokens, Diags,
+ InputSourceLoc};
+
+ PragmaScanner.TheLexer.setParsingPreprocessorDirective(true);
+ if (PragmaScanner.lexPragma(Begin, Buffer.end()))
+ return true;
+
+ DirectiveKind K = PragmaScanner.topDirective();
+ if (K == pp_none) {
+ skipLine(First, End);
+ return false;
+ }
+
+ assert(Begin == Buffer.end());
+ pushDirective(K);
+ return false;
+}
+
bool Scanner::lexPragma(const char *&First, const char *const End) {
std::optional<StringRef> FoundId = tryLexIdentifierOrSkipLine(First, End);
if (!FoundId)
@@ -652,9 +741,22 @@ bool Scanner::lexPragma(const char *&First, const char *const End) {
return false;
}
- // #pragma clang.
- if (!isNextIdentifierOrSkipLine("module", First, End))
+ FoundId = tryLexIdentifierOrSkipLine(First, End);
+ if (!FoundId)
return false;
+ Id = *FoundId;
+
+ // #pragma clang system_header
+ if (Id == "system_header") {
+ lexPPDirectiveBody(First, End);
+ pushDirective(pp_pragma_system_header);
+ return false;
+ }
+
+ if (Id != "module") {
+ skipLine(First, End);
+ return false;
+ }
// #pragma clang module.
if (!isNextIdentifierOrSkipLine("import", First, End))
@@ -700,6 +802,7 @@ static bool isStartOfRelevantLine(char First) {
case 'i':
case 'e':
case 'm':
+ case '_':
return true;
}
return false;
@@ -736,6 +839,12 @@ bool Scanner::lexPPLine(const char *&First, const char *const End) {
if (*First == 'i' || *First == 'e' || *First == 'm')
return lexModule(First, End);
+ if (*First == '_') {
+ if (isNextIdentifierOrSkipLine("_Pragma", First, End))
+ return lex_Pragma(First, End);
+ return false;
+ }
+
// Handle preprocessing directives.
TheLexer.setParsingPreprocessorDirective(true);
diff --git a/contrib/llvm-project/clang/lib/Lex/HeaderMap.cpp b/contrib/llvm-project/clang/lib/Lex/HeaderMap.cpp
index bb50a4eef65c..da0b8898f690 100644
--- a/contrib/llvm-project/clang/lib/Lex/HeaderMap.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/HeaderMap.cpp
@@ -77,8 +77,8 @@ bool HeaderMapImpl::checkHeader(const llvm::MemoryBuffer &File,
if (Header->Magic == HMAP_HeaderMagicNumber &&
Header->Version == HMAP_HeaderVersion)
NeedsByteSwap = false;
- else if (Header->Magic == llvm::ByteSwap_32(HMAP_HeaderMagicNumber) &&
- Header->Version == llvm::ByteSwap_16(HMAP_HeaderVersion))
+ else if (Header->Magic == llvm::byteswap<uint32_t>(HMAP_HeaderMagicNumber) &&
+ Header->Version == llvm::byteswap<uint16_t>(HMAP_HeaderVersion))
NeedsByteSwap = true; // Mixed endianness headermap.
else
return false; // Not a header map.
@@ -113,7 +113,7 @@ StringRef HeaderMapImpl::getFileName() const {
unsigned HeaderMapImpl::getEndianAdjustedWord(unsigned X) const {
if (!NeedsBSwap) return X;
- return llvm::ByteSwap_32(X);
+ return llvm::byteswap<uint32_t>(X);
}
/// getHeader - Return a reference to the file header, in unbyte-swapped form.
diff --git a/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp b/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp
index 074c147ba3c5..a5e8b028b25e 100644
--- a/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp
@@ -311,7 +311,7 @@ Module *HeaderSearch::lookupModule(StringRef ModuleName, StringRef SearchName,
// SearchName rather than ModuleName, to permit finding private modules
// named FooPrivate in buggy frameworks named Foo.
SmallString<128> FrameworkDirName;
- FrameworkDirName += Dir.getFrameworkDir()->getName();
+ FrameworkDirName += Dir.getFrameworkDirRef()->getName();
llvm::sys::path::append(FrameworkDirName, SearchName + ".framework");
if (auto FrameworkDir =
FileMgr.getOptionalDirectoryRef(FrameworkDirName)) {
@@ -345,7 +345,7 @@ Module *HeaderSearch::lookupModule(StringRef ModuleName, StringRef SearchName,
// Search for a module map in a subdirectory with the same name as the
// module.
SmallString<128> NestedModuleMapDirName;
- NestedModuleMapDirName = Dir.getDir()->getName();
+ NestedModuleMapDirName = Dir.getDirRef()->getName();
llvm::sys::path::append(NestedModuleMapDirName, ModuleName);
if (loadModuleMapFile(NestedModuleMapDirName, IsSystem,
/*IsFramework*/false) == LMM_NewlyLoaded){
@@ -378,15 +378,17 @@ void HeaderSearch::indexInitialHeaderMaps() {
llvm::StringMap<unsigned, llvm::BumpPtrAllocator> Index(SearchDirs.size());
// Iterate over all filename keys and associate them with the index i.
- unsigned i = 0;
- for (; i != SearchDirs.size(); ++i) {
+ for (unsigned i = 0; i != SearchDirs.size(); ++i) {
auto &Dir = SearchDirs[i];
// We're concerned with only the initial contiguous run of header
// maps within SearchDirs, which can be 99% of SearchDirs when
// SearchDirs.size() is ~10000.
- if (!Dir.isHeaderMap())
+ if (!Dir.isHeaderMap()) {
+ SearchDirHeaderMapIndex = std::move(Index);
+ FirstNonHeaderMapSearchDirIdx = i;
break;
+ }
// Give earlier keys precedence over identical later keys.
auto Callback = [&](StringRef Filename) {
@@ -394,9 +396,6 @@ void HeaderSearch::indexInitialHeaderMaps() {
};
Dir.getHeaderMap()->forEachKey(Callback);
}
-
- SearchDirHeaderMapIndex = std::move(Index);
- FirstNonHeaderMapSearchDirIdx = i;
}
//===----------------------------------------------------------------------===//
@@ -406,11 +405,10 @@ void HeaderSearch::indexInitialHeaderMaps() {
/// getName - Return the directory or filename corresponding to this lookup
/// object.
StringRef DirectoryLookup::getName() const {
- // FIXME: Use the name from \c DirectoryEntryRef.
if (isNormalDir())
- return getDir()->getName();
+ return getDirRef()->getName();
if (isFramework())
- return getFrameworkDir()->getName();
+ return getFrameworkDirRef()->getName();
assert(isHeaderMap() && "Unknown DirectoryLookup");
return getHeaderMap()->getFileName();
}
@@ -438,8 +436,8 @@ OptionalFileEntryRef HeaderSearch::getFileAndSuggestModule(
// If there is a module that corresponds to this header, suggest it.
if (!findUsableModuleForHeader(
- &File->getFileEntry(), Dir ? Dir : File->getFileEntry().getDir(),
- RequestingModule, SuggestedModule, IsSystemHeaderDir))
+ *File, Dir ? Dir : File->getFileEntry().getDir(), RequestingModule,
+ SuggestedModule, IsSystemHeaderDir))
return std::nullopt;
return *File;
@@ -492,7 +490,8 @@ OptionalFileEntryRef DirectoryLookup::LookupFile(
IsInHeaderMap = true;
- auto FixupSearchPath = [&]() {
+ auto FixupSearchPathAndFindUsableModule =
+ [&](FileEntryRef File) -> OptionalFileEntryRef {
if (SearchPath) {
StringRef SearchPathRef(getName());
SearchPath->clear();
@@ -502,6 +501,12 @@ OptionalFileEntryRef DirectoryLookup::LookupFile(
RelativePath->clear();
RelativePath->append(Filename.begin(), Filename.end());
}
+ if (!HS.findUsableModuleForHeader(File, File.getFileEntry().getDir(),
+ RequestingModule, SuggestedModule,
+ isSystemHeaderDirectory())) {
+ return std::nullopt;
+ }
+ return File;
};
// Check if the headermap maps the filename to a framework include
@@ -514,8 +519,7 @@ OptionalFileEntryRef DirectoryLookup::LookupFile(
}
if (auto Res = HS.getFileMgr().getOptionalFileRef(Dest, OpenFile)) {
- FixupSearchPath();
- return *Res;
+ return FixupSearchPathAndFindUsableModule(*Res);
}
// Header maps need to be marked as used whenever the filename matches.
@@ -686,7 +690,7 @@ OptionalFileEntryRef DirectoryLookup::DoFrameworkLookup(
// If we found the header and are allowed to suggest a module, do so now.
if (File && needModuleLookup(RequestingModule, SuggestedModule)) {
// Find the framework in which this header occurs.
- StringRef FrameworkPath = File->getFileEntry().getDir()->getName();
+ StringRef FrameworkPath = File->getDir().getName();
bool FoundFramework = false;
do {
// Determine whether this directory exists.
@@ -709,14 +713,13 @@ OptionalFileEntryRef DirectoryLookup::DoFrameworkLookup(
bool IsSystem = getDirCharacteristic() != SrcMgr::C_User;
if (FoundFramework) {
- if (!HS.findUsableModuleForFrameworkHeader(
- &File->getFileEntry(), FrameworkPath, RequestingModule,
- SuggestedModule, IsSystem))
+ if (!HS.findUsableModuleForFrameworkHeader(*File, FrameworkPath,
+ RequestingModule,
+ SuggestedModule, IsSystem))
return std::nullopt;
} else {
- if (!HS.findUsableModuleForHeader(&File->getFileEntry(), getDir(),
- RequestingModule, SuggestedModule,
- IsSystem))
+ if (!HS.findUsableModuleForHeader(*File, getDir(), RequestingModule,
+ SuggestedModule, IsSystem))
return std::nullopt;
}
}
@@ -858,7 +861,7 @@ diagnoseFrameworkInclude(DiagnosticsEngine &Diags, SourceLocation IncludeLoc,
OptionalFileEntryRef HeaderSearch::LookupFile(
StringRef Filename, SourceLocation IncludeLoc, bool isAngled,
ConstSearchDirIterator FromDir, ConstSearchDirIterator *CurDirArg,
- ArrayRef<std::pair<const FileEntry *, const DirectoryEntry *>> Includers,
+ ArrayRef<std::pair<const FileEntry *, DirectoryEntryRef>> Includers,
SmallVectorImpl<char> *SearchPath, SmallVectorImpl<char> *RelativePath,
Module *RequestingModule, ModuleMap::KnownHeader *SuggestedModule,
bool *IsMapped, bool *IsFrameworkFound, bool SkipCache,
@@ -913,7 +916,7 @@ OptionalFileEntryRef HeaderSearch::LookupFile(
// Concatenate the requested file onto the directory.
// FIXME: Portability. Filename concatenation should be in sys::Path.
- TmpDir = IncluderAndDir.second->getName();
+ TmpDir = IncluderAndDir.second.getName();
TmpDir.push_back('/');
TmpDir.append(Filename.begin(), Filename.end());
@@ -952,7 +955,7 @@ OptionalFileEntryRef HeaderSearch::LookupFile(
ToHFI.Framework = Framework;
if (SearchPath) {
- StringRef SearchPathRef(IncluderAndDir.second->getName());
+ StringRef SearchPathRef(IncluderAndDir.second.getName());
SearchPath->clear();
SearchPath->append(SearchPathRef.begin(), SearchPathRef.end());
}
@@ -962,7 +965,7 @@ OptionalFileEntryRef HeaderSearch::LookupFile(
}
if (First) {
diagnoseFrameworkInclude(Diags, IncludeLoc,
- IncluderAndDir.second->getName(), Filename,
+ IncluderAndDir.second.getName(), Filename,
&FE->getFileEntry());
return FE;
}
@@ -1005,7 +1008,8 @@ OptionalFileEntryRef HeaderSearch::LookupFile(
ConstSearchDirIterator NextIt = std::next(It);
if (!SkipCache) {
- if (CacheLookup.StartIt == NextIt) {
+ if (CacheLookup.StartIt == NextIt &&
+ CacheLookup.RequestingModule == RequestingModule) {
// HIT: Skip querying potentially lots of directories for this lookup.
if (CacheLookup.HitIt)
It = CacheLookup.HitIt;
@@ -1018,7 +1022,7 @@ OptionalFileEntryRef HeaderSearch::LookupFile(
// MISS: This is the first query, or the previous query didn't match
// our search start. We will fill in our found location below, so prime
// the start point value.
- CacheLookup.reset(/*NewStartIt=*/NextIt);
+ CacheLookup.reset(RequestingModule, /*NewStartIt=*/NextIt);
if (It == search_dir_begin() && FirstNonHeaderMapSearchDirIdx > 0) {
// Handle cold misses of user includes in the presence of many header
@@ -1033,8 +1037,9 @@ OptionalFileEntryRef HeaderSearch::LookupFile(
It = search_dir_nth(Iter->second);
}
}
- } else
- CacheLookup.reset(/*NewStartIt=*/NextIt);
+ } else {
+ CacheLookup.reset(RequestingModule, /*NewStartIt=*/NextIt);
+ }
SmallString<64> MappedName;
@@ -1117,7 +1122,7 @@ OptionalFileEntryRef HeaderSearch::LookupFile(
bool FoundByHeaderMap = !IsMapped ? false : *IsMapped;
if (!Includers.empty())
diagnoseFrameworkInclude(
- Diags, IncludeLoc, Includers.front().second->getName(), Filename,
+ Diags, IncludeLoc, Includers.front().second.getName(), Filename,
&File->getFileEntry(), isAngled, FoundByHeaderMap);
// Remember this location for the next lookup we do.
@@ -1275,7 +1280,7 @@ OptionalFileEntryRef HeaderSearch::LookupSubframeworkHeader(
getFileInfo(&File->getFileEntry()).DirInfo = DirInfo;
FrameworkName.pop_back(); // remove the trailing '/'
- if (!findUsableModuleForFrameworkHeader(&File->getFileEntry(), FrameworkName,
+ if (!findUsableModuleForFrameworkHeader(*File, FrameworkName,
RequestingModule, SuggestedModule,
/*IsSystem*/ false))
return std::nullopt;
@@ -1371,7 +1376,7 @@ HeaderSearch::getExistingFileInfo(const FileEntry *FE,
return HFI;
}
-bool HeaderSearch::isFileMultipleIncludeGuarded(const FileEntry *File) {
+bool HeaderSearch::isFileMultipleIncludeGuarded(const FileEntry *File) const {
// Check if we've entered this file and found an include guard or #pragma
// once. Note that we dor't check for #import, because that's not a property
// of the file itself.
@@ -1555,7 +1560,7 @@ bool HeaderSearch::hasModuleMap(StringRef FileName,
}
ModuleMap::KnownHeader
-HeaderSearch::findModuleForHeader(const FileEntry *File, bool AllowTextual,
+HeaderSearch::findModuleForHeader(FileEntryRef File, bool AllowTextual,
bool AllowExcluded) const {
if (ExternalSource) {
// Make sure the external source has handled header info about this file,
@@ -1566,7 +1571,7 @@ HeaderSearch::findModuleForHeader(const FileEntry *File, bool AllowTextual,
}
ArrayRef<ModuleMap::KnownHeader>
-HeaderSearch::findAllModulesForHeader(const FileEntry *File) const {
+HeaderSearch::findAllModulesForHeader(FileEntryRef File) const {
if (ExternalSource) {
// Make sure the external source has handled header info about this file,
// which includes whether the file is part of a module.
@@ -1575,7 +1580,17 @@ HeaderSearch::findAllModulesForHeader(const FileEntry *File) const {
return ModMap.findAllModulesForHeader(File);
}
-static bool suggestModule(HeaderSearch &HS, const FileEntry *File,
+ArrayRef<ModuleMap::KnownHeader>
+HeaderSearch::findResolvedModulesForHeader(const FileEntry *File) const {
+ if (ExternalSource) {
+ // Make sure the external source has handled header info about this file,
+ // which includes whether the file is part of a module.
+ (void)getExistingFileInfo(File);
+ }
+ return ModMap.findResolvedModulesForHeader(File);
+}
+
+static bool suggestModule(HeaderSearch &HS, FileEntryRef File,
Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule) {
ModuleMap::KnownHeader Module =
@@ -1611,18 +1626,18 @@ static bool suggestModule(HeaderSearch &HS, const FileEntry *File,
}
bool HeaderSearch::findUsableModuleForHeader(
- const FileEntry *File, const DirectoryEntry *Root, Module *RequestingModule,
+ FileEntryRef File, const DirectoryEntry *Root, Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule, bool IsSystemHeaderDir) {
- if (File && needModuleLookup(RequestingModule, SuggestedModule)) {
+ if (needModuleLookup(RequestingModule, SuggestedModule)) {
// If there is a module that corresponds to this header, suggest it.
- hasModuleMap(File->getName(), Root, IsSystemHeaderDir);
+ hasModuleMap(File.getName(), Root, IsSystemHeaderDir);
return suggestModule(*this, File, RequestingModule, SuggestedModule);
}
return true;
}
bool HeaderSearch::findUsableModuleForFrameworkHeader(
- const FileEntry *File, StringRef FrameworkName, Module *RequestingModule,
+ FileEntryRef File, StringRef FrameworkName, Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule, bool IsSystemFramework) {
// If we're supposed to suggest a module, look for one now.
if (needModuleLookup(RequestingModule, SuggestedModule)) {
@@ -1648,10 +1663,10 @@ bool HeaderSearch::findUsableModuleForFrameworkHeader(
return true;
}
-static const FileEntry *getPrivateModuleMap(const FileEntry *File,
+static const FileEntry *getPrivateModuleMap(FileEntryRef File,
FileManager &FileMgr) {
- StringRef Filename = llvm::sys::path::filename(File->getName());
- SmallString<128> PrivateFilename(File->getDir()->getName());
+ StringRef Filename = llvm::sys::path::filename(File.getName());
+ SmallString<128> PrivateFilename(File.getDir().getName());
if (Filename == "module.map")
llvm::sys::path::append(PrivateFilename, "module_private.map");
else if (Filename == "module.modulemap")
@@ -1663,7 +1678,7 @@ static const FileEntry *getPrivateModuleMap(const FileEntry *File,
return nullptr;
}
-bool HeaderSearch::loadModuleMapFile(const FileEntry *File, bool IsSystem,
+bool HeaderSearch::loadModuleMapFile(FileEntryRef File, bool IsSystem,
FileID ID, unsigned *Offset,
StringRef OriginalModuleMapFile) {
// Find the directory for the module. For frameworks, that may require going
@@ -1682,9 +1697,7 @@ bool HeaderSearch::loadModuleMapFile(const FileEntry *File, bool IsSystem,
Dir = FakeFile.getDir();
}
} else {
- // TODO: Replace with `Dir = File.getDir()` when `File` is switched to
- // `FileEntryRef`.
- Dir = FileMgr.getOptionalDirectoryRef(File->getDir()->getName());
+ Dir = File.getDir();
}
assert(Dir && "parent must exist");
@@ -1713,11 +1726,9 @@ bool HeaderSearch::loadModuleMapFile(const FileEntry *File, bool IsSystem,
}
HeaderSearch::LoadModuleMapResult
-HeaderSearch::loadModuleMapFileImpl(const FileEntry *File, bool IsSystem,
+HeaderSearch::loadModuleMapFileImpl(FileEntryRef File, bool IsSystem,
DirectoryEntryRef Dir, FileID ID,
unsigned *Offset) {
- assert(File && "expected FileEntry");
-
// Check whether we've already loaded this module map, and mark it as being
// loaded in case we recursively try to load it from itself.
auto AddResult = LoadedModuleMaps.insert(std::make_pair(File, true));
@@ -1741,42 +1752,39 @@ HeaderSearch::loadModuleMapFileImpl(const FileEntry *File, bool IsSystem,
return LMM_NewlyLoaded;
}
-const FileEntry *
-HeaderSearch::lookupModuleMapFile(const DirectoryEntry *Dir, bool IsFramework) {
+OptionalFileEntryRef
+HeaderSearch::lookupModuleMapFile(DirectoryEntryRef Dir, bool IsFramework) {
if (!HSOpts->ImplicitModuleMaps)
- return nullptr;
+ return std::nullopt;
// For frameworks, the preferred spelling is Modules/module.modulemap, but
// module.map at the framework root is also accepted.
- SmallString<128> ModuleMapFileName(Dir->getName());
+ SmallString<128> ModuleMapFileName(Dir.getName());
if (IsFramework)
llvm::sys::path::append(ModuleMapFileName, "Modules");
llvm::sys::path::append(ModuleMapFileName, "module.modulemap");
- if (auto F = FileMgr.getFile(ModuleMapFileName))
+ if (auto F = FileMgr.getOptionalFileRef(ModuleMapFileName))
return *F;
// Continue to allow module.map
- ModuleMapFileName = Dir->getName();
+ ModuleMapFileName = Dir.getName();
llvm::sys::path::append(ModuleMapFileName, "module.map");
- if (auto F = FileMgr.getFile(ModuleMapFileName))
+ if (auto F = FileMgr.getOptionalFileRef(ModuleMapFileName))
return *F;
// For frameworks, allow to have a private module map with a preferred
// spelling when a public module map is absent.
if (IsFramework) {
- ModuleMapFileName = Dir->getName();
+ ModuleMapFileName = Dir.getName();
llvm::sys::path::append(ModuleMapFileName, "Modules",
"module.private.modulemap");
- if (auto F = FileMgr.getFile(ModuleMapFileName))
+ if (auto F = FileMgr.getOptionalFileRef(ModuleMapFileName))
return *F;
}
- return nullptr;
+ return std::nullopt;
}
Module *HeaderSearch::loadFrameworkModule(StringRef Name, DirectoryEntryRef Dir,
bool IsSystem) {
- if (Module *Module = ModMap.findModule(Name))
- return Module;
-
// Try to load a module map file.
switch (loadModuleMapFile(Dir, IsSystem, /*IsFramework*/true)) {
case LMM_InvalidModuleMap:
@@ -1785,10 +1793,10 @@ Module *HeaderSearch::loadFrameworkModule(StringRef Name, DirectoryEntryRef Dir,
ModMap.inferFrameworkModule(Dir, IsSystem, /*Parent=*/nullptr);
break;
- case LMM_AlreadyLoaded:
case LMM_NoDirectory:
return nullptr;
+ case LMM_AlreadyLoaded:
case LMM_NewlyLoaded:
break;
}
@@ -1812,9 +1820,10 @@ HeaderSearch::loadModuleMapFile(DirectoryEntryRef Dir, bool IsSystem,
if (KnownDir != DirectoryHasModuleMap.end())
return KnownDir->second ? LMM_AlreadyLoaded : LMM_InvalidModuleMap;
- if (const FileEntry *ModuleMapFile = lookupModuleMapFile(Dir, IsFramework)) {
+ if (OptionalFileEntryRef ModuleMapFile =
+ lookupModuleMapFile(Dir, IsFramework)) {
LoadModuleMapResult Result =
- loadModuleMapFileImpl(ModuleMapFile, IsSystem, Dir);
+ loadModuleMapFileImpl(*ModuleMapFile, IsSystem, Dir);
// Add Dir explicitly in case ModuleMapFile is in a subdirectory.
// E.g. Foo.framework/Modules/module.modulemap
// ^Dir ^ModuleMapFile
@@ -1837,7 +1846,7 @@ void HeaderSearch::collectAllModules(SmallVectorImpl<Module *> &Modules) {
if (DL.isFramework()) {
std::error_code EC;
SmallString<128> DirNative;
- llvm::sys::path::native(DL.getFrameworkDir()->getName(), DirNative);
+ llvm::sys::path::native(DL.getFrameworkDirRef()->getName(), DirNative);
// Search each of the ".framework" directories to load them as modules.
llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
@@ -1900,13 +1909,15 @@ void HeaderSearch::loadSubdirectoryModuleMaps(DirectoryLookup &SearchDir) {
return;
std::error_code EC;
- SmallString<128> Dir = SearchDir.getDir()->getName();
+ SmallString<128> Dir = SearchDir.getDirRef()->getName();
FileMgr.makeAbsolutePath(Dir);
SmallString<128> DirNative;
llvm::sys::path::native(Dir, DirNative);
llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
+ if (Dir->type() == llvm::sys::fs::file_type::regular_file)
+ continue;
bool IsFramework = llvm::sys::path::extension(Dir->path()) == ".framework";
if (IsFramework == SearchDir.isFramework())
loadModuleMapFile(Dir->path(), SearchDir.isSystemHeaderDirectory(),
@@ -1917,7 +1928,7 @@ void HeaderSearch::loadSubdirectoryModuleMaps(DirectoryLookup &SearchDir) {
}
std::string HeaderSearch::suggestPathToFileForDiagnostics(
- const FileEntry *File, llvm::StringRef MainFile, bool *IsSystem) {
+ const FileEntry *File, llvm::StringRef MainFile, bool *IsSystem) const {
// FIXME: We assume that the path name currently cached in the FileEntry is
// the most appropriate one for this analysis (and that it's spelled the
// same way as the corresponding header search path).
@@ -1927,9 +1938,9 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(
std::string HeaderSearch::suggestPathToFileForDiagnostics(
llvm::StringRef File, llvm::StringRef WorkingDir, llvm::StringRef MainFile,
- bool *IsSystem) {
+ bool *IsSystem) const {
using namespace llvm::sys;
-
+
llvm::SmallString<32> FilePath = File;
// remove_dots switches to backslashes on windows as a side-effect!
// We always want to suggest forward slashes for includes.
@@ -1983,14 +1994,14 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(
bool BestPrefixIsFramework = false;
for (const DirectoryLookup &DL : search_dir_range()) {
if (DL.isNormalDir()) {
- StringRef Dir = DL.getDir()->getName();
+ StringRef Dir = DL.getDirRef()->getName();
if (CheckDir(Dir)) {
if (IsSystem)
*IsSystem = BestPrefixLength && isSystem(DL.getDirCharacteristic());
BestPrefixIsFramework = false;
}
} else if (DL.isFramework()) {
- StringRef Dir = DL.getFrameworkDir()->getName();
+ StringRef Dir = DL.getFrameworkDirRef()->getName();
if (CheckDir(Dir)) {
if (IsSystem)
*IsSystem = BestPrefixLength && isSystem(DL.getDirCharacteristic());
diff --git a/contrib/llvm-project/clang/lib/Lex/InitHeaderSearch.cpp b/contrib/llvm-project/clang/lib/Lex/InitHeaderSearch.cpp
index d4465565718e..41382d7cb3fc 100644
--- a/contrib/llvm-project/clang/lib/Lex/InitHeaderSearch.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/InitHeaderSearch.cpp
@@ -21,11 +21,11 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
#include <optional>
using namespace clang;
@@ -233,8 +233,6 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
switch (os) {
case llvm::Triple::CloudABI:
case llvm::Triple::NaCl:
- case llvm::Triple::PS4:
- case llvm::Triple::PS5:
case llvm::Triple::ELFIAMCU:
break;
case llvm::Triple::Win32:
@@ -339,31 +337,6 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
case llvm::Triple::NaCl:
case llvm::Triple::ELFIAMCU:
break;
- case llvm::Triple::PS4:
- case llvm::Triple::PS5: {
- // <isysroot> gets prepended later in AddPath().
- std::string BaseSDKPath;
- if (!HasSysroot) {
- const char *EnvVar = (os == llvm::Triple::PS4) ? "SCE_ORBIS_SDK_DIR"
- : "SCE_PROSPERO_SDK_DIR";
- const char *envValue = getenv(EnvVar);
- if (envValue)
- BaseSDKPath = envValue;
- else {
- // HSOpts.ResourceDir variable contains the location of Clang's
- // resource files.
- // Assuming that Clang is configured for PS4 without
- // --with-clang-resource-dir option, the location of Clang's resource
- // files is <SDK_DIR>/host_tools/lib/clang
- SmallString<128> P = StringRef(HSOpts.ResourceDir);
- llvm::sys::path::append(P, "../../..");
- BaseSDKPath = std::string(P.str());
- }
- }
- AddPath(BaseSDKPath + "/target/include", System, false);
- AddPath(BaseSDKPath + "/target/include_common", System, false);
- break;
- }
default:
AddPath("/usr/include", ExternCSystem, false);
break;
@@ -412,6 +385,8 @@ bool InitHeaderSearch::ShouldAddDefaultIncludePaths(
case llvm::Triple::FreeBSD:
case llvm::Triple::NetBSD:
case llvm::Triple::OpenBSD:
+ case llvm::Triple::PS4:
+ case llvm::Triple::PS5:
case llvm::Triple::Fuchsia:
case llvm::Triple::Hurd:
case llvm::Triple::Linux:
@@ -677,7 +652,7 @@ void clang::ApplyHeaderSearchOptions(HeaderSearch &HS,
// Set up the builtin include directory in the module map.
SmallString<128> P = StringRef(HSOpts.ResourceDir);
llvm::sys::path::append(P, "include");
- if (auto Dir = HS.getFileMgr().getDirectory(P))
+ if (auto Dir = HS.getFileMgr().getOptionalDirectoryRef(P))
HS.getModuleMap().setBuiltinIncludeDir(*Dir);
}
diff --git a/contrib/llvm-project/clang/lib/Lex/Lexer.cpp b/contrib/llvm-project/clang/lib/Lex/Lexer.cpp
index d49d9e9e4b14..3637f8420462 100644
--- a/contrib/llvm-project/clang/lib/Lex/Lexer.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Lexer.cpp
@@ -2279,7 +2279,7 @@ void Lexer::codeCompleteIncludedFile(const char *PathStart,
++CompletionPoint;
if (Next == (IsAngled ? '>' : '"'))
break;
- if (llvm::is_contained(SlashChars, Next))
+ if (SlashChars.contains(Next))
break;
}
@@ -2785,7 +2785,7 @@ bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr,
// Adjust the pointer to point directly after the first slash. It's
// not necessary to set C here, it will be overwritten at the end of
// the outer loop.
- CurPtr += llvm::countTrailingZeros<unsigned>(cmp) + 1;
+ CurPtr += llvm::countr_zero<unsigned>(cmp) + 1;
goto FoundSlash;
}
CurPtr += 16;
@@ -3348,8 +3348,8 @@ std::optional<uint32_t> Lexer::tryReadNumericUCN(const char *&StartPtr,
}
if (Delimited && PP) {
- Diag(SlashLoc, PP->getLangOpts().CPlusPlus2b
- ? diag::warn_cxx2b_delimited_escape_sequence
+ Diag(SlashLoc, PP->getLangOpts().CPlusPlus23
+ ? diag::warn_cxx23_delimited_escape_sequence
: diag::ext_delimited_escape_sequence)
<< /*delimited*/ 0 << (PP->getLangOpts().CPlusPlus ? 1 : 0);
}
@@ -3436,8 +3436,8 @@ std::optional<uint32_t> Lexer::tryReadNamedUCN(const char *&StartPtr,
}
if (Diagnose && Match)
- Diag(SlashLoc, PP->getLangOpts().CPlusPlus2b
- ? diag::warn_cxx2b_delimited_escape_sequence
+ Diag(SlashLoc, PP->getLangOpts().CPlusPlus23
+ ? diag::warn_cxx23_delimited_escape_sequence
: diag::ext_delimited_escape_sequence)
<< /*named*/ 1 << (PP->getLangOpts().CPlusPlus ? 1 : 0);
@@ -3484,9 +3484,14 @@ uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
if (LangOpts.AsmPreprocessor)
return CodePoint;
- // C99 6.4.3p2: A universal character name shall not specify a character whose
- // short identifier is less than 00A0 other than 0024 ($), 0040 (@), or
- // 0060 (`), nor one in the range D800 through DFFF inclusive.)
+ // C2x 6.4.3p2: A universal character name shall not designate a code point
+ // where the hexadecimal value is:
+ // - in the range D800 through DFFF inclusive; or
+ // - greater than 10FFFF.
+ // A universal-character-name outside the c-char-sequence of a character
+ // constant, or the s-char-sequence of a string-literal shall not designate
+ // a control character or a character in the basic character set.
+
// C++11 [lex.charset]p2: If the hexadecimal value for a
// universal-character-name corresponds to a surrogate code point (in the
// range 0xD800-0xDFFF, inclusive), the program is ill-formed. Additionally,
@@ -3496,9 +3501,6 @@ uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
// ranges 0x00-0x1F or 0x7F-0x9F, both inclusive) or to a character in the
// basic source character set, the program is ill-formed.
if (CodePoint < 0xA0) {
- if (CodePoint == 0x24 || CodePoint == 0x40 || CodePoint == 0x60)
- return CodePoint;
-
// We don't use isLexingRawMode() here because we need to warn about bad
// UCNs even when skipping preprocessing tokens in a #if block.
if (Result && PP) {
@@ -4222,9 +4224,7 @@ LexStart:
if (LangOpts.Digraphs && Char == '>') {
Kind = tok::r_square; // ':>' -> ']'
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
- } else if ((LangOpts.CPlusPlus ||
- LangOpts.DoubleSquareBracketAttributes) &&
- Char == ':') {
+ } else if (Char == ':') {
Kind = tok::coloncolon;
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
} else {
@@ -4361,11 +4361,9 @@ HandleDirective:
FormTokenWithChars(Result, CurPtr, tok::hash);
PP->HandleDirective(Result);
- if (PP->hadModuleLoaderFatalFailure()) {
+ if (PP->hadModuleLoaderFatalFailure())
// With a fatal failure in the module loader, we abort parsing.
- assert(Result.is(tok::eof) && "Preprocessor did not set tok:eof");
return true;
- }
// We parsed the directive; lex a token with the new state.
return false;
@@ -4443,8 +4441,7 @@ bool Lexer::LexDependencyDirectiveToken(Token &Result) {
Result.setLiteralData(TokPtr);
return true;
}
- if (Result.is(tok::colon) &&
- (LangOpts.CPlusPlus || LangOpts.DoubleSquareBracketAttributes)) {
+ if (Result.is(tok::colon)) {
// Convert consecutive colons to 'tok::coloncolon'.
if (*BufferPtr == ':') {
assert(DepDirectives.front().Tokens[NextDepDirectiveTokenIndex].is(
@@ -4482,6 +4479,7 @@ bool Lexer::LexDependencyDirectiveTokenWhileSkipping(Token &Result) {
case pp_pragma_push_macro:
case pp_pragma_pop_macro:
case pp_pragma_include_alias:
+ case pp_pragma_system_header:
case pp_include_next:
case decl_at_import:
case cxx_module_decl:
diff --git a/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp b/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp
index 421a85336043..3b9913ac8ba4 100644
--- a/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp
@@ -87,6 +87,24 @@ static DiagnosticBuilder Diag(DiagnosticsEngine *Diags,
MakeCharSourceRange(Features, TokLoc, TokBegin, TokRangeBegin, TokRangeEnd);
}
+static bool IsEscapeValidInUnevaluatedStringLiteral(char Escape) {
+ switch (Escape) {
+ case '\'':
+ case '"':
+ case '?':
+ case '\\':
+ case 'a':
+ case 'b':
+ case 'f':
+ case 'n':
+ case 'r':
+ case 't':
+ case 'v':
+ return true;
+ }
+ return false;
+}
+
/// ProcessCharEscape - Parse a standard C escape sequence, which can occur in
/// either a character or a string literal.
static unsigned ProcessCharEscape(const char *ThisTokBegin,
@@ -94,7 +112,8 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin,
const char *ThisTokEnd, bool &HadError,
FullSourceLoc Loc, unsigned CharWidth,
DiagnosticsEngine *Diags,
- const LangOptions &Features) {
+ const LangOptions &Features,
+ StringLiteralEvalMethod EvalMethod) {
const char *EscapeBegin = ThisTokBuf;
bool Delimited = false;
bool EndDelimiterFound = false;
@@ -105,6 +124,7 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin,
// We know that this character can't be off the end of the buffer, because
// that would have been \", which would not have been the end of string.
unsigned ResultChar = *ThisTokBuf++;
+ char Escape = ResultChar;
switch (ResultChar) {
// These map to themselves.
case '\\': case '\'': case '"': case '?': break;
@@ -263,7 +283,8 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin,
ThisTokBuf++;
continue;
}
- if (ResultChar & 0x020000000)
+ // Check if one of the top three bits is set before shifting them out.
+ if (ResultChar & 0xE0000000)
Overflow = true;
ResultChar <<= 3;
@@ -311,12 +332,18 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin,
<< tok::r_brace;
else if (!HadError) {
Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
- Features.CPlusPlus2b ? diag::warn_cxx2b_delimited_escape_sequence
+ Features.CPlusPlus23 ? diag::warn_cxx23_delimited_escape_sequence
: diag::ext_delimited_escape_sequence)
<< /*delimited*/ 0 << (Features.CPlusPlus ? 1 : 0);
}
}
+ if (EvalMethod == StringLiteralEvalMethod::Unevaluated &&
+ !IsEscapeValidInUnevaluatedStringLiteral(Escape)) {
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::err_unevaluated_string_invalid_escape_sequence)
+ << StringRef(EscapeBegin, ThisTokBuf - EscapeBegin);
+ }
return ResultChar;
}
@@ -613,22 +640,28 @@ static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
return false;
}
- // C++11 allows UCNs that refer to control characters and basic source
- // characters inside character and string literals
+ // C2x and C++11 allow UCNs that refer to control characters
+ // and basic source characters inside character and string literals
if (UcnVal < 0xa0 &&
- (UcnVal != 0x24 && UcnVal != 0x40 && UcnVal != 0x60)) { // $, @, `
- bool IsError = (!Features.CPlusPlus11 || !in_char_string_literal);
+ // $, @, ` are allowed in all language modes
+ (UcnVal != 0x24 && UcnVal != 0x40 && UcnVal != 0x60)) {
+ bool IsError =
+ (!(Features.CPlusPlus11 || Features.C2x) || !in_char_string_literal);
if (Diags) {
char BasicSCSChar = UcnVal;
if (UcnVal >= 0x20 && UcnVal < 0x7f)
Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
- IsError ? diag::err_ucn_escape_basic_scs :
- diag::warn_cxx98_compat_literal_ucn_escape_basic_scs)
+ IsError ? diag::err_ucn_escape_basic_scs
+ : Features.CPlusPlus
+ ? diag::warn_cxx98_compat_literal_ucn_escape_basic_scs
+ : diag::warn_c2x_compat_literal_ucn_escape_basic_scs)
<< StringRef(&BasicSCSChar, 1);
else
Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
- IsError ? diag::err_ucn_control_character :
- diag::warn_cxx98_compat_literal_ucn_control_character);
+ IsError ? diag::err_ucn_control_character
+ : Features.CPlusPlus
+ ? diag::warn_cxx98_compat_literal_ucn_control_character
+ : diag::warn_c2x_compat_literal_ucn_control_character);
}
if (IsError)
return false;
@@ -640,7 +673,7 @@ static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
if ((IsDelimitedEscapeSequence || IsNamedEscapeSequence) && Diags)
Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
- Features.CPlusPlus2b ? diag::warn_cxx2b_delimited_escape_sequence
+ Features.CPlusPlus23 ? diag::warn_cxx23_delimited_escape_sequence
: diag::ext_delimited_escape_sequence)
<< (IsNamedEscapeSequence ? 1 : 0) << (Features.CPlusPlus ? 1 : 0);
@@ -948,7 +981,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
// ToDo: more precise check for CUDA.
// TODO: AMDGPU might also support it in the future.
if ((Target.hasFloat16Type() || LangOpts.CUDA ||
- (LangOpts.OpenMPIsDevice && Target.getTriple().isNVPTX())) &&
+ (LangOpts.OpenMPIsTargetDevice && Target.getTriple().isNVPTX())) &&
s + 2 < ThisTokEnd && s[1] == '1' && s[2] == '6') {
s += 2; // success, eat up 2 characters.
isFloat16 = true;
@@ -1726,9 +1759,10 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
}
unsigned CharWidth = getCharWidth(Kind, PP.getTargetInfo());
uint64_t result =
- ProcessCharEscape(TokBegin, begin, end, HadError,
- FullSourceLoc(Loc,PP.getSourceManager()),
- CharWidth, &PP.getDiagnostics(), PP.getLangOpts());
+ ProcessCharEscape(TokBegin, begin, end, HadError,
+ FullSourceLoc(Loc, PP.getSourceManager()), CharWidth,
+ &PP.getDiagnostics(), PP.getLangOpts(),
+ StringLiteralEvalMethod::Evaluated);
*buffer_begin++ = result;
}
@@ -1757,7 +1791,7 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
LitVal = 0;
for (size_t i = 0; i < NumCharsSoFar; ++i) {
// check for enough leading zeros to shift into
- multi_char_too_long |= (LitVal.countLeadingZeros() < 8);
+ multi_char_too_long |= (LitVal.countl_zero() < 8);
LitVal <<= 8;
LitVal = LitVal + (codepoint_buffer[i] & 0xFF);
}
@@ -1836,13 +1870,14 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
/// hex-digit hex-digit hex-digit hex-digit
/// \endverbatim
///
-StringLiteralParser::
-StringLiteralParser(ArrayRef<Token> StringToks,
- Preprocessor &PP)
- : SM(PP.getSourceManager()), Features(PP.getLangOpts()),
- Target(PP.getTargetInfo()), Diags(&PP.getDiagnostics()),
- MaxTokenLength(0), SizeBound(0), CharByteWidth(0), Kind(tok::unknown),
- ResultPtr(ResultBuf.data()), hadError(false), Pascal(false) {
+StringLiteralParser::StringLiteralParser(ArrayRef<Token> StringToks,
+ Preprocessor &PP,
+ StringLiteralEvalMethod EvalMethod)
+ : SM(PP.getSourceManager()), Features(PP.getLangOpts()),
+ Target(PP.getTargetInfo()), Diags(&PP.getDiagnostics()),
+ MaxTokenLength(0), SizeBound(0), CharByteWidth(0), Kind(tok::unknown),
+ ResultPtr(ResultBuf.data()), EvalMethod(EvalMethod), hadError(false),
+ Pascal(false) {
init(StringToks);
}
@@ -1859,35 +1894,38 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
assert(!StringToks.empty() && "expected at least one token");
MaxTokenLength = StringToks[0].getLength();
assert(StringToks[0].getLength() >= 2 && "literal token is invalid!");
- SizeBound = StringToks[0].getLength()-2; // -2 for "".
- Kind = StringToks[0].getKind();
-
+ SizeBound = StringToks[0].getLength() - 2; // -2 for "".
hadError = false;
- // Implement Translation Phase #6: concatenation of string literals
+ // Determines the kind of string from the prefix
+ Kind = tok::string_literal;
+
/// (C99 5.1.1.2p1). The common case is only one string fragment.
- for (unsigned i = 1; i != StringToks.size(); ++i) {
- if (StringToks[i].getLength() < 2)
- return DiagnoseLexingError(StringToks[i].getLocation());
+ for (const Token &Tok : StringToks) {
+ if (Tok.getLength() < 2)
+ return DiagnoseLexingError(Tok.getLocation());
// The string could be shorter than this if it needs cleaning, but this is a
// reasonable bound, which is all we need.
- assert(StringToks[i].getLength() >= 2 && "literal token is invalid!");
- SizeBound += StringToks[i].getLength()-2; // -2 for "".
+ assert(Tok.getLength() >= 2 && "literal token is invalid!");
+ SizeBound += Tok.getLength() - 2; // -2 for "".
// Remember maximum string piece length.
- if (StringToks[i].getLength() > MaxTokenLength)
- MaxTokenLength = StringToks[i].getLength();
+ if (Tok.getLength() > MaxTokenLength)
+ MaxTokenLength = Tok.getLength();
// Remember if we see any wide or utf-8/16/32 strings.
// Also check for illegal concatenations.
- if (StringToks[i].isNot(Kind) && StringToks[i].isNot(tok::string_literal)) {
+ if (isUnevaluated() && Tok.getKind() != tok::string_literal) {
+ if (Diags)
+ Diags->Report(Tok.getLocation(), diag::err_unevaluated_string_prefix);
+ hadError = true;
+ } else if (Tok.isNot(Kind) && Tok.isNot(tok::string_literal)) {
if (isOrdinary()) {
- Kind = StringToks[i].getKind();
+ Kind = Tok.getKind();
} else {
if (Diags)
- Diags->Report(StringToks[i].getLocation(),
- diag::err_unsupported_string_concat);
+ Diags->Report(Tok.getLocation(), diag::err_unsupported_string_concat);
hadError = true;
}
}
@@ -1965,13 +2003,18 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
// result of a concatenation involving at least one user-defined-string-
// literal, all the participating user-defined-string-literals shall
// have the same ud-suffix.
- if (UDSuffixBuf != UDSuffix) {
+ bool UnevaluatedStringHasUDL = isUnevaluated() && !UDSuffix.empty();
+ if (UDSuffixBuf != UDSuffix || UnevaluatedStringHasUDL) {
if (Diags) {
SourceLocation TokLoc = StringToks[i].getLocation();
- Diags->Report(TokLoc, diag::err_string_concat_mixed_suffix)
- << UDSuffixBuf << UDSuffix
- << SourceRange(UDSuffixTokLoc, UDSuffixTokLoc)
- << SourceRange(TokLoc, TokLoc);
+ if (UnevaluatedStringHasUDL) {
+ Diags->Report(TokLoc, diag::err_unevaluated_string_udl)
+ << SourceRange(TokLoc, TokLoc);
+ } else {
+ Diags->Report(TokLoc, diag::err_string_concat_mixed_suffix)
+ << UDSuffixBuf << UDSuffix
+ << SourceRange(UDSuffixTokLoc, UDSuffixTokLoc);
+ }
}
hadError = true;
}
@@ -2043,8 +2086,9 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
++ThisTokBuf; // skip "
// Check if this is a pascal string
- if (Features.PascalStrings && ThisTokBuf + 1 != ThisTokEnd &&
- ThisTokBuf[0] == '\\' && ThisTokBuf[1] == 'p') {
+ if (!isUnevaluated() && Features.PascalStrings &&
+ ThisTokBuf + 1 != ThisTokEnd && ThisTokBuf[0] == '\\' &&
+ ThisTokBuf[1] == 'p') {
// If the \p sequence is found in the first token, we have a pascal string
// Otherwise, if we already have a pascal string, ignore the first \p
@@ -2080,9 +2124,9 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
}
// Otherwise, this is a non-UCN escape character. Process it.
unsigned ResultChar =
- ProcessCharEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd, hadError,
- FullSourceLoc(StringToks[i].getLocation(), SM),
- CharByteWidth*8, Diags, Features);
+ ProcessCharEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd, hadError,
+ FullSourceLoc(StringToks[i].getLocation(), SM),
+ CharByteWidth * 8, Diags, Features, EvalMethod);
if (CharByteWidth == 4) {
// FIXME: Make the type of the result buffer correct instead of
@@ -2104,6 +2148,8 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
}
}
+ assert((!Pascal || !isUnevaluated()) &&
+ "Pascal string in unevaluated context");
if (Pascal) {
if (CharByteWidth == 4) {
// FIXME: Make the type of the result buffer correct instead of
@@ -2277,8 +2323,8 @@ unsigned StringLiteralParser::getOffsetOfStringByte(const Token &Tok,
ByteNo -= Len;
} else {
ProcessCharEscape(SpellingStart, SpellingPtr, SpellingEnd, HadError,
- FullSourceLoc(Tok.getLocation(), SM),
- CharByteWidth*8, Diags, Features);
+ FullSourceLoc(Tok.getLocation(), SM), CharByteWidth * 8,
+ Diags, Features, StringLiteralEvalMethod::Evaluated);
--ByteNo;
}
assert(!HadError && "This method isn't valid on erroneous strings");
diff --git a/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp b/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp
index ee2cca4e0814..5a1b0a918caa 100644
--- a/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp
@@ -181,7 +181,7 @@ OptionalFileEntryRef ModuleMap::findHeader(
Module *M, const Module::UnresolvedHeaderDirective &Header,
SmallVectorImpl<char> &RelativePathName, bool &NeedsFramework) {
// Search for the header file within the module's home directory.
- auto *Directory = M->Directory;
+ auto Directory = M->Directory;
SmallString<128> FullPathName(Directory->getName());
auto GetFile = [&](StringRef Filename) -> OptionalFileEntryRef {
@@ -266,7 +266,8 @@ void ModuleMap::resolveHeader(Module *Mod,
<< UmbrellaMod->getFullModuleName();
else
// Record this umbrella header.
- setUmbrellaHeader(Mod, *File, Header.FileName, RelativePathName.str());
+ setUmbrellaHeaderAsWritten(Mod, *File, Header.FileName,
+ RelativePathName.str());
} else {
Module::Header H = {Header.FileName, std::string(RelativePathName.str()),
*File};
@@ -408,29 +409,27 @@ ModuleMap::findKnownHeader(const FileEntry *File) {
return Known;
}
-ModuleMap::KnownHeader
-ModuleMap::findHeaderInUmbrellaDirs(const FileEntry *File,
- SmallVectorImpl<const DirectoryEntry *> &IntermediateDirs) {
+ModuleMap::KnownHeader ModuleMap::findHeaderInUmbrellaDirs(
+ FileEntryRef File, SmallVectorImpl<DirectoryEntryRef> &IntermediateDirs) {
if (UmbrellaDirs.empty())
return {};
- const DirectoryEntry *Dir = File->getDir();
- assert(Dir && "file in no directory");
+ OptionalDirectoryEntryRef Dir = File.getDir();
// Note: as an egregious but useful hack we use the real path here, because
// frameworks moving from top-level frameworks to embedded frameworks tend
// to be symlinked from the top-level location to the embedded location,
// and we need to resolve lookups as if we had found the embedded location.
- StringRef DirName = SourceMgr.getFileManager().getCanonicalName(Dir);
+ StringRef DirName = SourceMgr.getFileManager().getCanonicalName(*Dir);
// Keep walking up the directory hierarchy, looking for a directory with
// an umbrella header.
do {
- auto KnownDir = UmbrellaDirs.find(Dir);
+ auto KnownDir = UmbrellaDirs.find(*Dir);
if (KnownDir != UmbrellaDirs.end())
return KnownHeader(KnownDir->second, NormalHeader);
- IntermediateDirs.push_back(Dir);
+ IntermediateDirs.push_back(*Dir);
// Retrieve our parent path.
DirName = llvm::sys::path::parent_path(DirName);
@@ -438,10 +437,7 @@ ModuleMap::findHeaderInUmbrellaDirs(const FileEntry *File,
break;
// Resolve the parent path to a directory entry.
- if (auto DirEntry = SourceMgr.getFileManager().getDirectory(DirName))
- Dir = *DirEntry;
- else
- Dir = nullptr;
+ Dir = SourceMgr.getFileManager().getOptionalDirectoryRef(DirName);
} while (Dir);
return {};
}
@@ -528,8 +524,9 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
// We have found a module, but we don't use it.
if (NotUsed) {
- Diags.Report(FilenameLoc, diag::err_undeclared_use_of_module)
- << RequestingModule->getTopLevelModule()->Name << Filename;
+ Diags.Report(FilenameLoc, diag::err_undeclared_use_of_module_indirect)
+ << RequestingModule->getTopLevelModule()->Name << Filename
+ << NotUsed->Name;
return;
}
@@ -580,7 +577,7 @@ static bool isBetterKnownHeader(const ModuleMap::KnownHeader &New,
return false;
}
-ModuleMap::KnownHeader ModuleMap::findModuleForHeader(const FileEntry *File,
+ModuleMap::KnownHeader ModuleMap::findModuleForHeader(FileEntryRef File,
bool AllowTextual,
bool AllowExcluded) {
auto MakeResult = [&](ModuleMap::KnownHeader R) -> ModuleMap::KnownHeader {
@@ -610,10 +607,10 @@ ModuleMap::KnownHeader ModuleMap::findModuleForHeader(const FileEntry *File,
}
ModuleMap::KnownHeader
-ModuleMap::findOrCreateModuleForHeaderInUmbrellaDir(const FileEntry *File) {
+ModuleMap::findOrCreateModuleForHeaderInUmbrellaDir(FileEntryRef File) {
assert(!Headers.count(File) && "already have a module for this header");
- SmallVector<const DirectoryEntry *, 2> SkippedDirs;
+ SmallVector<DirectoryEntryRef, 2> SkippedDirs;
KnownHeader H = findHeaderInUmbrellaDirs(File, SkippedDirs);
if (H) {
Module *Result = H.getModule();
@@ -621,7 +618,7 @@ ModuleMap::findOrCreateModuleForHeaderInUmbrellaDir(const FileEntry *File) {
// Search up the module stack until we find a module with an umbrella
// directory.
Module *UmbrellaModule = Result;
- while (!UmbrellaModule->getUmbrellaDir() && UmbrellaModule->Parent)
+ while (!UmbrellaModule->getEffectiveUmbrellaDir() && UmbrellaModule->Parent)
UmbrellaModule = UmbrellaModule->Parent;
if (UmbrellaModule->InferSubmodules) {
@@ -633,11 +630,11 @@ ModuleMap::findOrCreateModuleForHeaderInUmbrellaDir(const FileEntry *File) {
// the actual header is located.
bool Explicit = UmbrellaModule->InferExplicitSubmodules;
- for (const DirectoryEntry *SkippedDir : llvm::reverse(SkippedDirs)) {
+ for (DirectoryEntryRef SkippedDir : llvm::reverse(SkippedDirs)) {
// Find or create the module that corresponds to this directory name.
SmallString<32> NameBuf;
StringRef Name = sanitizeFilenameAsIdentifier(
- llvm::sys::path::stem(SkippedDir->getName()), NameBuf);
+ llvm::sys::path::stem(SkippedDir.getName()), NameBuf);
Result = findOrCreateModule(Name, Result, /*IsFramework=*/false,
Explicit).first;
InferredModuleAllowedBy[Result] = UmbrellaModuleMap;
@@ -655,7 +652,7 @@ ModuleMap::findOrCreateModuleForHeaderInUmbrellaDir(const FileEntry *File) {
// Infer a submodule with the same name as this header file.
SmallString<32> NameBuf;
StringRef Name = sanitizeFilenameAsIdentifier(
- llvm::sys::path::stem(File->getName()), NameBuf);
+ llvm::sys::path::stem(File.getName()), NameBuf);
Result = findOrCreateModule(Name, Result, /*IsFramework=*/false,
Explicit).first;
InferredModuleAllowedBy[Result] = UmbrellaModuleMap;
@@ -682,7 +679,7 @@ ModuleMap::findOrCreateModuleForHeaderInUmbrellaDir(const FileEntry *File) {
}
ArrayRef<ModuleMap::KnownHeader>
-ModuleMap::findAllModulesForHeader(const FileEntry *File) {
+ModuleMap::findAllModulesForHeader(FileEntryRef File) {
HeadersMap::iterator Known = findKnownHeader(File);
if (Known != Headers.end())
return Known->second;
@@ -703,13 +700,12 @@ ModuleMap::findResolvedModulesForHeader(const FileEntry *File) const {
return It->second;
}
-bool ModuleMap::isHeaderInUnavailableModule(const FileEntry *Header) const {
+bool ModuleMap::isHeaderInUnavailableModule(FileEntryRef Header) const {
return isHeaderUnavailableInModule(Header, nullptr);
}
-bool
-ModuleMap::isHeaderUnavailableInModule(const FileEntry *Header,
- const Module *RequestingModule) const {
+bool ModuleMap::isHeaderUnavailableInModule(
+ FileEntryRef Header, const Module *RequestingModule) const {
resolveHeaderDirectives(Header);
HeadersMap::const_iterator Known = Headers.find(Header);
if (Known != Headers.end()) {
@@ -737,8 +733,8 @@ ModuleMap::isHeaderUnavailableInModule(const FileEntry *Header,
return true;
}
- const DirectoryEntry *Dir = Header->getDir();
- SmallVector<const DirectoryEntry *, 2> SkippedDirs;
+ OptionalDirectoryEntryRef Dir = Header.getDir();
+ SmallVector<DirectoryEntryRef, 2> SkippedDirs;
StringRef DirName = Dir->getName();
auto IsUnavailable = [&](const Module *M) {
@@ -749,8 +745,7 @@ ModuleMap::isHeaderUnavailableInModule(const FileEntry *Header,
// Keep walking up the directory hierarchy, looking for a directory with
// an umbrella header.
do {
- llvm::DenseMap<const DirectoryEntry *, Module *>::const_iterator KnownDir
- = UmbrellaDirs.find(Dir);
+ auto KnownDir = UmbrellaDirs.find(*Dir);
if (KnownDir != UmbrellaDirs.end()) {
Module *Found = KnownDir->second;
if (IsUnavailable(Found))
@@ -759,15 +754,16 @@ ModuleMap::isHeaderUnavailableInModule(const FileEntry *Header,
// Search up the module stack until we find a module with an umbrella
// directory.
Module *UmbrellaModule = Found;
- while (!UmbrellaModule->getUmbrellaDir() && UmbrellaModule->Parent)
+ while (!UmbrellaModule->getEffectiveUmbrellaDir() &&
+ UmbrellaModule->Parent)
UmbrellaModule = UmbrellaModule->Parent;
if (UmbrellaModule->InferSubmodules) {
- for (const DirectoryEntry *SkippedDir : llvm::reverse(SkippedDirs)) {
+ for (DirectoryEntryRef SkippedDir : llvm::reverse(SkippedDirs)) {
// Find or create the module that corresponds to this directory name.
SmallString<32> NameBuf;
StringRef Name = sanitizeFilenameAsIdentifier(
- llvm::sys::path::stem(SkippedDir->getName()), NameBuf);
+ llvm::sys::path::stem(SkippedDir.getName()), NameBuf);
Found = lookupModuleQualified(Name, Found);
if (!Found)
return false;
@@ -778,7 +774,7 @@ ModuleMap::isHeaderUnavailableInModule(const FileEntry *Header,
// Infer a submodule with the same name as this header file.
SmallString<32> NameBuf;
StringRef Name = sanitizeFilenameAsIdentifier(
- llvm::sys::path::stem(Header->getName()),
+ llvm::sys::path::stem(Header.getName()),
NameBuf);
Found = lookupModuleQualified(Name, Found);
if (!Found)
@@ -788,7 +784,7 @@ ModuleMap::isHeaderUnavailableInModule(const FileEntry *Header,
return IsUnavailable(Found);
}
- SkippedDirs.push_back(Dir);
+ SkippedDirs.push_back(*Dir);
// Retrieve our parent path.
DirName = llvm::sys::path::parent_path(DirName);
@@ -796,10 +792,7 @@ ModuleMap::isHeaderUnavailableInModule(const FileEntry *Header,
break;
// Resolve the parent path to a directory entry.
- if (auto DirEntry = SourceMgr.getFileManager().getDirectory(DirName))
- Dir = *DirEntry;
- else
- Dir = nullptr;
+ Dir = SourceMgr.getFileManager().getOptionalDirectoryRef(DirName);
} while (Dir);
return false;
@@ -854,7 +847,7 @@ Module *ModuleMap::createGlobalModuleFragmentForModuleUnit(SourceLocation Loc,
Module *Parent) {
auto *Result = new Module("<global>", Loc, Parent, /*IsFramework*/ false,
/*IsExplicit*/ true, NumCreatedModules++);
- Result->Kind = Module::GlobalModuleFragment;
+ Result->Kind = Module::ExplicitGlobalModuleFragment;
// If the created module isn't owned by a parent, send it to PendingSubmodules
// to wait for its parent.
if (!Result->Parent)
@@ -862,6 +855,21 @@ Module *ModuleMap::createGlobalModuleFragmentForModuleUnit(SourceLocation Loc,
return Result;
}
+Module *ModuleMap::createImplicitGlobalModuleFragmentForModuleUnit(
+ SourceLocation Loc, bool IsExported, Module *Parent) {
+ assert(Parent && "We should only create an implicit global module fragment "
+ "in a module purview");
+ // Note: Here the `IsExplicit` parameter refers to the semantics in clang
+ // modules. All the non-explicit submodules in clang modules will be exported
+ // too. Here we simplify the implementation by using the concept.
+ auto *Result = new Module(IsExported ? "<exported implicit global>"
+ : "<implicit global>",
+ Loc, Parent, /*IsFramework*/ false,
+ /*IsExplicit*/ !IsExported, NumCreatedModules++);
+ Result->Kind = Module::ImplicitGlobalModuleFragment;
+ return Result;
+}
+
Module *
ModuleMap::createPrivateModuleFragmentForInterfaceUnit(Module *Parent,
SourceLocation Loc) {
@@ -872,23 +880,30 @@ ModuleMap::createPrivateModuleFragmentForInterfaceUnit(Module *Parent,
return Result;
}
-Module *ModuleMap::createModuleForInterfaceUnit(SourceLocation Loc,
- StringRef Name) {
- assert(LangOpts.CurrentModule == Name && "module name mismatch");
- assert(!Modules[Name] && "redefining existing module");
-
+Module *ModuleMap::createModuleUnitWithKind(SourceLocation Loc, StringRef Name,
+ Module::ModuleKind Kind) {
auto *Result =
new Module(Name, Loc, nullptr, /*IsFramework*/ false,
/*IsExplicit*/ false, NumCreatedModules++);
- Result->Kind = Module::ModuleInterfaceUnit;
- Modules[Name] = SourceModule = Result;
+ Result->Kind = Kind;
- // Reparent the current global module fragment as a submodule of this module.
+ // Reparent any current global module fragment as a submodule of this module.
for (auto &Submodule : PendingSubmodules) {
Submodule->setParent(Result);
Submodule.release(); // now owned by parent
}
PendingSubmodules.clear();
+ return Result;
+}
+
+Module *ModuleMap::createModuleForInterfaceUnit(SourceLocation Loc,
+ StringRef Name) {
+ assert(LangOpts.CurrentModule == Name && "module name mismatch");
+ assert(!Modules[Name] && "redefining existing module");
+
+ auto *Result =
+ createModuleUnitWithKind(Loc, Name, Module::ModuleInterfaceUnit);
+ Modules[Name] = SourceModule = Result;
// Mark the main source file as being within the newly-created module so that
// declarations and macros are properly visibility-restricted to it.
@@ -899,6 +914,30 @@ Module *ModuleMap::createModuleForInterfaceUnit(SourceLocation Loc,
return Result;
}
+Module *ModuleMap::createModuleForImplementationUnit(SourceLocation Loc,
+ StringRef Name) {
+ assert(LangOpts.CurrentModule == Name && "module name mismatch");
+ // The interface for this implementation must exist and be loaded.
+ assert(Modules[Name] && Modules[Name]->Kind == Module::ModuleInterfaceUnit &&
+ "creating implementation module without an interface");
+
+ // Create an entry in the modules map to own the implementation unit module.
+ // User module names must not start with a period (so that this cannot clash
+ // with any legal user-defined module name).
+ StringRef IName = ".ImplementationUnit";
+ assert(!Modules[IName] && "multiple implementation units?");
+
+ auto *Result =
+ createModuleUnitWithKind(Loc, Name, Module::ModuleImplementationUnit);
+ Modules[IName] = SourceModule = Result;
+
+ // Check that the main file is present.
+ assert(SourceMgr.getFileEntryForID(SourceMgr.getMainFileID()) &&
+ "no input file for module implementation");
+
+ return Result;
+}
+
Module *ModuleMap::createHeaderUnit(SourceLocation Loc, StringRef Name,
Module::Header H) {
assert(LangOpts.CurrentModule == Name && "module name mismatch");
@@ -914,37 +953,23 @@ Module *ModuleMap::createHeaderUnit(SourceLocation Loc, StringRef Name,
/// For a framework module, infer the framework against which we
/// should link.
-static void inferFrameworkLink(Module *Mod, const DirectoryEntry *FrameworkDir,
- FileManager &FileMgr) {
+static void inferFrameworkLink(Module *Mod) {
assert(Mod->IsFramework && "Can only infer linking for framework modules");
assert(!Mod->isSubFramework() &&
"Can only infer linking for top-level frameworks");
- SmallString<128> LibName;
- LibName += FrameworkDir->getName();
- llvm::sys::path::append(LibName, Mod->Name);
-
- // The library name of a framework has more than one possible extension since
- // the introduction of the text-based dynamic library format. We need to check
- // for both before we give up.
- for (const char *extension : {"", ".tbd"}) {
- llvm::sys::path::replace_extension(LibName, extension);
- if (FileMgr.getFile(LibName)) {
- Mod->LinkLibraries.push_back(Module::LinkLibrary(Mod->Name,
- /*IsFramework=*/true));
- return;
- }
- }
+ Mod->LinkLibraries.push_back(Module::LinkLibrary(Mod->Name,
+ /*IsFramework=*/true));
}
-Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
+Module *ModuleMap::inferFrameworkModule(DirectoryEntryRef FrameworkDir,
bool IsSystem, Module *Parent) {
Attributes Attrs;
Attrs.IsSystem = IsSystem;
return inferFrameworkModule(FrameworkDir, Attrs, Parent);
}
-Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
+Module *ModuleMap::inferFrameworkModule(DirectoryEntryRef FrameworkDir,
Attributes Attrs, Module *Parent) {
// Note: as an egregious but useful hack we use the real path here, because
// we might be looking at an embedded framework that symlinks out to a
@@ -975,7 +1000,7 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
if (llvm::sys::path::has_parent_path(FrameworkDirName)) {
// Figure out the parent path.
StringRef Parent = llvm::sys::path::parent_path(FrameworkDirName);
- if (auto ParentDir = FileMgr.getDirectory(Parent)) {
+ if (auto ParentDir = FileMgr.getOptionalDirectoryRef(Parent)) {
// Check whether we have already looked into the parent directory
// for a module map.
llvm::DenseMap<const DirectoryEntry *, InferredDirectory>::const_iterator
@@ -984,9 +1009,9 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
// We haven't looked here before. Load a module map, if there is
// one.
bool IsFrameworkDir = Parent.endswith(".framework");
- if (const FileEntry *ModMapFile =
- HeaderInfo.lookupModuleMapFile(*ParentDir, IsFrameworkDir)) {
- parseModuleMapFile(ModMapFile, Attrs.IsSystem, *ParentDir);
+ if (OptionalFileEntryRef ModMapFile =
+ HeaderInfo.lookupModuleMapFile(*ParentDir, IsFrameworkDir)) {
+ parseModuleMapFile(*ModMapFile, Attrs.IsSystem, *ParentDir);
inferred = InferredDirectories.find(*ParentDir);
}
@@ -1022,7 +1047,7 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
}
// Look for an umbrella header.
- SmallString<128> UmbrellaName = StringRef(FrameworkDir->getName());
+ SmallString<128> UmbrellaName = FrameworkDir.getName();
llvm::sys::path::append(UmbrellaName, "Headers", ModuleName + ".h");
auto UmbrellaHeader = FileMgr.getOptionalFileRef(UmbrellaName);
@@ -1056,7 +1081,8 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
RelativePath = llvm::sys::path::relative_path(RelativePath);
// umbrella header "umbrella-header-name"
- setUmbrellaHeader(Result, *UmbrellaHeader, ModuleName + ".h", RelativePath);
+ setUmbrellaHeaderAsWritten(Result, *UmbrellaHeader, ModuleName + ".h",
+ RelativePath);
// export *
Result->Exports.push_back(Module::ExportDecl(nullptr, true));
@@ -1067,8 +1093,7 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
// Look for subframeworks.
std::error_code EC;
- SmallString<128> SubframeworksDirName
- = StringRef(FrameworkDir->getName());
+ SmallString<128> SubframeworksDirName = FrameworkDir.getName();
llvm::sys::path::append(SubframeworksDirName, "Frameworks");
llvm::sys::path::native(SubframeworksDirName);
llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
@@ -1079,8 +1104,7 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
if (!StringRef(Dir->path()).endswith(".framework"))
continue;
- if (auto SubframeworkDir =
- FileMgr.getDirectory(Dir->path())) {
+ if (auto SubframeworkDir = FileMgr.getOptionalDirectoryRef(Dir->path())) {
// Note: as an egregious but useful hack, we use the real path here and
// check whether it is actually a subdirectory of the parent directory.
// This will not be the case if the 'subframework' is actually a symlink
@@ -1113,9 +1137,8 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
// If the module is a top-level framework, automatically link against the
// framework.
- if (!Result->isSubFramework()) {
- inferFrameworkLink(Result, FrameworkDir, FileMgr);
- }
+ if (!Result->isSubFramework())
+ inferFrameworkLink(Result);
return Result;
}
@@ -1135,11 +1158,11 @@ Module *ModuleMap::createShadowedModule(StringRef Name, bool IsFramework,
return Result;
}
-void ModuleMap::setUmbrellaHeader(
+void ModuleMap::setUmbrellaHeaderAsWritten(
Module *Mod, FileEntryRef UmbrellaHeader, const Twine &NameAsWritten,
const Twine &PathRelativeToRootModuleDirectory) {
Headers[UmbrellaHeader].push_back(KnownHeader(Mod, NormalHeader));
- Mod->Umbrella = &UmbrellaHeader.getMapEntry();
+ Mod->Umbrella = UmbrellaHeader;
Mod->UmbrellaAsWritten = NameAsWritten.str();
Mod->UmbrellaRelativeToRootModuleDirectory =
PathRelativeToRootModuleDirectory.str();
@@ -1147,12 +1170,12 @@ void ModuleMap::setUmbrellaHeader(
// Notify callbacks that we just added a new header.
for (const auto &Cb : Callbacks)
- Cb->moduleMapAddUmbrellaHeader(&SourceMgr.getFileManager(), UmbrellaHeader);
+ Cb->moduleMapAddUmbrellaHeader(UmbrellaHeader);
}
-void ModuleMap::setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir,
- const Twine &NameAsWritten,
- const Twine &PathRelativeToRootModuleDirectory) {
+void ModuleMap::setUmbrellaDirAsWritten(
+ Module *Mod, DirectoryEntryRef UmbrellaDir, const Twine &NameAsWritten,
+ const Twine &PathRelativeToRootModuleDirectory) {
Mod->Umbrella = UmbrellaDir;
Mod->UmbrellaAsWritten = NameAsWritten.str();
Mod->UmbrellaRelativeToRootModuleDirectory =
@@ -1256,7 +1279,7 @@ void ModuleMap::addHeader(Module *Mod, Module::Header Header,
// Notify callbacks that we just added a new header.
for (const auto &Cb : Callbacks)
- Cb->moduleMapAddHeader(Header.Entry->getName());
+ Cb->moduleMapAddHeader(Header.Entry.getName());
}
OptionalFileEntryRef
@@ -1296,24 +1319,14 @@ ModuleMap::canonicalizeModuleMapPath(SmallVectorImpl<char> &Path) {
}
FileManager &FM = SourceMgr.getFileManager();
- auto DirEntry = FM.getDirectory(Dir.empty() ? "." : Dir);
+ auto DirEntry = FM.getDirectoryRef(Dir.empty() ? "." : Dir);
if (!DirEntry)
- return DirEntry.getError();
+ return llvm::errorToErrorCode(DirEntry.takeError());
// Canonicalize the directory.
StringRef CanonicalDir = FM.getCanonicalName(*DirEntry);
- if (CanonicalDir != Dir) {
- auto CanonicalDirEntry = FM.getDirectory(CanonicalDir);
- // Only use the canonicalized path if it resolves to the same entry as the
- // original. This is not true if there's a VFS overlay on top of a FS where
- // the directory is a symlink. The overlay would not remap the target path
- // of the symlink to the same directory entry in that case.
- if (CanonicalDirEntry && *CanonicalDirEntry == *DirEntry) {
- bool Done = llvm::sys::path::replace_path_prefix(Path, Dir, CanonicalDir);
- (void)Done;
- assert(Done && "Path should always start with Dir");
- }
- }
+ if (CanonicalDir != Dir)
+ llvm::sys::path::replace_path_prefix(Path, Dir, CanonicalDir);
// In theory, the filename component should also be canonicalized if it
// on a case-insensitive filesystem. However, the extra canonicalization is
@@ -1485,7 +1498,7 @@ namespace clang {
/// The directory that file names in this module map file should
/// be resolved relative to.
- const DirectoryEntry *Directory;
+ DirectoryEntryRef Directory;
/// Whether this module map is in a system header directory.
bool IsSystem;
@@ -1551,7 +1564,7 @@ namespace clang {
explicit ModuleMapParser(Lexer &L, SourceManager &SourceMgr,
const TargetInfo *Target, DiagnosticsEngine &Diags,
ModuleMap &Map, const FileEntry *ModuleMapFile,
- const DirectoryEntry *Directory, bool IsSystem)
+ DirectoryEntryRef Directory, bool IsSystem)
: L(L), SourceMgr(SourceMgr), Target(Target), Diags(Diags), Map(Map),
ModuleMapFile(ModuleMapFile), Directory(Directory),
IsSystem(IsSystem) {
@@ -2003,10 +2016,28 @@ void ModuleMapParser::parseModuleDecl() {
Module *ShadowingModule = nullptr;
if (Module *Existing = Map.lookupModuleQualified(ModuleName, ActiveModule)) {
// We might see a (re)definition of a module that we already have a
- // definition for in two cases:
+ // definition for in four cases:
// - If we loaded one definition from an AST file and we've just found a
// corresponding definition in a module map file, or
- bool LoadedFromASTFile = Existing->DefinitionLoc.isInvalid();
+ bool LoadedFromASTFile = Existing->IsFromModuleFile;
+ // - If we previously inferred this module from different module map file.
+ bool Inferred = Existing->IsInferred;
+ // - If we're building a framework that vends a module map, we might've
+ // previously seen the one in intermediate products and now the system
+ // one.
+ // FIXME: If we're parsing module map file that looks like this:
+ // framework module FW { ... }
+ // module FW.Sub { ... }
+ // We can't check the framework qualifier, since it's not attached to
+ // the definition of Sub. Checking that qualifier on \c Existing is
+ // not correct either, since we might've previously seen:
+ // module FW { ... }
+ // module FW.Sub { ... }
+ // We should enforce consistency of redefinitions so that we can rely
+ // that \c Existing is part of a framework iff the redefinition of FW
+ // we have just skipped had it too. Once we do that, stop checking
+ // the local framework qualifier and only rely on \c Existing.
+ bool PartOfFramework = Framework || Existing->isPartOfFramework();
// - If we're building a (preprocessed) module and we've just loaded the
// module map file from which it was created.
bool ParsedAsMainInput =
@@ -2014,7 +2045,8 @@ void ModuleMapParser::parseModuleDecl() {
Map.LangOpts.CurrentModule == ModuleName &&
SourceMgr.getDecomposedLoc(ModuleNameLoc).first !=
SourceMgr.getDecomposedLoc(Existing->DefinitionLoc).first;
- if (!ActiveModule && (LoadedFromASTFile || ParsedAsMainInput)) {
+ if (LoadedFromASTFile || Inferred || PartOfFramework || ParsedAsMainInput) {
+ ActiveModule = PreviousActiveModule;
// Skip the module definition.
skipUntil(MMToken::RBrace);
if (Tok.is(MMToken::RBrace))
@@ -2169,9 +2201,8 @@ void ModuleMapParser::parseModuleDecl() {
// If the active module is a top-level framework, and there are no link
// libraries, automatically link against the framework.
if (ActiveModule->IsFramework && !ActiveModule->isSubFramework() &&
- ActiveModule->LinkLibraries.empty()) {
- inferFrameworkLink(ActiveModule, Directory, SourceMgr.getFileManager());
- }
+ ActiveModule->LinkLibraries.empty())
+ inferFrameworkLink(ActiveModule);
// If the module meets all requirements but is still unavailable, mark the
// whole tree as unavailable to prevent it from building.
@@ -2222,16 +2253,16 @@ void ModuleMapParser::parseExternModuleDecl() {
StringRef FileNameRef = FileName;
SmallString<128> ModuleMapFileName;
if (llvm::sys::path::is_relative(FileNameRef)) {
- ModuleMapFileName += Directory->getName();
+ ModuleMapFileName += Directory.getName();
llvm::sys::path::append(ModuleMapFileName, FileName);
FileNameRef = ModuleMapFileName;
}
- if (auto File = SourceMgr.getFileManager().getFile(FileNameRef))
+ if (auto File = SourceMgr.getFileManager().getOptionalFileRef(FileNameRef))
Map.parseModuleMapFile(
*File, IsSystem,
Map.HeaderInfo.getHeaderSearchOpts().ModuleMapFileHomeIsCwd
? Directory
- : (*File)->getDir(),
+ : File->getDir(),
FileID(), nullptr, ExternLoc);
}
@@ -2444,7 +2475,7 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken,
bool NeedsFramework = false;
Map.addUnresolvedHeader(ActiveModule, std::move(Header), NeedsFramework);
- if (NeedsFramework && ActiveModule)
+ if (NeedsFramework)
Diags.Report(CurrModuleDeclLoc, diag::note_mmap_add_framework_keyword)
<< ActiveModule->getFullModuleName()
<< FixItHint::CreateReplacement(CurrModuleDeclLoc, "framework module");
@@ -2481,16 +2512,14 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
}
// Look for this file.
- const DirectoryEntry *Dir = nullptr;
+ OptionalDirectoryEntryRef Dir;
if (llvm::sys::path::is_absolute(DirName)) {
- if (auto D = SourceMgr.getFileManager().getDirectory(DirName))
- Dir = *D;
+ Dir = SourceMgr.getFileManager().getOptionalDirectoryRef(DirName);
} else {
SmallString<128> PathName;
- PathName = Directory->getName();
+ PathName = Directory.getName();
llvm::sys::path::append(PathName, DirName);
- if (auto D = SourceMgr.getFileManager().getDirectory(PathName))
- Dir = *D;
+ Dir = SourceMgr.getFileManager().getOptionalDirectoryRef(PathName);
}
if (!Dir) {
@@ -2511,7 +2540,7 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
for (llvm::vfs::recursive_directory_iterator I(FS, Dir->getName(), EC), E;
I != E && !EC; I.increment(EC)) {
if (auto FE = SourceMgr.getFileManager().getOptionalFileRef(I->path())) {
- Module::Header Header = {"", std::string(I->path()), FE};
+ Module::Header Header = {"", std::string(I->path()), *FE};
Headers.push_back(std::move(Header));
}
}
@@ -2524,7 +2553,7 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
return;
}
- if (Module *OwningModule = Map.UmbrellaDirs[Dir]) {
+ if (Module *OwningModule = Map.UmbrellaDirs[*Dir]) {
Diags.Report(UmbrellaLoc, diag::err_mmap_umbrella_clash)
<< OwningModule->getFullModuleName();
HadError = true;
@@ -2532,7 +2561,7 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
}
// Record this umbrella directory.
- Map.setUmbrellaDir(ActiveModule, Dir, DirNameAsWritten, DirName);
+ Map.setUmbrellaDirAsWritten(ActiveModule, *Dir, DirNameAsWritten, DirName);
}
/// Parse a module export declaration.
@@ -2796,7 +2825,7 @@ void ModuleMapParser::parseInferredModuleDecl(bool Framework, bool Explicit) {
if (ActiveModule) {
// Inferred modules must have umbrella directories.
if (!Failed && ActiveModule->IsAvailable &&
- !ActiveModule->getUmbrellaDir()) {
+ !ActiveModule->getEffectiveUmbrellaDir()) {
Diags.Report(StarLoc, diag::err_mmap_inferred_no_umbrella);
Failed = true;
}
@@ -3050,7 +3079,7 @@ bool ModuleMapParser::parseModuleMapFile() {
}
bool ModuleMap::parseModuleMapFile(const FileEntry *File, bool IsSystem,
- const DirectoryEntry *Dir, FileID ID,
+ DirectoryEntryRef Dir, FileID ID,
unsigned *Offset,
SourceLocation ExternModuleLoc) {
assert(Target && "Missing target information");
diff --git a/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp b/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp
index 6ae513dea878..f133a50dd2ab 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp
@@ -109,52 +109,76 @@ enum PPElifDiag {
PED_Elifndef
};
+static bool isFeatureTestMacro(StringRef MacroName) {
+ // list from:
+ // * https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_macros.html
+ // * https://docs.microsoft.com/en-us/cpp/c-runtime-library/security-features-in-the-crt?view=msvc-160
+ // * man 7 feature_test_macros
+ // The list must be sorted for correct binary search.
+ static constexpr StringRef ReservedMacro[] = {
+ "_ATFILE_SOURCE",
+ "_BSD_SOURCE",
+ "_CRT_NONSTDC_NO_WARNINGS",
+ "_CRT_SECURE_CPP_OVERLOAD_STANDARD_NAMES",
+ "_CRT_SECURE_NO_WARNINGS",
+ "_FILE_OFFSET_BITS",
+ "_FORTIFY_SOURCE",
+ "_GLIBCXX_ASSERTIONS",
+ "_GLIBCXX_CONCEPT_CHECKS",
+ "_GLIBCXX_DEBUG",
+ "_GLIBCXX_DEBUG_PEDANTIC",
+ "_GLIBCXX_PARALLEL",
+ "_GLIBCXX_PARALLEL_ASSERTIONS",
+ "_GLIBCXX_SANITIZE_VECTOR",
+ "_GLIBCXX_USE_CXX11_ABI",
+ "_GLIBCXX_USE_DEPRECATED",
+ "_GNU_SOURCE",
+ "_ISOC11_SOURCE",
+ "_ISOC95_SOURCE",
+ "_ISOC99_SOURCE",
+ "_LARGEFILE64_SOURCE",
+ "_POSIX_C_SOURCE",
+ "_REENTRANT",
+ "_SVID_SOURCE",
+ "_THREAD_SAFE",
+ "_XOPEN_SOURCE",
+ "_XOPEN_SOURCE_EXTENDED",
+ "__STDCPP_WANT_MATH_SPEC_FUNCS__",
+ "__STDC_FORMAT_MACROS",
+ };
+ return std::binary_search(std::begin(ReservedMacro), std::end(ReservedMacro),
+ MacroName);
+}
+
+static bool isLanguageDefinedBuiltin(const SourceManager &SourceMgr,
+ const MacroInfo *MI,
+ const StringRef MacroName) {
+ // If this is a macro with special handling (like __LINE__) then it's language
+ // defined.
+ if (MI->isBuiltinMacro())
+ return true;
+ // Builtin macros are defined in the builtin file
+ if (!SourceMgr.isWrittenInBuiltinFile(MI->getDefinitionLoc()))
+ return false;
+ // C defines macros starting with __STDC, and C++ defines macros starting with
+ // __STDCPP
+ if (MacroName.startswith("__STDC"))
+ return true;
+ // C++ defines the __cplusplus macro
+ if (MacroName == "__cplusplus")
+ return true;
+ // C++ defines various feature-test macros starting with __cpp
+ if (MacroName.startswith("__cpp"))
+ return true;
+ // Anything else isn't language-defined
+ return false;
+}
+
static MacroDiag shouldWarnOnMacroDef(Preprocessor &PP, IdentifierInfo *II) {
const LangOptions &Lang = PP.getLangOpts();
- if (isReservedInAllContexts(II->isReserved(Lang))) {
- // list from:
- // - https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_macros.html
- // - https://docs.microsoft.com/en-us/cpp/c-runtime-library/security-features-in-the-crt?view=msvc-160
- // - man 7 feature_test_macros
- // The list must be sorted for correct binary search.
- static constexpr StringRef ReservedMacro[] = {
- "_ATFILE_SOURCE",
- "_BSD_SOURCE",
- "_CRT_NONSTDC_NO_WARNINGS",
- "_CRT_SECURE_CPP_OVERLOAD_STANDARD_NAMES",
- "_CRT_SECURE_NO_WARNINGS",
- "_FILE_OFFSET_BITS",
- "_FORTIFY_SOURCE",
- "_GLIBCXX_ASSERTIONS",
- "_GLIBCXX_CONCEPT_CHECKS",
- "_GLIBCXX_DEBUG",
- "_GLIBCXX_DEBUG_PEDANTIC",
- "_GLIBCXX_PARALLEL",
- "_GLIBCXX_PARALLEL_ASSERTIONS",
- "_GLIBCXX_SANITIZE_VECTOR",
- "_GLIBCXX_USE_CXX11_ABI",
- "_GLIBCXX_USE_DEPRECATED",
- "_GNU_SOURCE",
- "_ISOC11_SOURCE",
- "_ISOC95_SOURCE",
- "_ISOC99_SOURCE",
- "_LARGEFILE64_SOURCE",
- "_POSIX_C_SOURCE",
- "_REENTRANT",
- "_SVID_SOURCE",
- "_THREAD_SAFE",
- "_XOPEN_SOURCE",
- "_XOPEN_SOURCE_EXTENDED",
- "__STDCPP_WANT_MATH_SPEC_FUNCS__",
- "__STDC_FORMAT_MACROS",
- };
- if (std::binary_search(std::begin(ReservedMacro), std::end(ReservedMacro),
- II->getName()))
- return MD_NoWarn;
-
- return MD_ReservedMacro;
- }
StringRef Text = II->getName();
+ if (isReservedInAllContexts(II->isReserved(Lang)))
+ return isFeatureTestMacro(Text) ? MD_NoWarn : MD_ReservedMacro;
if (II->isKeyword(Lang))
return MD_KeywordDef;
if (Lang.CPlusPlus11 && (Text.equals("override") || Text.equals("final")))
@@ -319,15 +343,6 @@ bool Preprocessor::CheckMacroName(Token &MacroNameTok, MacroUse isDefineUndef,
return Diag(MacroNameTok, diag::err_defined_macro_name);
}
- if (isDefineUndef == MU_Undef) {
- auto *MI = getMacroInfo(II);
- if (MI && MI->isBuiltinMacro()) {
- // Warn if undefining "__LINE__" and other builtins, per C99 6.10.8/4
- // and C++ [cpp.predefined]p4], but allow it as an extension.
- Diag(MacroNameTok, diag::ext_pp_undef_builtin_macro);
- }
- }
-
// If defining/undefining reserved identifier or a keyword, we need to issue
// a warning.
SourceLocation MacroNameLoc = MacroNameTok.getLocation();
@@ -434,7 +449,7 @@ void Preprocessor::SuggestTypoedDirective(const Token &Tok,
std::vector<StringRef> Candidates = {
"if", "ifdef", "ifndef", "elif", "else", "endif"
};
- if (LangOpts.C2x || LangOpts.CPlusPlus2b)
+ if (LangOpts.C2x || LangOpts.CPlusPlus23)
Candidates.insert(Candidates.end(), {"elifdef", "elifndef"});
if (std::optional<StringRef> Sugg = findSimilarStr(Directive, Candidates)) {
@@ -745,12 +760,12 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
if (!CondInfo.WasSkipping)
SkippingRangeState.endLexPass(Hashptr);
- // Warn if using `#elifdef` & `#elifndef` in not C2x & C++2b mode even
+ // Warn if using `#elifdef` & `#elifndef` in not C2x & C++23 mode even
// if this branch is in a skipping block.
unsigned DiagID;
if (LangOpts.CPlusPlus)
- DiagID = LangOpts.CPlusPlus2b ? diag::warn_cxx2b_compat_pp_directive
- : diag::ext_cxx2b_pp_directive;
+ DiagID = LangOpts.CPlusPlus23 ? diag::warn_cxx23_compat_pp_directive
+ : diag::ext_cxx23_pp_directive;
else
DiagID = LangOpts.C2x ? diag::warn_c2x_compat_pp_directive
: diag::ext_c2x_pp_directive;
@@ -842,10 +857,10 @@ Module *Preprocessor::getModuleForLocation(SourceLocation Loc,
// Try to determine the module of the include directive.
// FIXME: Look into directly passing the FileEntry from LookupFile instead.
FileID IDOfIncl = SourceMgr.getFileID(SourceMgr.getExpansionLoc(Loc));
- if (const FileEntry *EntryOfIncl = SourceMgr.getFileEntryForID(IDOfIncl)) {
+ if (auto EntryOfIncl = SourceMgr.getFileEntryRefForID(IDOfIncl)) {
// The include comes from an included file.
return HeaderInfo.getModuleMap()
- .findModuleForHeader(EntryOfIncl, AllowTextual)
+ .findModuleForHeader(*EntryOfIncl, AllowTextual)
.getModule();
}
}
@@ -870,7 +885,7 @@ Preprocessor::getHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
auto &SM = getSourceManager();
while (!Loc.isInvalid() && !SM.isInMainFile(Loc)) {
auto ID = SM.getFileID(SM.getExpansionLoc(Loc));
- auto *FE = SM.getFileEntryForID(ID);
+ auto FE = SM.getFileEntryRefForID(ID);
if (!FE)
break;
@@ -880,7 +895,7 @@ Preprocessor::getHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
SourceMgr.isInSystemHeader(Loc));
bool InPrivateHeader = false;
- for (auto Header : HeaderInfo.findAllModulesForHeader(FE)) {
+ for (auto Header : HeaderInfo.findAllModulesForHeader(*FE)) {
if (!Header.isAccessibleFrom(IncM)) {
// It's in a private header; we can't #include it.
// FIXME: If there's a public header in some module that re-exports it,
@@ -902,14 +917,13 @@ Preprocessor::getHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
// If we have a module import syntax, we shouldn't include a header to
// make a particular module visible. Let the caller know they should
// suggest an import instead.
- if (getLangOpts().ObjC || getLangOpts().CPlusPlusModules ||
- getLangOpts().ModulesTS)
+ if (getLangOpts().ObjC || getLangOpts().CPlusPlusModules)
return nullptr;
// If this is an accessible, non-textual header of M's top-level module
// that transitively includes the given location and makes the
// corresponding module visible, this is the thing to #include.
- return FE;
+ return *FE;
}
// FIXME: If we're bailing out due to a private header, we shouldn't suggest
@@ -920,8 +934,8 @@ Preprocessor::getHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
// If the header is includable and has an include guard, assume the
// intended way to expose its contents is by #include, not by importing a
// module that transitively includes it.
- if (getHeaderSearchInfo().isFileMultipleIncludeGuarded(FE))
- return FE;
+ if (getHeaderSearchInfo().isFileMultipleIncludeGuarded(*FE))
+ return *FE;
Loc = SM.getIncludeLoc(ID);
}
@@ -945,12 +959,11 @@ OptionalFileEntryRef Preprocessor::LookupFile(
// If the header lookup mechanism may be relative to the current inclusion
// stack, record the parent #includes.
- SmallVector<std::pair<const FileEntry *, const DirectoryEntry *>, 16>
- Includers;
+ SmallVector<std::pair<const FileEntry *, DirectoryEntryRef>, 16> Includers;
bool BuildSystemModule = false;
if (!FromDir && !FromFile) {
FileID FID = getCurrentFileLexer()->getFileID();
- const FileEntry *FileEnt = SourceMgr.getFileEntryForID(FID);
+ OptionalFileEntryRef FileEnt = SourceMgr.getFileEntryRefForID(FID);
// If there is no file entry associated with this file, it must be the
// predefines buffer or the module includes buffer. Any other file is not
@@ -966,13 +979,15 @@ OptionalFileEntryRef Preprocessor::LookupFile(
// map file.
if (!FileEnt) {
if (FID == SourceMgr.getMainFileID() && MainFileDir) {
- Includers.push_back(std::make_pair(nullptr, MainFileDir));
+ Includers.push_back(std::make_pair(nullptr, *MainFileDir));
BuildSystemModule = getCurrentModule()->IsSystem;
- } else if ((FileEnt =
- SourceMgr.getFileEntryForID(SourceMgr.getMainFileID())))
- Includers.push_back(std::make_pair(FileEnt, *FileMgr.getDirectory(".")));
+ } else if ((FileEnt = SourceMgr.getFileEntryRefForID(
+ SourceMgr.getMainFileID()))) {
+ auto CWD = FileMgr.getOptionalDirectoryRef(".");
+ Includers.push_back(std::make_pair(*FileEnt, *CWD));
+ }
} else {
- Includers.push_back(std::make_pair(FileEnt, FileEnt->getDir()));
+ Includers.push_back(std::make_pair(*FileEnt, FileEnt->getDir()));
}
// MSVC searches the current include stack from top to bottom for
@@ -982,7 +997,7 @@ OptionalFileEntryRef Preprocessor::LookupFile(
for (IncludeStackInfo &ISEntry : llvm::reverse(IncludeMacroStack)) {
if (IsFileLexer(ISEntry))
if ((FileEnt = ISEntry.ThePPLexer->getFileEntry()))
- Includers.push_back(std::make_pair(FileEnt, FileEnt->getDir()));
+ Includers.push_back(std::make_pair(*FileEnt, FileEnt->getDir()));
}
}
}
@@ -1178,6 +1193,10 @@ void Preprocessor::HandleDirective(Token &Result) {
switch (Result.getKind()) {
case tok::eod:
+ // Ignore the null directive with regards to the multiple-include
+ // optimization, i.e. allow the null directive to appear outside of the
+ // include guard and still enable the multiple-include optimization.
+ CurPPLexer->MIOpt.SetReadToken(ReadAnyTokensBeforeDirective);
return; // null directive.
case tok::code_completion:
setCodeCompletionReached();
@@ -1186,8 +1205,12 @@ void Preprocessor::HandleDirective(Token &Result) {
CurPPLexer->getConditionalStackDepth() > 0);
return;
case tok::numeric_constant: // # 7 GNU line marker directive.
- if (getLangOpts().AsmPreprocessor)
- break; // # 4 is not a preprocessor directive in .S files.
+ // In a .S file "# 4" may be a comment so don't treat it as a preprocessor
+ // directive. However do permit it in the predefines file, as we use line
+ // markers to mark the builtin macros as being in a system header.
+ if (getLangOpts().AsmPreprocessor &&
+ SourceMgr.getFileID(SavedHash.getLocation()) != getPredefinesFileID())
+ break;
return HandleDigitDirective(Result);
default:
IdentifierInfo *II = Result.getIdentifierInfo();
@@ -1249,10 +1272,10 @@ void Preprocessor::HandleDirective(Token &Result) {
case tok::pp_warning:
if (LangOpts.CPlusPlus)
- Diag(Result, LangOpts.CPlusPlus2b
- ? diag::warn_cxx2b_compat_warning_directive
+ Diag(Result, LangOpts.CPlusPlus23
+ ? diag::warn_cxx23_compat_warning_directive
: diag::ext_pp_warning_directive)
- << /*C++2b*/ 1;
+ << /*C++23*/ 1;
else
Diag(Result, LangOpts.C2x ? diag::warn_c2x_compat_warning_directive
: diag::ext_pp_warning_directive)
@@ -2640,7 +2663,7 @@ bool Preprocessor::ReadMacroParameterList(MacroInfo *MI, Token &Tok) {
SmallVector<IdentifierInfo*, 32> Parameters;
while (true) {
- LexUnexpandedToken(Tok);
+ LexUnexpandedNonComment(Tok);
switch (Tok.getKind()) {
case tok::r_paren:
// Found the end of the parameter list.
@@ -2661,7 +2684,7 @@ bool Preprocessor::ReadMacroParameterList(MacroInfo *MI, Token &Tok) {
}
// Lex the token after the identifier.
- LexUnexpandedToken(Tok);
+ LexUnexpandedNonComment(Tok);
if (Tok.isNot(tok::r_paren)) {
Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
return true;
@@ -2695,7 +2718,7 @@ bool Preprocessor::ReadMacroParameterList(MacroInfo *MI, Token &Tok) {
Parameters.push_back(II);
// Lex the token after the identifier.
- LexUnexpandedToken(Tok);
+ LexUnexpandedNonComment(Tok);
switch (Tok.getKind()) {
default: // #define X(A B
@@ -2711,7 +2734,7 @@ bool Preprocessor::ReadMacroParameterList(MacroInfo *MI, Token &Tok) {
Diag(Tok, diag::ext_named_variadic_macro);
// Lex the token after the identifier.
- LexUnexpandedToken(Tok);
+ LexUnexpandedNonComment(Tok);
if (Tok.isNot(tok::r_paren)) {
Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
return true;
@@ -3005,6 +3028,12 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
MI->setTokens(Tokens, BP);
return MI;
}
+
+static bool isObjCProtectedMacro(const IdentifierInfo *II) {
+ return II->isStr("__strong") || II->isStr("__weak") ||
+ II->isStr("__unsafe_unretained") || II->isStr("__autoreleasing");
+}
+
/// HandleDefineDirective - Implements \#define. This consumes the entire macro
/// line then lets the caller lex the next real token.
void Preprocessor::HandleDefineDirective(
@@ -3076,15 +3105,9 @@ void Preprocessor::HandleDefineDirective(
// In Objective-C, ignore attempts to directly redefine the builtin
// definitions of the ownership qualifiers. It's still possible to
// #undef them.
- auto isObjCProtectedMacro = [](const IdentifierInfo *II) -> bool {
- return II->isStr("__strong") ||
- II->isStr("__weak") ||
- II->isStr("__unsafe_unretained") ||
- II->isStr("__autoreleasing");
- };
- if (getLangOpts().ObjC &&
- SourceMgr.getFileID(OtherMI->getDefinitionLoc())
- == getPredefinesFileID() &&
+ if (getLangOpts().ObjC &&
+ SourceMgr.getFileID(OtherMI->getDefinitionLoc()) ==
+ getPredefinesFileID() &&
isObjCProtectedMacro(MacroNameTok.getIdentifierInfo())) {
// Warn if it changes the tokens.
if ((!getDiagnostics().getSuppressSystemWarnings() ||
@@ -3108,7 +3131,7 @@ void Preprocessor::HandleDefineDirective(
// Warn if defining "__LINE__" and other builtins, per C99 6.10.8/4 and
// C++ [cpp.predefined]p4, but allow it as an extension.
- if (OtherMI->isBuiltinMacro())
+ if (isLanguageDefinedBuiltin(SourceMgr, OtherMI, II->getName()))
Diag(MacroNameTok, diag::ext_pp_redef_builtin_macro);
// Macros must be identical. This means all tokens and whitespace
// separation must be the same. C99 6.10.3p2.
@@ -3188,6 +3211,11 @@ void Preprocessor::HandleUndefDirective() {
if (!MI->isUsed() && MI->isWarnIfUnused())
Diag(MI->getDefinitionLoc(), diag::pp_macro_not_used);
+ // Warn if undefining "__LINE__" and other builtins, per C99 6.10.8/4 and
+ // C++ [cpp.predefined]p4, but allow it as an extension.
+ if (isLanguageDefinedBuiltin(SourceMgr, MI, II->getName()))
+ Diag(MacroNameTok, diag::ext_pp_undef_builtin_macro);
+
if (MI->isWarnIfUnused())
WarnUnusedMacroLocs.erase(MI->getDefinitionLoc());
@@ -3416,14 +3444,14 @@ void Preprocessor::HandleElifFamilyDirective(Token &ElifToken,
: PED_Elifndef;
++NumElse;
- // Warn if using `#elifdef` & `#elifndef` in not C2x & C++2b mode.
+ // Warn if using `#elifdef` & `#elifndef` in not C2x & C++23 mode.
switch (DirKind) {
case PED_Elifdef:
case PED_Elifndef:
unsigned DiagID;
if (LangOpts.CPlusPlus)
- DiagID = LangOpts.CPlusPlus2b ? diag::warn_cxx2b_compat_pp_directive
- : diag::ext_cxx2b_pp_directive;
+ DiagID = LangOpts.CPlusPlus23 ? diag::warn_cxx23_compat_pp_directive
+ : diag::ext_cxx23_pp_directive;
else
DiagID = LangOpts.C2x ? diag::warn_c2x_compat_pp_directive
: diag::ext_c2x_pp_directive;
diff --git a/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp b/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp
index aa411cfc5f2c..7c41dd510d2d 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp
@@ -44,7 +44,7 @@ namespace {
/// conditional and the source range covered by it.
class PPValue {
SourceRange Range;
- IdentifierInfo *II;
+ IdentifierInfo *II = nullptr;
public:
llvm::APSInt Val;
@@ -323,13 +323,13 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
PP.Diag(PeekTok, diag::ext_c99_longlong);
}
- // 'z/uz' literals are a C++2b feature.
+ // 'z/uz' literals are a C++23 feature.
if (Literal.isSizeT)
PP.Diag(PeekTok, PP.getLangOpts().CPlusPlus
- ? PP.getLangOpts().CPlusPlus2b
+ ? PP.getLangOpts().CPlusPlus23
? diag::warn_cxx20_compat_size_t_suffix
- : diag::ext_cxx2b_size_t_suffix
- : diag::err_cxx2b_size_t_suffix);
+ : diag::ext_cxx23_size_t_suffix
+ : diag::err_cxx23_size_t_suffix);
// 'wb/uwb' literals are a C2x feature. We explicitly do not support the
// suffix in C++ as an extension because a library-based UDL that resolves
diff --git a/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp b/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp
index 66168467ecf5..ab005381adfa 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp
@@ -223,18 +223,17 @@ void Preprocessor::EnterTokenStream(const Token *Toks, unsigned NumToks,
/// Compute the relative path that names the given file relative to
/// the given directory.
static void computeRelativePath(FileManager &FM, const DirectoryEntry *Dir,
- const FileEntry *File,
- SmallString<128> &Result) {
+ FileEntryRef File, SmallString<128> &Result) {
Result.clear();
- StringRef FilePath = File->getDir()->getName();
+ StringRef FilePath = File.getDir().getName();
StringRef Path = FilePath;
while (!Path.empty()) {
if (auto CurDir = FM.getDirectory(Path)) {
if (*CurDir == Dir) {
Result = FilePath.substr(Path.size());
llvm::sys::path::append(Result,
- llvm::sys::path::filename(File->getName()));
+ llvm::sys::path::filename(File.getName()));
return;
}
}
@@ -242,7 +241,7 @@ static void computeRelativePath(FileManager &FM, const DirectoryEntry *Dir,
Path = llvm::sys::path::parent_path(Path);
}
- Result = File->getName();
+ Result = File.getName();
}
void Preprocessor::PropagateLineStartLeadingSpaceInfo(Token &Result) {
@@ -282,23 +281,24 @@ const char *Preprocessor::getCurLexerEndPos() {
static void collectAllSubModulesWithUmbrellaHeader(
const Module &Mod, SmallVectorImpl<const Module *> &SubMods) {
- if (Mod.getUmbrellaHeader())
+ if (Mod.getUmbrellaHeaderAsWritten())
SubMods.push_back(&Mod);
for (auto *M : Mod.submodules())
collectAllSubModulesWithUmbrellaHeader(*M, SubMods);
}
void Preprocessor::diagnoseMissingHeaderInUmbrellaDir(const Module &Mod) {
- const Module::Header &UmbrellaHeader = Mod.getUmbrellaHeader();
- assert(UmbrellaHeader.Entry && "Module must use umbrella header");
- const FileID &File = SourceMgr.translateFile(UmbrellaHeader.Entry);
+ std::optional<Module::Header> UmbrellaHeader =
+ Mod.getUmbrellaHeaderAsWritten();
+ assert(UmbrellaHeader && "Module must use umbrella header");
+ const FileID &File = SourceMgr.translateFile(UmbrellaHeader->Entry);
SourceLocation ExpectedHeadersLoc = SourceMgr.getLocForEndOfFile(File);
if (getDiagnostics().isIgnored(diag::warn_uncovered_module_header,
ExpectedHeadersLoc))
return;
ModuleMap &ModMap = getHeaderSearchInfo().getModuleMap();
- const DirectoryEntry *Dir = Mod.getUmbrellaDir().Entry;
+ OptionalDirectoryEntryRef Dir = Mod.getEffectiveUmbrellaDir();
llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
std::error_code EC;
for (llvm::vfs::recursive_directory_iterator Entry(FS, Dir->getName(), EC),
@@ -313,12 +313,12 @@ void Preprocessor::diagnoseMissingHeaderInUmbrellaDir(const Module &Mod) {
.Default(false))
continue;
- if (auto Header = getFileManager().getFile(Entry->path()))
+ if (auto Header = getFileManager().getOptionalFileRef(Entry->path()))
if (!getSourceManager().hasFileInfo(*Header)) {
if (!ModMap.isHeaderInUnavailableModule(*Header)) {
// Find the relative path that would access this header.
SmallString<128> RelativePath;
- computeRelativePath(FileMgr, Dir, *Header, RelativePath);
+ computeRelativePath(FileMgr, *Dir, *Header, RelativePath);
Diag(ExpectedHeadersLoc, diag::warn_uncovered_module_header)
<< Mod.getFullModuleName() << RelativePath;
}
@@ -333,6 +333,15 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
assert(!CurTokenLexer &&
"Ending a file when currently in a macro!");
+ SourceLocation UnclosedSafeBufferOptOutLoc;
+
+ if (IncludeMacroStack.empty() &&
+ isPPInSafeBufferOptOutRegion(UnclosedSafeBufferOptOutLoc)) {
+ // To warn if a "-Wunsafe-buffer-usage" opt-out region is still open by the
+ // end of a file.
+ Diag(UnclosedSafeBufferOptOutLoc,
+ diag::err_pp_unclosed_pragma_unsafe_buffer_usage);
+ }
// If we have an unclosed module region from a pragma at the end of a
// module, complain and close it now.
const bool LeavingSubmodule = CurLexer && CurLexerSubmodule;
@@ -526,13 +535,19 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
return LeavingSubmodule;
}
}
-
// If this is the end of the main file, form an EOF token.
assert(CurLexer && "Got EOF but no current lexer set!");
const char *EndPos = getCurLexerEndPos();
Result.startToken();
CurLexer->BufferPtr = EndPos;
- CurLexer->FormTokenWithChars(Result, EndPos, tok::eof);
+
+ if (isIncrementalProcessingEnabled()) {
+ CurLexer->FormTokenWithChars(Result, EndPos, tok::annot_repl_input_end);
+ Result.setAnnotationEndLoc(Result.getLocation());
+ Result.setAnnotationValue(nullptr);
+ } else {
+ CurLexer->FormTokenWithChars(Result, EndPos, tok::eof);
+ }
if (isCodeCompletionEnabled()) {
// Inserting the code-completion point increases the source buffer by 1,
diff --git a/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp b/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
index 76d0d53ed31d..71d38e59707a 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
@@ -87,7 +87,7 @@ void Preprocessor::appendMacroDirective(IdentifierInfo *II, MacroDirective *MD){
// Set up the identifier as having associated macro history.
II->setHasMacroDefinition(true);
- if (!MD->isDefined() && LeafModuleMacros.find(II) == LeafModuleMacros.end())
+ if (!MD->isDefined() && !LeafModuleMacros.contains(II))
II->setHasMacroDefinition(false);
if (II->isFromAST())
II->setChangedSinceDeserialization();
@@ -125,7 +125,7 @@ void Preprocessor::setLoadedMacroDirective(IdentifierInfo *II,
// Setup the identifier as having associated macro history.
II->setHasMacroDefinition(true);
- if (!MD->isDefined() && LeafModuleMacros.find(II) == LeafModuleMacros.end())
+ if (!MD->isDefined() && !LeafModuleMacros.contains(II))
II->setHasMacroDefinition(false);
}
@@ -1559,17 +1559,11 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// __FILE_NAME__ is a Clang-specific extension that expands to the
// the last part of __FILE__.
if (II == Ident__FILE_NAME__) {
- // Try to get the last path component, failing that return the original
- // presumed location.
- StringRef PLFileName = llvm::sys::path::filename(PLoc.getFilename());
- if (PLFileName != "")
- FN += PLFileName;
- else
- FN += PLoc.getFilename();
+ processPathToFileName(FN, PLoc, getLangOpts(), getTargetInfo());
} else {
FN += PLoc.getFilename();
+ processPathForFileMacro(FN, getLangOpts(), getTargetInfo());
}
- processPathForFileMacro(FN, getLangOpts(), getTargetInfo());
Lexer::Stringify(FN);
OS << '"' << FN << '"';
}
@@ -1875,7 +1869,8 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
if (!Tok.isAnnotation() && Tok.getIdentifierInfo())
Tok.setKind(tok::identifier);
else if (Tok.is(tok::string_literal) && !Tok.hasUDSuffix()) {
- StringLiteralParser Literal(Tok, *this);
+ StringLiteralParser Literal(Tok, *this,
+ StringLiteralEvalMethod::Unevaluated);
if (Literal.hadError)
return;
@@ -1974,3 +1969,16 @@ void Preprocessor::processPathForFileMacro(SmallVectorImpl<char> &Path,
llvm::sys::path::remove_dots(Path, false, llvm::sys::path::Style::posix);
}
}
+
+void Preprocessor::processPathToFileName(SmallVectorImpl<char> &FileName,
+ const PresumedLoc &PLoc,
+ const LangOptions &LangOpts,
+ const TargetInfo &TI) {
+ // Try to get the last path component, failing that return the original
+ // presumed location.
+ StringRef PLFileName = llvm::sys::path::filename(PLoc.getFilename());
+ if (PLFileName.empty())
+ PLFileName = PLoc.getFilename();
+ FileName.append(PLFileName.begin(), PLFileName.end());
+ processPathForFileMacro(FileName, LangOpts, TI);
+}
diff --git a/contrib/llvm-project/clang/lib/Lex/Pragma.cpp b/contrib/llvm-project/clang/lib/Lex/Pragma.cpp
index 4da9d1603770..85543ed3f987 100644
--- a/contrib/llvm-project/clang/lib/Lex/Pragma.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Pragma.cpp
@@ -262,17 +262,48 @@ void Preprocessor::Handle_Pragma(Token &Tok) {
SourceLocation RParenLoc = Tok.getLocation();
bool Invalid = false;
- std::string StrVal = getSpelling(StrTok, &Invalid);
+ SmallString<64> StrVal;
+ StrVal.resize(StrTok.getLength());
+ StringRef StrValRef = getSpelling(StrTok, StrVal, &Invalid);
if (Invalid) {
Diag(PragmaLoc, diag::err__Pragma_malformed);
return;
}
- // The _Pragma is lexically sound. Destringize according to C11 6.10.9.1:
- // "The string literal is destringized by deleting any encoding prefix,
- // deleting the leading and trailing double-quotes, replacing each escape
- // sequence \" by a double-quote, and replacing each escape sequence \\ by a
- // single backslash."
+ assert(StrValRef.size() <= StrVal.size());
+
+ // If the token was spelled somewhere else, copy it.
+ if (StrValRef.begin() != StrVal.begin())
+ StrVal.assign(StrValRef);
+ // Truncate if necessary.
+ else if (StrValRef.size() != StrVal.size())
+ StrVal.resize(StrValRef.size());
+
+ // The _Pragma is lexically sound. Destringize according to C11 6.10.9.1.
+ prepare_PragmaString(StrVal);
+
+ // Plop the string (including the newline and trailing null) into a buffer
+ // where we can lex it.
+ Token TmpTok;
+ TmpTok.startToken();
+ CreateString(StrVal, TmpTok);
+ SourceLocation TokLoc = TmpTok.getLocation();
+
+ // Make and enter a lexer object so that we lex and expand the tokens just
+ // like any others.
+ Lexer *TL = Lexer::Create_PragmaLexer(TokLoc, PragmaLoc, RParenLoc,
+ StrVal.size(), *this);
+
+ EnterSourceFileWithLexer(TL, nullptr);
+
+ // With everything set up, lex this as a #pragma directive.
+ HandlePragmaDirective({PIK__Pragma, PragmaLoc});
+
+ // Finally, return whatever came after the pragma directive.
+ return Lex(Tok);
+}
+
+void clang::prepare_PragmaString(SmallVectorImpl<char> &StrVal) {
if (StrVal[0] == 'L' || StrVal[0] == 'U' ||
(StrVal[0] == 'u' && StrVal[1] != '8'))
StrVal.erase(StrVal.begin());
@@ -296,8 +327,8 @@ void Preprocessor::Handle_Pragma(Token &Tok) {
// Remove 'R " d-char-sequence' and 'd-char-sequence "'. We'll replace the
// parens below.
- StrVal.erase(0, 2 + NumDChars);
- StrVal.erase(StrVal.size() - 1 - NumDChars);
+ StrVal.erase(StrVal.begin(), StrVal.begin() + 2 + NumDChars);
+ StrVal.erase(StrVal.end() - 1 - NumDChars, StrVal.end());
} else {
assert(StrVal[0] == '"' && StrVal[StrVal.size()-1] == '"' &&
"Invalid string token!");
@@ -319,27 +350,7 @@ void Preprocessor::Handle_Pragma(Token &Tok) {
StrVal[0] = ' ';
// Replace the terminating quote with a \n.
- StrVal[StrVal.size()-1] = '\n';
-
- // Plop the string (including the newline and trailing null) into a buffer
- // where we can lex it.
- Token TmpTok;
- TmpTok.startToken();
- CreateString(StrVal, TmpTok);
- SourceLocation TokLoc = TmpTok.getLocation();
-
- // Make and enter a lexer object so that we lex and expand the tokens just
- // like any others.
- Lexer *TL = Lexer::Create_PragmaLexer(TokLoc, PragmaLoc, RParenLoc,
- StrVal.size(), *this);
-
- EnterSourceFileWithLexer(TL, nullptr);
-
- // With everything set up, lex this as a #pragma directive.
- HandlePragmaDirective({PIK__Pragma, PragmaLoc});
-
- // Finally, return whatever came after the pragma directive.
- return Lex(Tok);
+ StrVal[StrVal.size() - 1] = '\n';
}
/// HandleMicrosoft__pragma - Like Handle_Pragma except the pragma text
@@ -1066,28 +1077,19 @@ struct PragmaDebugHandler : public PragmaHandler {
PP.EnterToken(Crasher, /*IsReinject*/ false);
}
} else if (II->isStr("dump")) {
- Token Identifier;
- PP.LexUnexpandedToken(Identifier);
- if (auto *DumpII = Identifier.getIdentifierInfo()) {
- Token DumpAnnot;
- DumpAnnot.startToken();
- DumpAnnot.setKind(tok::annot_pragma_dump);
- DumpAnnot.setAnnotationRange(
- SourceRange(Tok.getLocation(), Identifier.getLocation()));
- DumpAnnot.setAnnotationValue(DumpII);
- PP.DiscardUntilEndOfDirective();
- PP.EnterToken(DumpAnnot, /*IsReinject*/false);
- } else {
- PP.Diag(Identifier, diag::warn_pragma_debug_missing_argument)
- << II->getName();
- }
+ Token DumpAnnot;
+ DumpAnnot.startToken();
+ DumpAnnot.setKind(tok::annot_pragma_dump);
+ DumpAnnot.setAnnotationRange(SourceRange(Tok.getLocation()));
+ PP.EnterToken(DumpAnnot, /*IsReinject*/false);
} else if (II->isStr("diag_mapping")) {
Token DiagName;
PP.LexUnexpandedToken(DiagName);
if (DiagName.is(tok::eod))
PP.getDiagnostics().dump();
else if (DiagName.is(tok::string_literal) && !DiagName.hasUDSuffix()) {
- StringLiteralParser Literal(DiagName, PP);
+ StringLiteralParser Literal(DiagName, PP,
+ StringLiteralEvalMethod::Unevaluated);
if (Literal.hadError)
return;
PP.getDiagnostics().dump(Literal.GetString());
@@ -1243,6 +1245,32 @@ struct PragmaDebugHandler : public PragmaHandler {
#endif
};
+struct PragmaUnsafeBufferUsageHandler : public PragmaHandler {
+ PragmaUnsafeBufferUsageHandler() : PragmaHandler("unsafe_buffer_usage") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
+ Token &FirstToken) override {
+ Token Tok;
+
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok, diag::err_pp_pragma_unsafe_buffer_usage_syntax);
+ return;
+ }
+
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ SourceLocation Loc = Tok.getLocation();
+
+ if (II->isStr("begin")) {
+ if (PP.enterOrExitSafeBufferOptOutRegion(true, Loc))
+ PP.Diag(Loc, diag::err_pp_double_begin_pragma_unsafe_buffer_usage);
+ } else if (II->isStr("end")) {
+ if (PP.enterOrExitSafeBufferOptOutRegion(false, Loc))
+ PP.Diag(Loc, diag::err_pp_unmatched_end_begin_pragma_unsafe_buffer_usage);
+ } else
+ PP.Diag(Tok, diag::err_pp_pragma_unsafe_buffer_usage_syntax);
+ }
+};
+
/// PragmaDiagnosticHandler - e.g. '\#pragma GCC diagnostic ignored "-Wformat"'
struct PragmaDiagnosticHandler : public PragmaHandler {
private:
@@ -1264,16 +1292,26 @@ public:
IdentifierInfo *II = Tok.getIdentifierInfo();
PPCallbacks *Callbacks = PP.getPPCallbacks();
+ // Get the next token, which is either an EOD or a string literal. We lex
+ // it now so that we can early return if the previous token was push or pop.
+ PP.LexUnexpandedToken(Tok);
+
if (II->isStr("pop")) {
if (!PP.getDiagnostics().popMappings(DiagLoc))
PP.Diag(Tok, diag::warn_pragma_diagnostic_cannot_pop);
else if (Callbacks)
Callbacks->PragmaDiagnosticPop(DiagLoc, Namespace);
+
+ if (Tok.isNot(tok::eod))
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_diagnostic_invalid_token);
return;
} else if (II->isStr("push")) {
PP.getDiagnostics().pushMappings(DiagLoc);
if (Callbacks)
Callbacks->PragmaDiagnosticPush(DiagLoc, Namespace);
+
+ if (Tok.isNot(tok::eod))
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_diagnostic_invalid_token);
return;
}
@@ -1289,9 +1327,8 @@ public:
return;
}
- PP.LexUnexpandedToken(Tok);
+ // At this point, we expect a string literal.
SourceLocation StringLoc = Tok.getLocation();
-
std::string WarningName;
if (!PP.FinishLexStringLiteral(Tok, WarningName, "pragma diagnostic",
/*AllowMacroExpansion=*/false))
@@ -2128,6 +2165,9 @@ void Preprocessor::RegisterBuiltinPragmas() {
ModuleHandler->AddPragma(new PragmaModuleBuildHandler());
ModuleHandler->AddPragma(new PragmaModuleLoadHandler());
+ // Safe Buffers pragmas
+ AddPragmaHandler("clang", new PragmaUnsafeBufferUsageHandler);
+
// Add region pragmas.
AddPragmaHandler(new PragmaRegionHandler("region"));
AddPragmaHandler(new PragmaRegionHandler("endregion"));
diff --git a/contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp b/contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp
index 85eb57f61611..aab6a2bed89d 100644
--- a/contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp
@@ -381,12 +381,7 @@ PreprocessingRecord::getLoadedPreprocessedEntity(unsigned Index) {
MacroDefinitionRecord *
PreprocessingRecord::findMacroDefinition(const MacroInfo *MI) {
- llvm::DenseMap<const MacroInfo *, MacroDefinitionRecord *>::iterator Pos =
- MacroDefinitions.find(MI);
- if (Pos == MacroDefinitions.end())
- return nullptr;
-
- return Pos->second;
+ return MacroDefinitions.lookup(MI);
}
void PreprocessingRecord::addMacroExpansion(const Token &Id,
diff --git a/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp b/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
index 0d411abf8f1c..8de78a13930e 100644
--- a/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
@@ -860,7 +860,7 @@ bool Preprocessor::HandleIdentifier(Token &Identifier) {
// keyword when we're in a caching lexer, because caching lexers only get
// used in contexts where import declarations are disallowed.
//
- // Likewise if this is the C++ Modules TS import keyword.
+ // Likewise if this is the standard C++ import keyword.
if (((LastTokenWasAt && II.isModulesImport()) ||
Identifier.is(tok::kw_import)) &&
!InMacroArgs && !DisableMacroExpansion &&
@@ -1274,7 +1274,7 @@ bool Preprocessor::LexAfterModuleImport(Token &Result) {
// If we're expecting a '.' or a ';', and we got a '.', then wait until we
// see the next identifier. (We can also see a '[[' that begins an
- // attribute-specifier-seq here under the C++ Modules TS.)
+ // attribute-specifier-seq here under the Standard C++ Modules.)
if (!ModuleImportExpectsIdentifier && Result.getKind() == tok::period) {
ModuleImportExpectsIdentifier = true;
CurLexerKind = CLK_LexAfterModuleImport;
@@ -1299,12 +1299,12 @@ bool Preprocessor::LexAfterModuleImport(Token &Result) {
SemiLoc = Suffix.back().getLocation();
}
- // Under the Modules TS, the dot is just part of the module name, and not
- // a real hierarchy separator. Flatten such module names now.
+ // Under the standard C++ Modules, the dot is just part of the module name,
+ // and not a real hierarchy separator. Flatten such module names now.
//
// FIXME: Is this the right level to be performing this transformation?
std::string FlatModuleName;
- if (getLangOpts().ModulesTS || getLangOpts().CPlusPlusModules) {
+ if (getLangOpts().CPlusPlusModules) {
for (auto &Piece : NamedModuleImportPath) {
// If the FlatModuleName ends with colon, it implies it is a partition.
if (!FlatModuleName.empty() && FlatModuleName.back() != ':')
@@ -1486,6 +1486,75 @@ void Preprocessor::emitFinalMacroWarning(const Token &Identifier,
Diag(*A.FinalAnnotationLoc, diag::note_pp_macro_annotation) << 2;
}
+bool Preprocessor::isSafeBufferOptOut(const SourceManager &SourceMgr,
+ const SourceLocation &Loc) const {
+ // Try to find a region in `SafeBufferOptOutMap` where `Loc` is in:
+ auto FirstRegionEndingAfterLoc = llvm::partition_point(
+ SafeBufferOptOutMap,
+ [&SourceMgr,
+ &Loc](const std::pair<SourceLocation, SourceLocation> &Region) {
+ return SourceMgr.isBeforeInTranslationUnit(Region.second, Loc);
+ });
+
+ if (FirstRegionEndingAfterLoc != SafeBufferOptOutMap.end()) {
+ // To test if the start location of the found region precedes `Loc`:
+ return SourceMgr.isBeforeInTranslationUnit(FirstRegionEndingAfterLoc->first,
+ Loc);
+ }
+ // If we do not find a region whose end location passes `Loc`, we want to
+ // check if the current region is still open:
+ if (!SafeBufferOptOutMap.empty() &&
+ SafeBufferOptOutMap.back().first == SafeBufferOptOutMap.back().second)
+ return SourceMgr.isBeforeInTranslationUnit(SafeBufferOptOutMap.back().first,
+ Loc);
+ return false;
+}
+
+bool Preprocessor::enterOrExitSafeBufferOptOutRegion(
+ bool isEnter, const SourceLocation &Loc) {
+ if (isEnter) {
+ if (isPPInSafeBufferOptOutRegion())
+ return true; // invalid enter action
+ InSafeBufferOptOutRegion = true;
+ CurrentSafeBufferOptOutStart = Loc;
+
+ // To set the start location of a new region:
+
+ if (!SafeBufferOptOutMap.empty()) {
+ [[maybe_unused]] auto *PrevRegion = &SafeBufferOptOutMap.back();
+ assert(PrevRegion->first != PrevRegion->second &&
+ "Shall not begin a safe buffer opt-out region before closing the "
+ "previous one.");
+ }
+ // If the start location equals to the end location, we call the region a
+ // open region or a unclosed region (i.e., end location has not been set
+ // yet).
+ SafeBufferOptOutMap.emplace_back(Loc, Loc);
+ } else {
+ if (!isPPInSafeBufferOptOutRegion())
+ return true; // invalid enter action
+ InSafeBufferOptOutRegion = false;
+
+ // To set the end location of the current open region:
+
+ assert(!SafeBufferOptOutMap.empty() &&
+ "Misordered safe buffer opt-out regions");
+ auto *CurrRegion = &SafeBufferOptOutMap.back();
+ assert(CurrRegion->first == CurrRegion->second &&
+ "Set end location to a closed safe buffer opt-out region");
+ CurrRegion->second = Loc;
+ }
+ return false;
+}
+
+bool Preprocessor::isPPInSafeBufferOptOutRegion() {
+ return InSafeBufferOptOutRegion;
+}
+bool Preprocessor::isPPInSafeBufferOptOutRegion(SourceLocation &StartLoc) {
+ StartLoc = CurrentSafeBufferOptOutStart;
+ return InSafeBufferOptOutRegion;
+}
+
ModuleLoader::~ModuleLoader() = default;
CommentHandler::~CommentHandler() = default;
diff --git a/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp b/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp
index ebe7dd66c118..856d5682727f 100644
--- a/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp
@@ -500,8 +500,7 @@ void TokenLexer::ExpandFunctionArguments() {
// the first token in a __VA_OPT__ after a ##, delete the ##.
assert(VCtx.isInVAOpt() && "should only happen inside a __VA_OPT__");
VCtx.hasPlaceholderAfterHashhashAtStart();
- }
- if (RParenAfter)
+ } else if (RParenAfter)
VCtx.hasPlaceholderBeforeRParen();
}
continue;
@@ -567,7 +566,7 @@ void TokenLexer::ExpandFunctionArguments() {
continue;
}
- if (RParenAfter)
+ if (RParenAfter && !NonEmptyPasteBefore)
VCtx.hasPlaceholderBeforeRParen();
// If this is on the RHS of a paste operator, we've already copied the
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseAST.cpp b/contrib/llvm-project/clang/lib/Parse/ParseAST.cpp
index f442b6213836..77ab3b556da5 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseAST.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseAST.cpp
@@ -18,6 +18,7 @@
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
#include "clang/Sema/CodeCompleteConsumer.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaConsumer.h"
#include "clang/Sema/TemplateInstCallback.h"
@@ -172,27 +173,6 @@ void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) {
for (Decl *D : S.WeakTopLevelDecls())
Consumer->HandleTopLevelDecl(DeclGroupRef(D));
- // For C++20 modules, the codegen for module initializers needs to be altered
- // and to be able to use a name based on the module name.
-
- // At this point, we should know if we are building a non-header C++20 module.
- if (S.getLangOpts().CPlusPlusModules) {
- // If we are building the module from source, then the top level module
- // will be here.
- Module *CodegenModule = S.getCurrentModule();
- bool Interface = true;
- if (CodegenModule)
- // We only use module initializers for importable module (including
- // partition implementation units).
- Interface = S.currentModuleIsInterface();
- else if (S.getLangOpts().isCompilingModuleInterface())
- // If we are building the module from a PCM file, then the module can be
- // found here.
- CodegenModule = S.getPreprocessor().getCurrentModule();
-
- if (Interface && CodegenModule)
- S.getASTContext().setModuleForCodeGen(CodegenModule);
- }
Consumer->HandleTranslationUnit(S.getASTContext());
// Finalize the template instantiation observer chain.
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp b/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
index 3a7f5426d4a7..4951eb9aa280 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
@@ -10,12 +10,14 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Parse/Parser.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Scope.h"
+
using namespace clang;
/// ParseCXXInlineMethodDef - We parsed and verified that the specified
@@ -748,7 +750,7 @@ void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
}
ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, nullptr,
- nullptr, SourceLocation(), ParsedAttr::AS_GNU,
+ nullptr, SourceLocation(), ParsedAttr::Form::GNU(),
nullptr);
if (HasFunScope)
@@ -757,7 +759,7 @@ void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
// If there are multiple decls, then the decl cannot be within the
// function scope.
ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, nullptr,
- nullptr, SourceLocation(), ParsedAttr::AS_GNU,
+ nullptr, SourceLocation(), ParsedAttr::Form::GNU(),
nullptr);
}
} else {
@@ -836,6 +838,7 @@ bool Parser::ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
case tok::annot_module_begin:
case tok::annot_module_end:
case tok::annot_module_include:
+ case tok::annot_repl_input_end:
// Ran out of tokens.
return false;
@@ -1242,6 +1245,7 @@ bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
case tok::annot_module_begin:
case tok::annot_module_end:
case tok::annot_module_include:
+ case tok::annot_repl_input_end:
// Ran out of tokens.
return false;
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp b/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
index e6812ac72c88..43b2a32cce71 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
@@ -21,6 +21,7 @@
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
@@ -56,6 +57,18 @@ TypeResult Parser::ParseTypeName(SourceRange *Range, DeclaratorContext Context,
if (OwnedType)
*OwnedType = DS.isTypeSpecOwned() ? DS.getRepAsDecl() : nullptr;
+ // Move declspec attributes to ParsedAttributes
+ if (Attrs) {
+ llvm::SmallVector<ParsedAttr *, 1> ToBeMoved;
+ for (ParsedAttr &AL : DS.getAttributes()) {
+ if (AL.isDeclspecAttribute())
+ ToBeMoved.push_back(&AL);
+ }
+
+ for (ParsedAttr *AL : ToBeMoved)
+ Attrs->takeOneFrom(DS.getAttributes(), AL);
+ }
+
// Parse the abstract-declarator, if present.
Declarator DeclaratorInfo(DS, ParsedAttributesView::none(), Context);
ParseDeclarator(DeclaratorInfo);
@@ -202,14 +215,14 @@ void Parser::ParseGNUAttributes(ParsedAttributes &Attrs,
if (Tok.isNot(tok::l_paren)) {
Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_GNU);
+ ParsedAttr::Form::GNU());
continue;
}
// Handle "parameterized" attributes
if (!LateAttrs || !isAttributeLateParsed(*AttrName)) {
ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, &EndLoc, nullptr,
- SourceLocation(), ParsedAttr::AS_GNU, D);
+ SourceLocation(), ParsedAttr::Form::GNU(), D);
continue;
}
@@ -335,7 +348,7 @@ void Parser::ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
ParsedAttributes &Attrs,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax) {
+ ParsedAttr::Form Form) {
BalancedDelimiterTracker Parens(*this, tok::l_paren);
Parens.consumeOpen();
@@ -352,16 +365,16 @@ void Parser::ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
if (T.isUsable())
Attrs.addNewTypeAttr(&AttrName,
SourceRange(AttrNameLoc, Parens.getCloseLocation()),
- ScopeName, ScopeLoc, T.get(), Syntax);
+ ScopeName, ScopeLoc, T.get(), Form);
else
Attrs.addNew(&AttrName, SourceRange(AttrNameLoc, Parens.getCloseLocation()),
- ScopeName, ScopeLoc, nullptr, 0, Syntax);
+ ScopeName, ScopeLoc, nullptr, 0, Form);
}
unsigned Parser::ParseAttributeArgsCommon(
IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax) {
+ SourceLocation ScopeLoc, ParsedAttr::Form Form) {
// Ignore the left paren location for now.
ConsumeParen();
@@ -380,7 +393,7 @@ unsigned Parser::ParseAttributeArgsCommon(
bool IsIdentifierArg = AttributeHasVariadicIdentifierArg ||
attributeHasIdentifierArg(*AttrName);
ParsedAttr::Kind AttrKind =
- ParsedAttr::getParsedKind(AttrName, ScopeName, Syntax);
+ ParsedAttr::getParsedKind(AttrName, ScopeName, Form.getSyntax());
// If we don't know how to parse this attribute, but this is the only
// token in this argument, assume it's meant to be an identifier.
@@ -481,10 +494,10 @@ unsigned Parser::ParseAttributeArgsCommon(
if (AttributeIsTypeArgAttr && !TheParsedType.get().isNull()) {
Attrs.addNewTypeAttr(AttrName, SourceRange(AttrNameLoc, RParen),
- ScopeName, ScopeLoc, TheParsedType, Syntax);
+ ScopeName, ScopeLoc, TheParsedType, Form);
} else {
Attrs.addNew(AttrName, SourceRange(AttrLoc, RParen), ScopeName, ScopeLoc,
- ArgExprs.data(), ArgExprs.size(), Syntax);
+ ArgExprs.data(), ArgExprs.size(), Form);
}
}
@@ -499,36 +512,36 @@ unsigned Parser::ParseAttributeArgsCommon(
void Parser::ParseGNUAttributeArgs(
IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D) {
+ SourceLocation ScopeLoc, ParsedAttr::Form Form, Declarator *D) {
assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
ParsedAttr::Kind AttrKind =
- ParsedAttr::getParsedKind(AttrName, ScopeName, Syntax);
+ ParsedAttr::getParsedKind(AttrName, ScopeName, Form.getSyntax());
if (AttrKind == ParsedAttr::AT_Availability) {
ParseAvailabilityAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
- ScopeLoc, Syntax);
+ ScopeLoc, Form);
return;
} else if (AttrKind == ParsedAttr::AT_ExternalSourceSymbol) {
ParseExternalSourceSymbolAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ ScopeName, ScopeLoc, Form);
return;
} else if (AttrKind == ParsedAttr::AT_ObjCBridgeRelated) {
ParseObjCBridgeRelatedAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ ScopeName, ScopeLoc, Form);
return;
} else if (AttrKind == ParsedAttr::AT_SwiftNewType) {
ParseSwiftNewTypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
- ScopeLoc, Syntax);
+ ScopeLoc, Form);
return;
} else if (AttrKind == ParsedAttr::AT_TypeTagForDatatype) {
ParseTypeTagForDatatypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ ScopeName, ScopeLoc, Form);
return;
} else if (attributeIsTypeArgAttr(*AttrName)) {
ParseAttributeWithTypeArg(*AttrName, AttrNameLoc, Attrs, ScopeName,
- ScopeLoc, Syntax);
+ ScopeLoc, Form);
return;
}
@@ -548,41 +561,41 @@ void Parser::ParseGNUAttributeArgs(
}
ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
- ScopeLoc, Syntax);
+ ScopeLoc, Form);
}
unsigned Parser::ParseClangAttributeArgs(
IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax) {
+ SourceLocation ScopeLoc, ParsedAttr::Form Form) {
assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
ParsedAttr::Kind AttrKind =
- ParsedAttr::getParsedKind(AttrName, ScopeName, Syntax);
+ ParsedAttr::getParsedKind(AttrName, ScopeName, Form.getSyntax());
switch (AttrKind) {
default:
return ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ ScopeName, ScopeLoc, Form);
case ParsedAttr::AT_ExternalSourceSymbol:
ParseExternalSourceSymbolAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ ScopeName, ScopeLoc, Form);
break;
case ParsedAttr::AT_Availability:
ParseAvailabilityAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
- ScopeLoc, Syntax);
+ ScopeLoc, Form);
break;
case ParsedAttr::AT_ObjCBridgeRelated:
ParseObjCBridgeRelatedAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ ScopeName, ScopeLoc, Form);
break;
case ParsedAttr::AT_SwiftNewType:
ParseSwiftNewTypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
- ScopeLoc, Syntax);
+ ScopeLoc, Form);
break;
case ParsedAttr::AT_TypeTagForDatatype:
ParseTypeTagForDatatypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ ScopeName, ScopeLoc, Form);
break;
}
return !Attrs.empty() ? Attrs.begin()->getNumArgs() : 0;
@@ -714,14 +727,14 @@ bool Parser::ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
if (!HasInvalidAccessor)
Attrs.addNewPropertyAttr(AttrName, AttrNameLoc, nullptr, SourceLocation(),
AccessorNames[AK_Get], AccessorNames[AK_Put],
- ParsedAttr::AS_Declspec);
+ ParsedAttr::Form::Declspec());
T.skipToEnd();
return !HasInvalidAccessor;
}
unsigned NumArgs =
ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, nullptr, nullptr,
- SourceLocation(), ParsedAttr::AS_Declspec);
+ SourceLocation(), ParsedAttr::Form::Declspec());
// If this attribute's args were parsed, and it was expected to have
// arguments but none were provided, emit a diagnostic.
@@ -804,7 +817,7 @@ void Parser::ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs) {
if (!AttrHandled)
Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_Declspec);
+ ParsedAttr::Form::Declspec());
}
T.consumeClose();
EndLoc = T.getCloseLocation();
@@ -816,7 +829,8 @@ void Parser::ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs) {
void Parser::ParseMicrosoftTypeAttributes(ParsedAttributes &attrs) {
// Treat these like attributes
while (true) {
- switch (Tok.getKind()) {
+ auto Kind = Tok.getKind();
+ switch (Kind) {
case tok::kw___fastcall:
case tok::kw___stdcall:
case tok::kw___thiscall:
@@ -831,7 +845,7 @@ void Parser::ParseMicrosoftTypeAttributes(ParsedAttributes &attrs) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = ConsumeToken();
attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_Keyword);
+ Kind);
break;
}
default:
@@ -840,6 +854,22 @@ void Parser::ParseMicrosoftTypeAttributes(ParsedAttributes &attrs) {
}
}
+void Parser::ParseWebAssemblyFuncrefTypeAttribute(ParsedAttributes &attrs) {
+ assert(Tok.is(tok::kw___funcref));
+ SourceLocation StartLoc = Tok.getLocation();
+ if (!getTargetInfo().getTriple().isWasm()) {
+ ConsumeToken();
+ Diag(StartLoc, diag::err_wasm_funcref_not_wasm);
+ return;
+ }
+
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+ attrs.addNew(AttrName, AttrNameLoc, /*ScopeName=*/nullptr,
+ /*ScopeLoc=*/SourceLocation{}, /*Args=*/nullptr, /*numArgs=*/0,
+ tok::kw___funcref);
+}
+
void Parser::DiagnoseAndSkipExtendedMicrosoftTypeAttributes() {
SourceLocation StartLoc = Tok.getLocation();
SourceLocation EndLoc = SkipExtendedMicrosoftTypeAttributes();
@@ -882,7 +912,7 @@ void Parser::ParseBorlandTypeAttributes(ParsedAttributes &attrs) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = ConsumeToken();
attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_Keyword);
+ tok::kw___pascal);
}
}
@@ -892,7 +922,7 @@ void Parser::ParseOpenCLKernelAttributes(ParsedAttributes &attrs) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = ConsumeToken();
attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_Keyword);
+ tok::kw___kernel);
}
}
@@ -901,7 +931,7 @@ void Parser::ParseCUDAFunctionAttributes(ParsedAttributes &attrs) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = ConsumeToken();
attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_Keyword);
+ tok::kw___noinline__);
}
}
@@ -909,7 +939,7 @@ void Parser::ParseOpenCLQualifiers(ParsedAttributes &Attrs) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = Tok.getLocation();
Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_Keyword);
+ Tok.getKind());
}
bool Parser::isHLSLQualifier(const Token &Tok) const {
@@ -918,15 +948,16 @@ bool Parser::isHLSLQualifier(const Token &Tok) const {
void Parser::ParseHLSLQualifiers(ParsedAttributes &Attrs) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ auto Kind = Tok.getKind();
SourceLocation AttrNameLoc = ConsumeToken();
- Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_Keyword);
+ Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0, Kind);
}
void Parser::ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs) {
// Treat these like attributes, even though they're type specifiers.
while (true) {
- switch (Tok.getKind()) {
+ auto Kind = Tok.getKind();
+ switch (Kind) {
case tok::kw__Nonnull:
case tok::kw__Nullable:
case tok::kw__Nullable_result:
@@ -937,7 +968,7 @@ void Parser::ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs) {
Diag(AttrNameLoc, diag::ext_nullability)
<< AttrName;
attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_Keyword);
+ Kind);
break;
}
default:
@@ -1094,13 +1125,10 @@ VersionTuple Parser::ParseVersionTuple(SourceRange &Range) {
/// 'replacement' '=' <string>
/// opt-message:
/// 'message' '=' <string>
-void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
- SourceLocation AvailabilityLoc,
- ParsedAttributes &attrs,
- SourceLocation *endLoc,
- IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax) {
+void Parser::ParseAvailabilityAttribute(
+ IdentifierInfo &Availability, SourceLocation AvailabilityLoc,
+ ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc, ParsedAttr::Form Form) {
enum { Introduced, Deprecated, Obsoleted, Unknown };
AvailabilityChange Changes[Unknown];
ExprResult MessageExpr, ReplacementExpr;
@@ -1306,14 +1334,10 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
// Record this attribute
attrs.addNew(&Availability,
- SourceRange(AvailabilityLoc, T.getCloseLocation()),
- ScopeName, ScopeLoc,
- Platform,
- Changes[Introduced],
- Changes[Deprecated],
- Changes[Obsoleted],
- UnavailableLoc, MessageExpr.get(),
- Syntax, StrictLoc, ReplacementExpr.get());
+ SourceRange(AvailabilityLoc, T.getCloseLocation()), ScopeName,
+ ScopeLoc, Platform, Changes[Introduced], Changes[Deprecated],
+ Changes[Obsoleted], UnavailableLoc, MessageExpr.get(), Form,
+ StrictLoc, ReplacementExpr.get());
}
/// Parse the contents of the "external_source_symbol" attribute.
@@ -1328,11 +1352,12 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
/// keyword-arg:
/// 'language' '=' <string>
/// 'defined_in' '=' <string>
+/// 'USR' '=' <string>
/// 'generated_declaration'
void Parser::ParseExternalSourceSymbolAttribute(
IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc,
ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax) {
+ SourceLocation ScopeLoc, ParsedAttr::Form Form) {
// Opening '('.
BalancedDelimiterTracker T(*this, tok::l_paren);
if (T.expectAndConsume())
@@ -1343,6 +1368,7 @@ void Parser::ParseExternalSourceSymbolAttribute(
Ident_language = PP.getIdentifierInfo("language");
Ident_defined_in = PP.getIdentifierInfo("defined_in");
Ident_generated_declaration = PP.getIdentifierInfo("generated_declaration");
+ Ident_USR = PP.getIdentifierInfo("USR");
}
ExprResult Language;
@@ -1350,6 +1376,8 @@ void Parser::ParseExternalSourceSymbolAttribute(
ExprResult DefinedInExpr;
bool HasDefinedIn = false;
IdentifierLoc *GeneratedDeclaration = nullptr;
+ ExprResult USR;
+ bool HasUSR = false;
// Parse the language/defined_in/generated_declaration keywords
do {
@@ -1371,7 +1399,8 @@ void Parser::ParseExternalSourceSymbolAttribute(
continue;
}
- if (Keyword != Ident_language && Keyword != Ident_defined_in) {
+ if (Keyword != Ident_language && Keyword != Ident_defined_in &&
+ Keyword != Ident_USR) {
Diag(Tok, diag::err_external_source_symbol_expected_keyword);
SkipUntil(tok::r_paren, StopAtSemi);
return;
@@ -1384,16 +1413,22 @@ void Parser::ParseExternalSourceSymbolAttribute(
return;
}
- bool HadLanguage = HasLanguage, HadDefinedIn = HasDefinedIn;
+ bool HadLanguage = HasLanguage, HadDefinedIn = HasDefinedIn,
+ HadUSR = HasUSR;
if (Keyword == Ident_language)
HasLanguage = true;
+ else if (Keyword == Ident_USR)
+ HasUSR = true;
else
HasDefinedIn = true;
if (Tok.isNot(tok::string_literal)) {
Diag(Tok, diag::err_expected_string_literal)
<< /*Source='external_source_symbol attribute'*/ 3
- << /*language | source container*/ (Keyword != Ident_language);
+ << /*language | source container | USR*/ (
+ Keyword == Ident_language
+ ? 0
+ : (Keyword == Ident_defined_in ? 1 : 2));
SkipUntil(tok::comma, tok::r_paren, StopAtSemi | StopBeforeMatch);
continue;
}
@@ -1405,6 +1440,14 @@ void Parser::ParseExternalSourceSymbolAttribute(
continue;
}
Language = ParseStringLiteralExpression();
+ } else if (Keyword == Ident_USR) {
+ if (HadUSR) {
+ Diag(KeywordLoc, diag::err_external_source_symbol_duplicate_clause)
+ << Keyword;
+ ParseStringLiteralExpression();
+ continue;
+ }
+ USR = ParseStringLiteralExpression();
} else {
assert(Keyword == Ident_defined_in && "Invalid clause keyword!");
if (HadDefinedIn) {
@@ -1423,10 +1466,10 @@ void Parser::ParseExternalSourceSymbolAttribute(
if (EndLoc)
*EndLoc = T.getCloseLocation();
- ArgsUnion Args[] = {Language.get(), DefinedInExpr.get(),
- GeneratedDeclaration};
+ ArgsUnion Args[] = {Language.get(), DefinedInExpr.get(), GeneratedDeclaration,
+ USR.get()};
Attrs.addNew(&ExternalSourceSymbol, SourceRange(Loc, T.getCloseLocation()),
- ScopeName, ScopeLoc, Args, std::size(Args), Syntax);
+ ScopeName, ScopeLoc, Args, std::size(Args), Form);
}
/// Parse the contents of the "objc_bridge_related" attribute.
@@ -1443,7 +1486,7 @@ void Parser::ParseExternalSourceSymbolAttribute(
void Parser::ParseObjCBridgeRelatedAttribute(
IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax) {
+ SourceLocation ScopeLoc, ParsedAttr::Form Form) {
// Opening '('.
BalancedDelimiterTracker T(*this, tok::l_paren);
if (T.consumeOpen()) {
@@ -1506,13 +1549,13 @@ void Parser::ParseObjCBridgeRelatedAttribute(
Attrs.addNew(&ObjCBridgeRelated,
SourceRange(ObjCBridgeRelatedLoc, T.getCloseLocation()),
ScopeName, ScopeLoc, RelatedClass, ClassMethod, InstanceMethod,
- Syntax);
+ Form);
}
void Parser::ParseSwiftNewTypeAttribute(
IdentifierInfo &AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax) {
+ SourceLocation ScopeLoc, ParsedAttr::Form Form) {
BalancedDelimiterTracker T(*this, tok::l_paren);
// Opening '('
@@ -1547,16 +1590,13 @@ void Parser::ParseSwiftNewTypeAttribute(
ArgsUnion Args[] = {SwiftType};
Attrs.addNew(&AttrName, SourceRange(AttrNameLoc, T.getCloseLocation()),
- ScopeName, ScopeLoc, Args, std::size(Args), Syntax);
+ ScopeName, ScopeLoc, Args, std::size(Args), Form);
}
-void Parser::ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
- SourceLocation AttrNameLoc,
- ParsedAttributes &Attrs,
- SourceLocation *EndLoc,
- IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax) {
+void Parser::ParseTypeTagForDatatypeAttribute(
+ IdentifierInfo &AttrName, SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc, ParsedAttr::Form Form) {
assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -1605,7 +1645,7 @@ void Parser::ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
if (!T.consumeClose()) {
Attrs.addNewTypeTagForDatatype(&AttrName, AttrNameLoc, ScopeName, ScopeLoc,
ArgumentKind, MatchingCType.get(),
- LayoutCompatible, MustBeNull, Syntax);
+ LayoutCompatible, MustBeNull, Form);
}
if (EndLoc)
@@ -1653,30 +1693,43 @@ bool Parser::DiagnoseProhibitedCXX11Attribute() {
void Parser::DiagnoseMisplacedCXX11Attribute(ParsedAttributes &Attrs,
SourceLocation CorrectLocation) {
assert((Tok.is(tok::l_square) && NextToken().is(tok::l_square)) ||
- Tok.is(tok::kw_alignas));
+ Tok.is(tok::kw_alignas) || Tok.isRegularKeywordAttribute());
// Consume the attributes.
+ auto Keyword =
+ Tok.isRegularKeywordAttribute() ? Tok.getIdentifierInfo() : nullptr;
SourceLocation Loc = Tok.getLocation();
ParseCXX11Attributes(Attrs);
CharSourceRange AttrRange(SourceRange(Loc, Attrs.Range.getEnd()), true);
// FIXME: use err_attributes_misplaced
- Diag(Loc, diag::err_attributes_not_allowed)
- << FixItHint::CreateInsertionFromRange(CorrectLocation, AttrRange)
- << FixItHint::CreateRemoval(AttrRange);
+ (Keyword ? Diag(Loc, diag::err_keyword_not_allowed) << Keyword
+ : Diag(Loc, diag::err_attributes_not_allowed))
+ << FixItHint::CreateInsertionFromRange(CorrectLocation, AttrRange)
+ << FixItHint::CreateRemoval(AttrRange);
}
void Parser::DiagnoseProhibitedAttributes(
- const SourceRange &Range, const SourceLocation CorrectLocation) {
+ const ParsedAttributesView &Attrs, const SourceLocation CorrectLocation) {
+ auto *FirstAttr = Attrs.empty() ? nullptr : &Attrs.front();
if (CorrectLocation.isValid()) {
- CharSourceRange AttrRange(Range, true);
- Diag(CorrectLocation, diag::err_attributes_misplaced)
+ CharSourceRange AttrRange(Attrs.Range, true);
+ (FirstAttr && FirstAttr->isRegularKeywordAttribute()
+ ? Diag(CorrectLocation, diag::err_keyword_misplaced) << FirstAttr
+ : Diag(CorrectLocation, diag::err_attributes_misplaced))
<< FixItHint::CreateInsertionFromRange(CorrectLocation, AttrRange)
<< FixItHint::CreateRemoval(AttrRange);
- } else
- Diag(Range.getBegin(), diag::err_attributes_not_allowed) << Range;
+ } else {
+ const SourceRange &Range = Attrs.Range;
+ (FirstAttr && FirstAttr->isRegularKeywordAttribute()
+ ? Diag(Range.getBegin(), diag::err_keyword_not_allowed) << FirstAttr
+ : Diag(Range.getBegin(), diag::err_attributes_not_allowed))
+ << Range;
+ }
}
-void Parser::ProhibitCXX11Attributes(ParsedAttributes &Attrs, unsigned DiagID,
+void Parser::ProhibitCXX11Attributes(ParsedAttributes &Attrs,
+ unsigned AttrDiagID,
+ unsigned KeywordDiagID,
bool DiagnoseEmptyAttrs,
bool WarnOnUnknownAttrs) {
@@ -1696,13 +1749,18 @@ void Parser::ProhibitCXX11Attributes(ParsedAttributes &Attrs, unsigned DiagID,
// The attribute range starts with [[, but is empty. So this must
// be [[]], which we are supposed to diagnose because
// DiagnoseEmptyAttrs is true.
- Diag(Attrs.Range.getBegin(), DiagID) << Attrs.Range;
+ Diag(Attrs.Range.getBegin(), AttrDiagID) << Attrs.Range;
return;
}
}
}
for (const ParsedAttr &AL : Attrs) {
+ if (AL.isRegularKeywordAttribute()) {
+ Diag(AL.getLoc(), KeywordDiagID) << AL;
+ AL.setInvalid();
+ continue;
+ }
if (!AL.isCXX11Attribute() && !AL.isC2xAttribute())
continue;
if (AL.getKind() == ParsedAttr::UnknownAttribute) {
@@ -1710,7 +1768,7 @@ void Parser::ProhibitCXX11Attributes(ParsedAttributes &Attrs, unsigned DiagID,
Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored)
<< AL << AL.getRange();
} else {
- Diag(AL.getLoc(), DiagID) << AL;
+ Diag(AL.getLoc(), AttrDiagID) << AL;
AL.setInvalid();
}
}
@@ -1718,8 +1776,10 @@ void Parser::ProhibitCXX11Attributes(ParsedAttributes &Attrs, unsigned DiagID,
void Parser::DiagnoseCXX11AttributeExtension(ParsedAttributes &Attrs) {
for (const ParsedAttr &PA : Attrs) {
- if (PA.isCXX11Attribute() || PA.isC2xAttribute())
- Diag(PA.getLoc(), diag::ext_cxx11_attr_placement) << PA << PA.getRange();
+ if (PA.isCXX11Attribute() || PA.isC2xAttribute() ||
+ PA.isRegularKeywordAttribute())
+ Diag(PA.getLoc(), diag::ext_cxx11_attr_placement)
+ << PA << PA.isRegularKeywordAttribute() << PA.getRange();
}
}
@@ -1951,11 +2011,11 @@ bool Parser::MightBeDeclarator(DeclaratorContext Context) {
return getLangOpts().CPlusPlus11 && isCXX11VirtSpecifier(NextToken());
default:
- return false;
+ return Tok.isRegularKeywordAttribute();
}
default:
- return false;
+ return Tok.isRegularKeywordAttribute();
}
}
@@ -2030,6 +2090,7 @@ void Parser::SkipMalformedDecl() {
case tok::annot_module_begin:
case tok::annot_module_end:
case tok::annot_module_include:
+ case tok::annot_repl_input_end:
return;
default:
@@ -2136,13 +2197,16 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
- if (isDeclarationSpecifier(ImplicitTypenameContext::No)) {
- // If there is an invalid declaration specifier right after the
- // function prototype, then we must be in a missing semicolon case
- // where this isn't actually a body. Just fall through into the code
- // that handles it as a prototype, and let the top-level code handle
- // the erroneous declspec where it would otherwise expect a comma or
- // semicolon.
+ if (isDeclarationSpecifier(ImplicitTypenameContext::No) ||
+ Tok.is(tok::kw_namespace)) {
+ // If there is an invalid declaration specifier or a namespace
+ // definition right after the function prototype, then we must be in a
+ // missing semicolon case where this isn't actually a body. Just fall
+ // through into the code that handles it as a prototype, and let the
+ // top-level code handle the erroneous declspec where it would
+ // otherwise expect a comma or semicolon. Note that
+ // isDeclarationSpecifier already covers 'inline namespace', since
+ // 'inline' can be a declaration specifier.
} else {
Diag(Tok, diag::err_expected_fn_body);
SkipUntil(tok::semi);
@@ -2263,10 +2327,8 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
// Okay, there was no semicolon and one was expected. If we see a
// declaration specifier, just assume it was missing and continue parsing.
// Otherwise things are very confused and we skip to recover.
- if (!isDeclarationSpecifier(ImplicitTypenameContext::No)) {
- SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
- TryConsumeToken(tok::semi);
- }
+ if (!isDeclarationSpecifier(ImplicitTypenameContext::No))
+ SkipMalformedDecl();
}
return Actions.FinalizeDeclaratorGroup(getCurScope(), DS, DeclsInGroup);
@@ -2919,6 +2981,8 @@ Parser::getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context) {
return DeclSpecContext::DSC_condition;
case DeclaratorContext::ConversionId:
return DeclSpecContext::DSC_conv_operator;
+ case DeclaratorContext::CXXNew:
+ return DeclSpecContext::DSC_new;
case DeclaratorContext::Prototype:
case DeclaratorContext::ObjCResult:
case DeclaratorContext::ObjCParameter:
@@ -2927,7 +2991,6 @@ Parser::getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context) {
case DeclaratorContext::Block:
case DeclaratorContext::ForInit:
case DeclaratorContext::SelectionInit:
- case DeclaratorContext::CXXNew:
case DeclaratorContext::CXXCatch:
case DeclaratorContext::ObjCCatch:
case DeclaratorContext::BlockLiteral:
@@ -2942,24 +3005,26 @@ Parser::getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context) {
/// ParseAlignArgument - Parse the argument to an alignment-specifier.
///
-/// FIXME: Simply returns an alignof() expression if the argument is a
-/// type. Ideally, the type should be propagated directly into Sema.
-///
/// [C11] type-id
/// [C11] constant-expression
/// [C++0x] type-id ...[opt]
/// [C++0x] assignment-expression ...[opt]
-ExprResult Parser::ParseAlignArgument(SourceLocation Start,
- SourceLocation &EllipsisLoc) {
+ExprResult Parser::ParseAlignArgument(StringRef KWName, SourceLocation Start,
+ SourceLocation &EllipsisLoc, bool &IsType,
+ ParsedType &TypeResult) {
ExprResult ER;
if (isTypeIdInParens()) {
SourceLocation TypeLoc = Tok.getLocation();
ParsedType Ty = ParseTypeName().get();
SourceRange TypeRange(Start, Tok.getLocation());
- ER = Actions.ActOnUnaryExprOrTypeTraitExpr(TypeLoc, UETT_AlignOf, true,
- Ty.getAsOpaquePtr(), TypeRange);
- } else
+ if (Actions.ActOnAlignasTypeArgument(KWName, Ty, TypeLoc, TypeRange))
+ return ExprError();
+ TypeResult = Ty;
+ IsType = true;
+ } else {
ER = ParseConstantExpression();
+ IsType = false;
+ }
if (getLangOpts().CPlusPlus11)
TryConsumeToken(tok::ellipsis, EllipsisLoc);
@@ -2979,16 +3044,21 @@ void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *EndLoc) {
assert(Tok.isOneOf(tok::kw_alignas, tok::kw__Alignas) &&
"Not an alignment-specifier!");
-
- IdentifierInfo *KWName = Tok.getIdentifierInfo();
+ Token KWTok = Tok;
+ IdentifierInfo *KWName = KWTok.getIdentifierInfo();
+ auto Kind = KWTok.getKind();
SourceLocation KWLoc = ConsumeToken();
BalancedDelimiterTracker T(*this, tok::l_paren);
if (T.expectAndConsume())
return;
+ bool IsType;
+ ParsedType TypeResult;
SourceLocation EllipsisLoc;
- ExprResult ArgExpr = ParseAlignArgument(T.getOpenLocation(), EllipsisLoc);
+ ExprResult ArgExpr =
+ ParseAlignArgument(PP.getSpelling(KWTok), T.getOpenLocation(),
+ EllipsisLoc, IsType, TypeResult);
if (ArgExpr.isInvalid()) {
T.skipToEnd();
return;
@@ -2998,10 +3068,15 @@ void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
if (EndLoc)
*EndLoc = T.getCloseLocation();
- ArgsVector ArgExprs;
- ArgExprs.push_back(ArgExpr.get());
- Attrs.addNew(KWName, KWLoc, nullptr, KWLoc, ArgExprs.data(), 1,
- ParsedAttr::AS_Keyword, EllipsisLoc);
+ if (IsType) {
+ Attrs.addNewTypeAttr(KWName, KWLoc, nullptr, KWLoc, TypeResult, Kind,
+ EllipsisLoc);
+ } else {
+ ArgsVector ArgExprs;
+ ArgExprs.push_back(ArgExpr.get());
+ Attrs.addNew(KWName, KWLoc, nullptr, KWLoc, ArgExprs.data(), 1, Kind,
+ EllipsisLoc);
+ }
}
ExprResult Parser::ParseExtIntegerArgument() {
@@ -3244,13 +3319,17 @@ void Parser::ParseDeclarationSpecifiers(
switch (Tok.getKind()) {
default:
+ if (Tok.isRegularKeywordAttribute())
+ goto Attribute;
+
DoneWithDeclSpec:
if (!AttrsLastTime)
ProhibitAttributes(attrs);
else {
// Reject C++11 / C2x attributes that aren't type attributes.
for (const ParsedAttr &PA : attrs) {
- if (!PA.isCXX11Attribute() && !PA.isC2xAttribute())
+ if (!PA.isCXX11Attribute() && !PA.isC2xAttribute() &&
+ !PA.isRegularKeywordAttribute())
continue;
if (PA.getKind() == ParsedAttr::UnknownAttribute)
// We will warn about the unknown attribute elsewhere (in
@@ -3269,7 +3348,8 @@ void Parser::ParseDeclarationSpecifiers(
if (PA.isTypeAttr() && PA.getKind() != ParsedAttr::AT_LifetimeBound &&
PA.getKind() != ParsedAttr::AT_AnyX86NoCfCheck)
continue;
- Diag(PA.getLoc(), diag::err_attribute_not_type_attr) << PA;
+ Diag(PA.getLoc(), diag::err_attribute_not_type_attr)
+ << PA << PA.isRegularKeywordAttribute();
PA.setInvalid();
}
@@ -3283,9 +3363,10 @@ void Parser::ParseDeclarationSpecifiers(
case tok::l_square:
case tok::kw_alignas:
- if (!standardAttributesAllowed() || !isCXX11AttributeSpecifier())
+ if (!isAllowedCXX11AttributeSpecifier())
goto DoneWithDeclSpec;
+ Attribute:
ProhibitAttributes(attrs);
// FIXME: It would be good to recover by accepting the attributes,
// but attempting to do that now would cause serious
@@ -3349,6 +3430,8 @@ void Parser::ParseDeclarationSpecifiers(
goto DoneWithDeclSpec;
CXXScopeSpec SS;
+ if (TemplateInfo.TemplateParams)
+ SS.setTemplateParamLists(*TemplateInfo.TemplateParams);
Actions.RestoreNestedNameSpecifierAnnotation(Tok.getAnnotationValue(),
Tok.getAnnotationRange(),
SS);
@@ -3399,12 +3482,12 @@ void Parser::ParseDeclarationSpecifiers(
continue;
}
- if (TemplateId && TemplateId->Kind == TNK_Concept_template &&
- GetLookAheadToken(2).isOneOf(tok::kw_auto, tok::kw_decltype)) {
+ if (TemplateId && TemplateId->Kind == TNK_Concept_template) {
DS.getTypeSpecScope() = SS;
- // This is a qualified placeholder-specifier, e.g., ::C<int> auto ...
- // Consume the scope annotation and continue to consume the template-id
- // as a placeholder-specifier.
+ // This is probably a qualified placeholder-specifier, e.g., ::C<int>
+ // auto ... Consume the scope annotation and continue to consume the
+ // template-id as a placeholder-specifier. Let the next iteration
+ // diagnose a missing auto.
ConsumeAnnotationToken();
continue;
}
@@ -3444,7 +3527,8 @@ void Parser::ParseDeclarationSpecifiers(
&SS) &&
isConstructorDeclarator(/*Unqualified=*/false,
/*DeductionGuide=*/false,
- DS.isFriendSpecified()))
+ DS.isFriendSpecified(),
+ &TemplateInfo))
goto DoneWithDeclSpec;
// C++20 [temp.spec] 13.9/6.
@@ -3652,11 +3736,12 @@ void Parser::ParseDeclarationSpecifiers(
// Likewise, if this is a context where the identifier could be a template
// name, check whether this is a deduction guide declaration.
+ CXXScopeSpec SS;
if (getLangOpts().CPlusPlus17 &&
(DSContext == DeclSpecContext::DSC_class ||
DSContext == DeclSpecContext::DSC_top_level) &&
Actions.isDeductionGuideName(getCurScope(), *Tok.getIdentifierInfo(),
- Tok.getLocation()) &&
+ Tok.getLocation(), SS) &&
isConstructorDeclarator(/*Unqualified*/ true,
/*DeductionGuide*/ true))
goto DoneWithDeclSpec;
@@ -3720,6 +3805,10 @@ void Parser::ParseDeclarationSpecifiers(
}
if (!NextToken().isOneOf(tok::kw_auto, tok::kw_decltype))
goto DoneWithDeclSpec;
+
+ if (TemplateId && !isInvalid && Actions.CheckTypeConstraint(TemplateId))
+ TemplateId = nullptr;
+
ConsumeAnnotationToken();
SourceLocation AutoLoc = Tok.getLocation();
if (TryConsumeToken(tok::kw_decltype)) {
@@ -3788,7 +3877,7 @@ void Parser::ParseDeclarationSpecifiers(
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = Tok.getLocation();
DS.getAttributes().addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc,
- nullptr, 0, ParsedAttr::AS_Keyword);
+ nullptr, 0, tok::kw___forceinline);
break;
}
@@ -3811,6 +3900,10 @@ void Parser::ParseDeclarationSpecifiers(
ParseMicrosoftTypeAttributes(DS.getAttributes());
continue;
+ case tok::kw___funcref:
+ ParseWebAssemblyFuncrefTypeAttribute(DS.getAttributes());
+ continue;
+
// Borland single token adornments.
case tok::kw___pascal:
ParseBorlandTypeAttributes(DS.getAttributes());
@@ -3837,7 +3930,7 @@ void Parser::ParseDeclarationSpecifiers(
// Objective-C 'kindof' types.
case tok::kw___kindof:
DS.getAttributes().addNew(Tok.getIdentifierInfo(), Loc, nullptr, Loc,
- nullptr, 0, ParsedAttr::AS_Keyword);
+ nullptr, 0, tok::kw___kindof);
(void)ConsumeToken();
continue;
@@ -3903,6 +3996,8 @@ void Parser::ParseDeclarationSpecifiers(
isStorageClass = true;
break;
case tok::kw_thread_local:
+ if (getLangOpts().C2x)
+ Diag(Tok, diag::warn_c2x_compat_keyword) << Tok.getName();
isInvalid = DS.SetStorageClassSpecThread(DeclSpec::TSCS_thread_local, Loc,
PrevSpec, DiagID);
isStorageClass = true;
@@ -4138,6 +4233,9 @@ void Parser::ParseDeclarationSpecifiers(
DiagID, Policy);
break;
case tok::kw_bool:
+ if (getLangOpts().C2x)
+ Diag(Tok, diag::warn_c2x_compat_keyword) << Tok.getName();
+ [[fallthrough]];
case tok::kw__Bool:
if (Tok.is(tok::kw__Bool) && !getLangOpts().C99)
Diag(Tok, diag::ext_c99_feature) << Tok.getName();
@@ -4921,6 +5019,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
assert(TemplateInfo.TemplateParams && "no template parameters");
TParams = MultiTemplateParamsArg(TemplateInfo.TemplateParams->data(),
TemplateInfo.TemplateParams->size());
+ SS.setTemplateParamLists(TParams);
}
if (!Name && TUK != Sema::TUK_Definition) {
@@ -4943,6 +5042,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
if (IsElaboratedTypeSpecifier && !getLangOpts().MicrosoftExt &&
!getLangOpts().ObjC) {
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
+ diag::err_keyword_not_allowed,
/*DiagnoseEmptyAttrs=*/true);
if (BaseType.isUsable())
Diag(BaseRange.getBegin(), diag::ext_enum_base_in_type_specifier)
@@ -5088,7 +5188,7 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
// If attributes exist after the enumerator, parse them.
ParsedAttributes attrs(AttrFactory);
MaybeParseGNUAttributes(attrs);
- if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
+ if (isAllowedCXX11AttributeSpecifier()) {
if (getLangOpts().CPlusPlus)
Diag(Tok.getLocation(), getLangOpts().CPlusPlus17
? diag::warn_cxx14_compat_ns_enum_attribute
@@ -5369,7 +5469,7 @@ bool Parser::isTypeSpecifierQualifier() {
case tok::kw___read_only:
case tok::kw___read_write:
case tok::kw___write_only:
-
+ case tok::kw___funcref:
case tok::kw_groupshared:
return true;
@@ -5388,12 +5488,21 @@ Parser::DeclGroupPtrTy Parser::ParseTopLevelStmtDecl() {
// Parse a top-level-stmt.
Parser::StmtVector Stmts;
ParsedStmtContext SubStmtCtx = ParsedStmtContext();
+ Actions.PushFunctionScope();
StmtResult R = ParseStatementOrDeclaration(Stmts, SubStmtCtx);
+ Actions.PopFunctionScopeInfo();
if (!R.isUsable())
return nullptr;
SmallVector<Decl *, 2> DeclsInGroup;
DeclsInGroup.push_back(Actions.ActOnTopLevelStmtDecl(R.get()));
+
+ if (Tok.is(tok::annot_repl_input_end) &&
+ Tok.getAnnotationValue() != nullptr) {
+ ConsumeAnnotationToken();
+ cast<TopLevelStmtDecl>(DeclsInGroup.back())->setSemiMissing();
+ }
+
// Currently happens for things like -fms-extensions and use `__if_exists`.
for (Stmt *S : Stmts)
DeclsInGroup.push_back(Actions.ActOnTopLevelStmtDecl(S));
@@ -5632,6 +5741,7 @@ bool Parser::isDeclarationSpecifier(
#define GENERIC_IMAGE_TYPE(ImgType, Id) case tok::kw_##ImgType##_t:
#include "clang/Basic/OpenCLImageTypes.def"
+ case tok::kw___funcref:
case tok::kw_groupshared:
return true;
@@ -5641,11 +5751,15 @@ bool Parser::isDeclarationSpecifier(
}
bool Parser::isConstructorDeclarator(bool IsUnqualified, bool DeductionGuide,
- DeclSpec::FriendSpecified IsFriend) {
+ DeclSpec::FriendSpecified IsFriend,
+ const ParsedTemplateInfo *TemplateInfo) {
TentativeParsingAction TPA(*this);
// Parse the C++ scope specifier.
CXXScopeSpec SS;
+ if (TemplateInfo && TemplateInfo->TemplateParams)
+ SS.setTemplateParamLists(*TemplateInfo->TemplateParams);
+
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
/*ObjectHasErrors=*/false,
/*EnteringContext=*/true)) {
@@ -5707,9 +5821,12 @@ bool Parser::isConstructorDeclarator(bool IsUnqualified, bool DeductionGuide,
// therefore, we know that this is a constructor.
// Due to an ambiguity with implicit typename, the above is not enough.
// Additionally, check to see if we are a friend.
+ // If we parsed a scope specifier as well as friend,
+ // we might be parsing a friend constructor.
bool IsConstructor = false;
- if (isDeclarationSpecifier(IsFriend ? ImplicitTypenameContext::No
- : ImplicitTypenameContext::Yes))
+ if (isDeclarationSpecifier(IsFriend && !SS.isSet()
+ ? ImplicitTypenameContext::No
+ : ImplicitTypenameContext::Yes))
IsConstructor = true;
else if (Tok.is(tok::identifier) ||
(Tok.is(tok::annot_cxxscope) && NextToken().is(tok::identifier))) {
@@ -5799,8 +5916,8 @@ void Parser::ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs, bool AtomicAllowed,
bool IdentifierRequired,
std::optional<llvm::function_ref<void()>> CodeCompletionHandler) {
- if (standardAttributesAllowed() && (AttrReqs & AR_CXX11AttributesParsed) &&
- isCXX11AttributeSpecifier()) {
+ if ((AttrReqs & AR_CXX11AttributesParsed) &&
+ isAllowedCXX11AttributeSpecifier()) {
ParsedAttributes Attrs(AttrFactory);
ParseCXX11Attributes(Attrs);
DS.takeAttributesFrom(Attrs);
@@ -5893,6 +6010,12 @@ void Parser::ParseTypeQualifierListOpt(
continue;
}
goto DoneWithTypeQuals;
+
+ case tok::kw___funcref:
+ ParseWebAssemblyFuncrefTypeAttribute(DS.getAttributes());
+ continue;
+ goto DoneWithTypeQuals;
+
case tok::kw___pascal:
if (AttrReqs & AR_VendorAttributesParsed) {
ParseBorlandTypeAttributes(DS.getAttributes());
@@ -5911,7 +6034,7 @@ void Parser::ParseTypeQualifierListOpt(
// Objective-C 'kindof' types.
case tok::kw___kindof:
DS.getAttributes().addNew(Tok.getIdentifierInfo(), Loc, nullptr, Loc,
- nullptr, 0, ParsedAttr::AS_Keyword);
+ nullptr, 0, tok::kw___kindof);
(void)ConsumeToken();
continue;
@@ -6037,6 +6160,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
bool EnteringContext = D.getContext() == DeclaratorContext::File ||
D.getContext() == DeclaratorContext::Member;
CXXScopeSpec SS;
+ SS.setTemplateParamLists(D.getTemplateParameterLists());
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
/*ObjectHasErrors=*/false, EnteringContext);
@@ -6420,8 +6544,9 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
// that it's an initializer instead.
if (D.mayOmitIdentifier() && D.mayBeFollowedByCXXDirectInit()) {
RevertingTentativeParsingAction PA(*this);
- if (TryParseDeclarator(true, D.mayHaveIdentifier(), true) ==
- TPResult::False) {
+ if (TryParseDeclarator(true, D.mayHaveIdentifier(), true,
+ D.getDeclSpec().getTypeSpecType() == TST_auto) ==
+ TPResult::False) {
D.SetIdentifier(nullptr, Tok.getLocation());
goto PastIdentifier;
}
@@ -6566,6 +6691,10 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
PrototypeScope.Exit();
} else if (Tok.is(tok::l_square)) {
ParseBracketDeclarator(D);
+ } else if (Tok.isRegularKeywordAttribute()) {
+ // For consistency with attribute parsing.
+ Diag(Tok, diag::err_keyword_not_allowed) << Tok.getIdentifierInfo();
+ ConsumeToken();
} else if (Tok.is(tok::kw_requires) && D.hasGroupingParens()) {
// This declarator is declaring a function, but the requires clause is
// in the wrong place:
@@ -6970,7 +7099,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
TrailingReturnTypeLoc = Range.getBegin();
EndLoc = Range.getEnd();
}
- } else if (standardAttributesAllowed()) {
+ } else {
MaybeParseCXX11Attributes(FnAttrs);
}
}
@@ -6987,6 +7116,15 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
continue;
DeclsInPrototype.push_back(ND);
}
+ // Sort DeclsInPrototype based on raw encoding of the source location.
+ // Scope::decls() is iterating over a SmallPtrSet so sort the Decls before
+ // moving to DeclContext. This provides a stable ordering for traversing
+ // Decls in DeclContext, which is important for tasks like ASTWriter for
+ // deterministic output.
+ llvm::sort(DeclsInPrototype, [](Decl *D1, Decl *D2) {
+ return D1->getLocation().getRawEncoding() <
+ D2->getLocation().getRawEncoding();
+ });
}
// Remember that we parsed a function type, and remember the attributes.
@@ -7301,13 +7439,9 @@ void Parser::ParseParameterDeclarationClause(
DefArgToks.reset(new CachedTokens);
SourceLocation ArgStartLoc = NextToken().getLocation();
- if (!ConsumeAndStoreInitializer(*DefArgToks, CIK_DefaultArgument)) {
- DefArgToks.reset();
- Actions.ActOnParamDefaultArgumentError(Param, EqualLoc);
- } else {
- Actions.ActOnParamUnparsedDefaultArgument(Param, EqualLoc,
- ArgStartLoc);
- }
+ ConsumeAndStoreInitializer(*DefArgToks, CIK_DefaultArgument);
+ Actions.ActOnParamUnparsedDefaultArgument(Param, EqualLoc,
+ ArgStartLoc);
} else {
// Consume the '='.
ConsumeToken();
@@ -7615,8 +7749,7 @@ void Parser::ParseTypeofSpecifier(DeclSpec &DS) {
bool IsUnqual = Tok.is(tok::kw_typeof_unqual);
const IdentifierInfo *II = Tok.getIdentifierInfo();
if (getLangOpts().C2x && !II->getName().startswith("__"))
- Diag(Tok.getLocation(), diag::warn_c2x_compat_typeof_type_specifier)
- << IsUnqual;
+ Diag(Tok.getLocation(), diag::warn_c2x_compat_keyword) << Tok.getName();
Token OpTok = Tok;
SourceLocation StartLoc = ConsumeToken();
@@ -7816,7 +7949,7 @@ void Parser::DiagnoseBitIntUse(const Token &Tok) {
// In C2x mode, diagnose that the use is not compatible with pre-C2x modes.
// Otherwise, diagnose that the use is a Clang extension.
if (getLangOpts().C2x)
- Diag(Loc, diag::warn_c17_compat_bit_int);
+ Diag(Loc, diag::warn_c2x_compat_keyword) << Tok.getName();
else
Diag(Loc, diag::ext_bit_int) << getLangOpts().CPlusPlus;
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp b/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
index 227c1df2bddd..c1e09db2b3ee 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
@@ -23,6 +23,7 @@
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/SmallString.h"
@@ -349,7 +350,7 @@ Decl *Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc,
///
Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context) {
assert(isTokenStringLiteral() && "Not a string literal!");
- ExprResult Lang = ParseStringLiteralExpression(false);
+ ExprResult Lang = ParseUnevaluatedStringLiteralExpression();
ParseScope LinkageScope(this, Scope::DeclScope);
Decl *LinkageSpec =
@@ -428,7 +429,7 @@ Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context) {
: nullptr;
}
-/// Parse a C++ Modules TS export-declaration.
+/// Parse a standard C++ Modules export-declaration.
///
/// export-declaration:
/// 'export' declaration
@@ -456,13 +457,6 @@ Decl *Parser::ParseExportDeclaration() {
BalancedDelimiterTracker T(*this, tok::l_brace);
T.consumeOpen();
- // The Modules TS draft says "An export-declaration shall declare at least one
- // entity", but the intent is that it shall contain at least one declaration.
- if (Tok.is(tok::r_brace) && getLangOpts().ModulesTS) {
- Diag(ExportLoc, diag::err_export_empty)
- << SourceRange(ExportLoc, Tok.getLocation());
- }
-
while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
Tok.isNot(tok::eof)) {
ParsedAttributes DeclAttrs(AttrFactory);
@@ -641,6 +635,7 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
Tok.is(tok::identifier) &&
(NextToken().is(tok::semi) || NextToken().is(tok::comma) ||
NextToken().is(tok::ellipsis) || NextToken().is(tok::l_square) ||
+ NextToken().isRegularKeywordAttribute() ||
NextToken().is(tok::kw___attribute)) &&
D.SS.isNotEmpty() && LastII == Tok.getIdentifierInfo() &&
!D.SS.getScopeRep()->getAsNamespace() &&
@@ -773,11 +768,15 @@ Parser::DeclGroupPtrTy Parser::ParseUsingDeclaration(
// If we had any misplaced attributes from earlier, this is where they
// should have been written.
if (MisplacedAttrs.Range.isValid()) {
- Diag(MisplacedAttrs.Range.getBegin(), diag::err_attributes_not_allowed)
+ auto *FirstAttr =
+ MisplacedAttrs.empty() ? nullptr : &MisplacedAttrs.front();
+ auto &Range = MisplacedAttrs.Range;
+ (FirstAttr && FirstAttr->isRegularKeywordAttribute()
+ ? Diag(Range.getBegin(), diag::err_keyword_not_allowed) << FirstAttr
+ : Diag(Range.getBegin(), diag::err_attributes_not_allowed))
<< FixItHint::CreateInsertionFromRange(
- Tok.getLocation(),
- CharSourceRange::getTokenRange(MisplacedAttrs.Range))
- << FixItHint::CreateRemoval(MisplacedAttrs.Range);
+ Tok.getLocation(), CharSourceRange::getTokenRange(Range))
+ << FixItHint::CreateRemoval(Range);
Attrs.takeAllFrom(MisplacedAttrs);
}
@@ -965,14 +964,16 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd) {
assert(Tok.isOneOf(tok::kw_static_assert, tok::kw__Static_assert) &&
"Not a static_assert declaration");
- // Save the token used for static assertion.
- Token SavedTok = Tok;
+ // Save the token name used for static assertion.
+ const char *TokName = Tok.getName();
if (Tok.is(tok::kw__Static_assert) && !getLangOpts().C11)
Diag(Tok, diag::ext_c11_feature) << Tok.getName();
if (Tok.is(tok::kw_static_assert)) {
if (!getLangOpts().CPlusPlus) {
- if (!getLangOpts().C2x)
+ if (getLangOpts().C2x)
+ Diag(Tok, diag::warn_c2x_compat_keyword) << Tok.getName();
+ else
Diag(Tok, diag::ext_ms_static_assert) << FixItHint::CreateReplacement(
Tok.getLocation(), "_Static_assert");
} else
@@ -1015,14 +1016,17 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd) {
return nullptr;
}
- if (!isTokenStringLiteral()) {
+ if (isTokenStringLiteral())
+ AssertMessage = ParseUnevaluatedStringLiteralExpression();
+ else if (getLangOpts().CPlusPlus26)
+ AssertMessage = ParseConstantExpressionInExprEvalContext();
+ else {
Diag(Tok, diag::err_expected_string_literal)
<< /*Source='static_assert'*/ 1;
SkipMalformedDecl();
return nullptr;
}
- AssertMessage = ParseStringLiteralExpression();
if (AssertMessage.isInvalid()) {
SkipMalformedDecl();
return nullptr;
@@ -1032,9 +1036,7 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd) {
T.consumeClose();
DeclEnd = Tok.getLocation();
- // Passing the token used to the error message.
- ExpectAndConsumeSemi(diag::err_expected_semi_after_static_assert,
- SavedTok.getName());
+ ExpectAndConsumeSemi(diag::err_expected_semi_after_static_assert, TokName);
return Actions.ActOnStaticAssertDeclaration(StaticAssertLoc, AssertExpr.get(),
AssertMessage.get(),
@@ -1082,7 +1084,7 @@ SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
// Check for C++1y 'decltype(auto)'.
if (Tok.is(tok::kw_auto) && NextToken().is(tok::r_paren)) {
// the typename-specifier in a function-style cast expression may
- // be 'auto' since C++2b.
+ // be 'auto' since C++23.
Diag(Tok.getLocation(),
getLangOpts().CPlusPlus14
? diag::warn_cxx11_compat_decltype_auto_type_specifier
@@ -1377,9 +1379,9 @@ void Parser::ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs) {
tok::kw___multiple_inheritance,
tok::kw___virtual_inheritance)) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ auto Kind = Tok.getKind();
SourceLocation AttrNameLoc = ConsumeToken();
- attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_Keyword);
+ attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0, Kind);
}
}
@@ -1390,6 +1392,8 @@ bool Parser::isValidAfterTypeSpecifier(bool CouldBeBitfield) {
// This switch enumerates the valid "follow" set for type-specifiers.
switch (Tok.getKind()) {
default:
+ if (Tok.isRegularKeywordAttribute())
+ return true;
break;
case tok::semi: // struct foo {...} ;
case tok::star: // struct foo {...} * P;
@@ -1629,6 +1633,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
tok::kw___is_signed,
tok::kw___is_standard_layout,
tok::kw___is_trivial,
+ tok::kw___is_trivially_equality_comparable,
tok::kw___is_trivially_assignable,
tok::kw___is_trivially_constructible,
tok::kw___is_trivially_copyable,
@@ -1681,6 +1686,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
ColonProtectionRAIIObject X(*this);
CXXScopeSpec Spec;
+ if (TemplateInfo.TemplateParams)
+ Spec.setTemplateParamLists(*TemplateInfo.TemplateParams);
+
bool HasValidSpec = true;
if (ParseOptionalCXXScopeSpecifier(Spec, /*ObjectType=*/nullptr,
/*ObjectHasErrors=*/false,
@@ -1843,6 +1851,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
} else if (isClassCompatibleKeyword() &&
(NextToken().is(tok::l_square) ||
NextToken().is(tok::kw_alignas) ||
+ NextToken().isRegularKeywordAttribute() ||
isCXX11VirtSpecifier(NextToken()) != VirtSpecifiers::VS_None)) {
// We can't tell if this is a definition or reference
// until we skipped the 'final' and C++11 attribute specifiers.
@@ -1864,6 +1873,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
ConsumeParen();
if (!SkipUntil(tok::r_paren, StopAtSemi))
break;
+ } else if (Tok.isRegularKeywordAttribute()) {
+ ConsumeToken();
} else {
break;
}
@@ -1900,7 +1911,11 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// them to the right place.
SourceRange AttrRange = Attributes.Range;
if (AttrRange.isValid()) {
- Diag(AttrRange.getBegin(), diag::err_attributes_not_allowed)
+ auto *FirstAttr = Attributes.empty() ? nullptr : &Attributes.front();
+ auto Loc = AttrRange.getBegin();
+ (FirstAttr && FirstAttr->isRegularKeywordAttribute()
+ ? Diag(Loc, diag::err_keyword_not_allowed) << FirstAttr
+ : Diag(Loc, diag::err_attributes_not_allowed))
<< AttrRange
<< FixItHint::CreateInsertionFromRange(
AttrFixitLoc, CharSourceRange(AttrRange, true))
@@ -1948,6 +1963,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TUK == Sema::TUK_Declaration) {
// This is an explicit instantiation of a class template.
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
+ diag::err_keyword_not_allowed,
/*DiagnoseEmptyAttrs=*/true);
TagOrTempResult = Actions.ActOnExplicitInstantiation(
@@ -1964,6 +1980,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
(TUK == Sema::TUK_Friend &&
TemplateInfo.Kind == ParsedTemplateInfo::NonTemplate)) {
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
+ diag::err_keyword_not_allowed,
/*DiagnoseEmptyAttrs=*/true);
TypeResult = Actions.ActOnTagTemplateIdType(
TUK, TagType, StartLoc, SS, TemplateId->TemplateKWLoc,
@@ -2033,6 +2050,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
} else if (TUK == Sema::TUK_Friend &&
TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate) {
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
+ diag::err_keyword_not_allowed,
/*DiagnoseEmptyAttrs=*/true);
TagOrTempResult = Actions.ActOnTemplatedFriendTag(
@@ -2043,6 +2061,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
} else {
if (TUK != Sema::TUK_Declaration && TUK != Sema::TUK_Definition)
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
+ diag::err_keyword_not_allowed,
/* DiagnoseEmptyAttrs=*/true);
if (TUK == Sema::TUK_Definition &&
@@ -3019,12 +3038,14 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
//
// Diagnose attributes that appear in a friend member function declarator:
// friend int foo [[]] ();
- SmallVector<SourceRange, 4> Ranges;
- DeclaratorInfo.getCXX11AttributeRanges(Ranges);
- for (SmallVectorImpl<SourceRange>::iterator I = Ranges.begin(),
- E = Ranges.end();
- I != E; ++I)
- Diag((*I).getBegin(), diag::err_attributes_not_allowed) << *I;
+ for (const ParsedAttr &AL : DeclaratorInfo.getAttributes())
+ if (AL.isCXX11Attribute() || AL.isRegularKeywordAttribute()) {
+ auto Loc = AL.getRange().getBegin();
+ (AL.isRegularKeywordAttribute()
+ ? Diag(Loc, diag::err_keyword_not_allowed) << AL
+ : Diag(Loc, diag::err_attributes_not_allowed))
+ << AL.getRange();
+ }
ThisDecl = Actions.ActOnFriendFunctionDecl(getCurScope(), DeclaratorInfo,
TemplateParams);
@@ -3197,6 +3218,7 @@ ExprResult Parser::ParseCXXMemberInitializer(Decl *D, bool IsFunction,
? Sema::ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed
: Sema::ExpressionEvaluationContext::PotentiallyEvaluated,
D);
+ Actions.ExprEvalContexts.back().InImmediateEscalatingFunctionContext = true;
if (TryConsumeToken(tok::equal, EqualLoc)) {
if (Tok.is(tok::kw_delete)) {
// In principle, an initializer of '= delete p;' is legal, but it will
@@ -4358,19 +4380,19 @@ bool Parser::ParseCXX11AttributeArgs(
assert(Tok.is(tok::l_paren) && "Not a C++11 attribute argument list");
SourceLocation LParenLoc = Tok.getLocation();
const LangOptions &LO = getLangOpts();
- ParsedAttr::Syntax Syntax =
- LO.CPlusPlus ? ParsedAttr::AS_CXX11 : ParsedAttr::AS_C2x;
+ ParsedAttr::Form Form =
+ LO.CPlusPlus ? ParsedAttr::Form::CXX11() : ParsedAttr::Form::C2x();
// Try parsing microsoft attributes
if (getLangOpts().MicrosoftExt || getLangOpts().HLSL) {
if (hasAttribute(AttributeCommonInfo::Syntax::AS_Microsoft, ScopeName,
AttrName, getTargetInfo(), getLangOpts()))
- Syntax = ParsedAttr::AS_Microsoft;
+ Form = ParsedAttr::Form::Microsoft();
}
// If the attribute isn't known, we will not attempt to parse any
// arguments.
- if (Syntax != ParsedAttr::AS_Microsoft &&
+ if (Form.getSyntax() != ParsedAttr::AS_Microsoft &&
!hasAttribute(LO.CPlusPlus ? AttributeCommonInfo::Syntax::AS_CXX11
: AttributeCommonInfo::Syntax::AS_C2x,
ScopeName, AttrName, getTargetInfo(), getLangOpts())) {
@@ -4386,7 +4408,7 @@ bool Parser::ParseCXX11AttributeArgs(
// GNU-scoped attributes have some special cases to handle GNU-specific
// behaviors.
ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
- ScopeLoc, Syntax, nullptr);
+ ScopeLoc, Form, nullptr);
return true;
}
@@ -4406,10 +4428,10 @@ bool Parser::ParseCXX11AttributeArgs(
// Some Clang-scoped attributes have some special parsing behavior.
if (ScopeName && (ScopeName->isStr("clang") || ScopeName->isStr("_Clang")))
NumArgs = ParseClangAttributeArgs(AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ ScopeName, ScopeLoc, Form);
else
NumArgs = ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ ScopeName, ScopeLoc, Form);
if (!Attrs.empty() &&
IsBuiltInOrStandardCXX11Attribute(AttrName, ScopeName)) {
@@ -4464,16 +4486,33 @@ void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
CachedTokens &OpenMPTokens,
SourceLocation *EndLoc) {
if (Tok.is(tok::kw_alignas)) {
- Diag(Tok.getLocation(), diag::warn_cxx98_compat_alignas);
+ if (getLangOpts().C2x)
+ Diag(Tok, diag::warn_c2x_compat_keyword) << Tok.getName();
+ else
+ Diag(Tok.getLocation(), diag::warn_cxx98_compat_alignas);
ParseAlignmentSpecifier(Attrs, EndLoc);
return;
}
+ if (Tok.isRegularKeywordAttribute()) {
+ SourceLocation Loc = Tok.getLocation();
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ Attrs.addNew(AttrName, Loc, nullptr, Loc, nullptr, 0, Tok.getKind());
+ ConsumeToken();
+ return;
+ }
+
assert(Tok.is(tok::l_square) && NextToken().is(tok::l_square) &&
"Not a double square bracket attribute list");
SourceLocation OpenLoc = Tok.getLocation();
- Diag(OpenLoc, diag::warn_cxx98_compat_attribute);
+ if (getLangOpts().CPlusPlus) {
+ Diag(OpenLoc, getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_attribute
+ : diag::warn_ext_cxx11_attributes);
+ } else {
+ Diag(OpenLoc, getLangOpts().C2x ? diag::warn_pre_c2x_compat_attributes
+ : diag::warn_ext_c2x_attributes);
+ }
ConsumeBracket();
checkCompoundToken(OpenLoc, tok::l_square, CompoundToken::AttrBegin);
@@ -4556,7 +4595,8 @@ void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
AttrName,
SourceRange(ScopeLoc.isValid() ? ScopeLoc : AttrLoc, AttrLoc),
ScopeName, ScopeLoc, nullptr, 0,
- getLangOpts().CPlusPlus ? ParsedAttr::AS_CXX11 : ParsedAttr::AS_C2x);
+ getLangOpts().CPlusPlus ? ParsedAttr::Form::CXX11()
+ : ParsedAttr::Form::C2x());
AttrParsed = true;
}
@@ -4587,26 +4627,28 @@ void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
/// attribute-specifier-seq:
/// attribute-specifier-seq[opt] attribute-specifier
void Parser::ParseCXX11Attributes(ParsedAttributes &Attrs) {
- assert(standardAttributesAllowed());
-
SourceLocation StartLoc = Tok.getLocation();
SourceLocation EndLoc = StartLoc;
do {
ParseCXX11AttributeSpecifier(Attrs, &EndLoc);
- } while (isCXX11AttributeSpecifier());
+ } while (isAllowedCXX11AttributeSpecifier());
Attrs.Range = SourceRange(StartLoc, EndLoc);
}
void Parser::DiagnoseAndSkipCXX11Attributes() {
+ auto Keyword =
+ Tok.isRegularKeywordAttribute() ? Tok.getIdentifierInfo() : nullptr;
// Start and end location of an attribute or an attribute list.
SourceLocation StartLoc = Tok.getLocation();
SourceLocation EndLoc = SkipCXX11Attributes();
if (EndLoc.isValid()) {
SourceRange Range(StartLoc, EndLoc);
- Diag(StartLoc, diag::err_attributes_not_allowed) << Range;
+ (Keyword ? Diag(StartLoc, diag::err_keyword_not_allowed) << Keyword
+ : Diag(StartLoc, diag::err_attributes_not_allowed))
+ << Range;
}
}
@@ -4622,6 +4664,9 @@ SourceLocation Parser::SkipCXX11Attributes() {
T.consumeOpen();
T.skipToEnd();
EndLoc = T.getCloseLocation();
+ } else if (Tok.isRegularKeywordAttribute()) {
+ EndLoc = Tok.getLocation();
+ ConsumeToken();
} else {
assert(Tok.is(tok::kw_alignas) && "not an attribute specifier");
ConsumeToken();
@@ -4716,7 +4761,7 @@ void Parser::ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs) {
if (!T.consumeClose()) {
Attrs.addNew(UuidIdent, SourceRange(UuidLoc, T.getCloseLocation()), nullptr,
SourceLocation(), ArgExprs.data(), ArgExprs.size(),
- ParsedAttr::AS_Microsoft);
+ ParsedAttr::Form::Microsoft());
}
}
@@ -4772,7 +4817,7 @@ void Parser::ParseMicrosoftAttributes(ParsedAttributes &Attrs) {
}
if (!AttrParsed) {
Attrs.addNew(II, NameLoc, nullptr, SourceLocation(), nullptr, 0,
- ParsedAttr::AS_Microsoft);
+ ParsedAttr::Form::Microsoft());
}
}
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp b/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
index 66d937ac5742..75d04824d8b9 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
@@ -20,12 +20,13 @@
///
//===----------------------------------------------------------------------===//
-#include "clang/Parse/Parser.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
@@ -789,7 +790,9 @@ class CastExpressionIdValidator final : public CorrectionCandidateCallback {
/// [GNU] '__builtin_choose_expr' '(' assign-expr ',' assign-expr ','
/// assign-expr ')'
/// [GNU] '__builtin_FILE' '(' ')'
+/// [CLANG] '__builtin_FILE_NAME' '(' ')'
/// [GNU] '__builtin_FUNCTION' '(' ')'
+/// [MS] '__builtin_FUNCSIG' '(' ')'
/// [GNU] '__builtin_LINE' '(' ')'
/// [CLANG] '__builtin_COLUMN' '(' ')'
/// [GNU] '__builtin_source_location' '(' ')'
@@ -1007,8 +1010,8 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
if (getLangOpts().CPlusPlus)
Diag(Tok, diag::warn_cxx98_compat_nullptr);
else
- Diag(Tok, getLangOpts().C2x ? diag::warn_c17_compat_nullptr
- : diag::ext_c_nullptr);
+ Diag(Tok, getLangOpts().C2x ? diag::warn_c2x_compat_keyword
+ : diag::ext_c_nullptr) << Tok.getName();
Res = Actions.ActOnCXXNullPtrLiteral(ConsumeToken());
break;
@@ -1317,7 +1320,9 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw___builtin_convertvector:
case tok::kw___builtin_COLUMN:
case tok::kw___builtin_FILE:
+ case tok::kw___builtin_FILE_NAME:
case tok::kw___builtin_FUNCTION:
+ case tok::kw___builtin_FUNCSIG:
case tok::kw___builtin_LINE:
case tok::kw___builtin_source_location:
if (NotPrimaryExpression)
@@ -1868,7 +1873,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
/// primary-expression
/// postfix-expression '[' expression ']'
/// postfix-expression '[' braced-init-list ']'
-/// postfix-expression '[' expression-list [opt] ']' [C++2b 12.4.5]
+/// postfix-expression '[' expression-list [opt] ']' [C++23 12.4.5]
/// postfix-expression '(' argument-expression-list[opt] ')'
/// postfix-expression '.' identifier
/// postfix-expression '->' identifier
@@ -1943,10 +1948,10 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// We try to parse a list of indexes in all language mode first
// and, in we find 0 or one index, we try to parse an OpenMP array
- // section. This allow us to support C++2b multi dimensional subscript and
+ // section. This allow us to support C++23 multi dimensional subscript and
// OpenMp sections in the same language mode.
if (!getLangOpts().OpenMP || Tok.isNot(tok::colon)) {
- if (!getLangOpts().CPlusPlus2b) {
+ if (!getLangOpts().CPlusPlus23) {
ExprResult Idx;
if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
@@ -2484,8 +2489,11 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
RParenLoc);
}
- if (OpTok.isOneOf(tok::kw_alignof, tok::kw__Alignof))
+ if (getLangOpts().CPlusPlus &&
+ OpTok.isOneOf(tok::kw_alignof, tok::kw__Alignof))
Diag(OpTok, diag::warn_cxx98_compat_alignof);
+ else if (getLangOpts().C2x && OpTok.is(tok::kw_alignof))
+ Diag(OpTok, diag::warn_c2x_compat_keyword) << OpTok.getName();
EnterExpressionEvaluationContext Unevaluated(
Actions, Sema::ExpressionEvaluationContext::Unevaluated,
@@ -2539,7 +2547,9 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
/// assign-expr ')'
/// [GNU] '__builtin_types_compatible_p' '(' type-name ',' type-name ')'
/// [GNU] '__builtin_FILE' '(' ')'
+/// [CLANG] '__builtin_FILE_NAME' '(' ')'
/// [GNU] '__builtin_FUNCTION' '(' ')'
+/// [MS] '__builtin_FUNCSIG' '(' ')'
/// [GNU] '__builtin_LINE' '(' ')'
/// [CLANG] '__builtin_COLUMN' '(' ')'
/// [GNU] '__builtin_source_location' '(' ')'
@@ -2774,7 +2784,9 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
}
case tok::kw___builtin_COLUMN:
case tok::kw___builtin_FILE:
+ case tok::kw___builtin_FILE_NAME:
case tok::kw___builtin_FUNCTION:
+ case tok::kw___builtin_FUNCSIG:
case tok::kw___builtin_LINE:
case tok::kw___builtin_source_location: {
// Attempt to consume the r-paren.
@@ -2787,8 +2799,12 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
switch (T) {
case tok::kw___builtin_FILE:
return SourceLocExpr::File;
+ case tok::kw___builtin_FILE_NAME:
+ return SourceLocExpr::FileName;
case tok::kw___builtin_FUNCTION:
return SourceLocExpr::Function;
+ case tok::kw___builtin_FUNCSIG:
+ return SourceLocExpr::FuncSig;
case tok::kw___builtin_LINE:
return SourceLocExpr::Line;
case tok::kw___builtin_COLUMN:
@@ -3240,6 +3256,17 @@ Parser::ParseCompoundLiteralExpression(ParsedType Ty,
/// string-literal
/// \verbatim
ExprResult Parser::ParseStringLiteralExpression(bool AllowUserDefinedLiteral) {
+ return ParseStringLiteralExpression(AllowUserDefinedLiteral,
+ /*Unevaluated=*/false);
+}
+
+ExprResult Parser::ParseUnevaluatedStringLiteralExpression() {
+ return ParseStringLiteralExpression(/*AllowUserDefinedLiteral=*/false,
+ /*Unevaluated=*/true);
+}
+
+ExprResult Parser::ParseStringLiteralExpression(bool AllowUserDefinedLiteral,
+ bool Unevaluated) {
assert(isTokenStringLiteral() && "Not a string literal!");
// String concat. Note that keywords like __func__ and __FUNCTION__ are not
@@ -3251,6 +3278,11 @@ ExprResult Parser::ParseStringLiteralExpression(bool AllowUserDefinedLiteral) {
ConsumeStringToken();
} while (isTokenStringLiteral());
+ if (Unevaluated) {
+ assert(!AllowUserDefinedLiteral && "UDL are always evaluated");
+ return Actions.ActOnUnevaluatedStringLiteral(StringToks);
+ }
+
// Pass the set of string tokens, ready for concatenation, to the actions.
return Actions.ActOnStringLiteral(StringToks,
AllowUserDefinedLiteral ? getCurScope()
@@ -3270,6 +3302,12 @@ ExprResult Parser::ParseStringLiteralExpression(bool AllowUserDefinedLiteral) {
/// type-name : assignment-expression
/// default : assignment-expression
/// \endverbatim
+///
+/// As an extension, Clang also accepts:
+/// \verbatim
+/// generic-selection:
+/// _Generic ( type-name, generic-assoc-list )
+/// \endverbatim
ExprResult Parser::ParseGenericSelectionExpression() {
assert(Tok.is(tok::kw__Generic) && "_Generic keyword expected");
if (!getLangOpts().C11)
@@ -3280,8 +3318,20 @@ ExprResult Parser::ParseGenericSelectionExpression() {
if (T.expectAndConsume())
return ExprError();
+ // We either have a controlling expression or we have a controlling type, and
+ // we need to figure out which it is.
+ TypeResult ControllingType;
ExprResult ControllingExpr;
- {
+ if (isTypeIdForGenericSelection()) {
+ ControllingType = ParseTypeName();
+ if (ControllingType.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+ const auto *LIT = cast<LocInfoType>(ControllingType.get().get());
+ SourceLocation Loc = LIT->getTypeSourceInfo()->getTypeLoc().getBeginLoc();
+ Diag(Loc, diag::ext_generic_with_type_arg);
+ } else {
// C11 6.5.1.1p3 "The controlling expression of a generic selection is
// not evaluated."
EnterExpressionEvaluationContext Unevaluated(
@@ -3346,10 +3396,13 @@ ExprResult Parser::ParseGenericSelectionExpression() {
if (T.getCloseLocation().isInvalid())
return ExprError();
- return Actions.ActOnGenericSelectionExpr(KeyLoc, DefaultLoc,
- T.getCloseLocation(),
- ControllingExpr.get(),
- Types, Exprs);
+ void *ExprOrTy = ControllingExpr.isUsable()
+ ? ControllingExpr.get()
+ : ControllingType.get().getAsOpaquePtr();
+
+ return Actions.ActOnGenericSelectionExpr(
+ KeyLoc, DefaultLoc, T.getCloseLocation(), ControllingExpr.isUsable(),
+ ExprOrTy, Types, Exprs);
}
/// Parse A C++1z fold-expression after the opening paren and optional
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp b/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
index 7f09120574a7..b035bd9db9d5 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
@@ -20,6 +20,7 @@
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "llvm/Support/Compiler.h"
@@ -724,7 +725,7 @@ ExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) {
/// '&' identifier initializer
///
/// lambda-declarator:
-/// lambda-specifiers [C++2b]
+/// lambda-specifiers [C++23]
/// '(' parameter-declaration-clause ')' lambda-specifiers
/// requires-clause[opt]
///
@@ -1204,7 +1205,7 @@ static void tryConsumeLambdaSpecifierToken(Parser &P,
static void addStaticToLambdaDeclSpecifier(Parser &P, SourceLocation StaticLoc,
DeclSpec &DS) {
if (StaticLoc.isValid()) {
- P.Diag(StaticLoc, !P.getLangOpts().CPlusPlus2b
+ P.Diag(StaticLoc, !P.getLangOpts().CPlusPlus23
? diag::err_static_lambda
: diag::warn_cxx20_compat_static_lambda);
const char *PrevSpec = nullptr;
@@ -1275,18 +1276,19 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
PrettyStackTraceLoc CrashInfo(PP.getSourceManager(), LambdaBeginLoc,
"lambda expression parsing");
-
-
- // FIXME: Call into Actions to add any init-capture declarations to the
- // scope while parsing the lambda-declarator and compound-statement.
-
// Parse lambda-declarator[opt].
DeclSpec DS(AttrFactory);
Declarator D(DS, ParsedAttributesView::none(), DeclaratorContext::LambdaExpr);
TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
+
+ ParseScope LambdaScope(this, Scope::LambdaScope | Scope::DeclScope |
+ Scope::FunctionDeclarationScope |
+ Scope::FunctionPrototypeScope);
+
Actions.PushLambdaScope();
+ Actions.ActOnLambdaExpressionAfterIntroducer(Intro, getCurScope());
- ParsedAttributes Attr(AttrFactory);
+ ParsedAttributes Attributes(AttrFactory);
if (getLangOpts().CUDA) {
// In CUDA code, GNU attributes are allowed to appear immediately after the
// "[...]", even if there is no "(...)" before the lambda body.
@@ -1297,22 +1299,23 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
if (Tok.is(tok::kw___noinline__)) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = ConsumeToken();
- Attr.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_Keyword);
+ Attributes.addNew(AttrName, AttrNameLoc, /*ScopeName=*/nullptr,
+ AttrNameLoc, /*ArgsUnion=*/nullptr,
+ /*numArgs=*/0, tok::kw___noinline__);
} else if (Tok.is(tok::kw___attribute))
- ParseGNUAttributes(Attr, nullptr, &D);
+ ParseGNUAttributes(Attributes, /*LatePArsedAttrList=*/nullptr, &D);
else
break;
}
- D.takeAttributes(Attr);
+ D.takeAttributes(Attributes);
}
// Helper to emit a warning if we see a CUDA host/device/global attribute
// after '(...)'. nvcc doesn't accept this.
auto WarnIfHasCUDATargetAttr = [&] {
if (getLangOpts().CUDA)
- for (const ParsedAttr &A : Attr)
+ for (const ParsedAttr &A : Attributes)
if (A.getKind() == ParsedAttr::AT_CUDADevice ||
A.getKind() == ParsedAttr::AT_CUDAHost ||
A.getKind() == ParsedAttr::AT_CUDAGlobal)
@@ -1349,7 +1352,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
}
Actions.ActOnLambdaExplicitTemplateParameterList(
- LAngleLoc, TemplateParams, RAngleLoc, RequiresClause);
+ Intro, LAngleLoc, TemplateParams, RAngleLoc, RequiresClause);
++CurTemplateDepthTracker;
}
}
@@ -1359,40 +1362,45 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
// or operator template declaration. We accept this as a conforming extension
// in all language modes that support lambdas.
if (isCXX11AttributeSpecifier()) {
- Diag(Tok, getLangOpts().CPlusPlus2b
+ Diag(Tok, getLangOpts().CPlusPlus23
? diag::warn_cxx20_compat_decl_attrs_on_lambda
- : diag::ext_decl_attrs_on_lambda);
+ : diag::ext_decl_attrs_on_lambda)
+ << Tok.getIdentifierInfo() << Tok.isRegularKeywordAttribute();
MaybeParseCXX11Attributes(D);
}
TypeResult TrailingReturnType;
SourceLocation TrailingReturnTypeLoc;
+ SourceLocation LParenLoc, RParenLoc;
+ SourceLocation DeclEndLoc;
+ bool HasParentheses = false;
+ bool HasSpecifiers = false;
+ SourceLocation MutableLoc;
+
+ auto ParseConstexprAndMutableSpecifiers = [&] {
+ // GNU-style attributes must be parsed before the mutable specifier to
+ // be compatible with GCC. MSVC-style attributes must be parsed before
+ // the mutable specifier to be compatible with MSVC.
+ MaybeParseAttributes(PAKM_GNU | PAKM_Declspec, Attributes);
+ // Parse mutable-opt and/or constexpr-opt or consteval-opt, and update
+ // the DeclEndLoc.
+ SourceLocation ConstexprLoc;
+ SourceLocation ConstevalLoc;
+ SourceLocation StaticLoc;
+
+ tryConsumeLambdaSpecifierToken(*this, MutableLoc, StaticLoc, ConstexprLoc,
+ ConstevalLoc, DeclEndLoc);
+
+ DiagnoseStaticSpecifierRestrictions(*this, StaticLoc, MutableLoc, Intro);
+
+ addStaticToLambdaDeclSpecifier(*this, StaticLoc, DS);
+ addConstexprToLambdaDeclSpecifier(*this, ConstexprLoc, DS);
+ addConstevalToLambdaDeclSpecifier(*this, ConstevalLoc, DS);
+ };
auto ParseLambdaSpecifiers =
- [&](SourceLocation LParenLoc, SourceLocation RParenLoc,
- MutableArrayRef<DeclaratorChunk::ParamInfo> ParamInfo,
+ [&](MutableArrayRef<DeclaratorChunk::ParamInfo> ParamInfo,
SourceLocation EllipsisLoc) {
- SourceLocation DeclEndLoc = RParenLoc;
-
- // GNU-style attributes must be parsed before the mutable specifier to
- // be compatible with GCC. MSVC-style attributes must be parsed before
- // the mutable specifier to be compatible with MSVC.
- MaybeParseAttributes(PAKM_GNU | PAKM_Declspec, Attr);
-
- // Parse lambda specifiers and update the DeclEndLoc.
- SourceLocation MutableLoc;
- SourceLocation StaticLoc;
- SourceLocation ConstexprLoc;
- SourceLocation ConstevalLoc;
- tryConsumeLambdaSpecifierToken(*this, MutableLoc, StaticLoc,
- ConstexprLoc, ConstevalLoc, DeclEndLoc);
-
- DiagnoseStaticSpecifierRestrictions(*this, StaticLoc, MutableLoc,
- Intro);
-
- addStaticToLambdaDeclSpecifier(*this, StaticLoc, DS);
- addConstexprToLambdaDeclSpecifier(*this, ConstexprLoc, DS);
- addConstevalToLambdaDeclSpecifier(*this, ConstevalLoc, DS);
// Parse exception-specification[opt].
ExceptionSpecificationType ESpecType = EST_None;
SourceRange ESpecRange;
@@ -1400,6 +1408,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
SmallVector<SourceRange, 2> DynamicExceptionRanges;
ExprResult NoexceptExpr;
CachedTokens *ExceptionSpecTokens;
+
ESpecType = tryParseExceptionSpecification(
/*Delayed=*/false, ESpecRange, DynamicExceptions,
DynamicExceptionRanges, NoexceptExpr, ExceptionSpecTokens);
@@ -1408,8 +1417,8 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
DeclEndLoc = ESpecRange.getEnd();
// Parse attribute-specifier[opt].
- if (MaybeParseCXX11Attributes(Attr))
- DeclEndLoc = Attr.Range.getEnd();
+ if (MaybeParseCXX11Attributes(Attributes))
+ DeclEndLoc = Attributes.Range.getEnd();
// Parse OpenCL addr space attribute.
if (Tok.isOneOf(tok::kw___private, tok::kw___global, tok::kw___local,
@@ -1445,27 +1454,32 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
/*ExceptionSpecTokens*/ nullptr,
/*DeclsInPrototype=*/std::nullopt, LParenLoc, FunLocalRangeEnd,
D, TrailingReturnType, TrailingReturnTypeLoc, &DS),
- std::move(Attr), DeclEndLoc);
+ std::move(Attributes), DeclEndLoc);
+
+ Actions.ActOnLambdaClosureQualifiers(Intro, MutableLoc);
+
+ if (HasParentheses && Tok.is(tok::kw_requires))
+ ParseTrailingRequiresClause(D);
};
- if (Tok.is(tok::l_paren)) {
- ParseScope PrototypeScope(this, Scope::FunctionPrototypeScope |
- Scope::FunctionDeclarationScope |
- Scope::DeclScope);
+ ParseScope Prototype(this, Scope::FunctionPrototypeScope |
+ Scope::FunctionDeclarationScope |
+ Scope::DeclScope);
+ // Parse parameter-declaration-clause.
+ SmallVector<DeclaratorChunk::ParamInfo, 16> ParamInfo;
+ SourceLocation EllipsisLoc;
+
+ if (Tok.is(tok::l_paren)) {
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
- SourceLocation LParenLoc = T.getOpenLocation();
-
- // Parse parameter-declaration-clause.
- SmallVector<DeclaratorChunk::ParamInfo, 16> ParamInfo;
- SourceLocation EllipsisLoc;
+ LParenLoc = T.getOpenLocation();
if (Tok.isNot(tok::r_paren)) {
Actions.RecordParsingTemplateParameterDepth(
CurTemplateDepthTracker.getOriginalDepth());
- ParseParameterDeclarationClause(D, Attr, ParamInfo, EllipsisLoc);
+ ParseParameterDeclarationClause(D, Attributes, ParamInfo, EllipsisLoc);
// For a generic lambda, each 'auto' within the parameter declaration
// clause creates a template type parameter, so increment the depth.
// If we've parsed any explicit template parameters, then the depth will
@@ -1476,44 +1490,49 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
}
T.consumeClose();
+ DeclEndLoc = RParenLoc = T.getCloseLocation();
+ HasParentheses = true;
+ }
- // Parse lambda-specifiers.
- ParseLambdaSpecifiers(LParenLoc, /*DeclEndLoc=*/T.getCloseLocation(),
- ParamInfo, EllipsisLoc);
-
- // Parse requires-clause[opt].
- if (Tok.is(tok::kw_requires))
- ParseTrailingRequiresClause(D);
- } else if (Tok.isOneOf(tok::kw_mutable, tok::arrow, tok::kw___attribute,
- tok::kw_constexpr, tok::kw_consteval, tok::kw_static,
- tok::kw___private, tok::kw___global, tok::kw___local,
- tok::kw___constant, tok::kw___generic,
- tok::kw_groupshared, tok::kw_requires,
- tok::kw_noexcept) ||
- (Tok.is(tok::l_square) && NextToken().is(tok::l_square))) {
- if (!getLangOpts().CPlusPlus2b)
- // It's common to forget that one needs '()' before 'mutable', an
- // attribute specifier, the result type, or the requires clause. Deal with
- // this.
- Diag(Tok, diag::ext_lambda_missing_parens)
- << FixItHint::CreateInsertion(Tok.getLocation(), "() ");
-
- SourceLocation NoLoc;
- // Parse lambda-specifiers.
- std::vector<DeclaratorChunk::ParamInfo> EmptyParamInfo;
- ParseLambdaSpecifiers(/*LParenLoc=*/NoLoc, /*RParenLoc=*/NoLoc,
- EmptyParamInfo, /*EllipsisLoc=*/NoLoc);
+ HasSpecifiers =
+ Tok.isOneOf(tok::kw_mutable, tok::arrow, tok::kw___attribute,
+ tok::kw_constexpr, tok::kw_consteval, tok::kw_static,
+ tok::kw___private, tok::kw___global, tok::kw___local,
+ tok::kw___constant, tok::kw___generic, tok::kw_groupshared,
+ tok::kw_requires, tok::kw_noexcept) ||
+ Tok.isRegularKeywordAttribute() ||
+ (Tok.is(tok::l_square) && NextToken().is(tok::l_square));
+
+ if (HasSpecifiers && !HasParentheses && !getLangOpts().CPlusPlus23) {
+ // It's common to forget that one needs '()' before 'mutable', an
+ // attribute specifier, the result type, or the requires clause. Deal with
+ // this.
+ Diag(Tok, diag::ext_lambda_missing_parens)
+ << FixItHint::CreateInsertion(Tok.getLocation(), "() ");
}
+ if (HasParentheses || HasSpecifiers)
+ ParseConstexprAndMutableSpecifiers();
+
+ Actions.ActOnLambdaClosureParameters(getCurScope(), ParamInfo);
+
+ if (!HasParentheses)
+ Actions.ActOnLambdaClosureQualifiers(Intro, MutableLoc);
+
+ if (HasSpecifiers || HasParentheses)
+ ParseLambdaSpecifiers(ParamInfo, EllipsisLoc);
+
WarnIfHasCUDATargetAttr();
+ Prototype.Exit();
+
// FIXME: Rename BlockScope -> ClosureScope if we decide to continue using
// it.
unsigned ScopeFlags = Scope::BlockScope | Scope::FnScope | Scope::DeclScope |
Scope::CompoundStmtScope;
ParseScope BodyScope(this, ScopeFlags);
- Actions.ActOnStartOfLambdaDefinition(Intro, D, getCurScope());
+ Actions.ActOnStartOfLambdaDefinition(Intro, D, DS);
// Parse compound-statement.
if (!Tok.is(tok::l_brace)) {
@@ -1525,6 +1544,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
StmtResult Stmt(ParseCompoundStatementBody());
BodyScope.Exit();
TemplateParamScope.Exit();
+ LambdaScope.Exit();
if (!Stmt.isInvalid() && !TrailingReturnType.isInvalid())
return Actions.ActOnLambdaExpr(LambdaBeginLoc, Stmt.get(), getCurScope());
@@ -1978,7 +1998,7 @@ Parser::ParseAliasDeclarationInInitStatement(DeclaratorContext Context,
if (!DG)
return DG;
- Diag(DeclStart, !getLangOpts().CPlusPlus2b
+ Diag(DeclStart, !getLangOpts().CPlusPlus23
? diag::ext_alias_in_init_statement
: diag::warn_cxx20_alias_in_init_statement)
<< SourceRange(DeclStart, DeclEnd);
@@ -2120,8 +2140,6 @@ Parser::ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc,
DeclGroupPtrTy DG = ParseSimpleDeclaration(
DeclaratorContext::ForInit, DeclEnd, attrs, DeclSpecAttrs, false, FRI);
FRI->LoopVar = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
- assert((FRI->ColonLoc.isValid() || !DG) &&
- "cannot find for range declaration");
return Sema::ConditionResult();
}
@@ -2893,9 +2911,9 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
if (!Ty)
return true;
Result.setConstructorName(Ty, IdLoc, IdLoc);
- } else if (getLangOpts().CPlusPlus17 &&
- AllowDeductionGuide && SS.isEmpty() &&
- Actions.isDeductionGuideName(getCurScope(), *Id, IdLoc,
+ } else if (getLangOpts().CPlusPlus17 && AllowDeductionGuide &&
+ SS.isEmpty() &&
+ Actions.isDeductionGuideName(getCurScope(), *Id, IdLoc, SS,
&TemplateName)) {
// We have parsed a template-name naming a deduction guide.
Result.setDeductionGuideName(TemplateName, IdLoc);
@@ -3213,7 +3231,7 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
// A new-type-id is a simplified type-id, where essentially the
// direct-declarator is replaced by a direct-new-declarator.
MaybeParseGNUAttributes(DeclaratorInfo);
- if (ParseCXXTypeSpecifierSeq(DS))
+ if (ParseCXXTypeSpecifierSeq(DS, DeclaratorContext::CXXNew))
DeclaratorInfo.setInvalidType(true);
else {
DeclaratorInfo.SetSourceRange(DS.getSourceRange());
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseHLSL.cpp b/contrib/llvm-project/clang/lib/Parse/ParseHLSL.cpp
index ebda84de6a97..4fc6a2203cec 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseHLSL.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseHLSL.cpp
@@ -196,5 +196,5 @@ void Parser::ParseHLSLSemantics(ParsedAttributes &Attrs,
}
Attrs.addNew(II, Loc, nullptr, SourceLocation(), ArgExprs.data(),
- ArgExprs.size(), ParsedAttr::AS_HLSLSemantic);
+ ArgExprs.size(), ParsedAttr::Form::HLSLSemantic());
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp b/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp
index af0c3b47958d..f52c04ba2c4d 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp
@@ -15,6 +15,7 @@
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/Designator.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/STLExtras.h"
@@ -181,7 +182,8 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator(
NewSyntax);
Designation D;
- D.AddDesignator(Designator::getField(FieldName, SourceLocation(), NameLoc));
+ D.AddDesignator(Designator::CreateFieldDesignator(
+ FieldName, SourceLocation(), NameLoc));
PreferredType.enterDesignatedInitializer(
Tok.getLocation(), DesignatorCompletion.PreferredBaseType, D);
return Actions.ActOnDesignatedInitializer(D, ColonLoc, true,
@@ -210,8 +212,8 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator(
return ExprError();
}
- Desig.AddDesignator(Designator::getField(Tok.getIdentifierInfo(), DotLoc,
- Tok.getLocation()));
+ Desig.AddDesignator(Designator::CreateFieldDesignator(
+ Tok.getIdentifierInfo(), DotLoc, Tok.getLocation()));
ConsumeToken(); // Eat the identifier.
continue;
}
@@ -360,7 +362,8 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator(
// If this is a normal array designator, remember it.
if (Tok.isNot(tok::ellipsis)) {
- Desig.AddDesignator(Designator::getArray(Idx.get(), StartLoc));
+ Desig.AddDesignator(Designator::CreateArrayDesignator(Idx.get(),
+ StartLoc));
} else {
// Handle the gnu array range extension.
Diag(Tok, diag::ext_gnu_array_range);
@@ -371,9 +374,8 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator(
SkipUntil(tok::r_square, StopAtSemi);
return RHS;
}
- Desig.AddDesignator(Designator::getArrayRange(Idx.get(),
- RHS.get(),
- StartLoc, EllipsisLoc));
+ Desig.AddDesignator(Designator::CreateArrayRangeDesignator(
+ Idx.get(), RHS.get(), StartLoc, EllipsisLoc));
}
T.consumeClose();
@@ -429,7 +431,7 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator(
/// initializer: [C99 6.7.8]
/// '{' initializer-list '}'
/// '{' initializer-list ',' '}'
-/// [GNU] '{' '}'
+/// [C2x] '{' '}'
///
/// initializer-list:
/// designation[opt] initializer ...[opt]
@@ -447,9 +449,12 @@ ExprResult Parser::ParseBraceInitializer() {
ExprVector InitExprs;
if (Tok.is(tok::r_brace)) {
- // Empty initializers are a C++ feature and a GNU extension to C.
- if (!getLangOpts().CPlusPlus)
- Diag(LBraceLoc, diag::ext_gnu_empty_initializer);
+ // Empty initializers are a C++ feature and a GNU extension to C before C2x.
+ if (!getLangOpts().CPlusPlus) {
+ Diag(LBraceLoc, getLangOpts().C2x
+ ? diag::warn_c2x_compat_empty_initializer
+ : diag::ext_c_empty_initializer);
+ }
// Match the '}'.
return Actions.ActOnInitList(LBraceLoc, std::nullopt, ConsumeBrace());
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp b/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp
index 079bf9a9c08c..b30f0380621a 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp
@@ -153,6 +153,11 @@ Parser::ParseObjCAtClassDeclaration(SourceLocation atLoc) {
while (true) {
MaybeSkipAttributes(tok::objc_class);
+ if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
+ Actions.CodeCompleteObjCClassForwardDecl(getCurScope());
+ return Actions.ConvertDeclToDeclGroup(nullptr);
+ }
if (expectIdentifier()) {
SkipUntil(tok::semi);
return Actions.ConvertDeclToDeclGroup(nullptr);
@@ -408,7 +413,7 @@ static void addContextSensitiveTypeNullability(Parser &P,
auto getNullabilityAttr = [&](AttributePool &Pool) -> ParsedAttr * {
return Pool.create(P.getNullabilityKeyword(nullability),
SourceRange(nullabilityLoc), nullptr, SourceLocation(),
- nullptr, 0, ParsedAttr::AS_ContextSensitiveKeyword);
+ nullptr, 0, ParsedAttr::Form::ContextSensitiveKeyword());
};
if (D.getNumTypeObjects() > 0) {
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp b/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
index a31ceaeebd80..96d2e2cede62 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
@@ -19,6 +19,7 @@
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringSwitch.h"
@@ -2483,8 +2484,8 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
/// simd' | 'teams distribute parallel for simd' | 'teams distribute
/// parallel for' | 'target teams' | 'target teams distribute' | 'target
/// teams distribute parallel for' | 'target teams distribute parallel
-/// for simd' | 'target teams distribute simd' | 'masked' {clause}
-/// annot_pragma_openmp_end
+/// for simd' | 'target teams distribute simd' | 'masked' |
+/// 'parallel masked' {clause} annot_pragma_openmp_end
///
StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
ParsedStmtContext StmtCtx, bool ReadDirectiveWithinMetadirective) {
@@ -2923,17 +2924,20 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
// Consume final annot_pragma_openmp_end.
ConsumeAnnotationToken();
- // OpenMP [2.13.8, ordered Construct, Syntax]
- // If the depend clause is specified, the ordered construct is a stand-alone
- // directive.
- if (DKind == OMPD_ordered && FirstClauses[unsigned(OMPC_depend)].getInt()) {
- if ((StmtCtx & ParsedStmtContext::AllowStandaloneOpenMPDirectives) ==
- ParsedStmtContext()) {
- Diag(Loc, diag::err_omp_immediate_directive)
- << getOpenMPDirectiveName(DKind) << 1
- << getOpenMPClauseName(OMPC_depend);
+ if (DKind == OMPD_ordered) {
+ // If the depend or doacross clause is specified, the ordered construct
+ // is a stand-alone directive.
+ for (auto CK : {OMPC_depend, OMPC_doacross}) {
+ if (FirstClauses[unsigned(CK)].getInt()) {
+ if ((StmtCtx & ParsedStmtContext::AllowStandaloneOpenMPDirectives) ==
+ ParsedStmtContext()) {
+ Diag(Loc, diag::err_omp_immediate_directive)
+ << getOpenMPDirectiveName(DKind) << 1
+ << getOpenMPClauseName(CK);
+ }
+ HasAssociatedStatement = false;
+ }
}
- HasAssociatedStatement = false;
}
if (DKind == OMPD_tile && !FirstClauses[unsigned(OMPC_sizes)].getInt()) {
@@ -3102,8 +3106,13 @@ OMPClause *Parser::ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind) {
return nullptr;
SmallVector<Sema::UsesAllocatorsData, 4> Data;
do {
+ CXXScopeSpec SS;
+ Token Replacement;
ExprResult Allocator =
- getLangOpts().CPlusPlus ? ParseCXXIdExpression() : ParseExpression();
+ getLangOpts().CPlusPlus
+ ? ParseCXXIdExpression()
+ : tryParseCXXIdExpression(SS, /*isAddressOfOperand=*/false,
+ Replacement);
if (Allocator.isInvalid()) {
SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
@@ -3357,6 +3366,10 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_inclusive:
case OMPC_exclusive:
case OMPC_affinity:
+ case OMPC_doacross:
+ if (getLangOpts().OpenMP >= 52 && DKind == OMPD_ordered &&
+ CKind == OMPC_depend)
+ Diag(Tok, diag::warn_omp_depend_in_ordered_deprecated);
Clause = ParseOpenMPVarListClause(DKind, CKind, WrongDirective);
break;
case OMPC_sizes:
@@ -4360,7 +4373,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
if (!InvalidReductionId)
Data.ReductionOrMapperId =
Actions.GetNameFromUnqualifiedId(UnqualifiedReductionId);
- } else if (Kind == OMPC_depend) {
+ } else if (Kind == OMPC_depend || Kind == OMPC_doacross) {
if (getLangOpts().OpenMP >= 50) {
if (Tok.is(tok::identifier) && PP.getSpelling(Tok) == "iterator") {
// Handle optional dependence modifier.
@@ -4383,13 +4396,16 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
Kind, Tok.is(tok::identifier) ? PP.getSpelling(Tok) : "",
getLangOpts());
Data.ExtraModifierLoc = Tok.getLocation();
- if (Data.ExtraModifier == OMPC_DEPEND_unknown) {
+ if ((Kind == OMPC_depend && Data.ExtraModifier == OMPC_DEPEND_unknown) ||
+ (Kind == OMPC_doacross &&
+ Data.ExtraModifier == OMPC_DOACROSS_unknown)) {
SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
} else {
ConsumeToken();
// Special processing for depend(source) clause.
- if (DKind == OMPD_ordered && Data.ExtraModifier == OMPC_DEPEND_source) {
+ if (DKind == OMPD_ordered && Kind == OMPC_depend &&
+ Data.ExtraModifier == OMPC_DEPEND_source) {
// Parse ')'.
T.consumeClose();
return false;
@@ -4397,10 +4413,55 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
}
if (Tok.is(tok::colon)) {
Data.ColonLoc = ConsumeToken();
- } else {
+ } else if (Kind != OMPC_doacross || Tok.isNot(tok::r_paren)) {
Diag(Tok, DKind == OMPD_ordered ? diag::warn_pragma_expected_colon_r_paren
: diag::warn_pragma_expected_colon)
- << "dependency type";
+ << (Kind == OMPC_depend ? "dependency type" : "dependence-type");
+ }
+ if (Kind == OMPC_doacross) {
+ if (Tok.is(tok::identifier) &&
+ Tok.getIdentifierInfo()->isStr("omp_cur_iteration")) {
+ Data.ExtraModifier = Data.ExtraModifier == OMPC_DOACROSS_source
+ ? OMPC_DOACROSS_source_omp_cur_iteration
+ : OMPC_DOACROSS_sink_omp_cur_iteration;
+ ConsumeToken();
+ }
+ if (Data.ExtraModifier == OMPC_DOACROSS_sink_omp_cur_iteration) {
+ if (Tok.isNot(tok::minus)) {
+ Diag(Tok, diag::err_omp_sink_and_source_iteration_not_allowd)
+ << getOpenMPClauseName(Kind) << 0 << 0;
+ SkipUntil(tok::r_paren);
+ return false;
+ } else {
+ ConsumeToken();
+ SourceLocation Loc = Tok.getLocation();
+ uint64_t Value = 0;
+ if (Tok.isNot(tok::numeric_constant) ||
+ (PP.parseSimpleIntegerLiteral(Tok, Value) && Value != 1)) {
+ Diag(Loc, diag::err_omp_sink_and_source_iteration_not_allowd)
+ << getOpenMPClauseName(Kind) << 0 << 0;
+ SkipUntil(tok::r_paren);
+ return false;
+ }
+ }
+ }
+ if (Data.ExtraModifier == OMPC_DOACROSS_source_omp_cur_iteration) {
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_omp_sink_and_source_iteration_not_allowd)
+ << getOpenMPClauseName(Kind) << 1 << 1;
+ SkipUntil(tok::r_paren);
+ return false;
+ }
+ }
+ // Only the 'sink' case has the expression list.
+ if (Kind == OMPC_doacross &&
+ (Data.ExtraModifier == OMPC_DOACROSS_source ||
+ Data.ExtraModifier == OMPC_DOACROSS_source_omp_cur_iteration ||
+ Data.ExtraModifier == OMPC_DOACROSS_sink_omp_cur_iteration)) {
+ // Parse ')'.
+ T.consumeClose();
+ return false;
+ }
}
} else if (Kind == OMPC_linear) {
// Try to parse modifier if any.
@@ -4579,10 +4640,12 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
bool IsComma =
(Kind != OMPC_reduction && Kind != OMPC_task_reduction &&
- Kind != OMPC_in_reduction && Kind != OMPC_depend && Kind != OMPC_map) ||
+ Kind != OMPC_in_reduction && Kind != OMPC_depend &&
+ Kind != OMPC_doacross && Kind != OMPC_map) ||
(Kind == OMPC_reduction && !InvalidReductionId) ||
(Kind == OMPC_map && Data.ExtraModifier != OMPC_MAP_unknown) ||
(Kind == OMPC_depend && Data.ExtraModifier != OMPC_DEPEND_unknown) ||
+ (Kind == OMPC_doacross && Data.ExtraModifier != OMPC_DOACROSS_unknown) ||
(Kind == OMPC_adjust_args &&
Data.ExtraModifier != OMPC_ADJUST_ARGS_unknown);
const bool MayHaveTail = (Kind == OMPC_linear || Kind == OMPC_aligned);
@@ -4640,7 +4703,8 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
// Exit from scope when the iterator is used in depend clause.
if (HasIterator)
ExitScope();
- return (Kind != OMPC_depend && Kind != OMPC_map && Vars.empty()) ||
+ return (Kind != OMPC_depend && Kind != OMPC_doacross && Kind != OMPC_map &&
+ Vars.empty()) ||
(MustHaveTail && !Data.DepModOrTailExpr) || InvalidReductionId ||
IsInvalidMapperModifier || InvalidIterator;
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp b/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp
index 658853d42b74..b3178aef64d7 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp
@@ -19,6 +19,7 @@
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringSwitch.h"
@@ -665,18 +666,10 @@ void Parser::HandlePragmaVisibility() {
Actions.ActOnPragmaVisibility(VisType, VisLoc);
}
-namespace {
-struct PragmaPackInfo {
- Sema::PragmaMsStackAction Action;
- StringRef SlotLabel;
- Token Alignment;
-};
-} // end anonymous namespace
-
void Parser::HandlePragmaPack() {
assert(Tok.is(tok::annot_pragma_pack));
- PragmaPackInfo *Info =
- static_cast<PragmaPackInfo *>(Tok.getAnnotationValue());
+ Sema::PragmaPackInfo *Info =
+ static_cast<Sema::PragmaPackInfo *>(Tok.getAnnotationValue());
SourceLocation PragmaLoc = Tok.getLocation();
ExprResult Alignment;
if (Info->Alignment.is(tok::numeric_constant)) {
@@ -714,10 +707,36 @@ void Parser::HandlePragmaAlign() {
void Parser::HandlePragmaDump() {
assert(Tok.is(tok::annot_pragma_dump));
- IdentifierInfo *II =
- reinterpret_cast<IdentifierInfo *>(Tok.getAnnotationValue());
- Actions.ActOnPragmaDump(getCurScope(), Tok.getLocation(), II);
ConsumeAnnotationToken();
+ if (Tok.is(tok::eod)) {
+ PP.Diag(Tok, diag::warn_pragma_debug_missing_argument) << "dump";
+ } else if (NextToken().is(tok::eod)) {
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok, diag::warn_pragma_debug_unexpected_argument);
+ ConsumeAnyToken();
+ ExpectAndConsume(tok::eod);
+ return;
+ }
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ Actions.ActOnPragmaDump(getCurScope(), Tok.getLocation(), II);
+ ConsumeToken();
+ } else {
+ SourceLocation StartLoc = Tok.getLocation();
+ EnterExpressionEvaluationContext Ctx(
+ Actions, Sema::ExpressionEvaluationContext::Unevaluated);
+ ExprResult E = ParseExpression();
+ if (!E.isUsable() || E.get()->containsErrors()) {
+ // Diagnostics were emitted during parsing. No action needed.
+ } else if (E.get()->getDependence() != ExprDependence::None) {
+ PP.Diag(StartLoc, diag::warn_pragma_debug_dependent_argument)
+ << E.get()->isTypeDependent()
+ << SourceRange(StartLoc, Tok.getLocation());
+ } else {
+ Actions.ActOnPragmaDump(E.get());
+ }
+ SkipUntil(tok::eod, StopBeforeMatch);
+ }
+ ExpectAndConsume(tok::eod);
}
void Parser::HandlePragmaWeak() {
@@ -1800,7 +1819,8 @@ void Parser::HandlePragmaAttribute() {
ConsumeToken();
};
- if (Tok.is(tok::l_square) && NextToken().is(tok::l_square)) {
+ if ((Tok.is(tok::l_square) && NextToken().is(tok::l_square)) ||
+ Tok.isRegularKeywordAttribute()) {
// Parse the CXX11 style attribute.
ParseCXX11AttributeSpecifier(Attrs);
} else if (Tok.is(tok::kw___attribute)) {
@@ -1832,11 +1852,12 @@ void Parser::HandlePragmaAttribute() {
if (Tok.isNot(tok::l_paren))
Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_GNU);
+ ParsedAttr::Form::GNU());
else
ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, /*EndLoc=*/nullptr,
/*ScopeName=*/nullptr,
- /*ScopeLoc=*/SourceLocation(), ParsedAttr::AS_GNU,
+ /*ScopeLoc=*/SourceLocation(),
+ ParsedAttr::Form::GNU(),
/*Declarator=*/nullptr);
} while (TryConsumeToken(tok::comma));
@@ -2110,8 +2131,8 @@ void PragmaPackHandler::HandlePragma(Preprocessor &PP,
return;
}
- PragmaPackInfo *Info =
- PP.getPreprocessorAllocator().Allocate<PragmaPackInfo>(1);
+ Sema::PragmaPackInfo *Info =
+ PP.getPreprocessorAllocator().Allocate<Sema::PragmaPackInfo>(1);
Info->Action = Action;
Info->SlotLabel = SlotLabel;
Info->Alignment = Alignment;
@@ -4005,6 +4026,7 @@ void PragmaMaxTokensTotalHandler::HandlePragma(Preprocessor &PP,
}
// Handle '#pragma clang riscv intrinsic vector'.
+// '#pragma clang riscv intrinsic sifive_vector'.
void PragmaRISCVHandler::HandlePragma(Preprocessor &PP,
PragmaIntroducer Introducer,
Token &FirstToken) {
@@ -4020,9 +4042,10 @@ void PragmaRISCVHandler::HandlePragma(Preprocessor &PP,
PP.Lex(Tok);
II = Tok.getIdentifierInfo();
- if (!II || !II->isStr("vector")) {
+ if (!II || !(II->isStr("vector") || II->isStr("sifive_vector"))) {
PP.Diag(Tok.getLocation(), diag::warn_pragma_invalid_argument)
- << PP.getSpelling(Tok) << "riscv" << /*Expected=*/true << "'vector'";
+ << PP.getSpelling(Tok) << "riscv" << /*Expected=*/true
+ << "'vector' or 'sifive_vector'";
return;
}
@@ -4033,5 +4056,8 @@ void PragmaRISCVHandler::HandlePragma(Preprocessor &PP,
return;
}
- Actions.DeclareRISCVVBuiltins = true;
+ if (II->isStr("vector"))
+ Actions.DeclareRISCVVBuiltins = true;
+ else if (II->isStr("sifive_vector"))
+ Actions.DeclareRISCVSiFiveVectorBuiltins = true;
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp b/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp
index 1c8441fafc48..2346470dbdb7 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp
@@ -19,6 +19,7 @@
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "llvm/ADT/STLExtras.h"
@@ -334,7 +335,12 @@ Retry:
case tok::kw_asm: {
for (const ParsedAttr &AL : CXX11Attrs)
- Diag(AL.getRange().getBegin(), diag::warn_attribute_ignored) << AL;
+ // Could be relaxed if asm-related regular keyword attributes are
+ // added later.
+ (AL.isRegularKeywordAttribute()
+ ? Diag(AL.getRange().getBegin(), diag::err_keyword_not_allowed)
+ : Diag(AL.getRange().getBegin(), diag::warn_attribute_ignored))
+ << AL;
// Prevent these from being interpreted as statement attributes later on.
CXX11Attrs.clear();
ProhibitAttributes(GNUAttrs);
@@ -543,9 +549,22 @@ StmtResult Parser::ParseExprStatement(ParsedStmtContext StmtCtx) {
return ParseCaseStatement(StmtCtx, /*MissingCase=*/true, Expr);
}
- // Otherwise, eat the semicolon.
- ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
- return handleExprStmt(Expr, StmtCtx);
+ Token *CurTok = nullptr;
+ // If the semicolon is missing at the end of REPL input, consider if
+ // we want to do value printing. Note this is only enabled in C++ mode
+ // since part of the implementation requires C++ language features.
+ // Note we shouldn't eat the token since the callback needs it.
+ if (Tok.is(tok::annot_repl_input_end) && Actions.getLangOpts().CPlusPlus)
+ CurTok = &Tok;
+ else
+ // Otherwise, eat the semicolon.
+ ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
+
+ StmtResult R = handleExprStmt(Expr, StmtCtx);
+ if (CurTok && !R.isInvalid())
+ CurTok->setAnnotationValue(R.get());
+
+ return R;
}
/// ParseSEHTryBlockCommon
@@ -1052,7 +1071,7 @@ void Parser::ParseCompoundStatementLeadingPragmas() {
void Parser::DiagnoseLabelAtEndOfCompoundStatement() {
if (getLangOpts().CPlusPlus) {
- Diag(Tok, getLangOpts().CPlusPlus2b
+ Diag(Tok, getLangOpts().CPlusPlus23
? diag::warn_cxx20_compat_label_end_of_compound_statement
: diag::ext_cxx_label_end_of_compound_statement);
} else {
@@ -1100,7 +1119,7 @@ StmtResult Parser::handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx) {
++LookAhead;
}
// Then look to see if the next two tokens close the statement expression;
- // if so, this expression statement is the last statement in a statment
+ // if so, this expression statement is the last statement in a statement
// expression.
IsStmtExprResult = GetLookAheadToken(LookAhead).is(tok::r_brace) &&
GetLookAheadToken(LookAhead + 1).is(tok::r_paren);
@@ -1456,7 +1475,7 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
}
if (Tok.is(tok::kw_consteval)) {
- Diag(Tok, getLangOpts().CPlusPlus2b ? diag::warn_cxx20_compat_consteval_if
+ Diag(Tok, getLangOpts().CPlusPlus23 ? diag::warn_cxx20_compat_consteval_if
: diag::ext_consteval_if);
IsConsteval = true;
ConstevalLoc = ConsumeToken();
@@ -1606,7 +1625,7 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
IfScope.Exit();
// If the then or else stmt is invalid and the other is valid (and present),
- // make turn the invalid one into a null stmt to avoid dropping the other
+ // turn the invalid one into a null stmt to avoid dropping the other
// part. If both are invalid, return error.
if ((ThenStmt.isInvalid() && ElseStmt.isInvalid()) ||
(ThenStmt.isInvalid() && ElseStmt.get() == nullptr) ||
@@ -1617,7 +1636,7 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
if (IsConsteval) {
auto IsCompoundStatement = [](const Stmt *S) {
- if (const auto *Outer = dyn_cast_or_null<AttributedStmt>(S))
+ if (const auto *Outer = dyn_cast_if_present<AttributedStmt>(S))
S = Outer->getSubStmt();
return isa_and_nonnull<clang::CompoundStmt>(S);
};
@@ -1928,7 +1947,7 @@ bool Parser::isForRangeIdentifier() {
/// [C++] for-init-statement:
/// [C++] expression-statement
/// [C++] simple-declaration
-/// [C++2b] alias-declaration
+/// [C++23] alias-declaration
///
/// [C++0x] for-range-declaration:
/// [C++0x] attribute-specifier-seq[opt] type-specifier-seq declarator
@@ -2035,15 +2054,15 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
Diag(Tok, diag::warn_gcc_variable_decl_in_for_loop);
}
DeclGroupPtrTy DG;
+ SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
if (Tok.is(tok::kw_using)) {
DG = ParseAliasDeclarationInInitStatement(DeclaratorContext::ForInit,
attrs);
+ FirstPart = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
} else {
// In C++0x, "for (T NS:a" might not be a typo for ::
bool MightBeForRangeStmt = getLangOpts().CPlusPlus;
ColonProtectionRAIIObject ColonProtection(*this, MightBeForRangeStmt);
-
- SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
ParsedAttributes DeclSpecAttrs(AttrFactory);
DG = ParseSimpleDeclaration(
DeclaratorContext::ForInit, DeclEnd, attrs, DeclSpecAttrs, false,
@@ -2183,9 +2202,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
if (Tok.isNot(tok::semi)) {
if (!SecondPart.isInvalid())
Diag(Tok, diag::err_expected_semi_for);
- else
- // Skip until semicolon or rparen, don't consume it.
- SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch);
+ SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch);
}
if (Tok.is(tok::semi)) {
@@ -2411,7 +2428,7 @@ StmtResult Parser::ParsePragmaLoopHint(StmtVector &Stmts,
ArgsUnion(Hint.ValueExpr)};
TempAttrs.addNew(Hint.PragmaNameLoc->Ident, Hint.Range, nullptr,
Hint.PragmaNameLoc->Loc, ArgHints, 4,
- ParsedAttr::AS_Pragma);
+ ParsedAttr::Form::Pragma());
}
// Get the next statement.
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp b/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp
index 6fc67b6965dd..fc661d21b63a 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp
@@ -17,6 +17,7 @@
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaDiagnostic.h"
@@ -209,7 +210,15 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
}
ParsedAttributes prefixAttrs(AttrFactory);
- MaybeParseCXX11Attributes(prefixAttrs);
+ ParsedAttributes DeclSpecAttrs(AttrFactory);
+
+ // GNU attributes are applied to the declaration specification while the
+ // standard attributes are applied to the declaration. We parse the two
+ // attribute sets into different containters so we can apply them during
+ // the regular parsing process.
+ while (MaybeParseCXX11Attributes(prefixAttrs) ||
+ MaybeParseGNUAttributes(DeclSpecAttrs))
+ ;
if (Tok.is(tok::kw_using)) {
auto usingDeclPtr = ParseUsingDirectiveOrDeclaration(Context, TemplateInfo, DeclEnd,
@@ -222,6 +231,9 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
// Parse the declaration specifiers, stealing any diagnostics from
// the template parameters.
ParsingDeclSpec DS(*this, &DiagsFromTParams);
+ DS.SetRangeStart(DeclSpecAttrs.Range.getBegin());
+ DS.SetRangeEnd(DeclSpecAttrs.Range.getEnd());
+ DS.takeAttributesFrom(DeclSpecAttrs);
ParseDeclarationSpecifiers(DS, TemplateInfo, AS,
getDeclSpecContextFromDeclaratorContext(Context));
@@ -274,19 +286,10 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
// Error parsing the declarator?
if (!DeclaratorInfo.hasName()) {
- // If so, skip until the semi-colon or a }.
- SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
- if (Tok.is(tok::semi))
- ConsumeToken();
+ SkipMalformedDecl();
return nullptr;
}
- llvm::TimeTraceScope TimeScope("ParseTemplate", [&]() {
- return std::string(DeclaratorInfo.getIdentifier() != nullptr
- ? DeclaratorInfo.getIdentifier()->getName()
- : "<unknown>");
- });
-
LateParsedAttrList LateParsedAttrs(true);
if (DeclaratorInfo.isFunctionDeclarator()) {
if (Tok.is(tok::kw_requires)) {
@@ -849,10 +852,17 @@ NamedDecl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
// we introduce the type parameter into the local scope.
SourceLocation EqualLoc;
ParsedType DefaultArg;
- if (TryConsumeToken(tok::equal, EqualLoc))
+ if (TryConsumeToken(tok::equal, EqualLoc)) {
+ // The default argument may declare template parameters, notably
+ // if it contains a generic lambda, so we need to increase
+ // the template depth as these parameters would not be instantiated
+ // at the current level.
+ TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
+ ++CurTemplateDepthTracker;
DefaultArg =
ParseTypeName(/*Range=*/nullptr, DeclaratorContext::TemplateTypeArg)
.get();
+ }
NamedDecl *NewDecl = Actions.ActOnTypeParameter(getCurScope(),
TypenameKeyword, EllipsisLoc,
@@ -1038,6 +1048,14 @@ Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
// end of the template-parameter-list rather than a greater-than
// operator.
GreaterThanIsOperatorScope G(GreaterThanIsOperator, false);
+
+ // The default argument may declare template parameters, notably
+ // if it contains a generic lambda, so we need to increase
+ // the template depth as these parameters would not be instantiated
+ // at the current level.
+ TemplateParameterDepthRAII CurTemplateDepthTracker(
+ TemplateParameterDepth);
+ ++CurTemplateDepthTracker;
EnterExpressionEvaluationContext ConstantEvaluated(
Actions, Sema::ExpressionEvaluationContext::ConstantEvaluated);
DefaultArg =
@@ -1724,6 +1742,11 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplate &LPT) {
Actions.PushDeclContext(Actions.getCurScope(), DC);
}
+ // Parsing should occur with empty FP pragma stack and FP options used in the
+ // point of the template definition.
+ Sema::FpPragmaStackSaveRAII SavedStack(Actions);
+ Actions.resetFPOptions(LPT.FPO);
+
assert(!LPT.Toks.empty() && "Empty body!");
// Append the current token at the end of the new token stream so that it
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp b/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp
index 785749bff65a..b7c83bbeb82e 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp
@@ -74,9 +74,8 @@ bool Parser::isCXXDeclarationStatement(
switch (Tok.getKind()) {
case tok::identifier: {
IdentifierInfo *II = Tok.getIdentifierInfo();
- bool isDeductionGuide =
- Actions.isDeductionGuideName(getCurScope(), *II, Tok.getLocation(),
- /*Template=*/nullptr);
+ bool isDeductionGuide = Actions.isDeductionGuideName(
+ getCurScope(), *II, Tok.getLocation(), SS, /*Template=*/nullptr);
if (Actions.isCurrentClassName(*II, getCurScope(), &SS) ||
isDeductionGuide) {
if (isConstructorDeclarator(/*Unqualified=*/SS.isEmpty(),
@@ -88,10 +87,8 @@ bool Parser::isCXXDeclarationStatement(
}
case tok::kw_operator:
return true;
- case tok::annot_cxxscope: // Check if this is a dtor.
- if (NextToken().is(tok::tilde))
- return true;
- break;
+ case tok::tilde:
+ return true;
default:
break;
}
@@ -265,6 +262,7 @@ Parser::TPResult Parser::TryConsumeDeclarationSpecifier() {
/// attribute-specifier-seqopt type-specifier-seq declarator
///
Parser::TPResult Parser::TryParseSimpleDeclaration(bool AllowForRangeDecl) {
+ bool DeclSpecifierIsAuto = Tok.is(tok::kw_auto);
if (TryConsumeDeclarationSpecifier() == TPResult::Error)
return TPResult::Error;
@@ -280,7 +278,8 @@ Parser::TPResult Parser::TryParseSimpleDeclaration(bool AllowForRangeDecl) {
assert(TPR == TPResult::False);
}
- TPResult TPR = TryParseInitDeclaratorList();
+ TPResult TPR = TryParseInitDeclaratorList(
+ /*mayHaveTrailingReturnType=*/DeclSpecifierIsAuto);
if (TPR != TPResult::Ambiguous)
return TPR;
@@ -317,10 +316,15 @@ Parser::TPResult Parser::TryParseSimpleDeclaration(bool AllowForRangeDecl) {
/// '{' initializer-list ','[opt] '}'
/// '{' '}'
///
-Parser::TPResult Parser::TryParseInitDeclaratorList() {
+Parser::TPResult
+Parser::TryParseInitDeclaratorList(bool MayHaveTrailingReturnType) {
while (true) {
// declarator
- TPResult TPR = TryParseDeclarator(false/*mayBeAbstract*/);
+ TPResult TPR = TryParseDeclarator(
+ /*mayBeAbstract=*/false,
+ /*mayHaveIdentifier=*/true,
+ /*mayHaveDirectInit=*/false,
+ /*mayHaveTrailingReturnType=*/MayHaveTrailingReturnType);
if (TPR != TPResult::Ambiguous)
return TPR;
@@ -535,13 +539,18 @@ Parser::isCXXConditionDeclarationOrInitStatement(bool CanBeInitStatement,
RevertingTentativeParsingAction PA(*this);
// FIXME: A tag definition unambiguously tells us this is an init-statement.
+ bool MayHaveTrailingReturnType = Tok.is(tok::kw_auto);
if (State.update(TryConsumeDeclarationSpecifier()))
return State.result();
assert(Tok.is(tok::l_paren) && "Expected '('");
while (true) {
// Consume a declarator.
- if (State.update(TryParseDeclarator(false/*mayBeAbstract*/)))
+ if (State.update(TryParseDeclarator(
+ /*mayBeAbstract=*/false,
+ /*mayHaveIdentifier=*/true,
+ /*mayHaveDirectInit=*/false,
+ /*mayHaveTrailingReturnType=*/MayHaveTrailingReturnType)))
return State.result();
// Attributes, asm label, or an initializer imply this is not an expression.
@@ -626,13 +635,16 @@ bool Parser::isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous) {
// We need tentative parsing...
RevertingTentativeParsingAction PA(*this);
+ bool MayHaveTrailingReturnType = Tok.is(tok::kw_auto);
// type-specifier-seq
TryConsumeDeclarationSpecifier();
assert(Tok.is(tok::l_paren) && "Expected '('");
// declarator
- TPR = TryParseDeclarator(true/*mayBeAbstract*/, false/*mayHaveIdentifier*/);
+ TPR = TryParseDeclarator(true /*mayBeAbstract*/, false /*mayHaveIdentifier*/,
+ /*mayHaveDirectInit=*/false,
+ MayHaveTrailingReturnType);
// In case of an error, let the declaration parsing code handle it.
if (TPR == TPResult::Error)
@@ -644,7 +656,12 @@ bool Parser::isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous) {
if (Context == TypeIdInParens && Tok.is(tok::r_paren)) {
TPR = TPResult::True;
isAmbiguous = true;
-
+ // We are supposed to be inside the first operand to a _Generic selection
+ // expression, so if we find a comma after the declarator, we've found a
+ // type and not an expression.
+ } else if (Context == TypeIdAsGenericSelectionArgument && Tok.is(tok::comma)) {
+ TPR = TPResult::True;
+ isAmbiguous = true;
// We are supposed to be inside a template argument, so if after
// the abstract declarator we encounter a '>', '>>' (in C++0x), or
// ','; or, in C++0x, an ellipsis immediately preceding such, this
@@ -661,6 +678,9 @@ bool Parser::isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous) {
TPR = TPResult::True;
isAmbiguous = true;
+ } else if (Context == TypeIdInTrailingReturnType) {
+ TPR = TPResult::True;
+ isAmbiguous = true;
} else
TPR = TPResult::False;
}
@@ -708,6 +728,9 @@ Parser::isCXX11AttributeSpecifier(bool Disambiguate,
if (Tok.is(tok::kw_alignas))
return CAK_AttributeSpecifier;
+ if (Tok.isRegularKeywordAttribute())
+ return CAK_AttributeSpecifier;
+
if (Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square))
return CAK_NotAttributeSpecifier;
@@ -847,7 +870,8 @@ Parser::isCXX11AttributeSpecifier(bool Disambiguate,
bool Parser::TrySkipAttributes() {
while (Tok.isOneOf(tok::l_square, tok::kw___attribute, tok::kw___declspec,
- tok::kw_alignas)) {
+ tok::kw_alignas) ||
+ Tok.isRegularKeywordAttribute()) {
if (Tok.is(tok::l_square)) {
ConsumeBracket();
if (Tok.isNot(tok::l_square))
@@ -858,6 +882,8 @@ bool Parser::TrySkipAttributes() {
// Note that explicitly checking for `[[` and `]]` allows to fail as
// expected in the case of the Objective-C message send syntax.
ConsumeBracket();
+ } else if (Tok.isRegularKeywordAttribute()) {
+ ConsumeToken();
} else {
ConsumeToken();
if (Tok.isNot(tok::l_paren))
@@ -1045,7 +1071,8 @@ Parser::TPResult Parser::TryParseOperatorId() {
///
Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
bool mayHaveIdentifier,
- bool mayHaveDirectInit) {
+ bool mayHaveDirectInit,
+ bool mayHaveTrailingReturnType) {
// declarator:
// direct-declarator
// ptr-operator declarator
@@ -1087,7 +1114,7 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
ImplicitTypenameContext::No))) { // 'int(int)' is a function.
// '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
// exception-specification[opt]
- TPResult TPR = TryParseFunctionDeclarator();
+ TPResult TPR = TryParseFunctionDeclarator(mayHaveTrailingReturnType);
if (TPR != TPResult::Ambiguous)
return TPR;
} else {
@@ -1126,7 +1153,7 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
// direct-declarator '(' parameter-declaration-clause ')'
// cv-qualifier-seq[opt] exception-specification[opt]
ConsumeParen();
- TPR = TryParseFunctionDeclarator();
+ TPR = TryParseFunctionDeclarator(mayHaveTrailingReturnType);
} else if (Tok.is(tok::l_square)) {
// direct-declarator '[' constant-expression[opt] ']'
// direct-abstract-declarator[opt] '[' constant-expression[opt] ']'
@@ -1393,6 +1420,16 @@ Parser::isCXXDeclarationSpecifier(ImplicitTypenameContext AllowImplicitTypename,
return isCXXDeclarationSpecifier(ImplicitTypenameContext::Yes,
BracedCastResult, InvalidAsDeclSpec);
+ case tok::kw_auto: {
+ if (!getLangOpts().CPlusPlus23)
+ return TPResult::True;
+ if (NextToken().is(tok::l_brace))
+ return TPResult::False;
+ if (NextToken().is(tok::l_paren))
+ return TPResult::Ambiguous;
+ return TPResult::True;
+ }
+
case tok::coloncolon: { // ::foo::bar
const Token &Next = NextToken();
if (Next.isOneOf(tok::kw_new, // ::new
@@ -1426,7 +1463,6 @@ Parser::isCXXDeclarationSpecifier(ImplicitTypenameContext AllowImplicitTypename,
case tok::kw_static:
case tok::kw_extern:
case tok::kw_mutable:
- case tok::kw_auto:
case tok::kw___thread:
case tok::kw_thread_local:
case tok::kw__Thread_local:
@@ -1511,6 +1547,10 @@ Parser::isCXXDeclarationSpecifier(ImplicitTypenameContext AllowImplicitTypename,
case tok::kw___kindof:
return TPResult::True;
+ // WebAssemblyFuncref
+ case tok::kw___funcref:
+ return TPResult::True;
+
// Borland
case tok::kw___pascal:
return TPResult::True;
@@ -1616,7 +1656,10 @@ Parser::isCXXDeclarationSpecifier(ImplicitTypenameContext AllowImplicitTypename,
if (getLangOpts().CPlusPlus17) {
if (TryAnnotateTypeOrScopeToken())
return TPResult::Error;
- if (Tok.isNot(tok::identifier))
+ // If we annotated then the current token should not still be ::
+ // FIXME we may want to also check for tok::annot_typename but
+ // currently don't have a test case.
+ if (Tok.isNot(tok::annot_cxxscope))
break;
}
@@ -2001,6 +2044,7 @@ Parser::TPResult Parser::TryParseParameterDeclarationClause(
return TPR;
bool SeenType = false;
+ bool DeclarationSpecifierIsAuto = Tok.is(tok::kw_auto);
do {
SeenType |= isCXXDeclarationSpecifierAType();
if (TryConsumeDeclarationSpecifier() == TPResult::Error)
@@ -2022,7 +2066,11 @@ Parser::TPResult Parser::TryParseParameterDeclarationClause(
// declarator
// abstract-declarator[opt]
- TPR = TryParseDeclarator(true/*mayBeAbstract*/);
+ TPR = TryParseDeclarator(
+ /*mayBeAbstract=*/true,
+ /*mayHaveIdentifier=*/true,
+ /*mayHaveDirectInit=*/false,
+ /*mayHaveTrailingReturnType=*/DeclarationSpecifierIsAuto);
if (TPR != TPResult::Ambiguous)
return TPR;
@@ -2076,7 +2124,8 @@ Parser::TPResult Parser::TryParseParameterDeclarationClause(
/// exception-specification:
/// 'throw' '(' type-id-list[opt] ')'
///
-Parser::TPResult Parser::TryParseFunctionDeclarator() {
+Parser::TPResult
+Parser::TryParseFunctionDeclarator(bool MayHaveTrailingReturnType) {
// The '(' is already parsed.
TPResult TPR = TryParseParameterDeclarationClause();
@@ -2121,9 +2170,52 @@ Parser::TPResult Parser::TryParseFunctionDeclarator() {
}
}
+ // attribute-specifier-seq
+ if (!TrySkipAttributes())
+ return TPResult::Ambiguous;
+
+ // trailing-return-type
+ if (Tok.is(tok::arrow) && MayHaveTrailingReturnType) {
+ if (TPR == TPResult::True)
+ return TPR;
+ ConsumeToken();
+ if (Tok.is(tok::identifier) && NameAfterArrowIsNonType()) {
+ return TPResult::False;
+ }
+ if (isCXXTypeId(TentativeCXXTypeIdContext::TypeIdInTrailingReturnType))
+ return TPResult::True;
+ }
+
return TPResult::Ambiguous;
}
+// When parsing an identifier after an arrow it may be a member expression,
+// in which case we should not annotate it as an independant expression
+// so we just lookup that name, if it's not a type the construct is not
+// a function declaration.
+bool Parser::NameAfterArrowIsNonType() {
+ assert(Tok.is(tok::identifier));
+ Token Next = NextToken();
+ if (Next.is(tok::coloncolon))
+ return false;
+ IdentifierInfo *Name = Tok.getIdentifierInfo();
+ SourceLocation NameLoc = Tok.getLocation();
+ CXXScopeSpec SS;
+ TentativeParseCCC CCC(Next);
+ Sema::NameClassification Classification =
+ Actions.ClassifyName(getCurScope(), SS, Name, NameLoc, Next, &CCC);
+ switch (Classification.getKind()) {
+ case Sema::NC_OverloadSet:
+ case Sema::NC_NonType:
+ case Sema::NC_VarTemplate:
+ case Sema::NC_FunctionTemplate:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
/// '[' constant-expression[opt] ']'
///
Parser::TPResult Parser::TryParseBracketDeclarator() {
diff --git a/contrib/llvm-project/clang/lib/Parse/Parser.cpp b/contrib/llvm-project/clang/lib/Parse/Parser.cpp
index 6db3dc3156fd..b1ccbeb99e58 100644
--- a/contrib/llvm-project/clang/lib/Parse/Parser.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/Parser.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ASTLambda.h"
#include "clang/Basic/FileManager.h"
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/RAIIObjectsForParser.h"
@@ -319,6 +320,7 @@ bool Parser::SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags) {
case tok::annot_module_begin:
case tok::annot_module_end:
case tok::annot_module_include:
+ case tok::annot_repl_input_end:
// Stop before we change submodules. They generally indicate a "good"
// place to pick up parsing again (except in the special case where
// we're trying to skip to EOF).
@@ -522,7 +524,8 @@ void Parser::Initialize() {
Ident_strict = nullptr;
Ident_replacement = nullptr;
- Ident_language = Ident_defined_in = Ident_generated_declaration = nullptr;
+ Ident_language = Ident_defined_in = Ident_generated_declaration = Ident_USR =
+ nullptr;
Ident__except = nullptr;
@@ -612,11 +615,6 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result,
Sema::ModuleImportState &ImportState) {
DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(*this);
- // Skip over the EOF token, flagging end of previous input for incremental
- // processing
- if (PP.isIncrementalProcessingEnabled() && Tok.is(tok::eof))
- ConsumeToken();
-
Result = nullptr;
switch (Tok.getKind()) {
case tok::annot_pragma_unused:
@@ -629,8 +627,8 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result,
goto module_decl;
// Note: no need to handle kw_import here. We only form kw_import under
- // the Modules TS, and in that case 'export import' is parsed as an
- // export-declaration containing an import-declaration.
+ // the Standard C++ Modules, and in that case 'export import' is parsed as
+ // an export-declaration containing an import-declaration.
// Recognize context-sensitive C++20 'export module' and 'export import'
// declarations.
@@ -695,6 +693,7 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result,
return false;
case tok::eof:
+ case tok::annot_repl_input_end:
// Check whether -fmax-tokens= was reached.
if (PP.getMaxTokens() != 0 && PP.getTokenCount() > PP.getMaxTokens()) {
PP.Diag(Tok.getLocation(), diag::warn_max_tokens_total)
@@ -785,7 +784,7 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result,
///
/// [C++0x/GNU] 'extern' 'template' declaration
///
-/// [Modules-TS] module-import-declaration
+/// [C++20] module-import-declaration
///
Parser::DeclGroupPtrTy
Parser::ParseExternalDeclaration(ParsedAttributes &Attrs,
@@ -937,7 +936,7 @@ Parser::ParseExternalDeclaration(ParsedAttributes &Attrs,
SingleDecl = ParseModuleImport(SourceLocation(), IS);
} break;
case tok::kw_export:
- if (getLangOpts().CPlusPlusModules || getLangOpts().ModulesTS) {
+ if (getLangOpts().CPlusPlusModules) {
ProhibitAttributes(Attrs);
SingleDecl = ParseExportDeclaration();
break;
@@ -1404,6 +1403,17 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
if (BodyKind == Sema::FnBodyKind::Other)
SkipFunctionBody();
+ // ExpressionEvaluationContext is pushed in ActOnStartOfFunctionDef
+ // and it would be popped in ActOnFinishFunctionBody.
+ // We pop it explcitly here since ActOnFinishFunctionBody won't get called.
+ //
+ // Do not call PopExpressionEvaluationContext() if it is a lambda because
+ // one is already popped when finishing the lambda in BuildLambdaExpr().
+ //
+ // FIXME: It looks not easy to balance PushExpressionEvaluationContext()
+ // and PopExpressionEvaluationContext().
+ if (!isLambdaCallOperator(dyn_cast_if_present<FunctionDecl>(Res)))
+ Actions.PopExpressionEvaluationContext();
return Res;
}
@@ -1870,31 +1880,25 @@ Parser::TryAnnotateName(CorrectionCandidateCallback *CCC,
return ANK_TemplateName;
}
[[fallthrough]];
+ case Sema::NC_Concept:
case Sema::NC_VarTemplate:
case Sema::NC_FunctionTemplate:
case Sema::NC_UndeclaredTemplate: {
- // We have a type, variable or function template followed by '<'.
- ConsumeToken();
- UnqualifiedId Id;
- Id.setIdentifier(Name, NameLoc);
- if (AnnotateTemplateIdToken(
- TemplateTy::make(Classification.getTemplateName()),
- Classification.getTemplateNameKind(), SS, SourceLocation(), Id))
- return ANK_Error;
- return ANK_Success;
- }
- case Sema::NC_Concept: {
- UnqualifiedId Id;
- Id.setIdentifier(Name, NameLoc);
+ bool IsConceptName = Classification.getKind() == Sema::NC_Concept;
+ // We have a template name followed by '<'. Consume the identifier token so
+ // we reach the '<' and annotate it.
if (Next.is(tok::less))
- // We have a concept name followed by '<'. Consume the identifier token so
- // we reach the '<' and annotate it.
ConsumeToken();
+ UnqualifiedId Id;
+ Id.setIdentifier(Name, NameLoc);
if (AnnotateTemplateIdToken(
TemplateTy::make(Classification.getTemplateName()),
Classification.getTemplateNameKind(), SS, SourceLocation(), Id,
- /*AllowTypeAnnotation=*/false, /*TypeConstraint=*/true))
+ /*AllowTypeAnnotation=*/!IsConceptName,
+ /*TypeConstraint=*/IsConceptName))
return ANK_Error;
+ if (SS.isNotEmpty())
+ AnnotateScopeToken(SS, !WasScopeAnnotation);
return ANK_Success;
}
}
@@ -1943,7 +1947,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(
assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
Tok.is(tok::kw_typename) || Tok.is(tok::annot_cxxscope) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::annot_template_id) ||
- Tok.is(tok::kw___super)) &&
+ Tok.is(tok::kw___super) || Tok.is(tok::kw_auto)) &&
"Cannot be a type or scope token!");
if (Tok.is(tok::kw_typename)) {
@@ -2376,7 +2380,7 @@ void Parser::ParseMicrosoftIfExistsExternalDeclaration() {
/// Parse a declaration beginning with the 'module' keyword or C++20
/// context-sensitive keyword (optionally preceded by 'export').
///
-/// module-declaration: [Modules TS + P0629R0]
+/// module-declaration: [C++20]
/// 'export'[opt] 'module' module-name attribute-specifier-seq[opt] ';'
///
/// global-module-fragment: [C++2a]
@@ -2457,6 +2461,7 @@ Parser::ParseModuleDecl(Sema::ModuleImportState &ImportState) {
ParsedAttributes Attrs(AttrFactory);
MaybeParseCXX11Attributes(Attrs);
ProhibitCXX11Attributes(Attrs, diag::err_attribute_not_module_attr,
+ diag::err_keyword_not_module_attr,
/*DiagnoseEmptyAttrs=*/false,
/*WarnOnUnknownAttrs=*/true);
@@ -2526,6 +2531,7 @@ Decl *Parser::ParseModuleImport(SourceLocation AtLoc,
MaybeParseCXX11Attributes(Attrs);
// We don't support any module import attributes yet.
ProhibitCXX11Attributes(Attrs, diag::err_attribute_not_import_attr,
+ diag::err_keyword_not_import_attr,
/*DiagnoseEmptyAttrs=*/false,
/*WarnOnUnknownAttrs=*/true);
@@ -2602,7 +2608,7 @@ Decl *Parser::ParseModuleImport(SourceLocation AtLoc,
return Import.get();
}
-/// Parse a C++ Modules TS / Objective-C module name (both forms use the same
+/// Parse a C++ / Objective-C module name (both forms use the same
/// grammar).
///
/// module-name:
diff --git a/contrib/llvm-project/clang/lib/Rewrite/Rewriter.cpp b/contrib/llvm-project/clang/lib/Rewrite/Rewriter.cpp
index 8950bfb7c4dc..ef2858990dd9 100644
--- a/contrib/llvm-project/clang/lib/Rewrite/Rewriter.cpp
+++ b/contrib/llvm-project/clang/lib/Rewrite/Rewriter.cpp
@@ -14,22 +14,18 @@
#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticIDs.h"
-#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
#include "clang/Rewrite/Core/RewriteBuffer.h"
#include "clang/Rewrite/Core/RewriteRope.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <iterator>
#include <map>
-#include <memory>
-#include <system_error>
#include <utility>
using namespace clang;
@@ -410,68 +406,21 @@ bool Rewriter::IncreaseIndentation(CharSourceRange range,
return false;
}
-namespace {
-
-// A wrapper for a file stream that atomically overwrites the target.
-//
-// Creates a file output stream for a temporary file in the constructor,
-// which is later accessible via getStream() if ok() return true.
-// Flushes the stream and moves the temporary file to the target location
-// in the destructor.
-class AtomicallyMovedFile {
-public:
- AtomicallyMovedFile(DiagnosticsEngine &Diagnostics, StringRef Filename,
- bool &AllWritten)
- : Diagnostics(Diagnostics), Filename(Filename), AllWritten(AllWritten) {
- TempFilename = Filename;
- TempFilename += "-%%%%%%%%";
- int FD;
- if (llvm::sys::fs::createUniqueFile(TempFilename, FD, TempFilename)) {
- AllWritten = false;
- Diagnostics.Report(clang::diag::err_unable_to_make_temp)
- << TempFilename;
- } else {
- FileStream.reset(new llvm::raw_fd_ostream(FD, /*shouldClose=*/true));
- }
- }
-
- ~AtomicallyMovedFile() {
- if (!ok()) return;
-
- // Close (will also flush) theFileStream.
- FileStream->close();
- if (std::error_code ec = llvm::sys::fs::rename(TempFilename, Filename)) {
- AllWritten = false;
- Diagnostics.Report(clang::diag::err_unable_to_rename_temp)
- << TempFilename << Filename << ec.message();
- // If the remove fails, there's not a lot we can do - this is already an
- // error.
- llvm::sys::fs::remove(TempFilename);
- }
- }
-
- bool ok() { return (bool)FileStream; }
- raw_ostream &getStream() { return *FileStream; }
-
-private:
- DiagnosticsEngine &Diagnostics;
- StringRef Filename;
- SmallString<128> TempFilename;
- std::unique_ptr<llvm::raw_fd_ostream> FileStream;
- bool &AllWritten;
-};
-
-} // namespace
-
bool Rewriter::overwriteChangedFiles() {
bool AllWritten = true;
+ auto& Diag = getSourceMgr().getDiagnostics();
+ unsigned OverwriteFailure = Diag.getCustomDiagID(
+ DiagnosticsEngine::Error, "unable to overwrite file %0: %1");
for (buffer_iterator I = buffer_begin(), E = buffer_end(); I != E; ++I) {
- const FileEntry *Entry =
- getSourceMgr().getFileEntryForID(I->first);
- AtomicallyMovedFile File(getSourceMgr().getDiagnostics(), Entry->getName(),
- AllWritten);
- if (File.ok()) {
- I->second.write(File.getStream());
+ const FileEntry *Entry = getSourceMgr().getFileEntryForID(I->first);
+ if (auto Error =
+ llvm::writeToOutput(Entry->getName(), [&](llvm::raw_ostream &OS) {
+ I->second.write(OS);
+ return llvm::Error::success();
+ })) {
+ Diag.Report(OverwriteFailure)
+ << Entry->getName() << llvm::toString(std::move(Error));
+ AllWritten = false;
}
}
return !AllWritten;
diff --git a/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp b/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp
index 4530154ac944..43b13e0ec4d2 100644
--- a/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Sema/AnalysisBasedWarnings.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/EvaluatedExprVisitor.h"
@@ -25,6 +26,8 @@
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/Type.h"
#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
#include "clang/Analysis/Analyses/CalledOnceCheck.h"
#include "clang/Analysis/Analyses/Consumed.h"
@@ -35,6 +38,7 @@
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CFGStmtMap.h"
+#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Preprocessor.h"
@@ -43,6 +47,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -66,11 +71,17 @@ namespace {
public:
UnreachableCodeHandler(Sema &s) : S(s) {}
- void HandleUnreachable(reachable_code::UnreachableKind UK,
- SourceLocation L,
- SourceRange SilenceableCondVal,
- SourceRange R1,
- SourceRange R2) override {
+ void HandleUnreachable(reachable_code::UnreachableKind UK, SourceLocation L,
+ SourceRange SilenceableCondVal, SourceRange R1,
+ SourceRange R2, bool HasFallThroughAttr) override {
+ // If the diagnosed code is `[[fallthrough]];` and
+ // `-Wunreachable-code-fallthrough` is enabled, suppress `code will never
+ // be executed` warning to avoid generating diagnostic twice
+ if (HasFallThroughAttr &&
+ !S.getDiagnostics().isIgnored(diag::warn_unreachable_fallthrough_attr,
+ SourceLocation()))
+ return;
+
// Avoid reporting multiple unreachable code diagnostics that are
// triggered by the same conditional value.
if (PreviousSilenceableCondVal.isValid() &&
@@ -574,6 +585,7 @@ struct CheckFallThroughDiagnostics {
D.diag_AlwaysFallThrough_HasNoReturn = 0;
D.diag_AlwaysFallThrough_ReturnsNonVoid =
diag::warn_falloff_nonvoid_coroutine;
+ D.diag_NeverFallThroughOrReturn = 0;
D.funMode = Coroutine;
return D;
}
@@ -2149,9 +2161,11 @@ public:
namespace {
class UnsafeBufferUsageReporter : public UnsafeBufferUsageHandler {
Sema &S;
+ bool SuggestSuggestions; // Recommend -fsafe-buffer-usage-suggestions?
public:
- UnsafeBufferUsageReporter(Sema &S) : S(S) {}
+ UnsafeBufferUsageReporter(Sema &S, bool SuggestSuggestions)
+ : S(S), SuggestSuggestions(SuggestSuggestions) {}
void handleUnsafeOperation(const Stmt *Operation,
bool IsRelatedToDecl) override {
@@ -2184,25 +2198,113 @@ public:
MsgParam = 1;
}
} else {
+ if (isa<CallExpr>(Operation)) {
+ // note_unsafe_buffer_operation doesn't have this mode yet.
+ assert(!IsRelatedToDecl && "Not implemented yet!");
+ MsgParam = 3;
+ }
Loc = Operation->getBeginLoc();
Range = Operation->getSourceRange();
}
- if (IsRelatedToDecl)
+ if (IsRelatedToDecl) {
+ assert(!SuggestSuggestions &&
+ "Variables blamed for unsafe buffer usage without suggestions!");
S.Diag(Loc, diag::note_unsafe_buffer_operation) << MsgParam << Range;
- else
+ } else {
S.Diag(Loc, diag::warn_unsafe_buffer_operation) << MsgParam << Range;
+ if (SuggestSuggestions) {
+ S.Diag(Loc, diag::note_safe_buffer_usage_suggestions_disabled);
+ }
+ }
}
- // FIXME: rename to handleUnsafeVariable
- void handleFixableVariable(const VarDecl *Variable,
+ void handleUnsafeVariableGroup(const VarDecl *Variable,
+ const DefMapTy &VarGrpMap,
FixItList &&Fixes) override {
- const auto &D =
- S.Diag(Variable->getLocation(), diag::warn_unsafe_buffer_variable);
- D << Variable;
- D << (Variable->getType()->isPointerType() ? 0 : 1);
- D << Variable->getSourceRange();
- for (const auto &F : Fixes)
- D << F;
+ assert(!SuggestSuggestions &&
+ "Unsafe buffer usage fixits displayed without suggestions!");
+ S.Diag(Variable->getLocation(), diag::warn_unsafe_buffer_variable)
+ << Variable << (Variable->getType()->isPointerType() ? 0 : 1)
+ << Variable->getSourceRange();
+ if (!Fixes.empty()) {
+ const auto VarGroupForVD = VarGrpMap.find(Variable)->second;
+ unsigned FixItStrategy = 0; // For now we only have 'std::span' strategy
+ const auto &FD = S.Diag(Variable->getLocation(),
+ diag::note_unsafe_buffer_variable_fixit_group);
+
+ FD << Variable << FixItStrategy;
+ std::string AllVars = "";
+ if (VarGroupForVD.size() > 1) {
+ if (VarGroupForVD.size() == 2) {
+ if (VarGroupForVD[0] == Variable) {
+ AllVars.append("'" + VarGroupForVD[1]->getName().str() + "'");
+ } else {
+ AllVars.append("'" + VarGroupForVD[0]->getName().str() + "'");
+ }
+ } else {
+ bool first = false;
+ if (VarGroupForVD.size() == 3) {
+ for (const VarDecl * V : VarGroupForVD) {
+ if (V == Variable) {
+ continue;
+ }
+ if (!first) {
+ first = true;
+ AllVars.append("'" + V->getName().str() + "'" + " and ");
+ } else {
+ AllVars.append("'" + V->getName().str() + "'");
+ }
+ }
+ } else {
+ for (const VarDecl * V : VarGroupForVD) {
+ if (V == Variable) {
+ continue;
+ }
+ if (VarGroupForVD.back() != V) {
+ AllVars.append("'" + V->getName().str() + "'" + ", ");
+ } else {
+ AllVars.append("and '" + V->getName().str() + "'");
+ }
+ }
+ }
+ }
+ FD << AllVars << 1;
+ } else {
+ FD << "" << 0;
+ }
+
+ for (const auto &F : Fixes)
+ FD << F;
+ }
+ }
+
+ bool isSafeBufferOptOut(const SourceLocation &Loc) const override {
+ return S.PP.isSafeBufferOptOut(S.getSourceManager(), Loc);
+ }
+
+ // Returns the text representation of clang::unsafe_buffer_usage attribute.
+ // `WSSuffix` holds customized "white-space"s, e.g., newline or whilespace
+ // characters.
+ std::string
+ getUnsafeBufferUsageAttributeTextAt(SourceLocation Loc,
+ StringRef WSSuffix = "") const override {
+ Preprocessor &PP = S.getPreprocessor();
+ TokenValue ClangUnsafeBufferUsageTokens[] = {
+ tok::l_square,
+ tok::l_square,
+ PP.getIdentifierInfo("clang"),
+ tok::coloncolon,
+ PP.getIdentifierInfo("unsafe_buffer_usage"),
+ tok::r_square,
+ tok::r_square};
+
+ StringRef MacroName;
+
+ // The returned macro (it returns) is guaranteed not to be function-like:
+ MacroName = PP.getLastMacroWithSpelling(Loc, ClangUnsafeBufferUsageTokens);
+ if (MacroName.empty())
+ MacroName = "[[clang::unsafe_buffer_usage]]";
+ return MacroName.str() + WSSuffix.str();
}
};
} // namespace
@@ -2271,6 +2373,94 @@ static void flushDiagnostics(Sema &S, const sema::FunctionScopeInfo *fscope) {
S.Diag(D.Loc, D.PD);
}
+// An AST Visitor that calls a callback function on each callable DEFINITION
+// that is NOT in a dependent context:
+class CallableVisitor : public RecursiveASTVisitor<CallableVisitor> {
+private:
+ llvm::function_ref<void(const Decl *)> Callback;
+
+public:
+ CallableVisitor(llvm::function_ref<void(const Decl *)> Callback)
+ : Callback(Callback) {}
+
+ bool VisitFunctionDecl(FunctionDecl *Node) {
+ if (cast<DeclContext>(Node)->isDependentContext())
+ return true; // Not to analyze dependent decl
+ // `FunctionDecl->hasBody()` returns true if the function has a body
+ // somewhere defined. But we want to know if this `Node` has a body
+ // child. So we use `doesThisDeclarationHaveABody`:
+ if (Node->doesThisDeclarationHaveABody())
+ Callback(Node);
+ return true;
+ }
+
+ bool VisitBlockDecl(BlockDecl *Node) {
+ if (cast<DeclContext>(Node)->isDependentContext())
+ return true; // Not to analyze dependent decl
+ Callback(Node);
+ return true;
+ }
+
+ bool VisitObjCMethodDecl(ObjCMethodDecl *Node) {
+ if (cast<DeclContext>(Node)->isDependentContext())
+ return true; // Not to analyze dependent decl
+ if (Node->hasBody())
+ Callback(Node);
+ return true;
+ }
+
+ bool VisitLambdaExpr(LambdaExpr *Node) {
+ return VisitFunctionDecl(Node->getCallOperator());
+ }
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const { return false; }
+};
+
+void clang::sema::AnalysisBasedWarnings::IssueWarnings(
+ TranslationUnitDecl *TU) {
+ if (!TU)
+ return; // This is unexpected, give up quietly.
+
+ DiagnosticsEngine &Diags = S.getDiagnostics();
+
+ if (S.hasUncompilableErrorOccurred() || Diags.getIgnoreAllWarnings())
+ // exit if having uncompilable errors or ignoring all warnings:
+ return;
+
+ DiagnosticOptions &DiagOpts = Diags.getDiagnosticOptions();
+
+ // UnsafeBufferUsage analysis settings.
+ bool UnsafeBufferUsageCanEmitSuggestions = S.getLangOpts().CPlusPlus20;
+ bool UnsafeBufferUsageShouldEmitSuggestions = // Should != Can.
+ UnsafeBufferUsageCanEmitSuggestions &&
+ DiagOpts.ShowSafeBufferUsageSuggestions;
+ bool UnsafeBufferUsageShouldSuggestSuggestions =
+ UnsafeBufferUsageCanEmitSuggestions &&
+ !DiagOpts.ShowSafeBufferUsageSuggestions;
+ UnsafeBufferUsageReporter R(S, UnsafeBufferUsageShouldSuggestSuggestions);
+
+ // The Callback function that performs analyses:
+ auto CallAnalyzers = [&](const Decl *Node) -> void {
+ // Perform unsafe buffer usage analysis:
+ if (!Diags.isIgnored(diag::warn_unsafe_buffer_operation,
+ Node->getBeginLoc()) ||
+ !Diags.isIgnored(diag::warn_unsafe_buffer_variable,
+ Node->getBeginLoc())) {
+ clang::checkUnsafeBufferUsage(Node, R,
+ UnsafeBufferUsageShouldEmitSuggestions);
+ }
+
+ // More analysis ...
+ };
+ // Emit per-function analysis-based warnings that require the whole-TU
+ // reasoning. Check if any of them is enabled at all before scanning the AST:
+ if (!Diags.isIgnored(diag::warn_unsafe_buffer_operation, SourceLocation()) ||
+ !Diags.isIgnored(diag::warn_unsafe_buffer_variable, SourceLocation())) {
+ CallableVisitor(CallAnalyzers).TraverseTranslationUnitDecl(TU);
+ }
+}
+
void clang::sema::AnalysisBasedWarnings::IssueWarnings(
sema::AnalysisBasedWarnings::Policy P, sema::FunctionScopeInfo *fscope,
const Decl *D, QualType BlockType) {
@@ -2496,16 +2686,9 @@ void clang::sema::AnalysisBasedWarnings::IssueWarnings(
// Check for throw out of non-throwing function.
if (!Diags.isIgnored(diag::warn_throw_in_noexcept_func, D->getBeginLoc()))
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
- if (S.getLangOpts().CPlusPlus && isNoexcept(FD))
+ if (S.getLangOpts().CPlusPlus && !fscope->isCoroutine() && isNoexcept(FD))
checkThrowInNonThrowingFunc(S, FD, AC);
- // Emit unsafe buffer usage warnings and fixits.
- if (!Diags.isIgnored(diag::warn_unsafe_buffer_operation, D->getBeginLoc()) ||
- !Diags.isIgnored(diag::warn_unsafe_buffer_variable, D->getBeginLoc())) {
- UnsafeBufferUsageReporter R(S);
- checkUnsafeBufferUsage(D, R);
- }
-
// If none of the previous checks caused a CFG build, trigger one here
// for the logical error handler.
if (LogicalErrorHandler::hasActiveDiagnostics(Diags, D->getBeginLoc())) {
diff --git a/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp b/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
index b91291cfea0b..202417798712 100644
--- a/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
@@ -83,6 +83,7 @@ bool CodeCompletionContext::wantConstructorResults() const {
case CCC_ObjCCategoryName:
case CCC_IncludedFile:
case CCC_Attribute:
+ case CCC_ObjCClassForwardDecl:
return false;
}
@@ -166,6 +167,8 @@ StringRef clang::getCompletionKindString(CodeCompletionContext::Kind Kind) {
return "Attribute";
case CCKind::CCC_Recovery:
return "Recovery";
+ case CCKind::CCC_ObjCClassForwardDecl:
+ return "ObjCClassForwardDecl";
}
llvm_unreachable("Invalid CodeCompletionContext::Kind!");
}
diff --git a/contrib/llvm-project/clang/lib/Sema/HLSLExternalSemaSource.cpp b/contrib/llvm-project/clang/lib/Sema/HLSLExternalSemaSource.cpp
index 3ff4e75b5694..f29f92aceb50 100644
--- a/contrib/llvm-project/clang/lib/Sema/HLSLExternalSemaSource.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/HLSLExternalSemaSource.cpp
@@ -71,7 +71,6 @@ struct BuiltinTypeDeclBuilder {
// Don't let anyone derive from built-in types.
Record->addAttr(FinalAttr::CreateImplicit(AST, SourceRange(),
- AttributeCommonInfo::AS_Keyword,
FinalAttr::Keyword_final));
}
@@ -286,8 +285,7 @@ struct BuiltinTypeDeclBuilder {
MethodDecl->setLexicalDeclContext(Record);
MethodDecl->setAccess(AccessSpecifier::AS_public);
MethodDecl->addAttr(AlwaysInlineAttr::CreateImplicit(
- AST, SourceRange(), AttributeCommonInfo::AS_Keyword,
- AlwaysInlineAttr::CXX11_clang_always_inline));
+ AST, SourceRange(), AlwaysInlineAttr::CXX11_clang_always_inline));
Record->addDecl(MethodDecl);
return *this;
diff --git a/contrib/llvm-project/clang/lib/Sema/IdentifierResolver.cpp b/contrib/llvm-project/clang/lib/Sema/IdentifierResolver.cpp
index 607dc3111e9d..773cef65dcbd 100644
--- a/contrib/llvm-project/clang/lib/Sema/IdentifierResolver.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/IdentifierResolver.cpp
@@ -231,9 +231,12 @@ void IdentifierResolver::RemoveDecl(NamedDecl *D) {
return toIdDeclInfo(Ptr)->RemoveDecl(D);
}
-/// begin - Returns an iterator for decls with name 'Name'.
-IdentifierResolver::iterator
-IdentifierResolver::begin(DeclarationName Name) {
+llvm::iterator_range<IdentifierResolver::iterator>
+IdentifierResolver::decls(DeclarationName Name) {
+ return {begin(Name), end()};
+}
+
+IdentifierResolver::iterator IdentifierResolver::begin(DeclarationName Name) {
if (IdentifierInfo *II = Name.getAsIdentifierInfo())
readingIdentifier(*II);
diff --git a/contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp b/contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp
index bd2ce9a93e7e..45ff36d5fe23 100644
--- a/contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp
@@ -72,10 +72,9 @@ class JumpScopeChecker {
SmallVector<Stmt*, 16> Jumps;
SmallVector<Stmt*, 4> IndirectJumps;
- SmallVector<Stmt*, 4> AsmJumps;
+ SmallVector<LabelDecl *, 4> IndirectJumpTargets;
SmallVector<AttributedStmt *, 4> MustTailStmts;
- SmallVector<LabelDecl*, 4> IndirectJumpTargets;
- SmallVector<LabelDecl*, 4> AsmJumpTargets;
+
public:
JumpScopeChecker(Stmt *Body, Sema &S);
private:
@@ -86,7 +85,7 @@ private:
void BuildScopeInformation(Stmt *S, unsigned &origParentScope);
void VerifyJumps();
- void VerifyIndirectOrAsmJumps(bool IsAsmGoto);
+ void VerifyIndirectJumps();
void VerifyMustTailStmts();
void NoteJumpIntoScopes(ArrayRef<unsigned> ToScopes);
void DiagnoseIndirectOrAsmJump(Stmt *IG, unsigned IGScope, LabelDecl *Target,
@@ -115,8 +114,7 @@ JumpScopeChecker::JumpScopeChecker(Stmt *Body, Sema &s)
// Check that all jumps we saw are kosher.
VerifyJumps();
- VerifyIndirectOrAsmJumps(false);
- VerifyIndirectOrAsmJumps(true);
+ VerifyIndirectJumps();
VerifyMustTailStmts();
}
@@ -333,11 +331,8 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
// operand (to avoid recording the address-of-label use), which
// works only because of the restricted set of expressions which
// we detect as constant targets.
- if (cast<IndirectGotoStmt>(S)->getConstantTarget()) {
- LabelAndGotoScopes[S] = ParentScope;
- Jumps.push_back(S);
- return;
- }
+ if (cast<IndirectGotoStmt>(S)->getConstantTarget())
+ goto RecordJumpScope;
LabelAndGotoScopes[S] = ParentScope;
IndirectJumps.push_back(S);
@@ -354,27 +349,21 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
BuildScopeInformation(Var, ParentScope);
++StmtsToSkip;
}
+ goto RecordJumpScope;
+
+ case Stmt::GCCAsmStmtClass:
+ if (!cast<GCCAsmStmt>(S)->isAsmGoto())
+ break;
[[fallthrough]];
case Stmt::GotoStmtClass:
+ RecordJumpScope:
// Remember both what scope a goto is in as well as the fact that we have
// it. This makes the second scan not have to walk the AST again.
LabelAndGotoScopes[S] = ParentScope;
Jumps.push_back(S);
break;
- case Stmt::GCCAsmStmtClass:
- if (auto *GS = dyn_cast<GCCAsmStmt>(S))
- if (GS->isAsmGoto()) {
- // Remember both what scope a goto is in as well as the fact that we
- // have it. This makes the second scan not have to walk the AST again.
- LabelAndGotoScopes[S] = ParentScope;
- AsmJumps.push_back(GS);
- for (auto *E : GS->labels())
- AsmJumpTargets.push_back(E->getLabel());
- }
- break;
-
case Stmt::IfStmtClass: {
IfStmt *IS = cast<IfStmt>(S);
if (!(IS->isConstexpr() || IS->isConsteval() ||
@@ -477,6 +466,21 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
return;
}
+ case Stmt::StmtExprClass: {
+ // [GNU]
+ // Jumping into a statement expression with goto or using
+ // a switch statement outside the statement expression with
+ // a case or default label inside the statement expression is not permitted.
+ // Jumping out of a statement expression is permitted.
+ StmtExpr *SE = cast<StmtExpr>(S);
+ unsigned NewParentScope = Scopes.size();
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_enters_statement_expression,
+ /*OutDiag=*/0, SE->getBeginLoc()));
+ BuildScopeInformation(SE->getSubStmt(), NewParentScope);
+ return;
+ }
+
case Stmt::ObjCAtTryStmtClass: {
// Disallow jumps into any part of an @try statement by pushing a scope and
// walking all sub-stmts in that scope.
@@ -666,6 +670,22 @@ void JumpScopeChecker::VerifyJumps() {
continue;
}
+ // If an asm goto jumps to a different scope, things like destructors or
+ // initializers might not be run which may be suprising to users. Perhaps
+ // this behavior can be changed in the future, but today Clang will not
+ // generate such code. Produce a diagnostic instead. See also the
+ // discussion here: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=110728.
+ if (auto *G = dyn_cast<GCCAsmStmt>(Jump)) {
+ for (AddrLabelExpr *L : G->labels()) {
+ LabelDecl *LD = L->getLabel();
+ unsigned JumpScope = LabelAndGotoScopes[G];
+ unsigned TargetScope = LabelAndGotoScopes[LD->getStmt()];
+ if (JumpScope != TargetScope)
+ DiagnoseIndirectOrAsmJump(G, JumpScope, LD, TargetScope);
+ }
+ continue;
+ }
+
// We only get indirect gotos here when they have a constant target.
if (IndirectGotoStmt *IGS = dyn_cast<IndirectGotoStmt>(Jump)) {
LabelDecl *Target = IGS->getConstantTarget();
@@ -694,17 +714,16 @@ void JumpScopeChecker::VerifyJumps() {
}
}
-/// VerifyIndirectOrAsmJumps - Verify whether any possible indirect goto or
-/// asm goto jump might cross a protection boundary. Unlike direct jumps,
-/// indirect or asm goto jumps count cleanups as protection boundaries:
-/// since there's no way to know where the jump is going, we can't implicitly
-/// run the right cleanups the way we can with direct jumps.
-/// Thus, an indirect/asm jump is "trivial" if it bypasses no
-/// initializations and no teardowns. More formally, an indirect/asm jump
-/// from A to B is trivial if the path out from A to DCA(A,B) is
-/// trivial and the path in from DCA(A,B) to B is trivial, where
-/// DCA(A,B) is the deepest common ancestor of A and B.
-/// Jump-triviality is transitive but asymmetric.
+/// VerifyIndirectJumps - Verify whether any possible indirect goto jump might
+/// cross a protection boundary. Unlike direct jumps, indirect goto jumps
+/// count cleanups as protection boundaries: since there's no way to know where
+/// the jump is going, we can't implicitly run the right cleanups the way we
+/// can with direct jumps. Thus, an indirect/asm jump is "trivial" if it
+/// bypasses no initializations and no teardowns. More formally, an
+/// indirect/asm jump from A to B is trivial if the path out from A to DCA(A,B)
+/// is trivial and the path in from DCA(A,B) to B is trivial, where DCA(A,B) is
+/// the deepest common ancestor of A and B. Jump-triviality is transitive but
+/// asymmetric.
///
/// A path in is trivial if none of the entered scopes have an InDiag.
/// A path out is trivial is none of the exited scopes have an OutDiag.
@@ -712,57 +731,45 @@ void JumpScopeChecker::VerifyJumps() {
/// Under these definitions, this function checks that the indirect
/// jump between A and B is trivial for every indirect goto statement A
/// and every label B whose address was taken in the function.
-void JumpScopeChecker::VerifyIndirectOrAsmJumps(bool IsAsmGoto) {
- SmallVector<Stmt*, 4> GotoJumps = IsAsmGoto ? AsmJumps : IndirectJumps;
- if (GotoJumps.empty())
+void JumpScopeChecker::VerifyIndirectJumps() {
+ if (IndirectJumps.empty())
return;
- SmallVector<LabelDecl *, 4> JumpTargets =
- IsAsmGoto ? AsmJumpTargets : IndirectJumpTargets;
// If there aren't any address-of-label expressions in this function,
// complain about the first indirect goto.
- if (JumpTargets.empty()) {
- assert(!IsAsmGoto &&"only indirect goto can get here");
- S.Diag(GotoJumps[0]->getBeginLoc(),
+ if (IndirectJumpTargets.empty()) {
+ S.Diag(IndirectJumps[0]->getBeginLoc(),
diag::err_indirect_goto_without_addrlabel);
return;
}
- // Collect a single representative of every scope containing an
- // indirect or asm goto. For most code bases, this substantially cuts
- // down on the number of jump sites we'll have to consider later.
- typedef std::pair<unsigned, Stmt*> JumpScope;
+ // Collect a single representative of every scope containing an indirect
+ // goto. For most code bases, this substantially cuts down on the number of
+ // jump sites we'll have to consider later.
+ using JumpScope = std::pair<unsigned, Stmt *>;
SmallVector<JumpScope, 32> JumpScopes;
{
llvm::DenseMap<unsigned, Stmt*> JumpScopesMap;
- for (SmallVectorImpl<Stmt *>::iterator I = GotoJumps.begin(),
- E = GotoJumps.end();
- I != E; ++I) {
- Stmt *IG = *I;
+ for (Stmt *IG : IndirectJumps) {
if (CHECK_PERMISSIVE(!LabelAndGotoScopes.count(IG)))
continue;
unsigned IGScope = LabelAndGotoScopes[IG];
- Stmt *&Entry = JumpScopesMap[IGScope];
- if (!Entry) Entry = IG;
+ if (!JumpScopesMap.contains(IGScope))
+ JumpScopesMap[IGScope] = IG;
}
JumpScopes.reserve(JumpScopesMap.size());
- for (llvm::DenseMap<unsigned, Stmt *>::iterator I = JumpScopesMap.begin(),
- E = JumpScopesMap.end();
- I != E; ++I)
- JumpScopes.push_back(*I);
+ for (auto &Pair : JumpScopesMap)
+ JumpScopes.emplace_back(Pair);
}
// Collect a single representative of every scope containing a
// label whose address was taken somewhere in the function.
// For most code bases, there will be only one such scope.
llvm::DenseMap<unsigned, LabelDecl*> TargetScopes;
- for (SmallVectorImpl<LabelDecl *>::iterator I = JumpTargets.begin(),
- E = JumpTargets.end();
- I != E; ++I) {
- LabelDecl *TheLabel = *I;
+ for (LabelDecl *TheLabel : IndirectJumpTargets) {
if (CHECK_PERMISSIVE(!LabelAndGotoScopes.count(TheLabel->getStmt())))
continue;
unsigned LabelScope = LabelAndGotoScopes[TheLabel->getStmt()];
- LabelDecl *&Target = TargetScopes[LabelScope];
- if (!Target) Target = TheLabel;
+ if (!TargetScopes.contains(LabelScope))
+ TargetScopes[LabelScope] = TheLabel;
}
// For each target scope, make sure it's trivially reachable from
@@ -774,11 +781,7 @@ void JumpScopeChecker::VerifyIndirectOrAsmJumps(bool IsAsmGoto) {
// entered, then verify that every jump scope can be trivially
// exitted to reach a scope in S.
llvm::BitVector Reachable(Scopes.size(), false);
- for (llvm::DenseMap<unsigned,LabelDecl*>::iterator
- TI = TargetScopes.begin(), TE = TargetScopes.end(); TI != TE; ++TI) {
- unsigned TargetScope = TI->first;
- LabelDecl *TargetLabel = TI->second;
-
+ for (auto [TargetScope, TargetLabel] : TargetScopes) {
Reachable.reset();
// Mark all the enclosing scopes from which you can safely jump
@@ -799,10 +802,8 @@ void JumpScopeChecker::VerifyIndirectOrAsmJumps(bool IsAsmGoto) {
// Walk through all the jump sites, checking that they can trivially
// reach this label scope.
- for (SmallVectorImpl<JumpScope>::iterator
- I = JumpScopes.begin(), E = JumpScopes.end(); I != E; ++I) {
- unsigned Scope = I->first;
-
+ for (auto [JumpScope, JumpStmt] : JumpScopes) {
+ unsigned Scope = JumpScope;
// Walk out the "scope chain" for this scope, looking for a scope
// we've marked reachable. For well-formed code this amortizes
// to O(JumpScopes.size() / Scopes.size()): we only iterate
@@ -813,7 +814,7 @@ void JumpScopeChecker::VerifyIndirectOrAsmJumps(bool IsAsmGoto) {
if (Reachable.test(Scope)) {
// If we find something reachable, mark all the scopes we just
// walked through as reachable.
- for (unsigned S = I->first; S != Scope; S = Scopes[S].ParentScope)
+ for (unsigned S = JumpScope; S != Scope; S = Scopes[S].ParentScope)
Reachable.set(S);
IsReachable = true;
break;
@@ -832,7 +833,7 @@ void JumpScopeChecker::VerifyIndirectOrAsmJumps(bool IsAsmGoto) {
// Only diagnose if we didn't find something.
if (IsReachable) continue;
- DiagnoseIndirectOrAsmJump(I->second, I->first, TargetLabel, TargetScope);
+ DiagnoseIndirectOrAsmJump(JumpStmt, JumpScope, TargetLabel, TargetScope);
}
}
}
diff --git a/contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp b/contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp
index 55e015487f3b..058e22cb2b81 100644
--- a/contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp
@@ -341,3 +341,9 @@ bool MultiplexExternalSemaSource::MaybeDiagnoseMissingCompleteType(
}
return false;
}
+
+void MultiplexExternalSemaSource::AssignedLambdaNumbering(
+ const CXXRecordDecl *Lambda) {
+ for (auto *Source : Sources)
+ Source->AssignedLambdaNumbering(Lambda);
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp b/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp
index c1e39acb14ec..d7acb589172b 100644
--- a/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp
@@ -19,15 +19,12 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/ManagedStatic.h"
#include <cassert>
#include <cstddef>
#include <utility>
using namespace clang;
-LLVM_INSTANTIATE_REGISTRY(ParsedAttrInfoRegistry)
-
IdentifierLoc *IdentifierLoc::create(ASTContext &Ctx, SourceLocation Loc,
IdentifierInfo *Ident) {
IdentifierLoc *Result = new (Ctx) IdentifierLoc;
@@ -120,13 +117,7 @@ const ParsedAttrInfo &ParsedAttrInfo::get(const AttributeCommonInfo &A) {
if (A.getParsedKind() == AttributeCommonInfo::IgnoredAttribute)
return IgnoredParsedAttrInfo;
- // Otherwise this may be an attribute defined by a plugin. First instantiate
- // all plugin attributes if we haven't already done so.
- static llvm::ManagedStatic<std::list<std::unique_ptr<ParsedAttrInfo>>>
- PluginAttrInstances;
- if (PluginAttrInstances->empty())
- for (auto It : ParsedAttrInfoRegistry::entries())
- PluginAttrInstances->emplace_back(It.instantiate());
+ // Otherwise this may be an attribute defined by a plugin.
// Search for a ParsedAttrInfo whose name and syntax match.
std::string FullName = A.getNormalizedFullName();
@@ -134,10 +125,9 @@ const ParsedAttrInfo &ParsedAttrInfo::get(const AttributeCommonInfo &A) {
if (SyntaxUsed == AttributeCommonInfo::AS_ContextSensitiveKeyword)
SyntaxUsed = AttributeCommonInfo::AS_Keyword;
- for (auto &Ptr : *PluginAttrInstances)
- for (auto &S : Ptr->Spellings)
- if (S.Syntax == SyntaxUsed && S.NormalizedFullName == FullName)
- return *Ptr;
+ for (auto &Ptr : getAttributePluginInstances())
+ if (Ptr->hasSpelling(SyntaxUsed, FullName))
+ return *Ptr;
// If we failed to find a match then return a default ParsedAttrInfo.
static const ParsedAttrInfo DefaultParsedAttrInfo(
@@ -213,6 +203,11 @@ bool ParsedAttr::isSupportedByPragmaAttribute() const {
}
bool ParsedAttr::slidesFromDeclToDeclSpecLegacyBehavior() const {
+ if (isRegularKeywordAttribute())
+ // The appurtenance rules are applied strictly for all regular keyword
+ // atributes.
+ return false;
+
assert(isStandardAttributeSyntax());
// We have historically allowed some type attributes with standard attribute
diff --git a/contrib/llvm-project/clang/lib/Sema/Scope.cpp b/contrib/llvm-project/clang/lib/Sema/Scope.cpp
index c995c7e65f4b..4570d8c615fe 100644
--- a/contrib/llvm-project/clang/lib/Sema/Scope.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/Scope.cpp
@@ -70,8 +70,10 @@ void Scope::setFlags(Scope *parent, unsigned flags) {
if (flags & BlockScope) BlockParent = this;
if (flags & TemplateParamScope) TemplateParamParent = this;
- // If this is a prototype scope, record that.
- if (flags & FunctionPrototypeScope) PrototypeDepth++;
+ // If this is a prototype scope, record that. Lambdas have an extra prototype
+ // scope that doesn't add any depth.
+ if (flags & FunctionPrototypeScope && !(flags & LambdaScope))
+ PrototypeDepth++;
if (flags & DeclScope) {
if (flags & FunctionPrototypeScope)
diff --git a/contrib/llvm-project/clang/lib/Sema/ScopeInfo.cpp b/contrib/llvm-project/clang/lib/Sema/ScopeInfo.cpp
index e313052b3ab3..92ce5137f4f3 100644
--- a/contrib/llvm-project/clang/lib/Sema/ScopeInfo.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/ScopeInfo.cpp
@@ -39,6 +39,7 @@ void FunctionScopeInfo::Clear() {
FirstReturnLoc = SourceLocation();
FirstCXXOrObjCTryLoc = SourceLocation();
FirstSEHTryLoc = SourceLocation();
+ FoundImmediateEscalatingExpression = false;
// Coroutine state
FirstCoroutineStmtLoc = SourceLocation();
diff --git a/contrib/llvm-project/clang/lib/Sema/Sema.cpp b/contrib/llvm-project/clang/lib/Sema/Sema.cpp
index 0f0305422454..46ae6fba8344 100644
--- a/contrib/llvm-project/clang/lib/Sema/Sema.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/Sema.cpp
@@ -33,6 +33,7 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/CXXFieldCollector.h"
#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/MultiplexExternalSemaSource.h"
@@ -202,9 +203,8 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
VisContext(nullptr), PragmaAttributeCurrentTargetDecl(nullptr),
IsBuildingRecoveryCallExpr(false), LateTemplateParser(nullptr),
LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp),
- StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr),
- StdCoroutineTraitsCache(nullptr), CXXTypeInfoDecl(nullptr),
- MSVCGuidDecl(nullptr), StdSourceLocationImplDecl(nullptr),
+ StdInitializerList(nullptr), StdCoroutineTraitsCache(nullptr),
+ CXXTypeInfoDecl(nullptr), StdSourceLocationImplDecl(nullptr),
NSNumberDecl(nullptr), NSValueDecl(nullptr), NSStringDecl(nullptr),
StringWithUTF8StringMethod(nullptr),
ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr),
@@ -218,7 +218,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr),
DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this),
ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr),
- CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) {
+ CurScope(nullptr), Ident_super(nullptr) {
assert(pp.TUKind == TUKind);
TUScope = nullptr;
isConstantEvaluatedOverride = false;
@@ -444,6 +444,13 @@ void Sema::Initialize() {
#include "clang/Basic/RISCVVTypes.def"
}
+ if (Context.getTargetInfo().getTriple().isWasm() &&
+ Context.getTargetInfo().hasFeature("reference-types")) {
+#define WASM_TYPE(Name, Id, SingletonId) \
+ addImplicitTypedef(Name, Context.SingletonId);
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
+ }
+
if (Context.getTargetInfo().hasBuiltinMSVaList()) {
DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list");
if (IdResolver.begin(MSVaList) == IdResolver.end())
@@ -778,15 +785,15 @@ static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
return false;
}
-static bool isFunctionOrVarDeclExternC(NamedDecl *ND) {
- if (auto *FD = dyn_cast<FunctionDecl>(ND))
+static bool isFunctionOrVarDeclExternC(const NamedDecl *ND) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(ND))
return FD->isExternC();
return cast<VarDecl>(ND)->isExternC();
}
/// Determine whether ND is an external-linkage function or variable whose
/// type has no linkage.
-bool Sema::isExternalWithNoLinkageType(ValueDecl *VD) {
+bool Sema::isExternalWithNoLinkageType(const ValueDecl *VD) const {
// Note: it's not quite enough to check whether VD has UniqueExternalLinkage,
// because we also want to catch the case where its type has VisibleNoLinkage,
// which does not affect the linkage of VD.
@@ -858,7 +865,7 @@ static void checkUndefinedButUsed(Sema &S) {
S.getUndefinedButUsed(Undefined);
if (Undefined.empty()) return;
- for (auto Undef : Undefined) {
+ for (const auto &Undef : Undefined) {
ValueDecl *VD = cast<ValueDecl>(Undef.first);
SourceLocation UseLoc = Undef.second;
@@ -1024,16 +1031,6 @@ void Sema::ActOnStartOfTranslationUnit() {
if (getLangOpts().CPlusPlusModules &&
getLangOpts().getCompilingModule() == LangOptions::CMK_HeaderUnit)
HandleStartOfHeaderUnit();
- else if (getLangOpts().ModulesTS &&
- (getLangOpts().getCompilingModule() ==
- LangOptions::CMK_ModuleInterface ||
- getLangOpts().getCompilingModule() == LangOptions::CMK_None)) {
- // We start in an implied global module fragment.
- SourceLocation StartOfTU =
- SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID());
- ActOnGlobalModuleFragmentDecl(StartOfTU);
- ModuleScopes.back().ImplicitGlobalModuleFragment = true;
- }
}
void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) {
@@ -1186,7 +1183,7 @@ void Sema::ActOnEndOfTranslationUnit() {
!(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl)))
for (const auto &WI : WeakIDs.second)
Diag(WI.getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'weak'" << ExpectedVariableOrFunction;
+ << "'weak'" << /*isRegularKeyword=*/0 << ExpectedVariableOrFunction;
else
for (const auto &WI : WeakIDs.second)
Diag(WI.getLocation(), diag::warn_weak_identifier_undeclared)
@@ -1205,9 +1202,8 @@ void Sema::ActOnEndOfTranslationUnit() {
// A global-module-fragment is only permitted within a module unit.
bool DiagnosedMissingModuleDeclaration = false;
- if (!ModuleScopes.empty() &&
- ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment &&
- !ModuleScopes.back().ImplicitGlobalModuleFragment) {
+ if (!ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
+ Module::ExplicitGlobalModuleFragment) {
Diag(ModuleScopes.back().BeginLoc,
diag::err_module_declaration_missing_after_global_module_introducer);
DiagnosedMissingModuleDeclaration = true;
@@ -1244,7 +1240,8 @@ void Sema::ActOnEndOfTranslationUnit() {
ModMap.resolveConflicts(Mod, /*Complain=*/false);
// Queue the submodules, so their exports will also be resolved.
- Stack.append(Mod->submodule_begin(), Mod->submodule_end());
+ auto SubmodulesRange = Mod->submodules();
+ Stack.append(SubmodulesRange.begin(), SubmodulesRange.end());
}
}
@@ -1350,10 +1347,14 @@ void Sema::ActOnEndOfTranslationUnit() {
DiagD = FD;
if (DiagD->isDeleted())
continue; // Deleted functions are supposed to be unused.
+ SourceRange DiagRange = DiagD->getLocation();
+ if (const ASTTemplateArgumentListInfo *ASTTAL =
+ DiagD->getTemplateSpecializationArgsAsWritten())
+ DiagRange.setEnd(ASTTAL->RAngleLoc);
if (DiagD->isReferenced()) {
if (isa<CXXMethodDecl>(DiagD))
Diag(DiagD->getLocation(), diag::warn_unneeded_member_function)
- << DiagD;
+ << DiagD << DiagRange;
else {
if (FD->getStorageClass() == SC_Static &&
!FD->isInlineSpecified() &&
@@ -1361,40 +1362,46 @@ void Sema::ActOnEndOfTranslationUnit() {
SourceMgr.getExpansionLoc(FD->getLocation())))
Diag(DiagD->getLocation(),
diag::warn_unneeded_static_internal_decl)
- << DiagD;
+ << DiagD << DiagRange;
else
Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
- << /*function*/ 0 << DiagD;
+ << /*function=*/0 << DiagD << DiagRange;
}
} else {
if (FD->getDescribedFunctionTemplate())
Diag(DiagD->getLocation(), diag::warn_unused_template)
- << /*function*/ 0 << DiagD;
+ << /*function=*/0 << DiagD << DiagRange;
else
Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD)
? diag::warn_unused_member_function
: diag::warn_unused_function)
- << DiagD;
+ << DiagD << DiagRange;
}
} else {
const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition();
if (!DiagD)
DiagD = cast<VarDecl>(*I);
+ SourceRange DiagRange = DiagD->getLocation();
+ if (const auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(DiagD)) {
+ if (const ASTTemplateArgumentListInfo *ASTTAL =
+ VTSD->getTemplateArgsInfo())
+ DiagRange.setEnd(ASTTAL->RAngleLoc);
+ }
if (DiagD->isReferenced()) {
Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
- << /*variable*/ 1 << DiagD;
+ << /*variable=*/1 << DiagD << DiagRange;
+ } else if (DiagD->getDescribedVarTemplate()) {
+ Diag(DiagD->getLocation(), diag::warn_unused_template)
+ << /*variable=*/1 << DiagD << DiagRange;
} else if (DiagD->getType().isConstQualified()) {
const SourceManager &SM = SourceMgr;
if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) ||
!PP.getLangOpts().IsHeaderFile)
Diag(DiagD->getLocation(), diag::warn_unused_const_variable)
- << DiagD;
+ << DiagD << DiagRange;
} else {
- if (DiagD->getDescribedVarTemplate())
- Diag(DiagD->getLocation(), diag::warn_unused_template)
- << /*variable*/ 1 << DiagD;
- else
- Diag(DiagD->getLocation(), diag::warn_unused_variable) << DiagD;
+ Diag(DiagD->getLocation(), diag::warn_unused_variable)
+ << DiagD << DiagRange;
}
}
}
@@ -1407,9 +1414,7 @@ void Sema::ActOnEndOfTranslationUnit() {
// source.
RecordCompleteMap RecordsComplete;
RecordCompleteMap MNCComplete;
- for (NamedDeclSetType::iterator I = UnusedPrivateFields.begin(),
- E = UnusedPrivateFields.end(); I != E; ++I) {
- const NamedDecl *D = *I;
+ for (const NamedDecl *D : UnusedPrivateFields) {
const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext());
if (RD && !RD->isUnion() &&
IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) {
@@ -1430,6 +1435,8 @@ void Sema::ActOnEndOfTranslationUnit() {
}
}
+ AnalysisWarnings.IssueWarnings(Context.getTranslationUnitDecl());
+
// Check we've noticed that we're no longer parsing the initializer for every
// variable. If we miss cases, then at best we have a performance issue and
// at worst a rejects-valid bug.
@@ -1445,7 +1452,7 @@ void Sema::ActOnEndOfTranslationUnit() {
// Helper functions.
//===----------------------------------------------------------------------===//
-DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) {
+DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) const {
DeclContext *DC = CurContext;
while (true) {
@@ -1465,7 +1472,7 @@ DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) {
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
-FunctionDecl *Sema::getCurFunctionDecl(bool AllowLambda) {
+FunctionDecl *Sema::getCurFunctionDecl(bool AllowLambda) const {
DeclContext *DC = getFunctionLevelDeclContext(AllowLambda);
return dyn_cast<FunctionDecl>(DC);
}
@@ -1477,7 +1484,7 @@ ObjCMethodDecl *Sema::getCurMethodDecl() {
return dyn_cast<ObjCMethodDecl>(DC);
}
-NamedDecl *Sema::getCurFunctionOrMethodDecl() {
+NamedDecl *Sema::getCurFunctionOrMethodDecl() const {
DeclContext *DC = getFunctionLevelDeclContext();
if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC))
return cast<NamedDecl>(DC);
@@ -1611,7 +1618,7 @@ bool Sema::hasUncompilableErrorOccurred() const {
// Print notes showing how we can reach FD starting from an a priori
// known-callable function.
-static void emitCallStackNotes(Sema &S, FunctionDecl *FD) {
+static void emitCallStackNotes(Sema &S, const FunctionDecl *FD) {
auto FnIt = S.DeviceKnownEmittedFns.find(FD);
while (FnIt != S.DeviceKnownEmittedFns.end()) {
// Respect error limit.
@@ -1823,7 +1830,8 @@ void Sema::emitDeferredDiags() {
Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc,
unsigned DiagID,
- FunctionDecl *Fn, Sema &S)
+ const FunctionDecl *Fn,
+ Sema &S)
: S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
switch (K) {
@@ -1869,11 +1877,12 @@ Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
}
Sema::SemaDiagnosticBuilder
-Sema::targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD) {
+Sema::targetDiag(SourceLocation Loc, unsigned DiagID, const FunctionDecl *FD) {
FD = FD ? FD : getCurFunctionDecl();
if (LangOpts.OpenMP)
- return LangOpts.OpenMPIsDevice ? diagIfOpenMPDeviceCode(Loc, DiagID, FD)
- : diagIfOpenMPHostCode(Loc, DiagID, FD);
+ return LangOpts.OpenMPIsTargetDevice
+ ? diagIfOpenMPDeviceCode(Loc, DiagID, FD)
+ : diagIfOpenMPHostCode(Loc, DiagID, FD);
if (getLangOpts().CUDA)
return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID)
: CUDADiagIfHostCode(Loc, DiagID);
@@ -1937,8 +1946,9 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
// Try to associate errors with the lexical context, if that is a function, or
// the value declaration otherwise.
- FunctionDecl *FD = isa<FunctionDecl>(C) ? cast<FunctionDecl>(C)
- : dyn_cast_or_null<FunctionDecl>(D);
+ const FunctionDecl *FD = isa<FunctionDecl>(C)
+ ? cast<FunctionDecl>(C)
+ : dyn_cast_or_null<FunctionDecl>(D);
auto CheckDeviceType = [&](QualType Ty) {
if (Ty->isDependentType())
@@ -1975,6 +1985,8 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
(Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) ||
(Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
!Context.getTargetInfo().hasInt128Type()) ||
+ (Ty->isBFloat16Type() && !Context.getTargetInfo().hasBFloat16Type() &&
+ !LangOpts.CUDAIsDevice) ||
LongDoubleMismatched) {
PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
if (D)
@@ -1995,7 +2007,8 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
};
auto CheckType = [&](QualType Ty, bool IsRetTy = false) {
- if (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice) ||
+ if (LangOpts.SYCLIsDevice ||
+ (LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice) ||
LangOpts.CUDAIsDevice)
CheckDeviceType(Ty);
@@ -2010,7 +2023,7 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
if (Diag(Loc, PD, FD)
<< false /*show bit size*/ << 0 << Ty << false /*return*/
- << Context.getTargetInfo().getTriple().str()) {
+ << TI.getTriple().str()) {
if (D)
D->setInvalidDecl();
}
@@ -2029,7 +2042,7 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
if (Diag(Loc, PD, FD)
<< false /*show bit size*/ << 0 << Ty << true /*return*/
- << Context.getTargetInfo().getTriple().str()) {
+ << TI.getTriple().str()) {
if (D)
D->setInvalidDecl();
}
@@ -2037,6 +2050,9 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
}
+ if (Ty->isRVVType())
+ checkRVVTypeSupport(Ty, Loc, D);
+
// Don't allow SVE types in functions without a SVE target.
if (Ty->isSVESizelessBuiltinType() && FD && FD->hasBody()) {
llvm::StringMap<bool> CallerFeatureMap;
@@ -2123,11 +2139,13 @@ void Sema::PushFunctionScope() {
void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(),
BlockScope, Block));
+ CapturingFunctionScopes++;
}
LambdaScopeInfo *Sema::PushLambdaScope() {
LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics());
FunctionScopes.push_back(LSI);
+ CapturingFunctionScopes++;
return LSI;
}
@@ -2154,7 +2172,7 @@ static void checkEscapingByref(VarDecl *VD, Sema &S) {
new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc);
ExprResult Result;
auto IE = InitializedEntity::InitializeBlock(Loc, T);
- if (S.getLangOpts().CPlusPlus2b) {
+ if (S.getLangOpts().CPlusPlus23) {
auto *E = ImplicitCastExpr::Create(S.Context, T, CK_NoOp, VarRef, nullptr,
VK_XValue, FPOptionsOverride());
Result = S.PerformCopyInitialization(IE, SourceLocation(), E);
@@ -2249,6 +2267,8 @@ Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
void Sema::PoppedFunctionScopeDeleter::
operator()(sema::FunctionScopeInfo *Scope) const {
+ if (!Scope->isPlainFunction())
+ Self->CapturingFunctionScopes--;
// Stash the function scope for later reuse if it's for a normal function.
if (Scope->isPlainFunction() && !Self->CachedFunctionScope)
Self->CachedFunctionScope.reset(Scope);
@@ -2324,7 +2344,8 @@ FunctionScopeInfo *Sema::getEnclosingFunction() const {
LambdaScopeInfo *Sema::getEnclosingLambda() const {
for (auto *Scope : llvm::reverse(FunctionScopes)) {
if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope)) {
- if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext)) {
+ if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext) &&
+ LSI->AfterParameterList) {
// We have switched contexts due to template instantiation.
// FIXME: We should swap out the FunctionScopes during code synthesis
// so that we don't need to check for this.
@@ -2350,8 +2371,8 @@ LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) {
return nullptr;
}
auto *CurLSI = dyn_cast<LambdaScopeInfo>(*I);
- if (CurLSI && CurLSI->Lambda &&
- !CurLSI->Lambda->Encloses(CurContext)) {
+ if (CurLSI && CurLSI->Lambda && CurLSI->CallOperator &&
+ !CurLSI->Lambda->Encloses(CurContext) && CurLSI->AfterParameterList) {
// We have switched contexts due to template instantiation.
assert(!CodeSynthesisContexts.empty());
return nullptr;
@@ -2376,7 +2397,7 @@ void Sema::ActOnComment(SourceRange Comment) {
SourceMgr.isInSystemHeader(Comment.getBegin()))
return;
RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false);
- if (RC.isAlmostTrailingComment()) {
+ if (RC.isAlmostTrailingComment() || RC.hasUnsupportedSplice(SourceMgr)) {
SourceRange MagicMarkerRange(Comment.getBegin(),
Comment.getBegin().getLocWithOffset(3));
StringRef MagicMarkerText;
@@ -2387,6 +2408,11 @@ void Sema::ActOnComment(SourceRange Comment) {
case RawComment::RCK_OrdinaryC:
MagicMarkerText = "/**<";
break;
+ case RawComment::RCK_Invalid:
+ // FIXME: are there other scenarios that could produce an invalid
+ // raw comment here?
+ Diag(Comment.getBegin(), diag::warn_splice_in_doxygen_comment);
+ return;
default:
llvm_unreachable("if this is an almost Doxygen comment, "
"it should be ordinary");
@@ -2493,8 +2519,8 @@ bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
return false;
}
- if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) {
- if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) {
+ if (const auto *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) {
+ if (const auto *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) {
if (Fun->getMinRequiredArguments() == 0)
ZeroArgCallReturnTy = Fun->getReturnType();
return true;
@@ -2511,8 +2537,7 @@ bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
if (!FunTy)
FunTy = ExprTy->getAs<FunctionType>();
- if (const FunctionProtoType *FPT =
- dyn_cast_or_null<FunctionProtoType>(FunTy)) {
+ if (const auto *FPT = dyn_cast_if_present<FunctionProtoType>(FunTy)) {
if (FPT->getNumParams() == 0)
ZeroArgCallReturnTy = FunTy->getReturnType();
return true;
@@ -2543,7 +2568,7 @@ static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
continue;
}
- NamedDecl *Fn = (*It)->getUnderlyingDecl();
+ const NamedDecl *Fn = (*It)->getUnderlyingDecl();
// Don't print overloads for non-default multiversioned functions.
if (const auto *FD = Fn->getAsFunction()) {
if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() &&
@@ -2573,7 +2598,7 @@ static void notePlausibleOverloads(Sema &S, SourceLocation Loc,
UnresolvedSet<2> PlausibleOverloads;
for (OverloadExpr::decls_iterator It = Overloads.begin(),
DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
- const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*It);
+ const auto *OverloadDecl = cast<FunctionDecl>(*It);
QualType OverloadResultTy = OverloadDecl->getReturnType();
if (IsPlausibleResult(OverloadResultTy))
PlausibleOverloads.addDecl(It.getDecl());
@@ -2585,7 +2610,7 @@ static void notePlausibleOverloads(Sema &S, SourceLocation Loc,
/// putting parentheses after it. Notably, expressions with unary
/// operators can't be because the unary operator will start parsing
/// outside the call.
-static bool IsCallableWithAppend(Expr *E) {
+static bool IsCallableWithAppend(const Expr *E) {
E = E->IgnoreImplicit();
return (!isa<CStyleCastExpr>(E) &&
!isa<UnaryOperator>(E) &&
@@ -2659,12 +2684,6 @@ IdentifierInfo *Sema::getSuperIdentifier() const {
return Ident_super;
}
-IdentifierInfo *Sema::getFloat128Identifier() const {
- if (!Ident___float128)
- Ident___float128 = &Context.Idents.get("__float128");
- return Ident___float128;
-}
-
void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
CapturedRegionKind K,
unsigned OpenMPCaptureLevel) {
@@ -2674,6 +2693,7 @@ void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
OpenMPCaptureLevel);
CSI->ReturnType = Context.VoidTy;
FunctionScopes.push_back(CSI);
+ CapturingFunctionScopes++;
}
CapturedRegionScopeInfo *Sema::getCurCapturedRegion() {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp b/contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp
index 4a39c2d065e6..4af3c0f30a8e 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp
@@ -199,6 +199,16 @@ struct AccessTarget : public AccessedEntity {
: Target(S.Target), Has(S.Has) {
S.Target = nullptr;
}
+
+ // The move assignment operator is defined as deleted pending further
+ // motivation.
+ SavedInstanceContext &operator=(SavedInstanceContext &&) = delete;
+
+ // The copy constrcutor and copy assignment operator is defined as deleted
+ // pending further motivation.
+ SavedInstanceContext(const SavedInstanceContext &) = delete;
+ SavedInstanceContext &operator=(const SavedInstanceContext &) = delete;
+
~SavedInstanceContext() {
if (Target)
Target->HasInstanceContext = Has;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
index 3ea97f6aa8f2..6dadf01ead44 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
@@ -223,8 +223,6 @@ void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
switch (Kind) {
// For most of the platforms we support, native and natural are the same.
// With XL, native is the same as power, natural means something else.
- //
- // FIXME: This is not true on Darwin/PPC.
case POAK_Native:
case POAK_Power:
Action = Sema::PSK_Push_Set;
@@ -847,7 +845,6 @@ void Sema::ActOnPragmaUnused(const Token &IdTok, Scope *curScope,
Diag(PragmaLoc, diag::warn_used_but_marked_unused) << Name;
VD->addAttr(UnusedAttr::CreateImplicit(Context, IdTok.getLocation(),
- AttributeCommonInfo::AS_Pragma,
UnusedAttr::GNU_unused));
}
@@ -863,7 +860,7 @@ void Sema::AddCFAuditedAttribute(Decl *D) {
return;
AttributeCommonInfo Info(Ident, SourceRange(Loc),
- AttributeCommonInfo::AS_Pragma);
+ AttributeCommonInfo::Form::Pragma());
D->addAttr(CFAuditedTransferAttr::CreateImplicit(Context, Info));
}
@@ -1338,6 +1335,7 @@ void Sema::ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled) {
Diag(Loc, diag::err_pragma_fenv_requires_precise);
}
NewFPFeatures.setAllowFEnvAccessOverride(IsEnabled);
+ NewFPFeatures.setRoundingMathOverride(IsEnabled);
FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewFPFeatures);
CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp
index daa61ba45e8e..f37ba5cf4c10 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -99,34 +99,53 @@ DeclContext *Sema::computeDeclContext(const CXXScopeSpec &SS,
if (ClassTemplateDecl *ClassTemplate
= dyn_cast_or_null<ClassTemplateDecl>(
SpecType->getTemplateName().getAsTemplateDecl())) {
- QualType ContextType
- = Context.getCanonicalType(QualType(SpecType, 0));
-
- // If the type of the nested name specifier is the same as the
- // injected class name of the named class template, we're entering
- // into that class template definition.
- QualType Injected
- = ClassTemplate->getInjectedClassNameSpecialization();
- if (Context.hasSameType(Injected, ContextType))
- return ClassTemplate->getTemplatedDecl();
+ QualType ContextType =
+ Context.getCanonicalType(QualType(SpecType, 0));
+
+ // FIXME: The fallback on the search of partial
+ // specialization using ContextType should be eventually removed since
+ // it doesn't handle the case of constrained template parameters
+ // correctly. Currently removing this fallback would change the
+ // diagnostic output for invalid code in a number of tests.
+ ClassTemplatePartialSpecializationDecl *PartialSpec = nullptr;
+ ArrayRef<TemplateParameterList *> TemplateParamLists =
+ SS.getTemplateParamLists();
+ if (!TemplateParamLists.empty()) {
+ unsigned Depth = ClassTemplate->getTemplateParameters()->getDepth();
+ auto L = find_if(TemplateParamLists,
+ [Depth](TemplateParameterList *TPL) {
+ return TPL->getDepth() == Depth;
+ });
+ if (L != TemplateParamLists.end()) {
+ void *Pos = nullptr;
+ PartialSpec = ClassTemplate->findPartialSpecialization(
+ SpecType->template_arguments(), *L, Pos);
+ }
+ } else {
+ PartialSpec = ClassTemplate->findPartialSpecialization(ContextType);
+ }
- // If the type of the nested name specifier is the same as the
- // type of one of the class template's class template partial
- // specializations, we're entering into the definition of that
- // class template partial specialization.
- if (ClassTemplatePartialSpecializationDecl *PartialSpec
- = ClassTemplate->findPartialSpecialization(ContextType)) {
+ if (PartialSpec) {
// A declaration of the partial specialization must be visible.
// We can always recover here, because this only happens when we're
// entering the context, and that can't happen in a SFINAE context.
- assert(!isSFINAEContext() &&
- "partial specialization scope specifier in SFINAE context?");
- if (!hasReachableDefinition(PartialSpec))
+ assert(!isSFINAEContext() && "partial specialization scope "
+ "specifier in SFINAE context?");
+ if (PartialSpec->hasDefinition() &&
+ !hasReachableDefinition(PartialSpec))
diagnoseMissingImport(SS.getLastQualifierNameLoc(), PartialSpec,
MissingImportKind::PartialSpecialization,
- /*Recover*/true);
+ true);
return PartialSpec;
}
+
+ // If the type of the nested name specifier is the same as the
+ // injected class name of the named class template, we're entering
+ // into that class template definition.
+ QualType Injected =
+ ClassTemplate->getInjectedClassNameSpecialization();
+ if (Context.hasSameType(Injected, ContextType))
+ return ClassTemplate->getTemplatedDecl();
}
} else if (const RecordType *RecordT = NNSType->getAs<RecordType>()) {
// The nested name specifier refers to a member of a class template.
@@ -292,6 +311,11 @@ bool Sema::ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc,
bool Sema::ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc,
CXXScopeSpec &SS) {
+ if (getCurLambda()) {
+ Diag(SuperLoc, diag::err_super_in_lambda_unsupported);
+ return true;
+ }
+
CXXRecordDecl *RD = nullptr;
for (Scope *S = getCurScope(); S; S = S->getParent()) {
if (S->isFunctionScope()) {
@@ -308,9 +332,6 @@ bool Sema::ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
if (!RD) {
Diag(SuperLoc, diag::err_invalid_super_scope);
return true;
- } else if (RD->isLambda()) {
- Diag(SuperLoc, diag::err_super_in_lambda_unsupported);
- return true;
} else if (RD->getNumBases() == 0) {
Diag(SuperLoc, diag::err_no_base_classes) << RD->getName();
return true;
@@ -394,51 +415,6 @@ NamedDecl *Sema::FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS) {
return nullptr;
}
-bool Sema::isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
- NestedNameSpecInfo &IdInfo) {
- QualType ObjectType = GetTypeFromParser(IdInfo.ObjectType);
- LookupResult Found(*this, IdInfo.Identifier, IdInfo.IdentifierLoc,
- LookupNestedNameSpecifierName);
-
- // Determine where to perform name lookup
- DeclContext *LookupCtx = nullptr;
- bool isDependent = false;
- if (!ObjectType.isNull()) {
- // This nested-name-specifier occurs in a member access expression, e.g.,
- // x->B::f, and we are looking into the type of the object.
- assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist");
- LookupCtx = computeDeclContext(ObjectType);
- isDependent = ObjectType->isDependentType();
- } else if (SS.isSet()) {
- // This nested-name-specifier occurs after another nested-name-specifier,
- // so long into the context associated with the prior nested-name-specifier.
- LookupCtx = computeDeclContext(SS, false);
- isDependent = isDependentScopeSpecifier(SS);
- Found.setContextRange(SS.getRange());
- }
-
- if (LookupCtx) {
- // Perform "qualified" name lookup into the declaration context we
- // computed, which is either the type of the base of a member access
- // expression or the declaration context associated with a prior
- // nested-name-specifier.
-
- // The declaration context must be complete.
- if (!LookupCtx->isDependentContext() &&
- RequireCompleteDeclContext(SS, LookupCtx))
- return false;
-
- LookupQualifiedName(Found, LookupCtx);
- } else if (isDependent) {
- return false;
- } else {
- LookupName(Found, S);
- }
- Found.suppressDiagnostics();
-
- return Found.getAsSingle<NamespaceDecl>();
-}
-
namespace {
// Callback to only accept typo corrections that can be a valid C++ member
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp
index 9fd9369c9641..d65ecf52c523 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp
@@ -25,6 +25,7 @@
#include "clang/Sema/Initialization.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include <set>
using namespace clang;
@@ -65,9 +66,13 @@ namespace {
// If a pr-value initially has the type cv-T, where T is a
// cv-unqualified non-class, non-array type, the type of the
// expression is adjusted to T prior to any further analysis.
+ // C2x 6.5.4p6:
+ // Preceding an expression by a parenthesized type name converts the
+ // value of the expression to the unqualified, non-atomic version of
+ // the named type.
if (!S.Context.getLangOpts().ObjC && !DestType->isRecordType() &&
!DestType->isArrayType()) {
- DestType = DestType.getUnqualifiedType();
+ DestType = DestType.getAtomicUnqualifiedType();
}
if (const BuiltinType *placeholder =
@@ -449,9 +454,27 @@ static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
switch (sequence.getFailureKind()) {
default: return false;
+ case InitializationSequence::FK_ParenthesizedListInitFailed:
+ // In C++20, if the underlying destination type is a RecordType, Clang
+ // attempts to perform parentesized aggregate initialization if constructor
+ // overload fails:
+ //
+ // C++20 [expr.static.cast]p4:
+ // An expression E can be explicitly converted to a type T...if overload
+ // resolution for a direct-initialization...would find at least one viable
+ // function ([over.match.viable]), or if T is an aggregate type having a
+ // first element X and there is an implicit conversion sequence from E to
+ // the type of X.
+ //
+ // If that fails, then we'll generate the diagnostics from the failed
+ // previous constructor overload attempt. Array initialization, however, is
+ // not done after attempting constructor overloading, so we exit as there
+ // won't be a failed overload result.
+ if (destType->isArrayType())
+ return false;
+ break;
case InitializationSequence::FK_ConstructorOverloadFailed:
case InitializationSequence::FK_UserConversionOverloadFailed:
- case InitializationSequence::FK_ParenthesizedListInitFailed:
break;
}
@@ -2350,6 +2373,12 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
return TC_Success;
}
+ // Allow bitcasting between SVE VLATs and VLSTs, and vice-versa.
+ if (Self.isValidRVVBitcast(SrcType, DestType)) {
+ Kind = CK_BitCast;
+ return TC_Success;
+ }
+
// The non-vector type, if any, must have integral type. This is
// the same rule that C vector casts use; note, however, that enum
// types are not integral in C++.
@@ -2736,6 +2765,15 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
}
}
+ // WebAssembly tables cannot be cast.
+ QualType SrcType = SrcExpr.get()->getType();
+ if (SrcType->isWebAssemblyTableType()) {
+ Self.Diag(OpRange.getBegin(), diag::err_wasm_cast_table)
+ << 1 << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+
// C++ [expr.cast]p5: The conversions performed by
// - a const_cast,
// - a static_cast,
@@ -2911,6 +2949,13 @@ void CastOperation::CheckCStyleCast() {
return;
QualType SrcType = SrcExpr.get()->getType();
+ if (SrcType->isWebAssemblyTableType()) {
+ Self.Diag(OpRange.getBegin(), diag::err_wasm_cast_table)
+ << 1 << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+
assert(!SrcType->isPlaceholderType());
checkAddressSpaceCast(SrcType, DestType);
@@ -2937,6 +2982,13 @@ void CastOperation::CheckCStyleCast() {
return;
}
+ // Allow bitcasting between compatible RVV vector types.
+ if ((SrcType->isVectorType() || DestType->isVectorType()) &&
+ Self.isValidRVVBitcast(SrcType, DestType)) {
+ Kind = CK_BitCast;
+ return;
+ }
+
if (!DestType->isScalarType() && !DestType->isVectorType() &&
!DestType->isMatrixType()) {
const RecordType *DestRecordTy = DestType->getAs<RecordType>();
@@ -3075,20 +3127,6 @@ void CastOperation::CheckCStyleCast() {
return;
}
- // Can't cast to or from bfloat
- if (DestType->isBFloat16Type() && !SrcType->isBFloat16Type()) {
- Self.Diag(SrcExpr.get()->getExprLoc(), diag::err_cast_to_bfloat16)
- << SrcExpr.get()->getSourceRange();
- SrcExpr = ExprError();
- return;
- }
- if (SrcType->isBFloat16Type() && !DestType->isBFloat16Type()) {
- Self.Diag(SrcExpr.get()->getExprLoc(), diag::err_cast_from_bfloat16)
- << SrcExpr.get()->getSourceRange();
- SrcExpr = ExprError();
- return;
- }
-
// If either type is a pointer, the other type has to be either an
// integer or a pointer.
if (!DestType->isArithmeticType()) {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
index ea21171aaac6..f8e48728da66 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
@@ -72,10 +72,10 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
@@ -86,6 +86,7 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <bitset>
#include <cassert>
@@ -2025,6 +2026,12 @@ bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case llvm::Triple::loongarch32:
case llvm::Triple::loongarch64:
return CheckLoongArchBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ return CheckWebAssemblyBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ case llvm::Triple::nvptx:
+ case llvm::Triple::nvptx64:
+ return CheckNVPTXBuiltinFunctionCall(TI, BuiltinID, TheCall);
}
}
@@ -2143,6 +2150,14 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
break;
+ case Builtin::BI__builtin_set_flt_rounds:
+ if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall,
+ {llvm::Triple::x86, llvm::Triple::x86_64,
+ llvm::Triple::arm, llvm::Triple::thumb,
+ llvm::Triple::aarch64}))
+ return ExprError();
+ break;
+
case Builtin::BI__builtin_isgreater:
case Builtin::BI__builtin_isgreaterequal:
case Builtin::BI__builtin_isless:
@@ -2156,6 +2171,10 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if (SemaBuiltinFPClassification(TheCall, 6))
return ExprError();
break;
+ case Builtin::BI__builtin_isfpclass:
+ if (SemaBuiltinFPClassification(TheCall, 2))
+ return ExprError();
+ break;
case Builtin::BI__builtin_isfinite:
case Builtin::BI__builtin_isinf:
case Builtin::BI__builtin_isinf_sign:
@@ -2471,6 +2490,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BIaddressof:
case Builtin::BI__addressof:
case Builtin::BIforward:
+ case Builtin::BIforward_like:
case Builtin::BImove:
case Builtin::BImove_if_noexcept:
case Builtin::BIas_const: {
@@ -2580,6 +2600,12 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
break;
}
+ case Builtin::BI__builtin_nondeterministic_value: {
+ if (SemaBuiltinNonDeterministicValue(TheCall))
+ return ExprError();
+ break;
+ }
+
// __builtin_elementwise_abs restricts the element type to signed integers or
// floating point types only.
case Builtin::BI__builtin_elementwise_abs: {
@@ -2604,8 +2630,16 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
// types only.
case Builtin::BI__builtin_elementwise_ceil:
case Builtin::BI__builtin_elementwise_cos:
+ case Builtin::BI__builtin_elementwise_exp:
+ case Builtin::BI__builtin_elementwise_exp2:
case Builtin::BI__builtin_elementwise_floor:
+ case Builtin::BI__builtin_elementwise_log:
+ case Builtin::BI__builtin_elementwise_log2:
+ case Builtin::BI__builtin_elementwise_log10:
case Builtin::BI__builtin_elementwise_roundeven:
+ case Builtin::BI__builtin_elementwise_round:
+ case Builtin::BI__builtin_elementwise_rint:
+ case Builtin::BI__builtin_elementwise_nearbyint:
case Builtin::BI__builtin_elementwise_sin:
case Builtin::BI__builtin_elementwise_trunc:
case Builtin::BI__builtin_elementwise_canonicalize: {
@@ -2613,17 +2647,29 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
QualType ArgTy = TheCall->getArg(0)->getType();
- QualType EltTy = ArgTy;
+ if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(),
+ ArgTy, 1))
+ return ExprError();
+ break;
+ }
+ case Builtin::BI__builtin_elementwise_fma: {
+ if (SemaBuiltinElementwiseTernaryMath(TheCall))
+ return ExprError();
+ break;
+ }
- if (auto *VecTy = EltTy->getAs<VectorType>())
- EltTy = VecTy->getElementType();
- if (!EltTy->isFloatingType()) {
- Diag(TheCall->getArg(0)->getBeginLoc(),
- diag::err_builtin_invalid_arg_type)
- << 1 << /* float ty*/ 5 << ArgTy;
+ // These builtins restrict the element type to floating point
+ // types only, and take in two arguments.
+ case Builtin::BI__builtin_elementwise_pow: {
+ if (SemaBuiltinElementwiseMath(TheCall))
+ return ExprError();
+ QualType ArgTy = TheCall->getArg(0)->getType();
+ if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(),
+ ArgTy, 1) ||
+ checkFPMathBuiltinElementType(*this, TheCall->getArg(1)->getBeginLoc(),
+ ArgTy, 2))
return ExprError();
- }
break;
}
@@ -2857,6 +2903,9 @@ bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
#define GET_SVE_IMMEDIATE_CHECK
#include "clang/Basic/arm_sve_sema_rangechecks.inc"
#undef GET_SVE_IMMEDIATE_CHECK
+#define GET_SME_IMMEDIATE_CHECK
+#include "clang/Basic/arm_sme_sema_rangechecks.inc"
+#undef GET_SME_IMMEDIATE_CHECK
}
// Perform all the immediate checks for this builtin call.
@@ -2962,6 +3011,18 @@ bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3))
HasError = true;
break;
+ case SVETypeFlags::ImmCheck0_0:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 0))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_15:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 15))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_255:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 255))
+ HasError = true;
+ break;
}
}
@@ -3770,7 +3831,7 @@ bool Sema::CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
return Diag(TheCall->getBeginLoc(),
diag::err_loongarch_builtin_requires_la64)
<< TheCall->getSourceRange();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LoongArch::BI__builtin_loongarch_cacop_w: {
if (BuiltinID == LoongArch::BI__builtin_loongarch_cacop_w &&
!TI.hasFeature("32bit"))
@@ -4153,21 +4214,6 @@ static bool isPPC_64Builtin(unsigned BuiltinID) {
return false;
}
-static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall,
- StringRef FeatureToCheck, unsigned DiagID,
- StringRef DiagArg = "") {
- if (S.Context.getTargetInfo().hasFeature(FeatureToCheck))
- return false;
-
- if (DiagArg.empty())
- S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange();
- else
- S.Diag(TheCall->getBeginLoc(), DiagID)
- << DiagArg << TheCall->getSourceRange();
-
- return true;
-}
-
/// Returns true if the argument consists of one contiguous run of 1s with any
/// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so
/// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not,
@@ -4212,42 +4258,16 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3);
case PPC::BI__builtin_tbegin:
case PPC::BI__builtin_tend:
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) ||
- SemaFeatureCheck(*this, TheCall, "htm",
- diag::err_ppc_builtin_requires_htm);
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
case PPC::BI__builtin_tsr:
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) ||
- SemaFeatureCheck(*this, TheCall, "htm",
- diag::err_ppc_builtin_requires_htm);
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7);
case PPC::BI__builtin_tabortwc:
case PPC::BI__builtin_tabortdc:
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
- SemaFeatureCheck(*this, TheCall, "htm",
- diag::err_ppc_builtin_requires_htm);
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
case PPC::BI__builtin_tabortwci:
case PPC::BI__builtin_tabortdci:
- return SemaFeatureCheck(*this, TheCall, "htm",
- diag::err_ppc_builtin_requires_htm) ||
- (SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
- SemaBuiltinConstantArgRange(TheCall, 2, 0, 31));
- case PPC::BI__builtin_tabort:
- case PPC::BI__builtin_tcheck:
- case PPC::BI__builtin_treclaim:
- case PPC::BI__builtin_trechkpt:
- case PPC::BI__builtin_tendall:
- case PPC::BI__builtin_tresume:
- case PPC::BI__builtin_tsuspend:
- case PPC::BI__builtin_get_texasr:
- case PPC::BI__builtin_get_texasru:
- case PPC::BI__builtin_get_tfhar:
- case PPC::BI__builtin_get_tfiar:
- case PPC::BI__builtin_set_texasr:
- case PPC::BI__builtin_set_texasru:
- case PPC::BI__builtin_set_tfhar:
- case PPC::BI__builtin_set_tfiar:
- case PPC::BI__builtin_ttest:
- return SemaFeatureCheck(*this, TheCall, "htm",
- diag::err_ppc_builtin_requires_htm);
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
// According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05',
// __builtin_(un)pack_longdouble are available only if long double uses IBM
// extended double representation.
@@ -4268,26 +4288,8 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case PPC::BI__builtin_vsx_xxpermdi:
case PPC::BI__builtin_vsx_xxsldwi:
return SemaBuiltinVSX(TheCall);
- case PPC::BI__builtin_divwe:
- case PPC::BI__builtin_divweu:
- case PPC::BI__builtin_divde:
- case PPC::BI__builtin_divdeu:
- return SemaFeatureCheck(*this, TheCall, "extdiv",
- diag::err_ppc_builtin_only_on_arch, "7");
- case PPC::BI__builtin_bpermd:
- return SemaFeatureCheck(*this, TheCall, "bpermd",
- diag::err_ppc_builtin_only_on_arch, "7");
case PPC::BI__builtin_unpack_vector_int128:
- return SemaFeatureCheck(*this, TheCall, "vsx",
- diag::err_ppc_builtin_only_on_arch, "7") ||
- SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
- case PPC::BI__builtin_pack_vector_int128:
- return SemaFeatureCheck(*this, TheCall, "vsx",
- diag::err_ppc_builtin_only_on_arch, "7");
- case PPC::BI__builtin_pdepd:
- case PPC::BI__builtin_pextd:
- return SemaFeatureCheck(*this, TheCall, "isa-v31-instructions",
- diag::err_ppc_builtin_only_on_arch, "10");
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
case PPC::BI__builtin_altivec_vgnb:
return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7);
case PPC::BI__builtin_vsx_xxeval:
@@ -4301,17 +4303,8 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case PPC::BI__builtin_ppc_tw:
case PPC::BI__builtin_ppc_tdw:
return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31);
- case PPC::BI__builtin_ppc_cmpeqb:
- case PPC::BI__builtin_ppc_setb:
- case PPC::BI__builtin_ppc_maddhd:
- case PPC::BI__builtin_ppc_maddhdu:
- case PPC::BI__builtin_ppc_maddld:
- return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
- diag::err_ppc_builtin_only_on_arch, "9");
case PPC::BI__builtin_ppc_cmprb:
- return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
- diag::err_ppc_builtin_only_on_arch, "9") ||
- SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
// For __rlwnm, __rlwimi and __rldimi, the last parameter mask must
// be a constant that represents a contiguous bit field.
case PPC::BI__builtin_ppc_rlwnm:
@@ -4320,15 +4313,8 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case PPC::BI__builtin_ppc_rldimi:
return SemaBuiltinConstantArg(TheCall, 2, Result) ||
SemaValueIsRunOfOnes(TheCall, 3);
- case PPC::BI__builtin_ppc_extract_exp:
- case PPC::BI__builtin_ppc_extract_sig:
- case PPC::BI__builtin_ppc_insert_exp:
- return SemaFeatureCheck(*this, TheCall, "power9-vector",
- diag::err_ppc_builtin_only_on_arch, "9");
case PPC::BI__builtin_ppc_addex: {
- if (SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
- diag::err_ppc_builtin_only_on_arch, "9") ||
- SemaBuiltinConstantArgRange(TheCall, 2, 0, 3))
+ if (SemaBuiltinConstantArgRange(TheCall, 2, 0, 3))
return true;
// Output warning for reserved values 1 to 3.
int ArgValue =
@@ -4350,41 +4336,19 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
return SemaBuiltinConstantArgPower2(TheCall, 0);
case PPC::BI__builtin_ppc_rdlam:
return SemaValueIsRunOfOnes(TheCall, 2);
- case PPC::BI__builtin_ppc_icbt:
- case PPC::BI__builtin_ppc_sthcx:
- case PPC::BI__builtin_ppc_stbcx:
- case PPC::BI__builtin_ppc_lharx:
- case PPC::BI__builtin_ppc_lbarx:
- return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions",
- diag::err_ppc_builtin_only_on_arch, "8");
case PPC::BI__builtin_vsx_ldrmb:
case PPC::BI__builtin_vsx_strmb:
- return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions",
- diag::err_ppc_builtin_only_on_arch, "8") ||
- SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
+ return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
case PPC::BI__builtin_altivec_vcntmbb:
case PPC::BI__builtin_altivec_vcntmbh:
case PPC::BI__builtin_altivec_vcntmbw:
case PPC::BI__builtin_altivec_vcntmbd:
return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
- case PPC::BI__builtin_darn:
- case PPC::BI__builtin_darn_raw:
- case PPC::BI__builtin_darn_32:
- return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
- diag::err_ppc_builtin_only_on_arch, "9");
case PPC::BI__builtin_vsx_xxgenpcvbm:
case PPC::BI__builtin_vsx_xxgenpcvhm:
case PPC::BI__builtin_vsx_xxgenpcvwm:
case PPC::BI__builtin_vsx_xxgenpcvdm:
return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
- case PPC::BI__builtin_ppc_compare_exp_uo:
- case PPC::BI__builtin_ppc_compare_exp_lt:
- case PPC::BI__builtin_ppc_compare_exp_gt:
- case PPC::BI__builtin_ppc_compare_exp_eq:
- return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
- diag::err_ppc_builtin_only_on_arch, "9") ||
- SemaFeatureCheck(*this, TheCall, "vsx",
- diag::err_ppc_builtin_requires_vsx);
case PPC::BI__builtin_ppc_test_data_class: {
// Check if the first argument of the __builtin_ppc_test_data_class call is
// valid. The argument must be 'float' or 'double' or '__float128'.
@@ -4394,11 +4358,7 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
ArgType != QualType(Context.Float128Ty))
return Diag(TheCall->getBeginLoc(),
diag::err_ppc_invalid_test_data_class_type);
- return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
- diag::err_ppc_builtin_only_on_arch, "9") ||
- SemaFeatureCheck(*this, TheCall, "vsx",
- diag::err_ppc_builtin_requires_vsx) ||
- SemaBuiltinConstantArgRange(TheCall, 1, 0, 127);
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 127);
}
case PPC::BI__builtin_ppc_maxfe:
case PPC::BI__builtin_ppc_minfe:
@@ -4427,11 +4387,7 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
<< TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0;
return false;
}
- case PPC::BI__builtin_ppc_load8r:
- case PPC::BI__builtin_ppc_store8r:
- return SemaFeatureCheck(*this, TheCall, "isa-v206-instructions",
- diag::err_ppc_builtin_only_on_arch, "7");
-#define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \
+#define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \
case PPC::BI__builtin_##Name: \
return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types);
#include "clang/Basic/BuiltinsPPC.def"
@@ -4545,7 +4501,7 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
bool FeatureMissing = false;
SmallVector<StringRef> ReqFeatures;
StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID);
- Features.split(ReqFeatures, ',');
+ Features.split(ReqFeatures, ',', -1, false);
// Check if each required feature is included
for (StringRef F : ReqFeatures) {
@@ -4575,7 +4531,7 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
std::string FeatureStr = OF.str();
FeatureStr[0] = std::toupper(FeatureStr[0]);
// Combine strings.
- FeatureStrs += FeatureStrs == "" ? "" : ", ";
+ FeatureStrs += FeatureStrs.empty() ? "" : ", ";
FeatureStrs += "'";
FeatureStrs += FeatureStr;
FeatureStrs += "'";
@@ -4591,6 +4547,73 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
if (FeatureMissing)
return true;
+ // vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx,
+ // vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*.
+ switch (BuiltinID) {
+ default:
+ break;
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_m:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_m:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: {
+ bool RequireV = false;
+ for (unsigned ArgNum = 0; ArgNum < TheCall->getNumArgs(); ++ArgNum)
+ RequireV |= TheCall->getArg(ArgNum)->getType()->isRVVType(
+ /* Bitwidth */ 64, /* IsFloat */ false);
+
+ if (RequireV && !TI.hasFeature("v"))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_riscv_builtin_requires_extension)
+ << /* IsExtension */ false << TheCall->getSourceRange() << "v";
+
+ break;
+ }
+ }
+
switch (BuiltinID) {
case RISCVVector::BI__builtin_rvv_vsetvli:
return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) ||
@@ -4605,9 +4628,12 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
ASTContext::BuiltinVectorTypeInfo VecInfo =
Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
TheCall->getArg(0)->getType().getCanonicalType().getTypePtr()));
- unsigned MaxIndex =
- (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) /
- (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors);
+ unsigned MaxIndex;
+ if (VecInfo.NumVectors != 1) // vget for tuple type
+ MaxIndex = VecInfo.NumVectors;
+ else // vget for non-tuple type
+ MaxIndex = (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) /
+ (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors);
return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
}
case RISCVVector::BI__builtin_rvv_vset_v: {
@@ -4617,22 +4643,679 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
ASTContext::BuiltinVectorTypeInfo VecInfo =
Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
TheCall->getArg(2)->getType().getCanonicalType().getTypePtr()));
- unsigned MaxIndex =
- (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) /
- (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors);
+ unsigned MaxIndex;
+ if (ResVecInfo.NumVectors != 1) // vset for tuple type
+ MaxIndex = ResVecInfo.NumVectors;
+ else // vset fo non-tuple type
+ MaxIndex = (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) /
+ (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors);
return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
}
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf8:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m1:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m8:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16mf4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16mf2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m1:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m8:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32mf2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m1:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m8:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m1:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m8:
+ // bit_27_26, bit_24_20, bit_11_7, simm5
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, 0, 31) ||
+ SemaBuiltinConstantArgRange(TheCall, 3, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_iv_se:
+ // bit_27_26, bit_11_7, vs2, simm5
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaBuiltinConstantArgRange(TheCall, 3, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_i:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_i_se:
+ // bit_27_26, bit_24_20, simm5
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_iv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_iv_se:
+ // bit_27_26, vs2, simm5
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_ivv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_ivw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw_se:
+ // bit_27_26, vd, vs2, simm5
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaBuiltinConstantArgRange(TheCall, 3, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf8:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m1:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m8:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16mf4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16mf2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m1:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m8:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32mf2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m1:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m8:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m1:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m8:
+ // bit_27_26, bit_24_20, bit_11_7, xs1
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
+ case RISCVVector::BI__builtin_rvv_sf_vc_xv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_vv_se:
+ // bit_27_26, bit_11_7, vs2, xs1/vs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_x:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_x_se:
+ // bit_27_26, bit_24-20, xs1
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case RISCVVector::BI__builtin_rvv_sf_vc_vvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_xvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_vvw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_xvw_se:
+ // bit_27_26, vd, vs2, xs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vv_se:
+ // bit_27_26, vs2, xs1/vs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw_se:
+ // bit_27_26, vd, vs2, xs1/vs1
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3);
+ case RISCVVector::BI__builtin_rvv_sf_vc_fv_se:
+ // bit_26, bit_11_7, vs2, fs1
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) ||
+ SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case RISCVVector::BI__builtin_rvv_sf_vc_fvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_fvw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw_se:
+ // bit_26, vd, vs2, fs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fv_se:
+ // bit_26, vs2, fs1
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
// Check if byteselect is in [0, 3]
- case RISCV::BI__builtin_riscv_aes32dsi_32:
- case RISCV::BI__builtin_riscv_aes32dsmi_32:
- case RISCV::BI__builtin_riscv_aes32esi_32:
- case RISCV::BI__builtin_riscv_aes32esmi_32:
+ case RISCV::BI__builtin_riscv_aes32dsi:
+ case RISCV::BI__builtin_riscv_aes32dsmi:
+ case RISCV::BI__builtin_riscv_aes32esi:
+ case RISCV::BI__builtin_riscv_aes32esmi:
case RISCV::BI__builtin_riscv_sm4ks:
case RISCV::BI__builtin_riscv_sm4ed:
return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
// Check if rnum is in [0, 10]
- case RISCV::BI__builtin_riscv_aes64ks1i_64:
+ case RISCV::BI__builtin_riscv_aes64ks1i:
return SemaBuiltinConstantArgRange(TheCall, 1, 0, 10);
+ // Check if value range for vxrm is in [0, 3]
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx:
+ case RISCVVector::BI__builtin_rvv_vasub_vv:
+ case RISCVVector::BI__builtin_rvv_vasub_vx:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx:
+ case RISCVVector::BI__builtin_rvv_vssra_vv:
+ case RISCVVector::BI__builtin_rvv_vssra_vx:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_m:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_m:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_m:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_m:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_m:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_m:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_m:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_m:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_m:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_m:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_m:
+ return SemaBuiltinConstantArgRange(TheCall, 3, 0, 3);
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_mu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_mu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_mu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tum:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tum:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tum:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu:
+ return SemaBuiltinConstantArgRange(TheCall, 4, 0, 3);
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 4);
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_m:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 4);
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_mu:
+ return SemaBuiltinConstantArgRange(TheCall, 3, 0, 4);
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_mu:
+ return SemaBuiltinConstantArgRange(TheCall, 4, 0, 4);
+ case RISCV::BI__builtin_riscv_ntl_load:
+ case RISCV::BI__builtin_riscv_ntl_store:
+ DeclRefExpr *DRE =
+ cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+ assert((BuiltinID == RISCV::BI__builtin_riscv_ntl_store ||
+ BuiltinID == RISCV::BI__builtin_riscv_ntl_load) &&
+ "Unexpected RISC-V nontemporal load/store builtin!");
+ bool IsStore = BuiltinID == RISCV::BI__builtin_riscv_ntl_store;
+ unsigned NumArgs = IsStore ? 3 : 2;
+
+ if (checkArgCount(*this, TheCall, NumArgs))
+ return true;
+
+ // Domain value should be compile-time constant.
+ // 2 <= domain <= 5
+ if (SemaBuiltinConstantArgRange(TheCall, NumArgs - 1, 2, 5))
+ return true;
+
+ Expr *PointerArg = TheCall->getArg(0);
+ ExprResult PointerArgResult =
+ DefaultFunctionArrayLvalueConversion(PointerArg);
+
+ if (PointerArgResult.isInvalid())
+ return true;
+ PointerArg = PointerArgResult.get();
+
+ const PointerType *PtrType = PointerArg->getType()->getAs<PointerType>();
+ if (!PtrType) {
+ Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return true;
+ }
+
+ QualType ValType = PtrType->getPointeeType();
+ ValType = ValType.getUnqualifiedType();
+ if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
+ !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
+ !ValType->isVectorType() && !ValType->isRVVType()) {
+ Diag(DRE->getBeginLoc(),
+ diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return true;
+ }
+
+ if (!IsStore) {
+ TheCall->setType(ValType);
+ return false;
+ }
+
+ ExprResult ValArg = TheCall->getArg(1);
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(
+ Context, ValType, /*consume*/ false);
+ ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
+ if (ValArg.isInvalid())
+ return true;
+
+ TheCall->setArg(1, ValArg.get());
+ TheCall->setType(Context.VoidTy);
+ return false;
}
return false;
@@ -4708,6 +5391,68 @@ bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
return SemaBuiltinConstantArgRange(TheCall, i, l, u);
}
+bool Sema::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_ref_null_extern:
+ return BuiltinWasmRefNullExtern(TheCall);
+ case WebAssembly::BI__builtin_wasm_ref_null_func:
+ return BuiltinWasmRefNullFunc(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_get:
+ return BuiltinWasmTableGet(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_set:
+ return BuiltinWasmTableSet(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_size:
+ return BuiltinWasmTableSize(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_grow:
+ return BuiltinWasmTableGrow(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_fill:
+ return BuiltinWasmTableFill(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_copy:
+ return BuiltinWasmTableCopy(TheCall);
+ }
+
+ return false;
+}
+
+void Sema::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
+ const TargetInfo &TI = Context.getTargetInfo();
+ // (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at
+ // least zve64x
+ if ((Ty->isRVVType(/* Bitwidth */ 64, /* IsFloat */ false) ||
+ Ty->isRVVType(/* ElementCount */ 1)) &&
+ !TI.hasFeature("zve64x"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64x";
+ if (Ty->isRVVType(/* Bitwidth */ 16, /* IsFloat */ true) &&
+ !TI.hasFeature("zvfh"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zvfh";
+ if (Ty->isRVVType(/* Bitwidth */ 32, /* IsFloat */ true) &&
+ !TI.hasFeature("zve32f"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32f";
+ if (Ty->isRVVType(/* Bitwidth */ 64, /* IsFloat */ true) &&
+ !TI.hasFeature("zve64d"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64d";
+ // Given that caller already checked isRVVType() before calling this function,
+ // if we don't have at least zve32x supported, then we need to emit error.
+ if (!TI.hasFeature("zve32x"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32x";
+}
+
+bool Sema::CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ switch (BuiltinID) {
+ case NVPTX::BI__nvvm_cp_async_ca_shared_global_4:
+ case NVPTX::BI__nvvm_cp_async_ca_shared_global_8:
+ case NVPTX::BI__nvvm_cp_async_ca_shared_global_16:
+ case NVPTX::BI__nvvm_cp_async_cg_shared_global_16:
+ return checkArgCountAtMost(*this, TheCall, 3);
+ }
+
+ return false;
+}
+
/// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
/// This checks that the target supports __builtin_cpu_supports and
/// that the string argument is constant and valid.
@@ -5205,6 +5950,8 @@ bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_tdpbuud:
case X86::BI__builtin_ia32_tdpbf16ps:
case X86::BI__builtin_ia32_tdpfp16ps:
+ case X86::BI__builtin_ia32_tcmmimfp16ps:
+ case X86::BI__builtin_ia32_tcmmrlfp16ps:
return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2});
}
}
@@ -5569,6 +6316,7 @@ bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case X86::BI__builtin_ia32_pternlogq128_maskz:
case X86::BI__builtin_ia32_pternlogq256_mask:
case X86::BI__builtin_ia32_pternlogq256_maskz:
+ case X86::BI__builtin_ia32_vsm3rnds2:
i = 3; l = 0; u = 255;
break;
case X86::BI__builtin_ia32_gatherpfdpd:
@@ -6268,7 +7016,15 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
Op == AtomicExpr::AO__atomic_store_n ||
Op == AtomicExpr::AO__atomic_exchange_n ||
Op == AtomicExpr::AO__atomic_compare_exchange_n;
- bool IsAddSub = false;
+ // Bit mask for extra allowed value types other than integers for atomic
+ // arithmetic operations. Add/sub allow pointer and floating point. Min/max
+ // allow floating point.
+ enum ArithOpExtraValueType {
+ AOEVT_None = 0,
+ AOEVT_Pointer = 1,
+ AOEVT_FP = 2,
+ };
+ unsigned ArithAllows = AOEVT_None;
switch (Op) {
case AtomicExpr::AO__c11_atomic_init:
@@ -6294,18 +7050,30 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
case AtomicExpr::AO__atomic_store_n:
Form = Copy;
break;
- case AtomicExpr::AO__hip_atomic_fetch_add:
- case AtomicExpr::AO__hip_atomic_fetch_min:
- case AtomicExpr::AO__hip_atomic_fetch_max:
- case AtomicExpr::AO__c11_atomic_fetch_add:
- case AtomicExpr::AO__c11_atomic_fetch_sub:
- case AtomicExpr::AO__opencl_atomic_fetch_add:
- case AtomicExpr::AO__opencl_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__atomic_sub_fetch:
- IsAddSub = true;
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__opencl_atomic_fetch_add:
+ case AtomicExpr::AO__opencl_atomic_fetch_sub:
+ case AtomicExpr::AO__hip_atomic_fetch_add:
+ case AtomicExpr::AO__hip_atomic_fetch_sub:
+ ArithAllows = AOEVT_Pointer | AOEVT_FP;
+ Form = Arithmetic;
+ break;
+ case AtomicExpr::AO__atomic_fetch_max:
+ case AtomicExpr::AO__atomic_fetch_min:
+ case AtomicExpr::AO__atomic_max_fetch:
+ case AtomicExpr::AO__atomic_min_fetch:
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ case AtomicExpr::AO__opencl_atomic_fetch_max:
+ case AtomicExpr::AO__opencl_atomic_fetch_min:
+ case AtomicExpr::AO__hip_atomic_fetch_max:
+ case AtomicExpr::AO__hip_atomic_fetch_min:
+ ArithAllows = AOEVT_FP;
Form = Arithmetic;
break;
case AtomicExpr::AO__c11_atomic_fetch_and:
@@ -6328,16 +7096,6 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
case AtomicExpr::AO__atomic_nand_fetch:
Form = Arithmetic;
break;
- case AtomicExpr::AO__c11_atomic_fetch_min:
- case AtomicExpr::AO__c11_atomic_fetch_max:
- case AtomicExpr::AO__opencl_atomic_fetch_min:
- case AtomicExpr::AO__opencl_atomic_fetch_max:
- case AtomicExpr::AO__atomic_min_fetch:
- case AtomicExpr::AO__atomic_max_fetch:
- case AtomicExpr::AO__atomic_fetch_min:
- case AtomicExpr::AO__atomic_fetch_max:
- Form = Arithmetic;
- break;
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__hip_atomic_exchange:
@@ -6425,12 +7183,13 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
if (Form == Arithmetic) {
// GCC does not enforce these rules for GNU atomics, but we do to help catch
// trivial type errors.
- auto IsAllowedValueType = [&](QualType ValType) {
+ auto IsAllowedValueType = [&](QualType ValType,
+ unsigned AllowedType) -> bool {
if (ValType->isIntegerType())
return true;
if (ValType->isPointerType())
- return true;
- if (!ValType->isFloatingType())
+ return AllowedType & AOEVT_Pointer;
+ if (!(ValType->isFloatingType() && (AllowedType & AOEVT_FP)))
return false;
// LLVM Parser does not allow atomicrmw with x86_fp80 type.
if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) &&
@@ -6439,13 +7198,13 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
return false;
return true;
};
- if (IsAddSub && !IsAllowedValueType(ValType)) {
- Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp)
- << IsC11 << Ptr->getType() << Ptr->getSourceRange();
- return ExprError();
- }
- if (!IsAddSub && !ValType->isIntegerType()) {
- Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int)
+ if (!IsAllowedValueType(ValType, ArithAllows)) {
+ auto DID = ArithAllows & AOEVT_FP
+ ? (ArithAllows & AOEVT_Pointer
+ ? diag::err_atomic_op_needs_atomic_int_ptr_or_fp
+ : diag::err_atomic_op_needs_atomic_int_or_fp)
+ : diag::err_atomic_op_needs_atomic_int;
+ Diag(ExprRange.getBegin(), DID)
<< IsC11 << Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
@@ -6728,6 +7487,34 @@ static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
return false;
}
+bool Sema::BuiltinWasmRefNullExtern(CallExpr *TheCall) {
+ if (TheCall->getNumArgs() != 0)
+ return true;
+
+ TheCall->setType(Context.getWebAssemblyExternrefType());
+
+ return false;
+}
+
+bool Sema::BuiltinWasmRefNullFunc(CallExpr *TheCall) {
+ if (TheCall->getNumArgs() != 0) {
+ Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/ << 0 << TheCall->getNumArgs();
+ return true;
+ }
+
+ // This custom type checking code ensures that the nodes are as expected
+ // in order to later on generate the necessary builtin.
+ QualType Pointee = Context.getFunctionType(Context.VoidTy, {}, {});
+ QualType Type = Context.getPointerType(Pointee);
+ Pointee = Context.getAddrSpaceQualType(Pointee, LangAS::wasm_funcref);
+ Type = Context.getAttributedType(attr::WebAssemblyFuncref, Type,
+ Context.getPointerType(Pointee));
+ TheCall->setType(Type);
+
+ return false;
+}
+
/// We have a call to a function like __sync_fetch_and_add, which is an
/// overloaded function based on the pointer type of its first argument.
/// The main BuildCallExpr routines have already promoted the types of
@@ -7494,9 +8281,12 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
if (checkArgCount(*this, TheCall, NumArgs))
return true;
- // __builtin_fpclassify is the only case where NumArgs != 1, so we can count
- // on all preceding parameters just being int. Try all of those.
- for (unsigned i = 0; i < NumArgs - 1; ++i) {
+ // Find out position of floating-point argument.
+ unsigned FPArgNo = (NumArgs == 2) ? 0 : NumArgs - 1;
+
+ // We can count on all parameters preceding the floating-point just being int.
+ // Try all of those.
+ for (unsigned i = 0; i < FPArgNo; ++i) {
Expr *Arg = TheCall->getArg(i);
if (Arg->isTypeDependent())
@@ -7509,7 +8299,7 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
TheCall->setArg(i, Res.get());
}
- Expr *OrigArg = TheCall->getArg(NumArgs-1);
+ Expr *OrigArg = TheCall->getArg(FPArgNo);
if (OrigArg->isTypeDependent())
return false;
@@ -7521,7 +8311,7 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
OrigArg = UsualUnaryConversions(OrigArg).get();
else
OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get();
- TheCall->setArg(NumArgs - 1, OrigArg);
+ TheCall->setArg(FPArgNo, OrigArg);
// This operation requires a non-_Complex floating-point number.
if (!OrigArg->getType()->isRealFloatingType())
@@ -7529,6 +8319,12 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
diag::err_typecheck_call_invalid_unary_fp)
<< OrigArg->getType() << OrigArg->getSourceRange();
+ // __builtin_isfpclass has integer parameter that specify test mask. It is
+ // passed in (...), so it should be analyzed completely here.
+ if (NumArgs == 2)
+ if (SemaBuiltinConstantArgRange(TheCall, 1, 0, llvm::fcAllFlags))
+ return true;
+
return false;
}
@@ -7858,8 +8654,9 @@ bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
{
ExprResult FirstArgResult =
DefaultFunctionArrayLvalueConversion(FirstArg);
- if (FirstArgResult.isInvalid())
+ if (checkBuiltinArgument(*this, TheCall, 0))
return true;
+ /// In-place updation of FirstArg by checkBuiltinArgument is ignored.
TheCall->setArg(0, FirstArgResult.get());
}
@@ -8352,18 +9149,18 @@ bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
bool ValidString = true;
if (IsARMBuiltin) {
- ValidString &= Fields[0].startswith_insensitive("cp") ||
- Fields[0].startswith_insensitive("p");
+ ValidString &= Fields[0].starts_with_insensitive("cp") ||
+ Fields[0].starts_with_insensitive("p");
if (ValidString)
Fields[0] = Fields[0].drop_front(
- Fields[0].startswith_insensitive("cp") ? 2 : 1);
+ Fields[0].starts_with_insensitive("cp") ? 2 : 1);
- ValidString &= Fields[2].startswith_insensitive("c");
+ ValidString &= Fields[2].starts_with_insensitive("c");
if (ValidString)
Fields[2] = Fields[2].drop_front(1);
if (FiveFields) {
- ValidString &= Fields[3].startswith_insensitive("c");
+ ValidString &= Fields[3].starts_with_insensitive("c");
if (ValidString)
Fields[3] = Fields[3].drop_front(1);
}
@@ -8445,29 +9242,6 @@ bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
assert((TypeStr[0] != '\0') &&
"Invalid types in PPC MMA builtin declaration");
- switch (BuiltinID) {
- default:
- // This function is called in CheckPPCBuiltinFunctionCall where the
- // BuiltinID is guaranteed to be an MMA or pair vector memop builtin, here
- // we are isolating the pair vector memop builtins that can be used with mma
- // off so the default case is every builtin that requires mma and paired
- // vector memops.
- if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops",
- diag::err_ppc_builtin_only_on_arch, "10") ||
- SemaFeatureCheck(*this, TheCall, "mma",
- diag::err_ppc_builtin_only_on_arch, "10"))
- return true;
- break;
- case PPC::BI__builtin_vsx_lxvp:
- case PPC::BI__builtin_vsx_stxvp:
- case PPC::BI__builtin_vsx_assemble_pair:
- case PPC::BI__builtin_vsx_disassemble_pair:
- if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops",
- diag::err_ppc_builtin_only_on_arch, "10"))
- return true;
- break;
- }
-
unsigned Mask = 0;
unsigned ArgNum = 0;
@@ -9444,13 +10218,13 @@ void CheckFormatHandler::HandlePosition(const char *startPos,
getSpecifierRange(startPos, posLen));
}
-void
-CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen,
- analyze_format_string::PositionContext p) {
- EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier)
- << (unsigned) p,
- getLocationOfByte(startPos), /*IsStringLocation*/true,
- getSpecifierRange(startPos, posLen));
+void CheckFormatHandler::HandleInvalidPosition(
+ const char *startSpecifier, unsigned specifierLen,
+ analyze_format_string::PositionContext p) {
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_format_invalid_positional_specifier) << (unsigned)p,
+ getLocationOfByte(startSpecifier), /*IsStringLocation*/ true,
+ getSpecifierRange(startSpecifier, specifierLen));
}
void CheckFormatHandler::HandleZeroPosition(const char *startPos,
@@ -9495,7 +10269,7 @@ void CheckFormatHandler::DoneProcessing() {
void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall,
const Expr *ArgExpr) {
- assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 &&
+ assert(hasUncoveredArg() && !DiagnosticExprs.empty() &&
"Invalid state");
if (!ArgExpr)
@@ -10392,11 +11166,18 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
ImplicitMatch == ArgType::NoMatchTypeConfusion)
Match = ImplicitMatch;
assert(Match != ArgType::MatchPromotion);
- // Look through enums to their underlying type.
+ // Look through unscoped enums to their underlying type.
bool IsEnum = false;
+ bool IsScopedEnum = false;
if (auto EnumTy = ExprTy->getAs<EnumType>()) {
- ExprTy = EnumTy->getDecl()->getIntegerType();
- IsEnum = true;
+ if (EnumTy->isUnscopedEnumerationType()) {
+ ExprTy = EnumTy->getDecl()->getIntegerType();
+ // This controls whether we're talking about the underlying type or not,
+ // which we only want to do when it's an unscoped enum.
+ IsEnum = true;
+ } else {
+ IsScopedEnum = true;
+ }
}
// %C in an Objective-C context prints a unichar, not a wchar_t.
@@ -10461,7 +11242,7 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen);
- if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) {
+ if (IntendedTy == ExprTy && !ShouldNotPrintDirectly && !IsScopedEnum) {
unsigned Diag;
switch (Match) {
case ArgType::Match:
@@ -10497,12 +11278,18 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
// if necessary).
SmallString<16> CastBuf;
llvm::raw_svector_ostream CastFix(CastBuf);
- CastFix << "(";
- IntendedTy.print(CastFix, S.Context.getPrintingPolicy());
- CastFix << ")";
+ CastFix << (S.LangOpts.CPlusPlus ? "static_cast<" : "(");
+ if (IsScopedEnum) {
+ CastFix << AT.getRepresentativeType(S.Context).getAsString(
+ S.Context.getPrintingPolicy());
+ } else {
+ IntendedTy.print(CastFix, S.Context.getPrintingPolicy());
+ }
+ CastFix << (S.LangOpts.CPlusPlus ? ">" : ")");
SmallVector<FixItHint,4> Hints;
- if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly)
+ if ((!AT.matchesType(S.Context, IntendedTy) && !IsScopedEnum) ||
+ ShouldNotPrintDirectly)
Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str()));
if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) {
@@ -10510,7 +11297,7 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc());
Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str()));
- } else if (!requiresParensToAddCast(E)) {
+ } else if (!requiresParensToAddCast(E) && !S.LangOpts.CPlusPlus) {
// If the expression has high enough precedence,
// just write the C-style cast.
Hints.push_back(
@@ -12170,6 +12957,10 @@ Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
}
}
+ if (RetValExp && RetValExp->getType()->isWebAssemblyTableType()) {
+ Diag(ReturnLoc, diag::err_wasm_table_art) << 1;
+ }
+
// PPC MMA non-pointer types are not allowed as return type. Checking the type
// here prevent the user from using a PPC MMA type as trailing return type.
if (Context.getTargetInfo().getTriple().isPPC64())
@@ -12423,7 +13214,7 @@ struct IntRange {
static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value,
unsigned MaxWidth) {
if (value.isSigned() && value.isNegative())
- return IntRange(value.getMinSignedBits(), false);
+ return IntRange(value.getSignificantBits(), false);
if (value.getBitWidth() > MaxWidth)
value = value.trunc(MaxWidth);
@@ -13340,7 +14131,7 @@ static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
if (!Value.isSigned() || Value.isNegative())
if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit))
if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not)
- OriginalWidth = Value.getMinSignedBits();
+ OriginalWidth = Value.getSignificantBits();
if (OriginalWidth <= FieldWidth)
return false;
@@ -13932,6 +14723,13 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
QualType(Source, 0))))
return;
+ if (Target->isRVVVLSBuiltinType() &&
+ (S.Context.areCompatibleRVVTypes(QualType(Target, 0),
+ QualType(Source, 0)) ||
+ S.Context.areLaxCompatibleRVVTypes(QualType(Target, 0),
+ QualType(Source, 0))))
+ return;
+
if (!isa<VectorType>(Target)) {
if (S.SourceMgr.isInSystemMacro(CC))
return;
@@ -14283,6 +15081,12 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
if (S.SourceMgr.isInSystemMacro(CC))
return;
+ if (SourceBT && SourceBT->isInteger() && TargetBT &&
+ TargetBT->isInteger() &&
+ Source->isSignedIntegerType() == Target->isSignedIntegerType()) {
+ return;
+ }
+
unsigned DiagID = diag::warn_impcast_integer_sign;
// Traditionally, gcc has warned about this under -Wsign-compare.
@@ -14330,6 +15134,9 @@ static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E,
static void CheckConditionalOperand(Sema &S, Expr *E, QualType T,
SourceLocation CC, bool &ICContext) {
E = E->IgnoreParenImpCasts();
+ // Diagnose incomplete type for second or third operand in C.
+ if (!S.getLangOpts().CPlusPlus && E->getType()->isRecordType())
+ S.RequireCompleteExprType(E, diag::err_incomplete_type);
if (auto *CO = dyn_cast<AbstractConditionalOperator>(E))
return CheckConditionalOperator(S, CO, CC, T);
@@ -14652,7 +15459,7 @@ void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
bool IsAddressOf = false;
- if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ if (auto *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
if (UO->getOpcode() != UO_AddrOf)
return;
IsAddressOf = true;
@@ -14844,37 +15651,39 @@ void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) {
/// Diagnose when expression is an integer constant expression and its evaluation
/// results in integer overflow
-void Sema::CheckForIntOverflow (Expr *E) {
+void Sema::CheckForIntOverflow (const Expr *E) {
// Use a work list to deal with nested struct initializers.
- SmallVector<Expr *, 2> Exprs(1, E);
+ SmallVector<const Expr *, 2> Exprs(1, E);
do {
- Expr *OriginalE = Exprs.pop_back_val();
- Expr *E = OriginalE->IgnoreParenCasts();
+ const Expr *OriginalE = Exprs.pop_back_val();
+ const Expr *E = OriginalE->IgnoreParenCasts();
- if (isa<BinaryOperator>(E)) {
+ if (isa<BinaryOperator, UnaryOperator>(E)) {
E->EvaluateForOverflow(Context);
continue;
}
- if (auto InitList = dyn_cast<InitListExpr>(OriginalE))
+ if (const auto *InitList = dyn_cast<InitListExpr>(OriginalE))
Exprs.append(InitList->inits().begin(), InitList->inits().end());
else if (isa<ObjCBoxedExpr>(OriginalE))
E->EvaluateForOverflow(Context);
- else if (auto Call = dyn_cast<CallExpr>(E))
+ else if (const auto *Call = dyn_cast<CallExpr>(E))
Exprs.append(Call->arg_begin(), Call->arg_end());
- else if (auto Message = dyn_cast<ObjCMessageExpr>(E))
+ else if (const auto *Message = dyn_cast<ObjCMessageExpr>(E))
Exprs.append(Message->arg_begin(), Message->arg_end());
- else if (auto Construct = dyn_cast<CXXConstructExpr>(E))
+ else if (const auto *Construct = dyn_cast<CXXConstructExpr>(E))
Exprs.append(Construct->arg_begin(), Construct->arg_end());
- else if (auto Array = dyn_cast<ArraySubscriptExpr>(E))
+ else if (const auto *Temporary = dyn_cast<CXXBindTemporaryExpr>(E))
+ Exprs.push_back(Temporary->getSubExpr());
+ else if (const auto *Array = dyn_cast<ArraySubscriptExpr>(E))
Exprs.push_back(Array->getIdx());
- else if (auto Compound = dyn_cast<CompoundLiteralExpr>(E))
+ else if (const auto *Compound = dyn_cast<CompoundLiteralExpr>(E))
Exprs.push_back(Compound->getInitializer());
- else if (auto New = dyn_cast<CXXNewExpr>(E)) {
- if (New->isArray())
- if (auto ArraySize = New->getArraySize())
- Exprs.push_back(*ArraySize);
+ else if (const auto *New = dyn_cast<CXXNewExpr>(E);
+ New && New->isArray()) {
+ if (auto ArraySize = New->getArraySize())
+ Exprs.push_back(*ArraySize);
}
} while (!Exprs.empty());
}
@@ -15210,6 +16019,23 @@ public:
Base::VisitStmt(E);
}
+ void VisitCoroutineSuspendExpr(const CoroutineSuspendExpr *CSE) {
+ for (auto *Sub : CSE->children()) {
+ const Expr *ChildExpr = dyn_cast_or_null<Expr>(Sub);
+ if (!ChildExpr)
+ continue;
+
+ if (ChildExpr == CSE->getOperand())
+ // Do not recurse over a CoroutineSuspendExpr's operand.
+ // The operand is also a subexpression of getCommonExpr(), and
+ // recursing into it directly could confuse object management
+ // for the sake of sequence tracking.
+ continue;
+
+ Visit(Sub);
+ }
+ }
+
void VisitCastExpr(const CastExpr *E) {
Object O = Object();
if (E->getCastKind() == CK_LValueToRValue)
@@ -15796,14 +16622,21 @@ bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames) {
bool HasInvalidParm = false;
for (ParmVarDecl *Param : Parameters) {
+ assert(Param && "null in a parameter list");
// C99 6.7.5.3p4: the parameters in a parameter type list in a
// function declarator that is part of a function definition of
// that function shall not have incomplete type.
//
- // This is also C++ [dcl.fct]p6.
+ // C++23 [dcl.fct.def.general]/p2
+ // The type of a parameter [...] for a function definition
+ // shall not be a (possibly cv-qualified) class type that is incomplete
+ // or abstract within the function body unless the function is deleted.
if (!Param->isInvalidDecl() &&
- RequireCompleteType(Param->getLocation(), Param->getType(),
- diag::err_typecheck_decl_incomplete_type)) {
+ (RequireCompleteType(Param->getLocation(), Param->getType(),
+ diag::err_typecheck_decl_incomplete_type) ||
+ RequireNonAbstractType(Param->getBeginLoc(), Param->getOriginalType(),
+ diag::err_abstract_type_in_decl,
+ AbstractParamType))) {
Param->setInvalidDecl();
HasInvalidParm = true;
}
@@ -15863,6 +16696,13 @@ bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
RD, /*DeclIsField*/ false);
}
}
+
+ if (!Param->isInvalidDecl() &&
+ Param->getOriginalType()->isWebAssemblyTableType()) {
+ Param->setInvalidDecl();
+ HasInvalidParm = true;
+ Diag(Param->getLocation(), diag::err_wasm_table_as_function_parameter);
+ }
}
return HasInvalidParm;
@@ -15975,8 +16815,12 @@ std::optional<std::pair<
if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) {
// FIXME: If VD is captured by copy or is an escaping __block variable,
// use the alignment of VD's type.
- if (!VD->getType()->isReferenceType())
+ if (!VD->getType()->isReferenceType()) {
+ // Dependent alignment cannot be resolved -> bail out.
+ if (VD->hasDependentAlignment())
+ break;
return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero());
+ }
if (VD->hasInit())
return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx);
}
@@ -16528,14 +17372,13 @@ static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) {
namespace {
struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> {
- ASTContext &Context;
VarDecl *Variable;
Expr *Capturer = nullptr;
bool VarWillBeReased = false;
FindCaptureVisitor(ASTContext &Context, VarDecl *variable)
: EvaluatedExprVisitor<FindCaptureVisitor>(Context),
- Context(Context), Variable(variable) {}
+ Variable(variable) {}
void VisitDeclRefExpr(DeclRefExpr *ref) {
if (ref->getDecl() == Variable && !Capturer)
@@ -17821,6 +18664,40 @@ bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) {
return false;
}
+bool Sema::SemaBuiltinElementwiseTernaryMath(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 3))
+ return true;
+
+ Expr *Args[3];
+ for (int I = 0; I < 3; ++I) {
+ ExprResult Converted = UsualUnaryConversions(TheCall->getArg(I));
+ if (Converted.isInvalid())
+ return true;
+ Args[I] = Converted.get();
+ }
+
+ int ArgOrdinal = 1;
+ for (Expr *Arg : Args) {
+ if (checkFPMathBuiltinElementType(*this, Arg->getBeginLoc(), Arg->getType(),
+ ArgOrdinal++))
+ return true;
+ }
+
+ for (int I = 1; I < 3; ++I) {
+ if (Args[0]->getType().getCanonicalType() !=
+ Args[I]->getType().getCanonicalType()) {
+ return Diag(Args[0]->getBeginLoc(),
+ diag::err_typecheck_call_different_arg_types)
+ << Args[0]->getType() << Args[I]->getType();
+ }
+
+ TheCall->setArg(I, Args[I]);
+ }
+
+ TheCall->setType(Args[0]->getType());
+ return false;
+}
+
bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) {
if (checkArgCount(*this, TheCall, 1))
return true;
@@ -17833,6 +18710,21 @@ bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) {
return false;
}
+bool Sema::SemaBuiltinNonDeterministicValue(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 1))
+ return true;
+
+ ExprResult Arg = TheCall->getArg(0);
+ QualType TyArg = Arg.get()->getType();
+
+ if (!TyArg->isBuiltinType() && !TyArg->isVectorType())
+ return Diag(TheCall->getArg(0)->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << 1 << /*vector, integer or floating point ty*/ 0 << TyArg;
+
+ TheCall->setType(TyArg);
+ return false;
+}
+
ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult) {
if (checkArgCount(*this, TheCall, 1))
@@ -18097,12 +18989,178 @@ ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
return CallResult;
}
+/// Checks the argument at the given index is a WebAssembly table and if it
+/// is, sets ElTy to the element type.
+static bool CheckWasmBuiltinArgIsTable(Sema &S, CallExpr *E, unsigned ArgIndex,
+ QualType &ElTy) {
+ Expr *ArgExpr = E->getArg(ArgIndex);
+ const auto *ATy = dyn_cast<ArrayType>(ArgExpr->getType());
+ if (!ATy || !ATy->getElementType().isWebAssemblyReferenceType()) {
+ return S.Diag(ArgExpr->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_be_table_type)
+ << ArgIndex + 1 << ArgExpr->getSourceRange();
+ }
+ ElTy = ATy->getElementType();
+ return false;
+}
+
+/// Checks the argument at the given index is an integer.
+static bool CheckWasmBuiltinArgIsInteger(Sema &S, CallExpr *E,
+ unsigned ArgIndex) {
+ Expr *ArgExpr = E->getArg(ArgIndex);
+ if (!ArgExpr->getType()->isIntegerType()) {
+ return S.Diag(ArgExpr->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_be_integer_type)
+ << ArgIndex + 1 << ArgExpr->getSourceRange();
+ }
+ return false;
+}
+
+/// Check that the first argument is a WebAssembly table, and the second
+/// is an index to use as index into the table.
+bool Sema::BuiltinWasmTableGet(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 2))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
+ return true;
+
+ if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1))
+ return true;
+
+ // If all is well, we set the type of TheCall to be the type of the
+ // element of the table.
+ // i.e. a table.get on an externref table has type externref,
+ // or whatever the type of the table element is.
+ TheCall->setType(ElTy);
+
+ return false;
+}
+
+/// Check that the first argumnet is a WebAssembly table, the second is
+/// an index to use as index into the table and the third is the reference
+/// type to set into the table.
+bool Sema::BuiltinWasmTableSet(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 3))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
+ return true;
+
+ if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1))
+ return true;
+
+ if (!Context.hasSameType(ElTy, TheCall->getArg(2)->getType()))
+ return true;
+
+ return false;
+}
+
+/// Check that the argument is a WebAssembly table.
+bool Sema::BuiltinWasmTableSize(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 1))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
+ return true;
+
+ return false;
+}
+
+/// Check that the first argument is a WebAssembly table, the second is the
+/// value to use for new elements (of a type matching the table type), the
+/// third value is an integer.
+bool Sema::BuiltinWasmTableGrow(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 3))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
+ return true;
+
+ Expr *NewElemArg = TheCall->getArg(1);
+ if (!Context.hasSameType(ElTy, NewElemArg->getType())) {
+ return Diag(NewElemArg->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_match_table_element_type)
+ << 2 << 1 << NewElemArg->getSourceRange();
+ }
+
+ if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 2))
+ return true;
+
+ return false;
+}
+
+/// Check that the first argument is a WebAssembly table, the second is an
+/// integer, the third is the value to use to fill the table (of a type
+/// matching the table type), and the fourth is an integer.
+bool Sema::BuiltinWasmTableFill(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 4))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
+ return true;
+
+ if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1))
+ return true;
+
+ Expr *NewElemArg = TheCall->getArg(2);
+ if (!Context.hasSameType(ElTy, NewElemArg->getType())) {
+ return Diag(NewElemArg->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_match_table_element_type)
+ << 3 << 1 << NewElemArg->getSourceRange();
+ }
+
+ if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 3))
+ return true;
+
+ return false;
+}
+
+/// Check that the first argument is a WebAssembly table, the second is also a
+/// WebAssembly table (of the same element type), and the third to fifth
+/// arguments are integers.
+bool Sema::BuiltinWasmTableCopy(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 5))
+ return true;
+
+ QualType XElTy;
+ if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, XElTy))
+ return true;
+
+ QualType YElTy;
+ if (CheckWasmBuiltinArgIsTable(*this, TheCall, 1, YElTy))
+ return true;
+
+ Expr *TableYArg = TheCall->getArg(1);
+ if (!Context.hasSameType(XElTy, YElTy)) {
+ return Diag(TableYArg->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_match_table_element_type)
+ << 2 << 1 << TableYArg->getSourceRange();
+ }
+
+ for (int I = 2; I <= 4; I++) {
+ if (CheckWasmBuiltinArgIsInteger(*this, TheCall, I))
+ return true;
+ }
+
+ return false;
+}
+
/// \brief Enforce the bounds of a TCB
/// CheckTCBEnforcement - Enforces that every function in a named TCB only
/// directly calls other functions in the same TCB as marked by the enforce_tcb
/// and enforce_tcb_leaf attributes.
void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc,
const NamedDecl *Callee) {
+ // This warning does not make sense in code that has no runtime behavior.
+ if (isUnevaluatedContext())
+ return;
+
const NamedDecl *Caller = getCurFunctionOrMethodDecl();
if (!Caller || !Caller->hasAttr<EnforceTCBAttr>())
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
index 144bbe150abb..b5d29b2e956c 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
@@ -4148,7 +4148,7 @@ static void AddPrettyFunctionResults(const LangOptions &LangOpts,
static void HandleCodeCompleteResults(Sema *S,
CodeCompleteConsumer *CodeCompleter,
- CodeCompletionContext Context,
+ const CodeCompletionContext &Context,
CodeCompletionResult *Results,
unsigned NumResults) {
if (CodeCompleter)
@@ -4309,16 +4309,13 @@ void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc,
/*IsInclusionDirective=*/false);
// Enumerate submodules.
if (Mod) {
- for (Module::submodule_iterator Sub = Mod->submodule_begin(),
- SubEnd = Mod->submodule_end();
- Sub != SubEnd; ++Sub) {
-
+ for (auto *Submodule : Mod->submodules()) {
Builder.AddTypedTextChunk(
- Builder.getAllocator().CopyString((*Sub)->Name));
+ Builder.getAllocator().CopyString(Submodule->Name));
Results.AddResult(Result(
Builder.TakeString(), CCP_Declaration, CXCursor_ModuleImportDecl,
- (*Sub)->isAvailable() ? CXAvailability_Available
- : CXAvailability_NotAvailable));
+ Submodule->isAvailable() ? CXAvailability_Available
+ : CXAvailability_NotAvailable));
}
}
}
@@ -6217,7 +6214,7 @@ getNextAggregateIndexAfterDesignatedInit(const ResultCandidate &Aggregate,
// Look for designated initializers.
// They're in their syntactic form, not yet resolved to fields.
- IdentifierInfo *DesignatedFieldName = nullptr;
+ const IdentifierInfo *DesignatedFieldName = nullptr;
unsigned ArgsAfterDesignator = 0;
for (const Expr *Arg : Args) {
if (const auto *DIE = dyn_cast<DesignatedInitExpr>(Arg)) {
@@ -6423,7 +6420,7 @@ static QualType getDesignatedType(QualType BaseType, const Designation &Desig) {
assert(D.isFieldDesignator());
auto *RD = getAsRecordDecl(BaseType);
if (RD && RD->isCompleteDefinition()) {
- for (const auto *Member : RD->lookup(D.getField()))
+ for (const auto *Member : RD->lookup(D.getFieldDecl()))
if (const FieldDecl *FD = llvm::dyn_cast<FieldDecl>(Member)) {
NextType = FD->getType();
break;
@@ -8463,6 +8460,24 @@ void Sema::CodeCompleteObjCInterfaceDecl(Scope *S) {
Results.data(), Results.size());
}
+void Sema::CodeCompleteObjCClassForwardDecl(Scope *S) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_ObjCClassForwardDecl);
+ Results.EnterNewScope();
+
+ if (CodeCompleter->includeGlobals()) {
+ // Add all classes.
+ AddInterfaceResults(Context.getTranslationUnitDecl(), CurContext, false,
+ false, Results);
+ }
+
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
void Sema::CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName,
SourceLocation ClassNameLoc) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
@@ -10033,11 +10048,11 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
break;
case llvm::sys::fs::file_type::regular_file: {
// Only files that really look like headers. (Except in special dirs).
- const bool IsHeader = Filename.endswith_insensitive(".h") ||
- Filename.endswith_insensitive(".hh") ||
- Filename.endswith_insensitive(".hpp") ||
- Filename.endswith_insensitive(".hxx") ||
- Filename.endswith_insensitive(".inc") ||
+ const bool IsHeader = Filename.ends_with_insensitive(".h") ||
+ Filename.ends_with_insensitive(".hh") ||
+ Filename.ends_with_insensitive(".hpp") ||
+ Filename.ends_with_insensitive(".hxx") ||
+ Filename.ends_with_insensitive(".inc") ||
(ExtensionlessHeaders && !Filename.contains('.'));
if (!IsHeader)
break;
@@ -10058,12 +10073,12 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
// header maps are not (currently) enumerable.
break;
case DirectoryLookup::LT_NormalDir:
- AddFilesFromIncludeDir(IncludeDir.getDir()->getName(), IsSystem,
+ AddFilesFromIncludeDir(IncludeDir.getDirRef()->getName(), IsSystem,
DirectoryLookup::LT_NormalDir);
break;
case DirectoryLookup::LT_Framework:
- AddFilesFromIncludeDir(IncludeDir.getFrameworkDir()->getName(), IsSystem,
- DirectoryLookup::LT_Framework);
+ AddFilesFromIncludeDir(IncludeDir.getFrameworkDirRef()->getName(),
+ IsSystem, DirectoryLookup::LT_Framework);
break;
}
};
@@ -10075,9 +10090,8 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
using llvm::make_range;
if (!Angled) {
// The current directory is on the include path for "quoted" includes.
- const FileEntry *CurFile = PP.getCurrentFileLexer()->getFileEntry();
- if (CurFile && CurFile->getDir())
- AddFilesFromIncludeDir(CurFile->getDir()->getName(), false,
+ if (auto CurFile = PP.getCurrentFileLexer()->getFileEntry())
+ AddFilesFromIncludeDir(CurFile->getDir().getName(), false,
DirectoryLookup::LT_NormalDir);
for (const auto &D : make_range(S.quoted_dir_begin(), S.quoted_dir_end()))
AddFilesFromDirLookup(D, false);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp b/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
index a92bbde113fc..f24b549dd2ef 100755
--- a/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
@@ -10,19 +10,20 @@
//
//===----------------------------------------------------------------------===//
-#include "TreeTransform.h"
#include "clang/Sema/SemaConcept.h"
-#include "clang/Sema/Sema.h"
-#include "clang/Sema/SemaInternal.h"
-#include "clang/Sema/SemaDiagnostic.h"
-#include "clang/Sema/TemplateDeduction.h"
-#include "clang/Sema/Template.h"
-#include "clang/Sema/Overload.h"
-#include "clang/Sema/Initialization.h"
+#include "TreeTransform.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/OperatorPrecedence.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Overload.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Template.h"
+#include "clang/Sema/TemplateDeduction.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/StringExtras.h"
@@ -105,27 +106,35 @@ bool Sema::CheckConstraintExpression(const Expr *ConstraintExpression,
QualType Type = ConstraintExpression->getType();
auto CheckForNonPrimary = [&] {
- if (PossibleNonPrimary)
- *PossibleNonPrimary =
- // We have the following case:
- // template<typename> requires func(0) struct S { };
- // The user probably isn't aware of the parentheses required around
- // the function call, and we're only going to parse 'func' as the
- // primary-expression, and complain that it is of non-bool type.
- (NextToken.is(tok::l_paren) &&
- (IsTrailingRequiresClause ||
- (Type->isDependentType() &&
- isa<UnresolvedLookupExpr>(ConstraintExpression)) ||
- Type->isFunctionType() ||
- Type->isSpecificBuiltinType(BuiltinType::Overload))) ||
- // We have the following case:
- // template<typename T> requires size_<T> == 0 struct S { };
- // The user probably isn't aware of the parentheses required around
- // the binary operator, and we're only going to parse 'func' as the
- // first operand, and complain that it is of non-bool type.
- getBinOpPrecedence(NextToken.getKind(),
- /*GreaterThanIsOperator=*/true,
- getLangOpts().CPlusPlus11) > prec::LogicalAnd;
+ if (!PossibleNonPrimary)
+ return;
+
+ *PossibleNonPrimary =
+ // We have the following case:
+ // template<typename> requires func(0) struct S { };
+ // The user probably isn't aware of the parentheses required around
+ // the function call, and we're only going to parse 'func' as the
+ // primary-expression, and complain that it is of non-bool type.
+ //
+ // However, if we're in a lambda, this might also be:
+ // []<typename> requires var () {};
+ // Which also looks like a function call due to the lambda parentheses,
+ // but unlike the first case, isn't an error, so this check is skipped.
+ (NextToken.is(tok::l_paren) &&
+ (IsTrailingRequiresClause ||
+ (Type->isDependentType() &&
+ isa<UnresolvedLookupExpr>(ConstraintExpression) &&
+ !dyn_cast_if_present<LambdaScopeInfo>(getCurFunction())) ||
+ Type->isFunctionType() ||
+ Type->isSpecificBuiltinType(BuiltinType::Overload))) ||
+ // We have the following case:
+ // template<typename T> requires size_<T> == 0 struct S { };
+ // The user probably isn't aware of the parentheses required around
+ // the binary operator, and we're only going to parse 'func' as the
+ // first operand, and complain that it is of non-bool type.
+ getBinOpPrecedence(NextToken.getKind(),
+ /*GreaterThanIsOperator=*/true,
+ getLangOpts().CPlusPlus11) > prec::LogicalAnd;
};
// An atomic constraint!
@@ -152,7 +161,7 @@ struct SatisfactionStackRAII {
Sema &SemaRef;
bool Inserted = false;
SatisfactionStackRAII(Sema &SemaRef, const NamedDecl *ND,
- llvm::FoldingSetNodeID FSNID)
+ const llvm::FoldingSetNodeID &FSNID)
: SemaRef(SemaRef) {
if (ND) {
SemaRef.PushSatisfactionStackEntry(ND, FSNID);
@@ -519,6 +528,48 @@ bool Sema::CheckConstraintSatisfaction(const Expr *ConstraintExpr,
.isInvalid();
}
+bool Sema::addInstantiatedCapturesToScope(
+ FunctionDecl *Function, const FunctionDecl *PatternDecl,
+ LocalInstantiationScope &Scope,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ const auto *LambdaClass = cast<CXXMethodDecl>(Function)->getParent();
+ const auto *LambdaPattern = cast<CXXMethodDecl>(PatternDecl)->getParent();
+
+ unsigned Instantiated = 0;
+
+ auto AddSingleCapture = [&](const ValueDecl *CapturedPattern,
+ unsigned Index) {
+ ValueDecl *CapturedVar = LambdaClass->getCapture(Index)->getCapturedVar();
+ if (cast<CXXMethodDecl>(Function)->isConst()) {
+ QualType T = CapturedVar->getType();
+ T.addConst();
+ CapturedVar->setType(T);
+ }
+ if (CapturedVar->isInitCapture())
+ Scope.InstantiatedLocal(CapturedPattern, CapturedVar);
+ };
+
+ for (const LambdaCapture &CapturePattern : LambdaPattern->captures()) {
+ if (!CapturePattern.capturesVariable()) {
+ Instantiated++;
+ continue;
+ }
+ const ValueDecl *CapturedPattern = CapturePattern.getCapturedVar();
+ if (!CapturedPattern->isParameterPack()) {
+ AddSingleCapture(CapturedPattern, Instantiated++);
+ } else {
+ Scope.MakeInstantiatedLocalArgPack(CapturedPattern);
+ std::optional<unsigned> NumArgumentsInExpansion =
+ getNumArgumentsInExpansion(CapturedPattern->getType(), TemplateArgs);
+ if (!NumArgumentsInExpansion)
+ continue;
+ for (unsigned Arg = 0; Arg < *NumArgumentsInExpansion; ++Arg)
+ AddSingleCapture(CapturedPattern, Instantiated++);
+ }
+ }
+ return false;
+}
+
bool Sema::SetupConstraintScope(
FunctionDecl *FD, std::optional<ArrayRef<TemplateArgument>> TemplateArgs,
MultiLevelTemplateArgumentList MLTAL, LocalInstantiationScope &Scope) {
@@ -552,6 +603,11 @@ bool Sema::SetupConstraintScope(
if (addInstantiatedParametersToScope(FD, FromMemTempl->getTemplatedDecl(),
Scope, MLTAL))
return true;
+ // Make sure the captures are also added to the instantiation scope.
+ if (isLambdaCallOperator(FD) &&
+ addInstantiatedCapturesToScope(FD, FromMemTempl->getTemplatedDecl(),
+ Scope, MLTAL))
+ return true;
}
return false;
@@ -576,6 +632,11 @@ bool Sema::SetupConstraintScope(
// child-function.
if (addInstantiatedParametersToScope(FD, InstantiatedFrom, Scope, MLTAL))
return true;
+
+ // Make sure the captures are also added to the instantiation scope.
+ if (isLambdaCallOperator(FD) &&
+ addInstantiatedCapturesToScope(FD, InstantiatedFrom, Scope, MLTAL))
+ return true;
}
return false;
@@ -618,6 +679,15 @@ bool Sema::CheckFunctionConstraints(const FunctionDecl *FD,
return false;
}
+ // A lambda conversion operator has the same constraints as the call operator
+ // and constraints checking relies on whether we are in a lambda call operator
+ // (and may refer to its parameters), so check the call operator instead.
+ if (const auto *MD = dyn_cast<CXXConversionDecl>(FD);
+ MD && isLambdaConversionOperator(const_cast<CXXConversionDecl *>(MD)))
+ return CheckFunctionConstraints(MD->getParent()->getLambdaCallOperator(),
+ Satisfaction, UsageLoc,
+ ForOverloadResolution);
+
DeclContext *CtxToSave = const_cast<FunctionDecl *>(FD);
while (isLambdaCallOperator(CtxToSave) || FD->isTransparentContext()) {
@@ -644,26 +714,10 @@ bool Sema::CheckFunctionConstraints(const FunctionDecl *FD,
Record = const_cast<CXXRecordDecl *>(Method->getParent());
}
CXXThisScopeRAII ThisScope(*this, Record, ThisQuals, Record != nullptr);
- // We substitute with empty arguments in order to rebuild the atomic
- // constraint in a constant-evaluated context.
- // FIXME: Should this be a dedicated TreeTransform?
- const Expr *RC = FD->getTrailingRequiresClause();
- llvm::SmallVector<Expr *, 1> Converted;
-
- if (CheckConstraintSatisfaction(
- FD, {RC}, Converted, *MLTAL,
- SourceRange(UsageLoc.isValid() ? UsageLoc : FD->getLocation()),
- Satisfaction))
- return true;
-
- // FIXME: we need to do this for the function constraints for
- // comparison of constraints to work, but do we also need to do it for
- // CheckInstantiatedFunctionConstraints? That one is more difficult, but we
- // seem to always just pick up the constraints from the primary template.
- assert(Converted.size() <= 1 && "Got more expressions converted?");
- if (!Converted.empty() && Converted[0] != nullptr)
- const_cast<FunctionDecl *>(FD)->setTrailingRequiresClause(Converted[0]);
- return false;
+ return CheckConstraintSatisfaction(
+ FD, {FD->getTrailingRequiresClause()}, *MLTAL,
+ SourceRange(UsageLoc.isValid() ? UsageLoc : FD->getLocation()),
+ Satisfaction);
}
@@ -677,7 +731,7 @@ CalculateTemplateDepthForConstraints(Sema &S, const NamedDecl *ND,
ND, /*Final=*/false, /*Innermost=*/nullptr, /*RelativeToPrimary=*/true,
/*Pattern=*/nullptr,
/*ForConstraintInstantiation=*/true, SkipForSpecialization);
- return MLTAL.getNumSubstitutedLevels();
+ return MLTAL.getNumLevels();
}
namespace {
@@ -708,27 +762,55 @@ namespace {
};
} // namespace
+static const Expr *SubstituteConstraintExpression(Sema &S, const NamedDecl *ND,
+ const Expr *ConstrExpr) {
+ MultiLevelTemplateArgumentList MLTAL = S.getTemplateInstantiationArgs(
+ ND, /*Final=*/false, /*Innermost=*/nullptr,
+ /*RelativeToPrimary=*/true,
+ /*Pattern=*/nullptr, /*ForConstraintInstantiation=*/true,
+ /*SkipForSpecialization*/ false);
+ if (MLTAL.getNumSubstitutedLevels() == 0)
+ return ConstrExpr;
+
+ Sema::SFINAETrap SFINAE(S, /*AccessCheckingSFINAE=*/false);
+
+ Sema::InstantiatingTemplate Inst(
+ S, ND->getLocation(),
+ Sema::InstantiatingTemplate::ConstraintNormalization{},
+ const_cast<NamedDecl *>(ND), SourceRange{});
+
+ if (Inst.isInvalid())
+ return nullptr;
+
+ std::optional<Sema::CXXThisScopeRAII> ThisScope;
+ if (auto *RD = dyn_cast<CXXRecordDecl>(ND->getDeclContext()))
+ ThisScope.emplace(S, const_cast<CXXRecordDecl *>(RD), Qualifiers());
+ ExprResult SubstConstr =
+ S.SubstConstraintExpr(const_cast<clang::Expr *>(ConstrExpr), MLTAL);
+ if (SFINAE.hasErrorOccurred() || !SubstConstr.isUsable())
+ return nullptr;
+ return SubstConstr.get();
+}
+
bool Sema::AreConstraintExpressionsEqual(const NamedDecl *Old,
const Expr *OldConstr,
const NamedDecl *New,
const Expr *NewConstr) {
- if (Old && New && Old != New) {
- unsigned Depth1 = CalculateTemplateDepthForConstraints(
- *this, Old);
- unsigned Depth2 = CalculateTemplateDepthForConstraints(
- *this, New);
-
- // Adjust the 'shallowest' verison of this to increase the depth to match
- // the 'other'.
- if (Depth2 > Depth1) {
- OldConstr = AdjustConstraintDepth(*this, Depth2 - Depth1)
- .TransformExpr(const_cast<Expr *>(OldConstr))
- .get();
- } else if (Depth1 > Depth2) {
- NewConstr = AdjustConstraintDepth(*this, Depth1 - Depth2)
- .TransformExpr(const_cast<Expr *>(NewConstr))
- .get();
- }
+ if (OldConstr == NewConstr)
+ return true;
+ // C++ [temp.constr.decl]p4
+ if (Old && New && Old != New &&
+ Old->getLexicalDeclContext() != New->getLexicalDeclContext()) {
+ if (const Expr *SubstConstr =
+ SubstituteConstraintExpression(*this, Old, OldConstr))
+ OldConstr = SubstConstr;
+ else
+ return false;
+ if (const Expr *SubstConstr =
+ SubstituteConstraintExpression(*this, New, NewConstr))
+ NewConstr = SubstConstr;
+ else
+ return false;
}
llvm::FoldingSetNodeID ID1, ID2;
@@ -1082,6 +1164,11 @@ void Sema::DiagnoseUnsatisfiedConstraint(
const NormalizedConstraint *
Sema::getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints) {
+ // In case the ConstrainedDecl comes from modules, it is necessary to use
+ // the canonical decl to avoid different atomic constraints with the 'same'
+ // declarations.
+ ConstrainedDecl = cast<NamedDecl>(ConstrainedDecl->getCanonicalDecl());
+
auto CacheEntry = NormalizationCache.find(ConstrainedDecl);
if (CacheEntry == NormalizationCache.end()) {
auto Normalized =
@@ -1307,7 +1394,7 @@ static NormalForm makeDNF(const NormalizedConstraint &Normalized) {
}
template<typename AtomicSubsumptionEvaluator>
-static bool subsumes(NormalForm PDNF, NormalForm QCNF,
+static bool subsumes(const NormalForm &PDNF, const NormalForm &QCNF,
AtomicSubsumptionEvaluator E) {
// C++ [temp.constr.order] p2
// Then, P subsumes Q if and only if, for every disjunctive clause Pi in the
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
index 9678e30699c8..deb67337a2ae 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
@@ -54,12 +54,10 @@ static QualType lookupPromiseType(Sema &S, const FunctionDecl *FD,
const FunctionProtoType *FnType = FD->getType()->castAs<FunctionProtoType>();
const SourceLocation FuncLoc = FD->getLocation();
- NamespaceDecl *CoroNamespace = nullptr;
ClassTemplateDecl *CoroTraits =
- S.lookupCoroutineTraits(KwLoc, FuncLoc, CoroNamespace);
- if (!CoroTraits) {
+ S.lookupCoroutineTraits(KwLoc, FuncLoc);
+ if (!CoroTraits)
return QualType();
- }
// Form template argument list for coroutine_traits<R, P1, P2, ...> according
// to [dcl.fct.def.coroutine]3
@@ -117,7 +115,7 @@ static QualType lookupPromiseType(Sema &S, const FunctionDecl *FD,
QualType PromiseType = S.Context.getTypeDeclType(Promise);
auto buildElaboratedType = [&]() {
- auto *NNS = NestedNameSpecifier::Create(S.Context, nullptr, CoroNamespace);
+ auto *NNS = NestedNameSpecifier::Create(S.Context, nullptr, S.getStdNamespace());
NNS = NestedNameSpecifier::Create(S.Context, NNS, false,
CoroTrait.getTypePtr());
return S.Context.getElaboratedType(ETK_None, NNS, PromiseType);
@@ -142,7 +140,7 @@ static QualType lookupCoroutineHandleType(Sema &S, QualType PromiseType,
if (PromiseType.isNull())
return QualType();
- NamespaceDecl *CoroNamespace = S.getCachedCoroNamespace();
+ NamespaceDecl *CoroNamespace = S.getStdNamespace();
assert(CoroNamespace && "Should already be diagnosed");
LookupResult Result(S, &S.PP.getIdentifierTable().get("coroutine_handle"),
@@ -324,7 +322,7 @@ static ExprResult buildMemberCall(Sema &S, Expr *Base, SourceLocation Loc,
}
// See if return type is coroutine-handle and if so, invoke builtin coro-resume
-// on its address. This is to enable experimental support for coroutine-handle
+// on its address. This is to enable the support for coroutine-handle
// returning await_suspend that results in a guaranteed tail call to the target
// coroutine.
static Expr *maybeTailCall(Sema &S, QualType RetType, Expr *E,
@@ -432,7 +430,7 @@ static ReadySuspendResumeResult buildCoawaitCalls(Sema &S, VarDecl *CoroPromise,
// type Z.
QualType RetType = AwaitSuspend->getCallReturnType(S.Context);
- // Experimental support for coroutine_handle returning await_suspend.
+ // Support for coroutine_handle returning await_suspend.
if (Expr *TailCallSuspend =
maybeTailCall(S, RetType, AwaitSuspend, Loc))
// Note that we don't wrap the expression with ExprWithCleanups here
@@ -1139,6 +1137,18 @@ void Sema::CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body) {
Body = CoroutineBodyStmt::Create(Context, Builder);
}
+static CompoundStmt *buildCoroutineBody(Stmt *Body, ASTContext &Context) {
+ if (auto *CS = dyn_cast<CompoundStmt>(Body))
+ return CS;
+
+ // The body of the coroutine may be a try statement if it is in
+ // 'function-try-block' syntax. Here we wrap it into a compound
+ // statement for consistency.
+ assert(isa<CXXTryStmt>(Body) && "Unimaged coroutine body type");
+ return CompoundStmt::Create(Context, {Body}, FPOptionsOverride(),
+ SourceLocation(), SourceLocation());
+}
+
CoroutineStmtBuilder::CoroutineStmtBuilder(Sema &S, FunctionDecl &FD,
sema::FunctionScopeInfo &Fn,
Stmt *Body)
@@ -1146,7 +1156,7 @@ CoroutineStmtBuilder::CoroutineStmtBuilder(Sema &S, FunctionDecl &FD,
IsPromiseDependentType(
!Fn.CoroutinePromise ||
Fn.CoroutinePromise->getType()->isDependentType()) {
- this->Body = Body;
+ this->Body = buildCoroutineBody(Body, S.getASTContext());
for (auto KV : Fn.CoroutineParameterMoves)
this->ParamMovesVector.push_back(KV.second);
@@ -1732,12 +1742,22 @@ bool CoroutineStmtBuilder::makeGroDeclAndReturnStmt() {
assert(!FnRetType->isDependentType() &&
"get_return_object type must no longer be dependent");
+ // The call to get_­return_­object is sequenced before the call to
+ // initial_­suspend and is invoked at most once, but there are caveats
+ // regarding on whether the prvalue result object may be initialized
+ // directly/eager or delayed, depending on the types involved.
+ //
+ // More info at https://github.com/cplusplus/papers/issues/1414
+ bool GroMatchesRetType = S.getASTContext().hasSameType(GroType, FnRetType);
+
if (FnRetType->isVoidType()) {
ExprResult Res =
S.ActOnFinishFullExpr(this->ReturnValue, Loc, /*DiscardedValue*/ false);
if (Res.isInvalid())
return false;
+ if (!GroMatchesRetType)
+ this->ResultDecl = Res.get();
return true;
}
@@ -1750,12 +1770,61 @@ bool CoroutineStmtBuilder::makeGroDeclAndReturnStmt() {
return false;
}
- StmtResult ReturnStmt = S.BuildReturnStmt(Loc, ReturnValue);
+ StmtResult ReturnStmt;
+ clang::VarDecl *GroDecl = nullptr;
+ if (GroMatchesRetType) {
+ ReturnStmt = S.BuildReturnStmt(Loc, ReturnValue);
+ } else {
+ GroDecl = VarDecl::Create(
+ S.Context, &FD, FD.getLocation(), FD.getLocation(),
+ &S.PP.getIdentifierTable().get("__coro_gro"), GroType,
+ S.Context.getTrivialTypeSourceInfo(GroType, Loc), SC_None);
+ GroDecl->setImplicit();
+
+ S.CheckVariableDeclarationType(GroDecl);
+ if (GroDecl->isInvalidDecl())
+ return false;
+
+ InitializedEntity Entity = InitializedEntity::InitializeVariable(GroDecl);
+ ExprResult Res =
+ S.PerformCopyInitialization(Entity, SourceLocation(), ReturnValue);
+ if (Res.isInvalid())
+ return false;
+
+ Res = S.ActOnFinishFullExpr(Res.get(), /*DiscardedValue*/ false);
+ if (Res.isInvalid())
+ return false;
+
+ S.AddInitializerToDecl(GroDecl, Res.get(),
+ /*DirectInit=*/false);
+
+ S.FinalizeDeclaration(GroDecl);
+
+ // Form a declaration statement for the return declaration, so that AST
+ // visitors can more easily find it.
+ StmtResult GroDeclStmt =
+ S.ActOnDeclStmt(S.ConvertDeclToDeclGroup(GroDecl), Loc, Loc);
+ if (GroDeclStmt.isInvalid())
+ return false;
+
+ this->ResultDecl = GroDeclStmt.get();
+
+ ExprResult declRef = S.BuildDeclRefExpr(GroDecl, GroType, VK_LValue, Loc);
+ if (declRef.isInvalid())
+ return false;
+
+ ReturnStmt = S.BuildReturnStmt(Loc, declRef.get());
+ }
+
if (ReturnStmt.isInvalid()) {
noteMemberDeclaredHere(S, ReturnValue, Fn);
return false;
}
+ if (!GroMatchesRetType &&
+ cast<clang::ReturnStmt>(ReturnStmt.get())->getNRVOCandidate() == GroDecl)
+ GroDecl->setNRVOVariable(true);
+
this->ReturnStmt = ReturnStmt.get();
return true;
}
@@ -1842,67 +1911,32 @@ StmtResult Sema::BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs Args) {
}
ClassTemplateDecl *Sema::lookupCoroutineTraits(SourceLocation KwLoc,
- SourceLocation FuncLoc,
- NamespaceDecl *&Namespace) {
- if (!StdCoroutineTraitsCache) {
- // Because coroutines moved from std::experimental in the TS to std in
- // C++20, we look in both places to give users time to transition their
- // TS-specific code to C++20. Diagnostics are given when the TS usage is
- // discovered.
- // TODO: Become stricter when <experimental/coroutine> is removed.
-
- IdentifierInfo const &TraitIdent =
- PP.getIdentifierTable().get("coroutine_traits");
-
- NamespaceDecl *StdSpace = getStdNamespace();
- LookupResult ResStd(*this, &TraitIdent, FuncLoc, LookupOrdinaryName);
- bool InStd = StdSpace && LookupQualifiedName(ResStd, StdSpace);
-
- NamespaceDecl *ExpSpace = lookupStdExperimentalNamespace();
- LookupResult ResExp(*this, &TraitIdent, FuncLoc, LookupOrdinaryName);
- bool InExp = ExpSpace && LookupQualifiedName(ResExp, ExpSpace);
-
- if (!InStd && !InExp) {
- // The goggles, they found nothing!
- Diag(KwLoc, diag::err_implied_coroutine_type_not_found)
- << "std::coroutine_traits";
- return nullptr;
- }
+ SourceLocation FuncLoc) {
+ if (StdCoroutineTraitsCache)
+ return StdCoroutineTraitsCache;
- // Prefer ::std to std::experimental.
- LookupResult &Result = InStd ? ResStd : ResExp;
- CoroTraitsNamespaceCache = InStd ? StdSpace : ExpSpace;
+ IdentifierInfo const &TraitIdent =
+ PP.getIdentifierTable().get("coroutine_traits");
- // coroutine_traits is required to be a class template.
- StdCoroutineTraitsCache = Result.getAsSingle<ClassTemplateDecl>();
- if (!StdCoroutineTraitsCache) {
- Result.suppressDiagnostics();
- NamedDecl *Found = *Result.begin();
- Diag(Found->getLocation(), diag::err_malformed_std_coroutine_traits);
- return nullptr;
- }
+ NamespaceDecl *StdSpace = getStdNamespace();
+ LookupResult Result(*this, &TraitIdent, FuncLoc, LookupOrdinaryName);
+ bool Found = StdSpace && LookupQualifiedName(Result, StdSpace);
- if (InExp) {
- // Found in std::experimental
- Diag(KwLoc, diag::warn_deprecated_coroutine_namespace)
- << "coroutine_traits";
- ResExp.suppressDiagnostics();
- NamedDecl *Found = *ResExp.begin();
- Diag(Found->getLocation(), diag::note_entity_declared_at) << Found;
-
- if (InStd &&
- StdCoroutineTraitsCache != ResExp.getAsSingle<ClassTemplateDecl>()) {
- // Also found something different in std
- Diag(KwLoc,
- diag::err_mixed_use_std_and_experimental_namespace_for_coroutine);
- Diag(StdCoroutineTraitsCache->getLocation(),
- diag::note_entity_declared_at)
- << StdCoroutineTraitsCache;
+ if (!Found) {
+ // The goggles, we found nothing!
+ Diag(KwLoc, diag::err_implied_coroutine_type_not_found)
+ << "std::coroutine_traits";
+ return nullptr;
+ }
- return nullptr;
- }
- }
+ // coroutine_traits is required to be a class template.
+ StdCoroutineTraitsCache = Result.getAsSingle<ClassTemplateDecl>();
+ if (!StdCoroutineTraitsCache) {
+ Result.suppressDiagnostics();
+ NamedDecl *Found = *Result.begin();
+ Diag(Found->getLocation(), diag::err_malformed_std_coroutine_traits);
+ return nullptr;
}
- Namespace = CoroTraitsNamespaceCache;
+
return StdCoroutineTraitsCache;
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
index 0fbef1cc8b52..a4bf57928470 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
@@ -46,7 +46,8 @@
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <cstring>
#include <functional>
@@ -1593,7 +1594,7 @@ void Sema::PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext) {
}
bool Sema::isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S,
- bool AllowInlineNamespace) {
+ bool AllowInlineNamespace) const {
return IdResolver.isDeclInScope(D, Ctx, S, AllowInlineNamespace);
}
@@ -1661,13 +1662,19 @@ bool Sema::CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old) {
if (NewM == OldM)
return false;
- // Partitions are part of the module, but a partition could import another
- // module, so verify that the PMIs agree.
- if (NewM && OldM &&
- (NewM->isModulePartition() || OldM->isModulePartition()) &&
- NewM->getPrimaryModuleInterfaceName() ==
- OldM->getPrimaryModuleInterfaceName())
- return false;
+ if (NewM && OldM) {
+ // A module implementation unit has visibility of the decls in its
+ // implicitly imported interface.
+ if (NewM->isModuleImplementation() && OldM == ThePrimaryInterface)
+ return false;
+
+ // Partitions are part of the module, but a partition could import another
+ // module, so verify that the PMIs agree.
+ if ((NewM->isModulePartition() || OldM->isModulePartition()) &&
+ NewM->getPrimaryModuleInterfaceName() ==
+ OldM->getPrimaryModuleInterfaceName())
+ return false;
+ }
bool NewIsModuleInterface = NewM && NewM->isModulePurview();
bool OldIsModuleInterface = OldM && OldM->isModulePurview();
@@ -1815,17 +1822,21 @@ bool Sema::IsRedefinitionInModule(const NamedDecl *New,
return OldM == NewM;
}
-static bool isUsingDecl(NamedDecl *D) {
+static bool isUsingDeclNotAtClassScope(NamedDecl *D) {
+ if (D->getDeclContext()->isFileContext())
+ return false;
+
return isa<UsingShadowDecl>(D) ||
isa<UnresolvedUsingTypenameDecl>(D) ||
isa<UnresolvedUsingValueDecl>(D);
}
-/// Removes using shadow declarations from the lookup results.
+/// Removes using shadow declarations not at class scope from the lookup
+/// results.
static void RemoveUsingDecls(LookupResult &R) {
LookupResult::Filter F = R.makeFilter();
while (F.hasNext())
- if (isUsingDecl(F.next()))
+ if (isUsingDeclNotAtClassScope(F.next()))
F.erase();
F.done();
@@ -1982,7 +1993,8 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
return false;
}
- if (D->hasAttr<UnusedAttr>() || D->hasAttr<ObjCPreciseLifetimeAttr>())
+ if (D->hasAttr<UnusedAttr>() || D->hasAttr<ObjCPreciseLifetimeAttr>() ||
+ D->hasAttr<CleanupAttr>())
return false;
if (isa<LabelDecl>(D))
@@ -2085,7 +2097,7 @@ static void GenerateFixForUnusedDecl(const NamedDecl *D, ASTContext &Ctx,
if (isa<LabelDecl>(D)) {
SourceLocation AfterColon = Lexer::findLocationAfterToken(
D->getEndLoc(), tok::colon, Ctx.getSourceManager(), Ctx.getLangOpts(),
- true);
+ /*SkipTrailingWhitespaceAndNewline=*/false);
if (AfterColon.isInvalid())
return;
Hint = FixItHint::CreateRemoval(
@@ -2140,7 +2152,8 @@ void Sema::DiagnoseUnusedDecl(const NamedDecl *D, DiagReceiverTy DiagReceiver) {
else
DiagID = diag::warn_unused_variable;
- DiagReceiver(D->getLocation(), PDiag(DiagID) << D << Hint);
+ SourceLocation DiagLoc = D->getLocation();
+ DiagReceiver(DiagLoc, PDiag(DiagID) << D << Hint << SourceRange(DiagLoc));
}
void Sema::DiagnoseUnusedButSetDecl(const VarDecl *VD,
@@ -3683,10 +3696,10 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD, Scope *S,
!canRedefineFunction(Old, getLangOpts())) {
if (getLangOpts().MicrosoftExt) {
Diag(New->getLocation(), diag::ext_static_non_static) << New;
- Diag(OldLocation, PrevDiag);
+ Diag(OldLocation, PrevDiag) << Old << Old->getType();
} else {
Diag(New->getLocation(), diag::err_static_non_static) << New;
- Diag(OldLocation, PrevDiag);
+ Diag(OldLocation, PrevDiag) << Old << Old->getType();
return true;
}
}
@@ -4373,7 +4386,7 @@ static void diagnoseVarDeclTypeMismatch(Sema &S, VarDecl *New, VarDecl* Old) {
SourceLocation OldLocation;
std::tie(PrevDiag, OldLocation)
= getNoteDiagForInvalidRedeclaration(Old, New);
- S.Diag(OldLocation, PrevDiag);
+ S.Diag(OldLocation, PrevDiag) << Old << Old->getType();
New->setInvalidDecl();
}
@@ -4386,7 +4399,7 @@ static void diagnoseVarDeclTypeMismatch(Sema &S, VarDecl *New, VarDecl* Old) {
/// is attached.
void Sema::MergeVarDeclTypes(VarDecl *New, VarDecl *Old,
bool MergeTypeWithOld) {
- if (New->isInvalidDecl() || Old->isInvalidDecl())
+ if (New->isInvalidDecl() || Old->isInvalidDecl() || New->getType()->containsErrors() || Old->getType()->containsErrors())
return;
QualType MergedT;
@@ -5027,7 +5040,8 @@ void Sema::setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TagFromDeclSpec->setTypedefNameForAnonDecl(NewTD);
}
-static unsigned GetDiagnosticTypeSpecifierID(DeclSpec::TST T) {
+static unsigned GetDiagnosticTypeSpecifierID(const DeclSpec &DS) {
+ DeclSpec::TST T = DS.getTypeSpecType();
switch (T) {
case DeclSpec::TST_class:
return 0;
@@ -5038,12 +5052,17 @@ static unsigned GetDiagnosticTypeSpecifierID(DeclSpec::TST T) {
case DeclSpec::TST_union:
return 3;
case DeclSpec::TST_enum:
+ if (const auto *ED = dyn_cast<EnumDecl>(DS.getRepAsDecl())) {
+ if (ED->isScopedUsingClassTag())
+ return 5;
+ if (ED->isScoped())
+ return 6;
+ }
return 4;
default:
llvm_unreachable("unexpected type specifier");
}
}
-
/// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
/// no declarator (e.g. "struct foo;") is parsed. It also accepts template
/// parameters to cope with template friend declarations.
@@ -5101,7 +5120,7 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
// the declaration of a function or function template
if (Tag)
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_tag)
- << GetDiagnosticTypeSpecifierID(DS.getTypeSpecType())
+ << GetDiagnosticTypeSpecifierID(DS)
<< static_cast<int>(DS.getConstexprSpecifier());
else
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_wrong_decl_kind)
@@ -5135,7 +5154,7 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
//
// Per C++ [dcl.enum]p1, an opaque-enum-declaration can't either.
Diag(SS.getBeginLoc(), diag::err_standalone_class_nested_name_specifier)
- << GetDiagnosticTypeSpecifierID(DS.getTypeSpecType()) << SS.getRange();
+ << GetDiagnosticTypeSpecifierID(DS) << SS.getRange();
return nullptr;
}
@@ -5299,11 +5318,15 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
TypeSpecType == DeclSpec::TST_union ||
TypeSpecType == DeclSpec::TST_enum) {
for (const ParsedAttr &AL : DS.getAttributes())
- Diag(AL.getLoc(), diag::warn_declspec_attribute_ignored)
- << AL << GetDiagnosticTypeSpecifierID(TypeSpecType);
+ Diag(AL.getLoc(), AL.isRegularKeywordAttribute()
+ ? diag::err_declspec_keyword_has_no_effect
+ : diag::warn_declspec_attribute_ignored)
+ << AL << GetDiagnosticTypeSpecifierID(DS);
for (const ParsedAttr &AL : DeclAttrs)
- Diag(AL.getLoc(), diag::warn_declspec_attribute_ignored)
- << AL << GetDiagnosticTypeSpecifierID(TypeSpecType);
+ Diag(AL.getLoc(), AL.isRegularKeywordAttribute()
+ ? diag::err_declspec_keyword_has_no_effect
+ : diag::warn_declspec_attribute_ignored)
+ << AL << GetDiagnosticTypeSpecifierID(DS);
}
}
@@ -5694,10 +5717,10 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
SC = SC_None;
}
- assert(DS.getAttributes().empty() && "No attribute expected");
Anon = VarDecl::Create(Context, Owner, DS.getBeginLoc(),
Record->getLocation(), /*IdentifierInfo=*/nullptr,
Context.getTypeDeclType(Record), TInfo, SC);
+ ProcessDeclAttributes(S, Anon, Dc);
// Default-initialize the implicit variable. This initialization will be
// trivial in almost all cases, except if a union member has an in-class
@@ -6366,10 +6389,6 @@ NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D,
// containing the two f's declared in X, but neither of them
// matches.
- // C++ [dcl.meaning]p1:
- // [...] the member shall not merely have been introduced by a
- // using-declaration in the scope of the class or namespace nominated by
- // the nested-name-specifier of the declarator-id.
RemoveUsingDecls(Previous);
}
@@ -6746,14 +6765,26 @@ Sema::ActOnTypedefNameDecl(Scope *S, DeclContext *DC, TypedefNameDecl *NewTD,
if (IdentifierInfo *II = NewTD->getIdentifier())
if (!NewTD->isInvalidDecl() &&
NewTD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
- if (II->isStr("FILE"))
+ switch (II->getInterestingIdentifierID()) {
+ case tok::InterestingIdentifierKind::FILE:
Context.setFILEDecl(NewTD);
- else if (II->isStr("jmp_buf"))
+ break;
+ case tok::InterestingIdentifierKind::jmp_buf:
Context.setjmp_bufDecl(NewTD);
- else if (II->isStr("sigjmp_buf"))
+ break;
+ case tok::InterestingIdentifierKind::sigjmp_buf:
Context.setsigjmp_bufDecl(NewTD);
- else if (II->isStr("ucontext_t"))
+ break;
+ case tok::InterestingIdentifierKind::ucontext_t:
Context.setucontext_tDecl(NewTD);
+ break;
+ case tok::InterestingIdentifierKind::float_t:
+ case tok::InterestingIdentifierKind::double_t:
+ NewTD->addAttr(AvailableOnlyInDefaultEvalMethodAttr::Create(Context));
+ break;
+ default:
+ break;
+ }
}
return NewTD;
@@ -7674,7 +7705,12 @@ NamedDecl *Sema::ActOnVariableDeclarator(
// If we have any template parameter lists that don't directly belong to
// the variable (matching the scope specifier), store them.
- unsigned VDTemplateParamLists = TemplateParams ? 1 : 0;
+ // An explicit variable template specialization does not own any template
+ // parameter lists.
+ bool IsExplicitSpecialization =
+ IsVariableTemplateSpecialization && !IsPartialSpecialization;
+ unsigned VDTemplateParamLists =
+ (TemplateParams && !IsExplicitSpecialization) ? 1 : 0;
if (TemplateParamLists.size() > VDTemplateParamLists)
NewVD->setTemplateParameterListsInfo(
Context, TemplateParamLists.drop_back(VDTemplateParamLists));
@@ -7727,7 +7763,7 @@ NamedDecl *Sema::ActOnVariableDeclarator(
diag::err_thread_non_global)
<< DeclSpec::getSpecifierName(TSCS);
else if (!Context.getTargetInfo().isTLSSupported()) {
- if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice ||
+ if (getLangOpts().CUDA || getLangOpts().OpenMPIsTargetDevice ||
getLangOpts().SYCLIsDevice) {
// Postpone error emission until we've collected attributes required to
// figure out whether it's a host or device variable and whether the
@@ -7770,9 +7806,9 @@ NamedDecl *Sema::ActOnVariableDeclarator(
Diag(D.getDeclSpec().getConstexprSpecLoc(),
diag::err_constinit_local_variable);
else
- NewVD->addAttr(ConstInitAttr::Create(
- Context, D.getDeclSpec().getConstexprSpecLoc(),
- AttributeCommonInfo::AS_Keyword, ConstInitAttr::Keyword_constinit));
+ NewVD->addAttr(
+ ConstInitAttr::Create(Context, D.getDeclSpec().getConstexprSpecLoc(),
+ ConstInitAttr::Keyword_constinit));
break;
}
@@ -7833,6 +7869,18 @@ NamedDecl *Sema::ActOnVariableDeclarator(
}
}
+ // WebAssembly tables are always in address space 1 (wasm_var). Don't apply
+ // address space if the table has local storage (semantic checks elsewhere
+ // will produce an error anyway).
+ if (const auto *ATy = dyn_cast<ArrayType>(NewVD->getType())) {
+ if (ATy && ATy->getElementType().isWebAssemblyReferenceType() &&
+ !NewVD->hasLocalStorage()) {
+ QualType Type = Context.getAddrSpaceQualType(
+ NewVD->getType(), Context.getLangASForBuiltinAddressSpace(1));
+ NewVD->setType(Type);
+ }
+ }
+
// Handle attributes prior to checking for duplicates in MergeVarDecl
ProcessDeclAttributes(S, NewVD, D);
@@ -7844,17 +7892,18 @@ NamedDecl *Sema::ActOnVariableDeclarator(
if (const auto *TT = R->getAs<TypedefType>())
copyAttrFromTypedefToDecl<AllocSizeAttr>(*this, NewVD, TT);
- if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice ||
+ if (getLangOpts().CUDA || getLangOpts().OpenMPIsTargetDevice ||
getLangOpts().SYCLIsDevice) {
if (EmitTLSUnsupportedError &&
((getLangOpts().CUDA && DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) ||
- (getLangOpts().OpenMPIsDevice &&
+ (getLangOpts().OpenMPIsTargetDevice &&
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(NewVD))))
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_thread_unsupported);
if (EmitTLSUnsupportedError &&
- (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice)))
+ (LangOpts.SYCLIsDevice ||
+ (LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice)))
targetDiag(D.getIdentifierLoc(), diag::err_thread_unsupported);
// CUDA B.2.5: "__shared__ and __constant__ variables have implied static
// storage [duration]."
@@ -7989,6 +8038,13 @@ NamedDecl *Sema::ActOnVariableDeclarator(
if (!IsVariableTemplateSpecialization)
D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous));
+ // CheckVariableDeclaration will set NewVD as invalid if something is in
+ // error like WebAssembly tables being declared as arrays with a non-zero
+ // size, but then parsing continues and emits further errors on that line.
+ // To avoid that we check here if it happened and return nullptr.
+ if (NewVD->getType()->isWebAssemblyTableType() && NewVD->isInvalidDecl())
+ return nullptr;
+
if (NewTemplate) {
VarTemplateDecl *PrevVarTemplate =
NewVD->getPreviousDecl()
@@ -8133,7 +8189,7 @@ NamedDecl *Sema::getShadowedDeclaration(const VarDecl *D,
return nullptr;
// Don't diagnose declarations at file scope.
- if (D->hasGlobalStorage())
+ if (D->hasGlobalStorage() && !D->isStaticLocal())
return nullptr;
NamedDecl *ShadowedDecl = R.getFoundDecl();
@@ -8595,6 +8651,28 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
}
}
+ // WebAssembly tables must be static with a zero length and can't be
+ // declared within functions.
+ if (T->isWebAssemblyTableType()) {
+ if (getCurScope()->getParent()) { // Parent is null at top-level
+ Diag(NewVD->getLocation(), diag::err_wasm_table_in_function);
+ NewVD->setInvalidDecl();
+ return;
+ }
+ if (NewVD->getStorageClass() != SC_Static) {
+ Diag(NewVD->getLocation(), diag::err_wasm_table_must_be_static);
+ NewVD->setInvalidDecl();
+ return;
+ }
+ const auto *ATy = dyn_cast<ConstantArrayType>(T.getTypePtr());
+ if (!ATy || ATy->getSize().getSExtValue() != 0) {
+ Diag(NewVD->getLocation(),
+ diag::err_typecheck_wasm_table_must_have_zero_length);
+ NewVD->setInvalidDecl();
+ return;
+ }
+ }
+
bool isVM = T->isVariablyModifiedType();
if (isVM || NewVD->hasAttr<CleanupAttr>() ||
NewVD->hasAttr<BlocksAttr>())
@@ -8665,7 +8743,8 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
return;
}
- if (!NewVD->hasLocalStorage() && T->isSizelessType()) {
+ if (!NewVD->hasLocalStorage() && T->isSizelessType() &&
+ !T.isWebAssemblyReferenceType()) {
Diag(NewVD->getLocation(), diag::err_sizeless_nonlocal) << T;
NewVD->setInvalidDecl();
return;
@@ -8704,6 +8783,9 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
return;
}
}
+
+ if (T->isRVVType())
+ checkRVVTypeSupport(T, NewVD->getLocation(), cast<ValueDecl>(CurContext));
}
/// Perform semantic checking on a newly-created variable
@@ -9101,15 +9183,6 @@ static FunctionDecl *CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
}
Expr *TrailingRequiresClause = D.getTrailingRequiresClause();
- // Check that the return type is not an abstract class type.
- // For record types, this is done by the AbstractClassUsageDiagnoser once
- // the class has been completely parsed.
- if (!DC->isRecord() &&
- SemaRef.RequireNonAbstractType(
- D.getIdentifierLoc(), R->castAs<FunctionType>()->getReturnType(),
- diag::err_abstract_type_in_decl, SemaRef.AbstractReturnType))
- D.setInvalidType();
-
if (Name.getNameKind() == DeclarationName::CXXConstructorName) {
// This is a C++ constructor declaration.
assert(DC->isRecord() &&
@@ -9181,8 +9254,8 @@ static FunctionDecl *CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
SemaRef.Diag(TrailingRequiresClause->getBeginLoc(),
diag::err_trailing_requires_clause_on_deduction_guide)
<< TrailingRequiresClause->getSourceRange();
- SemaRef.CheckDeductionGuideDeclarator(D, R, SC);
-
+ if (SemaRef.CheckDeductionGuideDeclarator(D, R, SC))
+ return nullptr;
return CXXDeductionGuideDecl::Create(SemaRef.Context, DC, D.getBeginLoc(),
ExplicitSpecifier, NameInfo, R, TInfo,
D.getEndLoc());
@@ -9272,6 +9345,12 @@ static OpenCLParamType getOpenCLKernelParameterType(Sema &S, QualType PT) {
ParamKind == InvalidKernelParam)
return ParamKind;
+ // OpenCL v3.0 s6.11.a:
+ // A restriction to pass pointers to pointers only applies to OpenCL C
+ // v1.2 or below.
+ if (S.getLangOpts().getOpenCLCompatibleVersion() > 120)
+ return ValidKernelParam;
+
return PtrPtrKernelParam;
}
@@ -9299,6 +9378,11 @@ static OpenCLParamType getOpenCLKernelParameterType(Sema &S, QualType PT) {
return InvalidKernelParam;
}
+ // OpenCL v1.2 s6.9.p:
+ // A restriction to pass pointers only applies to OpenCL C v1.2 or below.
+ if (S.getLangOpts().getOpenCLCompatibleVersion() > 120)
+ return ValidKernelParam;
+
return PtrKernelParam;
}
@@ -9365,13 +9449,8 @@ static void checkIsValidOpenCLKernelParameter(
// OpenCL v3.0 s6.11.a:
// A kernel function argument cannot be declared as a pointer to a pointer
// type. [...] This restriction only applies to OpenCL C 1.2 or below.
- if (S.getLangOpts().getOpenCLCompatibleVersion() <= 120) {
- S.Diag(Param->getLocation(), diag::err_opencl_ptrptr_kernel_param);
- D.setInvalidType();
- return;
- }
-
- ValidTypes.insert(PT.getTypePtr());
+ S.Diag(Param->getLocation(), diag::err_opencl_ptrptr_kernel_param);
+ D.setInvalidType();
return;
case InvalidAddrSpacePtrKernelParam:
@@ -9489,7 +9568,8 @@ static void checkIsValidOpenCLKernelParameter(
// OpenCL v1.2 s6.9.p:
// Arguments to kernel functions that are declared to be a struct or union
// do not allow OpenCL objects to be passed as elements of the struct or
- // union.
+ // union. This restriction was lifted in OpenCL v2.0 with the introduction
+ // of SVM.
if (ParamType == PtrKernelParam || ParamType == PtrPtrKernelParam ||
ParamType == InvalidAddrSpacePtrKernelParam) {
S.Diag(Param->getLocation(),
@@ -9556,6 +9636,7 @@ static bool isStdBuiltin(ASTContext &Ctx, FunctionDecl *FD,
case Builtin::BIaddressof:
case Builtin::BI__addressof:
case Builtin::BIforward:
+ case Builtin::BIforward_like:
case Builtin::BImove:
case Builtin::BImove_if_noexcept:
case Builtin::BIas_const: {
@@ -10105,9 +10186,8 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NewFD->setParams(Params);
if (D.getDeclSpec().isNoreturnSpecified())
- NewFD->addAttr(C11NoReturnAttr::Create(Context,
- D.getDeclSpec().getNoreturnSpecLoc(),
- AttributeCommonInfo::AS_Keyword));
+ NewFD->addAttr(
+ C11NoReturnAttr::Create(Context, D.getDeclSpec().getNoreturnSpecLoc()));
// Functions returning a variably modified type violate C99 6.7.5.2p2
// because all functions have linkage.
@@ -10122,15 +10202,14 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
!NewFD->hasAttr<SectionAttr>())
NewFD->addAttr(PragmaClangTextSectionAttr::CreateImplicit(
Context, PragmaClangTextSection.SectionName,
- PragmaClangTextSection.PragmaLocation, AttributeCommonInfo::AS_Pragma));
+ PragmaClangTextSection.PragmaLocation));
// Apply an implicit SectionAttr if #pragma code_seg is active.
if (CodeSegStack.CurrentValue && D.isFunctionDefinition() &&
!NewFD->hasAttr<SectionAttr>()) {
NewFD->addAttr(SectionAttr::CreateImplicit(
Context, CodeSegStack.CurrentValue->getString(),
- CodeSegStack.CurrentPragmaLocation, AttributeCommonInfo::AS_Pragma,
- SectionAttr::Declspec_allocate));
+ CodeSegStack.CurrentPragmaLocation, SectionAttr::Declspec_allocate));
if (UnifySection(CodeSegStack.CurrentValue->getString(),
ASTContext::PSF_Implicit | ASTContext::PSF_Execute |
ASTContext::PSF_Read,
@@ -10143,8 +10222,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (StrictGuardStackCheckStack.CurrentValue && D.isFunctionDefinition() &&
!NewFD->hasAttr<StrictGuardStackCheckAttr>())
NewFD->addAttr(StrictGuardStackCheckAttr::CreateImplicit(
- Context, PragmaClangTextSection.PragmaLocation,
- AttributeCommonInfo::AS_Pragma));
+ Context, PragmaClangTextSection.PragmaLocation));
// Apply an implicit CodeSegAttr from class declspec or
// apply an implicit SectionAttr from #pragma code_seg if active.
@@ -10185,14 +10263,19 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
CheckHLSLEntryPoint(NewFD);
if (!NewFD->isInvalidDecl()) {
auto Env = TargetInfo.getTriple().getEnvironment();
- AttributeCommonInfo AL(NewFD->getBeginLoc());
HLSLShaderAttr::ShaderType ShaderType =
static_cast<HLSLShaderAttr::ShaderType>(
hlsl::getStageFromEnvironment(Env));
// To share code with HLSLShaderAttr, add HLSLShaderAttr to entry
// function.
- if (HLSLShaderAttr *Attr = mergeHLSLShaderAttr(NewFD, AL, ShaderType))
- NewFD->addAttr(Attr);
+ if (HLSLShaderAttr *NT = NewFD->getAttr<HLSLShaderAttr>()) {
+ if (NT->getType() != ShaderType)
+ Diag(NT->getLocation(), diag::err_hlsl_entry_shader_attr_mismatch)
+ << NT;
+ } else {
+ NewFD->addAttr(HLSLShaderAttr::Create(Context, ShaderType,
+ NewFD->getBeginLoc()));
+ }
}
}
// HLSL does not support specifying an address space on a function return
@@ -10663,6 +10746,14 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
}
}
+ // WebAssembly tables can't be used as function parameters.
+ if (Context.getTargetInfo().getTriple().isWasm()) {
+ if (PT->getUnqualifiedDesugaredType()->isWebAssemblyTableType()) {
+ Diag(Param->getTypeSpecStartLoc(),
+ diag::err_wasm_table_as_function_parameter);
+ D.setInvalidType();
+ }
+ }
}
// Here we have an function template explicit specialization at class scope.
@@ -10772,8 +10863,7 @@ Attr *Sema::getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
CodeSegStack.CurrentValue)
return SectionAttr::CreateImplicit(
getASTContext(), CodeSegStack.CurrentValue->getString(),
- CodeSegStack.CurrentPragmaLocation, AttributeCommonInfo::AS_Pragma,
- SectionAttr::Declspec_allocate);
+ CodeSegStack.CurrentPragmaLocation, SectionAttr::Declspec_allocate);
return nullptr;
}
@@ -11314,8 +11404,7 @@ static bool CheckMultiVersionAdditionalDecl(
if (NewMVKind == MultiVersionKind::None &&
OldMVKind == MultiVersionKind::TargetVersion) {
NewFD->addAttr(TargetVersionAttr::CreateImplicit(
- S.Context, "default", NewFD->getSourceRange(),
- AttributeCommonInfo::AS_GNU));
+ S.Context, "default", NewFD->getSourceRange()));
NewFD->setIsMultiVersion();
NewMVKind = MultiVersionKind::TargetVersion;
if (!NewTVA) {
@@ -11514,6 +11603,10 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
return false;
}
+ // Target attribute on AArch64 is not used for multiversioning
+ if (NewTA && S.getASTContext().getTargetInfo().getTriple().isAArch64())
+ return false;
+
if (!OldDecl || !OldDecl->getAsFunction() ||
OldDecl->getDeclContext()->getRedeclContext() !=
NewFD->getDeclContext()->getRedeclContext()) {
@@ -11532,8 +11625,7 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
const auto *OldTVA = OldFD->getAttr<TargetVersionAttr>();
if (OldTVA) {
NewFD->addAttr(TargetVersionAttr::CreateImplicit(
- S.Context, "default", NewFD->getSourceRange(),
- AttributeCommonInfo::AS_GNU));
+ S.Context, "default", NewFD->getSourceRange()));
NewFD->setIsMultiVersion();
OldFD->setIsMultiVersion();
OldDecl = OldFD;
@@ -11862,8 +11954,33 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
// member-declarator shall be present only if the declarator declares a
// templated function ([dcl.fct]).
if (Expr *TRC = NewFD->getTrailingRequiresClause()) {
- if (!NewFD->isTemplated() && !NewFD->isTemplateInstantiation())
+ // [temp.pre]/8:
+ // An entity is templated if it is
+ // - a template,
+ // - an entity defined ([basic.def]) or created ([class.temporary]) in a
+ // templated entity,
+ // - a member of a templated entity,
+ // - an enumerator for an enumeration that is a templated entity, or
+ // - the closure type of a lambda-expression ([expr.prim.lambda.closure])
+ // appearing in the declaration of a templated entity. [Note 6: A local
+ // class, a local or block variable, or a friend function defined in a
+ // templated entity is a templated entity. — end note]
+ //
+ // A templated function is a function template or a function that is
+ // templated. A templated class is a class template or a class that is
+ // templated. A templated variable is a variable template or a variable
+ // that is templated.
+
+ if (!NewFD->getDescribedFunctionTemplate() && // -a template
+ // defined... in a templated entity
+ !(DeclIsDefn && NewFD->isTemplated()) &&
+ // a member of a templated entity
+ !(isa<CXXMethodDecl>(NewFD) && NewFD->isTemplated()) &&
+ // Don't complain about instantiations, they've already had these
+ // rules + others enforced.
+ !NewFD->isTemplateInstantiation()) {
Diag(TRC->getBeginLoc(), diag::err_constrained_non_templated_function);
+ }
}
if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(NewFD))
@@ -12584,10 +12701,9 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
if (Init)
DeduceInits = Init;
- if (DirectInit) {
- if (auto *PL = dyn_cast_or_null<ParenListExpr>(Init))
- DeduceInits = PL->exprs();
- }
+ auto *PL = dyn_cast_if_present<ParenListExpr>(Init);
+ if (DirectInit && PL)
+ DeduceInits = PL->exprs();
if (isa<DeducedTemplateSpecializationType>(Deduced)) {
assert(VDecl && "non-auto type for init capture deduction?");
@@ -12597,7 +12713,7 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
// FIXME: Initialization should not be taking a mutable list of inits.
SmallVector<Expr*, 8> InitsCopy(DeduceInits.begin(), DeduceInits.end());
return DeduceTemplateSpecializationFromInitializer(TSI, Entity, Kind,
- InitsCopy);
+ InitsCopy, PL);
}
if (DirectInit) {
@@ -13024,6 +13140,14 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
return;
}
+ // WebAssembly tables can't be used to initialise a variable.
+ if (Init && !Init->getType().isNull() &&
+ Init->getType()->isWebAssemblyTableType()) {
+ Diag(Init->getExprLoc(), diag::err_wasm_table_art) << 0;
+ VDecl->setInvalidDecl();
+ return;
+ }
+
// C++11 [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
if (VDecl->getType()->isUndeducedType()) {
// Attempt typo correction early so that the type of the init expression can
@@ -13472,7 +13596,7 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
}
if (LangOpts.OpenMP &&
- (LangOpts.OpenMPIsDevice || !LangOpts.OMPTargetTriples.empty()) &&
+ (LangOpts.OpenMPIsTargetDevice || !LangOpts.OMPTargetTriples.empty()) &&
VDecl->isFileVarDecl())
DeclsToCheckForDeferredDiags.insert(VDecl);
CheckCompleteVariableDeclaration(VDecl);
@@ -13657,7 +13781,7 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
}
if (Context.getTargetInfo().allowDebugInfoForExternalRef() &&
- !Var->isInvalidDecl() && !getLangOpts().CPlusPlus)
+ !Var->isInvalidDecl())
ExternalDeclarations.push_back(Var);
return;
@@ -14148,9 +14272,9 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
} else if (Stack->CurrentValue) {
SectionFlags |= ASTContext::PSF_Implicit;
auto SectionName = Stack->CurrentValue->getString();
- var->addAttr(SectionAttr::CreateImplicit(
- Context, SectionName, Stack->CurrentPragmaLocation,
- AttributeCommonInfo::AS_Pragma, SectionAttr::Declspec_allocate));
+ var->addAttr(SectionAttr::CreateImplicit(Context, SectionName,
+ Stack->CurrentPragmaLocation,
+ SectionAttr::Declspec_allocate));
if (UnifySection(SectionName, SectionFlags, var))
var->dropAttr<SectionAttr>();
}
@@ -14160,8 +14284,7 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
// attribute.
if (CurInitSeg && var->getInit())
var->addAttr(InitSegAttr::CreateImplicit(Context, CurInitSeg->getString(),
- CurInitSegLoc,
- AttributeCommonInfo::AS_Pragma));
+ CurInitSegLoc));
}
// All the following checks are C++ only.
@@ -14263,23 +14386,19 @@ void Sema::FinalizeDeclaration(Decl *ThisDecl) {
if (PragmaClangBSSSection.Valid)
VD->addAttr(PragmaClangBSSSectionAttr::CreateImplicit(
Context, PragmaClangBSSSection.SectionName,
- PragmaClangBSSSection.PragmaLocation,
- AttributeCommonInfo::AS_Pragma));
+ PragmaClangBSSSection.PragmaLocation));
if (PragmaClangDataSection.Valid)
VD->addAttr(PragmaClangDataSectionAttr::CreateImplicit(
Context, PragmaClangDataSection.SectionName,
- PragmaClangDataSection.PragmaLocation,
- AttributeCommonInfo::AS_Pragma));
+ PragmaClangDataSection.PragmaLocation));
if (PragmaClangRodataSection.Valid)
VD->addAttr(PragmaClangRodataSectionAttr::CreateImplicit(
Context, PragmaClangRodataSection.SectionName,
- PragmaClangRodataSection.PragmaLocation,
- AttributeCommonInfo::AS_Pragma));
+ PragmaClangRodataSection.PragmaLocation));
if (PragmaClangRelroSection.Valid)
VD->addAttr(PragmaClangRelroSectionAttr::CreateImplicit(
Context, PragmaClangRelroSection.SectionName,
- PragmaClangRelroSection.PragmaLocation,
- AttributeCommonInfo::AS_Pragma));
+ PragmaClangRelroSection.PragmaLocation));
}
if (auto *DD = dyn_cast<DecompositionDecl>(ThisDecl)) {
@@ -14423,6 +14542,12 @@ Sema::DeclGroupPtrTy Sema::FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
for (unsigned i = 0, e = Group.size(); i != e; ++i) {
if (Decl *D = Group[i]) {
+ // Check if the Decl has been declared in '#pragma omp declare target'
+ // directive and has static storage duration.
+ if (auto *VD = dyn_cast<VarDecl>(D);
+ LangOpts.OpenMP && VD && VD->hasAttr<OMPDeclareTargetDeclAttr>() &&
+ VD->hasGlobalStorage())
+ ActOnOpenMPDeclareTargetInitializer(D);
// For declarators, there are some additional syntactic-ish checks we need
// to perform.
if (auto *DD = dyn_cast<DeclaratorDecl>(D)) {
@@ -14803,14 +14928,6 @@ ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
checkNonTrivialCUnion(New->getType(), New->getLocation(),
NTCUC_FunctionParam, NTCUK_Destruct|NTCUK_Copy);
- // Parameters can not be abstract class types.
- // For record types, this is done by the AbstractClassUsageDiagnoser once
- // the class has been completely parsed.
- if (!CurContext->isRecord() &&
- RequireNonAbstractType(NameLoc, T, diag::err_abstract_type_in_decl,
- AbstractParamType))
- New->setInvalidDecl();
-
// Parameter declarators cannot be interface types. All ObjC objects are
// passed by reference.
if (T->isObjCObjectType()) {
@@ -14831,7 +14948,11 @@ ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
// OpenCL allows function arguments declared to be an array of a type
// to be qualified with an address space.
!(getLangOpts().OpenCL &&
- (T->isArrayType() || T.getAddressSpace() == LangAS::opencl_private))) {
+ (T->isArrayType() || T.getAddressSpace() == LangAS::opencl_private)) &&
+ // WebAssembly allows reference types as parameters. Funcref in particular
+ // lives in a different address space.
+ !(T->isFunctionPointerType() &&
+ T.getAddressSpace() == LangAS::wasm_funcref)) {
Diag(NameLoc, diag::err_arg_with_address_space);
New->setInvalidDecl();
}
@@ -15126,6 +15247,17 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
FD->isConsteval() ? ExpressionEvaluationContext::ImmediateFunctionContext
: ExprEvalContexts.back().Context);
+ // Each ExpressionEvaluationContextRecord also keeps track of whether the
+ // context is nested in an immediate function context, so smaller contexts
+ // that appear inside immediate functions (like variable initializers) are
+ // considered to be inside an immediate function context even though by
+ // themselves they are not immediate function contexts. But when a new
+ // function is entered, we need to reset this tracking, since the entered
+ // function might be not an immediate function.
+ ExprEvalContexts.back().InImmediateFunctionContext = FD->isConsteval();
+ ExprEvalContexts.back().InImmediateEscalatingFunctionContext =
+ getLangOpts().CPlusPlus20 && FD->isImmediateEscalating();
+
// Check for defining attributes before the check for redefinition.
if (const auto *Attr = FD->getAttr<AliasAttr>()) {
Diag(Attr->getLocation(), diag::err_alias_is_definition) << FD << 0;
@@ -15202,13 +15334,19 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
}
}
- // The return type of a function definition must be complete (C99 6.9.1p3),
- // unless the function is deleted (C++ specifc, C++ [dcl.fct.def.general]p2)
+ // The return type of a function definition must be complete (C99 6.9.1p3).
+ // C++23 [dcl.fct.def.general]/p2
+ // The type of [...] the return for a function definition
+ // shall not be a (possibly cv-qualified) class type that is incomplete
+ // or abstract within the function body unless the function is deleted.
QualType ResultType = FD->getReturnType();
if (!ResultType->isDependentType() && !ResultType->isVoidType() &&
!FD->isInvalidDecl() && BodyKind != FnBodyKind::Delete &&
- RequireCompleteType(FD->getLocation(), ResultType,
- diag::err_func_def_incomplete_result))
+ (RequireCompleteType(FD->getLocation(), ResultType,
+ diag::err_func_def_incomplete_result) ||
+ RequireNonAbstractType(FD->getLocation(), FD->getReturnType(),
+ diag::err_abstract_type_in_decl,
+ AbstractReturnType)))
FD->setInvalidDecl();
if (FnBodyScope)
@@ -15434,10 +15572,10 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// one is already popped when finishing the lambda in BuildLambdaExpr().
// This is meant to pop the context added in ActOnStartOfFunctionDef().
ExitFunctionBodyRAII ExitRAII(*this, isLambdaCallOperator(FD));
-
if (FD) {
FD->setBody(Body);
FD->setWillHaveBody(false);
+ CheckImmediateEscalatingFunctionDefinition(FD, FSI);
if (getLangOpts().CPlusPlus14) {
if (!FD->isInvalidDecl() && Body && !FD->isDependentContext() &&
@@ -15823,7 +15961,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
DiscardCleanupsInEvaluationContext();
}
- if (FD && ((LangOpts.OpenMP && (LangOpts.OpenMPIsDevice ||
+ if (FD && ((LangOpts.OpenMP && (LangOpts.OpenMPIsTargetDevice ||
!LangOpts.OMPTargetTriples.empty())) ||
LangOpts.CUDA || LangOpts.SYCLIsDevice)) {
auto ES = getEmissionStatus(FD);
@@ -15869,8 +16007,14 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
while (!BlockScope->isCompoundStmtScope() && BlockScope->getParent())
BlockScope = BlockScope->getParent();
+ // Loop until we find a DeclContext that is either a function/method or the
+ // translation unit, which are the only two valid places to implicitly define
+ // a function. This avoids accidentally defining the function within a tag
+ // declaration, for example.
Scope *ContextScope = BlockScope;
- while (!ContextScope->getEntity())
+ while (!ContextScope->getEntity() ||
+ (!ContextScope->getEntity()->isFunctionOrMethod() &&
+ !ContextScope->getEntity()->isTranslationUnit()))
ContextScope = ContextScope->getParent();
ContextRAII SavedContext(*this, ContextScope->getEntity());
@@ -16005,7 +16149,11 @@ void Sema::AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
// indicates failure by returning a null pointer value. Any other allocation
// function never returns a null pointer value and indicates failure only by
// throwing an exception [...]
- if (!IsNothrow && !FD->hasAttr<ReturnsNonNullAttr>())
+ //
+ // However, -fcheck-new invalidates this possible assumption, so don't add
+ // NonNull when that is enabled.
+ if (!IsNothrow && !FD->hasAttr<ReturnsNonNullAttr>() &&
+ !getLangOpts().CheckNew)
FD->addAttr(ReturnsNonNullAttr::CreateImplicit(Context, FD->getLocation()));
// C++2a [basic.stc.dynamic.allocation]p2:
@@ -16198,6 +16346,7 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
case Builtin::BI__builtin_addressof:
case Builtin::BIas_const:
case Builtin::BIforward:
+ case Builtin::BIforward_like:
case Builtin::BImove:
case Builtin::BImove_if_noexcept:
if (ParmVarDecl *P = FD->getParamDecl(0u);
@@ -16992,11 +17141,14 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
S = getTagInjectionScope(S, getLangOpts());
} else {
assert(TUK == TUK_Friend);
+ CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(SearchDC);
+
// C++ [namespace.memdef]p3:
// If a friend declaration in a non-local class first declares a
// class or function, the friend class or function is a member of
// the innermost enclosing namespace.
- SearchDC = SearchDC->getEnclosingNamespaceContext();
+ SearchDC = RD->isLocalClass() ? RD->isLocalClass()
+ : SearchDC->getEnclosingNamespaceContext();
}
// In C++, we need to do a redeclaration lookup to properly
@@ -17608,9 +17760,10 @@ void Sema::ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagD,
Record->markAbstract();
if (FinalLoc.isValid()) {
- Record->addAttr(FinalAttr::Create(
- Context, FinalLoc, AttributeCommonInfo::AS_Keyword,
- static_cast<FinalAttr::Spelling>(IsFinalSpelledSealed)));
+ Record->addAttr(FinalAttr::Create(Context, FinalLoc,
+ IsFinalSpelledSealed
+ ? FinalAttr::Keyword_sealed
+ : FinalAttr::Keyword_final));
}
// C++ [class]p2:
// [...] The class-name is also inserted into the scope of the
@@ -19057,7 +19210,7 @@ static bool isRepresentableIntegerValue(ASTContext &Context,
--BitWidth;
return Value.getActiveBits() <= BitWidth;
}
- return Value.getMinSignedBits() <= BitWidth;
+ return Value.getSignificantBits() <= BitWidth;
}
// Given an integral type, return the next larger integral type
@@ -19247,6 +19400,7 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
if (!getLangOpts().CPlusPlus && !T.isNull())
Diag(IdLoc, diag::warn_enum_value_overflow);
} else if (!getLangOpts().CPlusPlus &&
+ !EltTy->isDependentType() &&
!isRepresentableIntegerValue(Context, EnumVal, EltTy)) {
// Enforce C99 6.7.2.2p2 even when we compute the next value.
Diag(IdLoc, diag::ext_enum_value_not_int)
@@ -19447,7 +19601,7 @@ static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements,
return;
}
- // Constants with initalizers are handled in the next loop.
+ // Constants with initializers are handled in the next loop.
if (ECD->getInitExpr())
continue;
@@ -19592,8 +19746,8 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
unsigned ActiveBits = InitVal.getActiveBits();
NumPositiveBits = std::max({NumPositiveBits, ActiveBits, 1u});
} else {
- NumNegativeBits = std::max(NumNegativeBits,
- (unsigned)InitVal.getMinSignedBits());
+ NumNegativeBits =
+ std::max(NumNegativeBits, (unsigned)InitVal.getSignificantBits());
}
}
@@ -19808,7 +19962,7 @@ void Sema::ActOnPragmaRedefineExtname(IdentifierInfo* Name,
NamedDecl *PrevDecl = LookupSingleName(TUScope, Name, NameLoc,
LookupOrdinaryName);
AttributeCommonInfo Info(AliasName, SourceRange(AliasNameLoc),
- AttributeCommonInfo::AS_Pragma);
+ AttributeCommonInfo::Form::Pragma());
AsmLabelAttr *Attr = AsmLabelAttr::CreateImplicit(
Context, AliasName->getName(), /*IsLiteralLabel=*/true, Info);
@@ -19833,7 +19987,7 @@ void Sema::ActOnPragmaWeakID(IdentifierInfo* Name,
Decl *PrevDecl = LookupSingleName(TUScope, Name, NameLoc, LookupOrdinaryName);
if (PrevDecl) {
- PrevDecl->addAttr(WeakAttr::CreateImplicit(Context, PragmaLoc, AttributeCommonInfo::AS_Pragma));
+ PrevDecl->addAttr(WeakAttr::CreateImplicit(Context, PragmaLoc));
} else {
(void)WeakUndeclaredIdentifiers[Name].insert(WeakInfo(nullptr, NameLoc));
}
@@ -19861,7 +20015,7 @@ ObjCContainerDecl *Sema::getObjCDeclContext() const {
return (dyn_cast_or_null<ObjCContainerDecl>(CurContext));
}
-Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD,
+Sema::FunctionEmissionStatus Sema::getEmissionStatus(const FunctionDecl *FD,
bool Final) {
assert(FD && "Expected non-null FunctionDecl");
@@ -19879,13 +20033,13 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD,
// We have to check the GVA linkage of the function's *definition* -- if we
// only have a declaration, we don't know whether or not the function will
// be emitted, because (say) the definition could include "inline".
- FunctionDecl *Def = FD->getDefinition();
+ const FunctionDecl *Def = FD->getDefinition();
return Def && !isDiscardableGVALinkage(
getASTContext().GetGVALinkageForFunction(Def));
};
- if (LangOpts.OpenMPIsDevice) {
+ if (LangOpts.OpenMPIsTargetDevice) {
// In OpenMP device mode we will not emit host only functions, or functions
// we don't need due to their linkage.
std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
index a303c7f57280..ed69e802c95d 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
@@ -273,7 +273,9 @@ static bool checkPositiveIntArgument(Sema &S, const AttrInfo &AI, const Expr *Ex
template <typename AttrTy>
static bool checkAttrMutualExclusion(Sema &S, Decl *D, const ParsedAttr &AL) {
if (const auto *A = D->getAttr<AttrTy>()) {
- S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible) << AL << A;
+ S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)
+ << AL << A
+ << (AL.isRegularKeywordAttribute() || A->isRegularKeywordAttribute());
S.Diag(A->getLocation(), diag::note_conflicting_attribute);
return true;
}
@@ -283,8 +285,9 @@ static bool checkAttrMutualExclusion(Sema &S, Decl *D, const ParsedAttr &AL) {
template <typename AttrTy>
static bool checkAttrMutualExclusion(Sema &S, Decl *D, const Attr &AL) {
if (const auto *A = D->getAttr<AttrTy>()) {
- S.Diag(AL.getLocation(), diag::err_attributes_are_not_compatible) << &AL
- << A;
+ S.Diag(AL.getLocation(), diag::err_attributes_are_not_compatible)
+ << &AL << A
+ << (AL.isRegularKeywordAttribute() || A->isRegularKeywordAttribute());
S.Diag(A->getLocation(), diag::note_conflicting_attribute);
return true;
}
@@ -447,7 +450,7 @@ static bool threadSafetyCheckIsSmartPointer(Sema &S, const RecordType* RT) {
if (!CXXRecord)
return false;
- for (auto BaseSpecifier : CXXRecord->bases()) {
+ for (const auto &BaseSpecifier : CXXRecord->bases()) {
if (!foundStarOperator)
foundStarOperator = IsOverloadedOperatorPresent(
BaseSpecifier.getType()->getAsRecordDecl(), OO_Star);
@@ -1878,8 +1881,11 @@ static void handleOwnershipAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Cannot have two ownership attributes of different kinds for the same
// index.
if (I->getOwnKind() != K && llvm::is_contained(I->args(), Idx)) {
- S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible) << AL << I;
- return;
+ S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)
+ << AL << I
+ << (AL.isRegularKeywordAttribute() ||
+ I->isRegularKeywordAttribute());
+ return;
} else if (K == OwnershipAttr::Returns &&
I->getOwnKind() == OwnershipAttr::Returns) {
// A returns attribute conflicts with any other returns attribute using
@@ -2033,7 +2039,7 @@ static void handleTLSModelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
if (S.Context.getTargetInfo().getTriple().isOSAIX() &&
- Model != "global-dynamic") {
+ Model != "global-dynamic" && Model != "local-exec") {
S.Diag(LiteralLoc, diag::err_aix_attr_unsupported_tls_model) << Model;
return;
}
@@ -2164,7 +2170,7 @@ static void handleNakedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// nonstatic) when in Microsoft compatibility mode.
if (S.getLangOpts().MSVCCompat && isa<CXXMethodDecl>(D)) {
S.Diag(AL.getLoc(), diag::err_attribute_wrong_decl_type_str)
- << AL << "non-member functions";
+ << AL << AL.isRegularKeywordAttribute() << "non-member functions";
return;
}
}
@@ -2177,7 +2183,8 @@ static void handleNoReturnAttr(Sema &S, Decl *D, const ParsedAttr &Attrs) {
if (!isa<ObjCMethodDecl>(D)) {
S.Diag(Attrs.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attrs << ExpectedFunctionOrMethod;
+ << Attrs << Attrs.isRegularKeywordAttribute()
+ << ExpectedFunctionOrMethod;
return;
}
@@ -2218,7 +2225,9 @@ bool Sema::CheckAttrNoArgs(const ParsedAttr &Attrs) {
bool Sema::CheckAttrTarget(const ParsedAttr &AL) {
// Check whether the attribute is valid on the current target.
if (!AL.existsInTarget(Context.getTargetInfo())) {
- Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored)
+ Diag(AL.getLoc(), AL.isRegularKeywordAttribute()
+ ? diag::err_keyword_not_supported_on_target
+ : diag::warn_unknown_attribute_ignored)
<< AL << AL.getRange();
AL.setInvalid();
return true;
@@ -2238,7 +2247,8 @@ static void handleAnalyzerNoReturnAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
S.Diag(AL.getLoc(), AL.isStandardAttributeSyntax()
? diag::err_attribute_wrong_decl_type
: diag::warn_attribute_wrong_decl_type)
- << AL << ExpectedFunctionMethodOrBlock;
+ << AL << AL.isRegularKeywordAttribute()
+ << ExpectedFunctionMethodOrBlock;
return;
}
}
@@ -2775,7 +2785,7 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return V;
};
AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
- ND, AL.getRange(), NewII, true /*Implicit*/,
+ ND, AL, NewII, true /*Implicit*/,
MinMacCatalystVersion(Introduced.Version),
MinMacCatalystVersion(Deprecated.Version),
MinMacCatalystVersion(Obsoleted.Version), IsUnavailable, Str,
@@ -2817,7 +2827,7 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return V ? *V : VersionTuple();
};
AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
- ND, AL.getRange(), NewII, true /*Implicit*/,
+ ND, AL, NewII, true /*Implicit*/,
VersionOrEmptyVersion(NewIntroduced),
VersionOrEmptyVersion(NewDeprecated),
VersionOrEmptyVersion(NewObsoleted), /*IsUnavailable=*/false, Str,
@@ -2834,7 +2844,7 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleExternalSourceSymbolAttr(Sema &S, Decl *D,
const ParsedAttr &AL) {
- if (!AL.checkAtLeastNumArgs(S, 1) || !AL.checkAtMostNumArgs(S, 3))
+ if (!AL.checkAtLeastNumArgs(S, 1) || !AL.checkAtMostNumArgs(S, 4))
return;
StringRef Language;
@@ -2844,9 +2854,12 @@ static void handleExternalSourceSymbolAttr(Sema &S, Decl *D,
if (const auto *SE = dyn_cast_or_null<StringLiteral>(AL.getArgAsExpr(1)))
DefinedIn = SE->getString();
bool IsGeneratedDeclaration = AL.getArgAsIdent(2) != nullptr;
+ StringRef USR;
+ if (const auto *SE = dyn_cast_or_null<StringLiteral>(AL.getArgAsExpr(3)))
+ USR = SE->getString();
D->addAttr(::new (S.Context) ExternalSourceSymbolAttr(
- S.Context, AL, Language, DefinedIn, IsGeneratedDeclaration));
+ S.Context, AL, Language, DefinedIn, IsGeneratedDeclaration, USR));
}
template <class T>
@@ -2885,12 +2898,10 @@ static void handleVisibilityAttr(Sema &S, Decl *D, const ParsedAttr &AL,
}
// 'type_visibility' can only go on a type or namespace.
- if (isTypeVisibility &&
- !(isa<TagDecl>(D) ||
- isa<ObjCInterfaceDecl>(D) ||
- isa<NamespaceDecl>(D))) {
+ if (isTypeVisibility && !(isa<TagDecl>(D) || isa<ObjCInterfaceDecl>(D) ||
+ isa<NamespaceDecl>(D))) {
S.Diag(AL.getRange().getBegin(), diag::err_attribute_wrong_decl_type)
- << AL << ExpectedTypeOrNamespace;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedTypeOrNamespace;
return;
}
@@ -3109,12 +3120,14 @@ static void handleSentinelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
} else {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL << ExpectedFunctionMethodOrBlock;
+ << AL << AL.isRegularKeywordAttribute()
+ << ExpectedFunctionMethodOrBlock;
return;
}
} else {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL << ExpectedFunctionMethodOrBlock;
+ << AL << AL.isRegularKeywordAttribute()
+ << ExpectedFunctionMethodOrBlock;
return;
}
D->addAttr(::new (S.Context) SentinelAttr(S.Context, AL, sentinel, nullPos));
@@ -3139,7 +3152,8 @@ static void handleWarnUnusedResult(Sema &S, Decl *D, const ParsedAttr &AL) {
// as a function pointer.
if (isa<VarDecl>(D))
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type_str)
- << AL << "functions, classes, or enumerations";
+ << AL << AL.isRegularKeywordAttribute()
+ << "functions, classes, or enumerations";
// If this is spelled as the standard C++17 attribute, but not in C++17,
// warn about using it as an extension. If there are attribute arguments,
@@ -3185,7 +3199,7 @@ static void handleWeakImportAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Nothing to warn about here.
} else
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL << ExpectedVariableOrFunction;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedVariableOrFunction;
return;
}
@@ -3505,6 +3519,7 @@ bool Sema::checkTargetClonesAttrString(
enum SecondParam { None, CPU, Tune };
enum ThirdParam { Target, TargetClones };
HasCommas = HasCommas || Str.contains(',');
+ const TargetInfo &TInfo = Context.getTargetInfo();
// Warn on empty at the beginning of a string.
if (Str.size() == 0)
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
@@ -3514,9 +3529,9 @@ bool Sema::checkTargetClonesAttrString(
while (!Parts.second.empty()) {
Parts = Parts.second.split(',');
StringRef Cur = Parts.first.trim();
- SourceLocation CurLoc = Literal->getLocationOfByte(
- Cur.data() - Literal->getString().data(), getSourceManager(),
- getLangOpts(), Context.getTargetInfo());
+ SourceLocation CurLoc =
+ Literal->getLocationOfByte(Cur.data() - Literal->getString().data(),
+ getSourceManager(), getLangOpts(), TInfo);
bool DefaultIsDupe = false;
bool HasCodeGenImpact = false;
@@ -3524,7 +3539,7 @@ bool Sema::checkTargetClonesAttrString(
return Diag(CurLoc, diag::warn_unsupported_target_attribute)
<< Unsupported << None << "" << TargetClones;
- if (Context.getTargetInfo().getTriple().isAArch64()) {
+ if (TInfo.getTriple().isAArch64()) {
// AArch64 target clones specific
if (Cur == "default") {
DefaultIsDupe = HasDefault;
@@ -3539,13 +3554,12 @@ bool Sema::checkTargetClonesAttrString(
while (!CurParts.second.empty()) {
CurParts = CurParts.second.split('+');
StringRef CurFeature = CurParts.first.trim();
- if (!Context.getTargetInfo().validateCpuSupports(CurFeature)) {
+ if (!TInfo.validateCpuSupports(CurFeature)) {
Diag(CurLoc, diag::warn_unsupported_target_attribute)
<< Unsupported << None << CurFeature << TargetClones;
continue;
}
- std::string Options;
- if (Context.getTargetInfo().getFeatureDepOptions(CurFeature, Options))
+ if (TInfo.doesFeatureAffectCodeGen(CurFeature))
HasCodeGenImpact = true;
CurFeatures.push_back(CurFeature);
}
@@ -3756,7 +3770,7 @@ static void handleEnumExtensibilityAttr(Sema &S, Decl *D,
/// Handle __attribute__((format_arg((idx)))) attribute based on
/// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
static void handleFormatArgAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- Expr *IdxExpr = AL.getArgAsExpr(0);
+ const Expr *IdxExpr = AL.getArgAsExpr(0);
ParamIdx Idx;
if (!checkFunctionOrMethodParameterIndex(S, D, AL, 1, IdxExpr, Idx))
return;
@@ -3831,7 +3845,7 @@ static void handleInitPriorityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
S.Diag(AL.getLoc(), diag::warn_attribute_ignored) << AL;
return;
}
-
+
if (S.getLangOpts().HLSL) {
S.Diag(AL.getLoc(), diag::err_hlsl_init_priority_unsupported);
return;
@@ -3881,7 +3895,9 @@ ErrorAttr *Sema::mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI,
(EA->isWarning() && NewAttr == "warning");
if (!Match) {
Diag(EA->getLocation(), diag::err_attributes_are_not_compatible)
- << CI << EA;
+ << CI << EA
+ << (CI.isRegularKeywordAttribute() ||
+ EA->isRegularKeywordAttribute());
Diag(CI.getLoc(), diag::note_conflicting_attribute);
return nullptr;
}
@@ -4198,8 +4214,8 @@ static void handleTransparentUnionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
RD = dyn_cast<RecordDecl>(D);
if (!RD || !RD->isUnion()) {
- S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type) << AL
- << ExpectedUnion;
+ S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << AL << AL.isRegularKeywordAttribute() << ExpectedUnion;
return;
}
@@ -4330,6 +4346,27 @@ void Sema::AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E) {
}
static void handleAlignedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (AL.hasParsedType()) {
+ const ParsedType &TypeArg = AL.getTypeArg();
+ TypeSourceInfo *TInfo;
+ (void)S.GetTypeFromParser(
+ ParsedType::getFromOpaquePtr(TypeArg.getAsOpaquePtr()), &TInfo);
+ if (AL.isPackExpansion() &&
+ !TInfo->getType()->containsUnexpandedParameterPack()) {
+ S.Diag(AL.getEllipsisLoc(),
+ diag::err_pack_expansion_without_parameter_packs);
+ return;
+ }
+
+ if (!AL.isPackExpansion() &&
+ S.DiagnoseUnexpandedParameterPack(TInfo->getTypeLoc().getBeginLoc(),
+ TInfo, Sema::UPPC_Expression))
+ return;
+
+ S.AddAlignedAttr(D, AL, TInfo, AL.isPackExpansion());
+ return;
+ }
+
// check the attribute arguments.
if (AL.getNumArgs() > 1) {
S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments) << AL << 1;
@@ -4354,53 +4391,61 @@ static void handleAlignedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
S.AddAlignedAttr(D, AL, E, AL.isPackExpansion());
}
+/// Perform checking of type validity
+///
+/// C++11 [dcl.align]p1:
+/// An alignment-specifier may be applied to a variable or to a class
+/// data member, but it shall not be applied to a bit-field, a function
+/// parameter, the formal parameter of a catch clause, or a variable
+/// declared with the register storage class specifier. An
+/// alignment-specifier may also be applied to the declaration of a class
+/// or enumeration type.
+/// CWG 2354:
+/// CWG agreed to remove permission for alignas to be applied to
+/// enumerations.
+/// C11 6.7.5/2:
+/// An alignment attribute shall not be specified in a declaration of
+/// a typedef, or a bit-field, or a function, or a parameter, or an
+/// object declared with the register storage-class specifier.
+static bool validateAlignasAppliedType(Sema &S, Decl *D,
+ const AlignedAttr &Attr,
+ SourceLocation AttrLoc) {
+ int DiagKind = -1;
+ if (isa<ParmVarDecl>(D)) {
+ DiagKind = 0;
+ } else if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->getStorageClass() == SC_Register)
+ DiagKind = 1;
+ if (VD->isExceptionVariable())
+ DiagKind = 2;
+ } else if (const auto *FD = dyn_cast<FieldDecl>(D)) {
+ if (FD->isBitField())
+ DiagKind = 3;
+ } else if (const auto *ED = dyn_cast<EnumDecl>(D)) {
+ if (ED->getLangOpts().CPlusPlus)
+ DiagKind = 4;
+ } else if (!isa<TagDecl>(D)) {
+ return S.Diag(AttrLoc, diag::err_attribute_wrong_decl_type)
+ << &Attr << Attr.isRegularKeywordAttribute()
+ << (Attr.isC11() ? ExpectedVariableOrField
+ : ExpectedVariableFieldOrTag);
+ }
+ if (DiagKind != -1) {
+ return S.Diag(AttrLoc, diag::err_alignas_attribute_wrong_decl_type)
+ << &Attr << DiagKind;
+ }
+ return false;
+}
+
void Sema::AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion) {
AlignedAttr TmpAttr(Context, CI, true, E);
SourceLocation AttrLoc = CI.getLoc();
// C++11 alignas(...) and C11 _Alignas(...) have additional requirements.
- if (TmpAttr.isAlignas()) {
- // C++11 [dcl.align]p1:
- // An alignment-specifier may be applied to a variable or to a class
- // data member, but it shall not be applied to a bit-field, a function
- // parameter, the formal parameter of a catch clause, or a variable
- // declared with the register storage class specifier. An
- // alignment-specifier may also be applied to the declaration of a class
- // or enumeration type.
- // CWG 2354:
- // CWG agreed to remove permission for alignas to be applied to
- // enumerations.
- // C11 6.7.5/2:
- // An alignment attribute shall not be specified in a declaration of
- // a typedef, or a bit-field, or a function, or a parameter, or an
- // object declared with the register storage-class specifier.
- int DiagKind = -1;
- if (isa<ParmVarDecl>(D)) {
- DiagKind = 0;
- } else if (const auto *VD = dyn_cast<VarDecl>(D)) {
- if (VD->getStorageClass() == SC_Register)
- DiagKind = 1;
- if (VD->isExceptionVariable())
- DiagKind = 2;
- } else if (const auto *FD = dyn_cast<FieldDecl>(D)) {
- if (FD->isBitField())
- DiagKind = 3;
- } else if (const auto *ED = dyn_cast<EnumDecl>(D)) {
- if (ED->getLangOpts().CPlusPlus)
- DiagKind = 4;
- } else if (!isa<TagDecl>(D)) {
- Diag(AttrLoc, diag::err_attribute_wrong_decl_type) << &TmpAttr
- << (TmpAttr.isC11() ? ExpectedVariableOrField
- : ExpectedVariableFieldOrTag);
- return;
- }
- if (DiagKind != -1) {
- Diag(AttrLoc, diag::err_alignas_attribute_wrong_decl_type)
- << &TmpAttr << DiagKind;
- return;
- }
- }
+ if (TmpAttr.isAlignas() &&
+ validateAlignasAppliedType(*this, D, TmpAttr, AttrLoc))
+ return;
if (E->isValueDependent()) {
// We can't support a dependent alignment on a non-dependent type,
@@ -4428,6 +4473,15 @@ void Sema::AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
if (ICE.isInvalid())
return;
+ uint64_t MaximumAlignment = Sema::MaximumAlignment;
+ if (Context.getTargetInfo().getTriple().isOSBinFormatCOFF())
+ MaximumAlignment = std::min(MaximumAlignment, uint64_t(8192));
+ if (Alignment > MaximumAlignment) {
+ Diag(AttrLoc, diag::err_attribute_aligned_too_great)
+ << MaximumAlignment << E->getSourceRange();
+ return;
+ }
+
uint64_t AlignVal = Alignment.getZExtValue();
// C++11 [dcl.align]p2:
// -- if the constant expression evaluates to zero, the alignment
@@ -4442,15 +4496,6 @@ void Sema::AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
}
}
- uint64_t MaximumAlignment = Sema::MaximumAlignment;
- if (Context.getTargetInfo().getTriple().isOSBinFormatCOFF())
- MaximumAlignment = std::min(MaximumAlignment, uint64_t(8192));
- if (AlignVal > MaximumAlignment) {
- Diag(AttrLoc, diag::err_attribute_aligned_too_great)
- << MaximumAlignment << E->getSourceRange();
- return;
- }
-
const auto *VD = dyn_cast<VarDecl>(D);
if (VD) {
unsigned MaxTLSAlign =
@@ -4477,15 +4522,56 @@ void Sema::AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
AlignedAttr *AA = ::new (Context) AlignedAttr(Context, CI, true, ICE.get());
AA->setPackExpansion(IsPackExpansion);
+ AA->setCachedAlignmentValue(
+ static_cast<unsigned>(AlignVal * Context.getCharWidth()));
D->addAttr(AA);
}
void Sema::AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI,
TypeSourceInfo *TS, bool IsPackExpansion) {
- // FIXME: Cache the number on the AL object if non-dependent?
- // FIXME: Perform checking of type validity
+ AlignedAttr TmpAttr(Context, CI, false, TS);
+ SourceLocation AttrLoc = CI.getLoc();
+
+ // C++11 alignas(...) and C11 _Alignas(...) have additional requirements.
+ if (TmpAttr.isAlignas() &&
+ validateAlignasAppliedType(*this, D, TmpAttr, AttrLoc))
+ return;
+
+ if (TS->getType()->isDependentType()) {
+ // We can't support a dependent alignment on a non-dependent type,
+ // because we have no way to model that a type is "type-dependent"
+ // but not dependent in any other way.
+ if (const auto *TND = dyn_cast<TypedefNameDecl>(D)) {
+ if (!TND->getUnderlyingType()->isDependentType()) {
+ Diag(AttrLoc, diag::err_alignment_dependent_typedef_name)
+ << TS->getTypeLoc().getSourceRange();
+ return;
+ }
+ }
+
+ AlignedAttr *AA = ::new (Context) AlignedAttr(Context, CI, false, TS);
+ AA->setPackExpansion(IsPackExpansion);
+ D->addAttr(AA);
+ return;
+ }
+
+ const auto *VD = dyn_cast<VarDecl>(D);
+ unsigned AlignVal = TmpAttr.getAlignment(Context);
+ // On AIX, an aligned attribute can not decrease the alignment when applied
+ // to a variable declaration with vector type.
+ if (VD && Context.getTargetInfo().getTriple().isOSAIX()) {
+ const Type *Ty = VD->getType().getTypePtr();
+ if (Ty->isVectorType() &&
+ Context.toCharUnitsFromBits(AlignVal).getQuantity() < 16) {
+ Diag(VD->getLocation(), diag::warn_aligned_attr_underaligned)
+ << VD->getType() << 16;
+ return;
+ }
+ }
+
AlignedAttr *AA = ::new (Context) AlignedAttr(Context, CI, false, TS);
AA->setPackExpansion(IsPackExpansion);
+ AA->setCachedAlignmentValue(AlignVal);
D->addAttr(AA);
}
@@ -4815,8 +4901,9 @@ InternalLinkageAttr *Sema::mergeInternalLinkageAttr(Decl *D,
// ImplicitParm or VarTemplateSpecialization).
if (VD->getKind() != Decl::Var) {
Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL << (getLangOpts().CPlusPlus ? ExpectedFunctionVariableOrClass
- : ExpectedVariableOrFunction);
+ << AL << AL.isRegularKeywordAttribute()
+ << (getLangOpts().CPlusPlus ? ExpectedFunctionVariableOrClass
+ : ExpectedVariableOrFunction);
return nullptr;
}
// Attribute does not apply to non-static local variables.
@@ -4835,8 +4922,9 @@ Sema::mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL) {
// ImplicitParm or VarTemplateSpecialization).
if (VD->getKind() != Decl::Var) {
Diag(AL.getLocation(), diag::warn_attribute_wrong_decl_type)
- << &AL << (getLangOpts().CPlusPlus ? ExpectedFunctionVariableOrClass
- : ExpectedVariableOrFunction);
+ << &AL << AL.isRegularKeywordAttribute()
+ << (getLangOpts().CPlusPlus ? ExpectedFunctionVariableOrClass
+ : ExpectedVariableOrFunction);
return nullptr;
}
// Attribute does not apply to non-static local variables.
@@ -4867,7 +4955,9 @@ SwiftNameAttr *Sema::mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA,
if (const auto *PrevSNA = D->getAttr<SwiftNameAttr>()) {
if (PrevSNA->getName() != Name && !PrevSNA->isImplicit()) {
Diag(PrevSNA->getLocation(), diag::err_attributes_are_not_compatible)
- << PrevSNA << &SNA;
+ << PrevSNA << &SNA
+ << (PrevSNA->isRegularKeywordAttribute() ||
+ SNA.isRegularKeywordAttribute());
Diag(SNA.getLoc(), diag::note_conflicting_attribute);
}
@@ -4967,7 +5057,10 @@ static void handleGlobalAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (FD->isInlineSpecified() && !S.getLangOpts().CUDAIsDevice)
S.Diag(FD->getBeginLoc(), diag::warn_kern_is_inline) << FD;
- D->addAttr(::new (S.Context) CUDAGlobalAttr(S.Context, AL));
+ if (AL.getKind() == ParsedAttr::AT_NVPTXKernel)
+ D->addAttr(::new (S.Context) NVPTXKernelAttr(S.Context, AL));
+ else
+ D->addAttr(::new (S.Context) CUDAGlobalAttr(S.Context, AL));
// In host compilation the kernel is emitted as a stub function, which is
// a helper function for launching the kernel. The instructions in the helper
// function has nothing to do with the source code of the kernel. Do not emit
@@ -5029,7 +5122,7 @@ static void handleCallConvAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!isa<ObjCMethodDecl>(D)) {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL << ExpectedFunctionOrMethod;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunctionOrMethod;
return;
}
@@ -5160,7 +5253,9 @@ static void handleLifetimeCategoryAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
: nullptr;
if (ExistingDerefType != ParmType.getTypePtrOrNull()) {
S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)
- << AL << OAttr;
+ << AL << OAttr
+ << (AL.isRegularKeywordAttribute() ||
+ OAttr->isRegularKeywordAttribute());
S.Diag(OAttr->getLocation(), diag::note_conflicting_attribute);
}
return;
@@ -5177,7 +5272,9 @@ static void handleLifetimeCategoryAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
: nullptr;
if (ExistingDerefType != ParmType.getTypePtrOrNull()) {
S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)
- << AL << PAttr;
+ << AL << PAttr
+ << (AL.isRegularKeywordAttribute() ||
+ PAttr->isRegularKeywordAttribute());
S.Diag(PAttr->getLocation(), diag::note_conflicting_attribute);
}
return;
@@ -5252,6 +5349,9 @@ bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
case ParsedAttr::AT_AArch64SVEPcs:
CC = CC_AArch64SVEPCS;
break;
+ case ParsedAttr::AT_ArmStreaming:
+ CC = CC_C; // FIXME: placeholder until real SME support is added.
+ break;
case ParsedAttr::AT_AMDGPUKernelCall:
CC = CC_AMDGPUKernelCall;
break;
@@ -5409,7 +5509,9 @@ void Sema::AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
if (auto existingAttr = D->getAttr<ParameterABIAttr>()) {
if (existingAttr->getABI() != abi) {
Diag(CI.getLoc(), diag::err_attributes_are_not_compatible)
- << getParameterABISpelling(abi) << existingAttr;
+ << getParameterABISpelling(abi) << existingAttr
+ << (CI.isRegularKeywordAttribute() ||
+ existingAttr->isRegularKeywordAttribute());
Diag(existingAttr->getLocation(), diag::note_conflicting_attribute);
return;
}
@@ -5601,7 +5703,7 @@ static void handleTypeTagForDatatypeAttr(Sema &S, Decl *D,
if (!isa<VarDecl>(D)) {
S.Diag(AL.getLoc(), diag::err_attribute_wrong_decl_type)
- << AL << ExpectedVariable;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedVariable;
return;
}
@@ -5696,6 +5798,14 @@ static bool ArmSveAliasValid(ASTContext &Context, unsigned BuiltinID,
BuiltinID <= AArch64::LastSVEBuiltin;
}
+static bool ArmSmeAliasValid(ASTContext &Context, unsigned BuiltinID,
+ StringRef AliasName) {
+ if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID))
+ BuiltinID = Context.BuiltinInfo.getAuxBuiltinID(BuiltinID);
+ return BuiltinID >= AArch64::FirstSMEBuiltin &&
+ BuiltinID <= AArch64::LastSMEBuiltin;
+}
+
static void handleArmBuiltinAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!AL.isArgIdent(0)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
@@ -5708,7 +5818,8 @@ static void handleArmBuiltinAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
StringRef AliasName = cast<FunctionDecl>(D)->getIdentifier()->getName();
bool IsAArch64 = S.Context.getTargetInfo().getTriple().isAArch64();
- if ((IsAArch64 && !ArmSveAliasValid(S.Context, BuiltinID, AliasName)) ||
+ if ((IsAArch64 && !ArmSveAliasValid(S.Context, BuiltinID, AliasName) &&
+ !ArmSmeAliasValid(S.Context, BuiltinID, AliasName)) ||
(!IsAArch64 && !ArmMveAliasValid(BuiltinID, AliasName) &&
!ArmCdeAliasValid(BuiltinID, AliasName))) {
S.Diag(AL.getLoc(), diag::err_attribute_arm_builtin_alias);
@@ -5898,7 +6009,8 @@ static void handleXReturnsXRetainedAttr(Sema &S, Decl *D,
break;
}
S.Diag(D->getBeginLoc(), diag::warn_attribute_wrong_decl_type)
- << AL.getRange() << AL << ExpectedDeclKind;
+ << AL.getRange() << AL << AL.isRegularKeywordAttribute()
+ << ExpectedDeclKind;
return;
}
@@ -6170,10 +6282,12 @@ static void handleObjCBoxable(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleObjCOwnershipAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (hasDeclarator(D)) return;
+ if (hasDeclarator(D))
+ return;
S.Diag(D->getBeginLoc(), diag::err_attribute_wrong_decl_type)
- << AL.getRange() << AL << ExpectedVariable;
+ << AL.getRange() << AL << AL.isRegularKeywordAttribute()
+ << ExpectedVariable;
}
static void handleObjCPreciseLifetimeAttr(Sema &S, Decl *D,
@@ -6666,7 +6780,8 @@ bool Sema::DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc,
Params = F->parameters();
if (!F->hasWrittenPrototype()) {
- Diag(Loc, diag::warn_attribute_wrong_decl_type) << AL
+ Diag(Loc, diag::warn_attribute_wrong_decl_type)
+ << AL << AL.isRegularKeywordAttribute()
<< ExpectedFunctionWithProtoType;
return false;
}
@@ -6787,7 +6902,7 @@ static void handleSwiftNewType(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!isa<TypedefNameDecl>(D)) {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type_str)
- << AL << "typedefs";
+ << AL << AL.isRegularKeywordAttribute() << "typedefs";
return;
}
@@ -7262,7 +7377,7 @@ static void handleMSP430InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// a function with no parameters and void return type.
if (!isFunctionOrMethod(D)) {
S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'interrupt'" << ExpectedFunctionOrMethod;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunctionOrMethod;
return;
}
@@ -7335,7 +7450,7 @@ static void handleMipsInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!isFunctionOrMethod(D)) {
S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'interrupt'" << ExpectedFunctionOrMethod;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunctionOrMethod;
return;
}
@@ -7410,7 +7525,8 @@ static void handleAnyX86InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
CXXMethodDecl::isStaticOverloadedOperator(
cast<NamedDecl>(D)->getDeclName().getCXXOverloadedOperator())) {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL << ExpectedFunctionWithProtoType;
+ << AL << AL.isRegularKeywordAttribute()
+ << ExpectedFunctionWithProtoType;
return;
}
// Interrupt handler must have void return type.
@@ -7466,7 +7582,7 @@ static void handleAnyX86InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleAVRInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!isFunctionOrMethod(D)) {
S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'interrupt'" << ExpectedFunction;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
return;
}
@@ -7479,7 +7595,7 @@ static void handleAVRInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleAVRSignalAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!isFunctionOrMethod(D)) {
S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'signal'" << ExpectedFunction;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
return;
}
@@ -7532,10 +7648,11 @@ BTFDeclTagAttr *Sema::mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL) {
return ::new (Context) BTFDeclTagAttr(Context, AL, AL.getBTFDeclTag());
}
-static void handleWebAssemblyExportNameAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+static void handleWebAssemblyExportNameAttr(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
if (!isFunctionOrMethod(D)) {
S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'export_name'" << ExpectedFunction;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
return;
}
@@ -7659,7 +7776,7 @@ static void handleRISCVInterruptAttr(Sema &S, Decl *D,
if (D->getFunctionType() == nullptr) {
S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'interrupt'" << ExpectedFunction;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
return;
}
@@ -7854,7 +7971,7 @@ static void handleX86ForceAlignArgPointerAttr(Sema &S, Decl *D,
// Attribute can only be applied to function types.
if (!isa<FunctionDecl>(D)) {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL << ExpectedFunction;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
return;
}
@@ -8133,7 +8250,7 @@ static void handleNoSanitizeSpecificAttr(Sema &S, Decl *D,
.Case("no_sanitize_memory", "memory");
if (isGlobalVar(D) && SanitizerName != "address")
S.Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << AL << ExpectedFunction;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
// FIXME: Rather than create a NoSanitizeSpecificAttr, this creates a
// NoSanitizeAttr object; but we need to calculate the correct spelling list
@@ -8252,6 +8369,22 @@ static void handleFunctionReturnThunksAttr(Sema &S, Decl *D,
D->addAttr(FunctionReturnThunksAttr::Create(S.Context, Kind, AL));
}
+static void handleAvailableOnlyInDefaultEvalMethod(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
+ assert(isa<TypedefNameDecl>(D) && "This attribute only applies to a typedef");
+ handleSimpleAttribute<AvailableOnlyInDefaultEvalMethodAttr>(S, D, AL);
+}
+
+static void handleNoMergeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ auto *VDecl = dyn_cast<VarDecl>(D);
+ if (VDecl && !VDecl->isFunctionPointerType()) {
+ S.Diag(AL.getLoc(), diag::warn_attribute_ignored_non_function_pointer)
+ << AL << VDecl;
+ return;
+ }
+ D->addAttr(NoMergeAttr::Create(S.Context, AL));
+}
+
static void handleSYCLKernelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// The 'sycl_kernel' attribute applies only to function templates.
const auto *FD = cast<FunctionDecl>(D);
@@ -8446,6 +8579,11 @@ static void handleHandleAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(Attr::Create(S.Context, Argument, AL));
}
+template<typename Attr>
+static void handleUnsafeBufferUsage(Sema &S, Decl *D, const ParsedAttr &AL) {
+ D->addAttr(Attr::Create(S.Context, AL));
+}
+
static void handleCFGuardAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// The guard attribute takes a single identifier argument.
@@ -8580,13 +8718,7 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
// Ignore C++11 attributes on declarator chunks: they appertain to the type
// instead.
- // FIXME: We currently check the attribute syntax directly instead of using
- // isCXX11Attribute(), which currently erroneously classifies the C11
- // `_Alignas` attribute as a C++11 attribute. `_Alignas` can appear on the
- // `DeclSpec`, so we need to let it through here to make sure it is processed
- // appropriately. Once the behavior of isCXX11Attribute() is fixed, we can
- // go back to using that here.
- if (AL.getSyntax() == ParsedAttr::AS_CXX11 && !Options.IncludeCXX11Attributes)
+ if (AL.isCXX11Attribute() && !Options.IncludeCXX11Attributes)
return;
// Unknown attributes are automatically warned on. Target-specific attributes
@@ -8595,7 +8727,9 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
if (AL.getKind() == ParsedAttr::UnknownAttribute ||
!AL.existsInTarget(S.Context.getTargetInfo())) {
S.Diag(AL.getLoc(),
- AL.isDeclspecAttribute()
+ AL.isRegularKeywordAttribute()
+ ? (unsigned)diag::err_keyword_not_supported_on_target
+ : AL.isDeclspecAttribute()
? (unsigned)diag::warn_unhandled_ms_attribute_ignored
: (unsigned)diag::warn_unknown_attribute_ignored)
<< AL << AL.getRange();
@@ -8624,7 +8758,7 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
if (AL.isTypeAttr()) {
if (Options.IgnoreTypeAttributes)
break;
- if (!AL.isStandardAttributeSyntax()) {
+ if (!AL.isStandardAttributeSyntax() && !AL.isRegularKeywordAttribute()) {
// Non-[[]] type attributes are handled in processTypeAttrs(); silently
// move on.
break;
@@ -8689,7 +8823,7 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
// needed for type attributes as well as statement attributes in Attr.td
// that do not list any subjects.
S.Diag(AL.getLoc(), diag::err_attribute_invalid_on_decl)
- << AL << D->getLocation();
+ << AL << AL.isRegularKeywordAttribute() << D->getLocation();
break;
case ParsedAttr::AT_Interrupt:
handleInterruptAttr(S, D, AL);
@@ -8843,6 +8977,7 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
case ParsedAttr::AT_CalledOnce:
handleCalledOnceAttr(S, D, AL);
break;
+ case ParsedAttr::AT_NVPTXKernel:
case ParsedAttr::AT_CUDAGlobal:
handleGlobalAttr(S, D, AL);
break;
@@ -9130,6 +9265,13 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
case ParsedAttr::AT_FunctionReturnThunks:
handleFunctionReturnThunksAttr(S, D, AL);
break;
+ case ParsedAttr::AT_NoMerge:
+ handleNoMergeAttr(S, D, AL);
+ break;
+
+ case ParsedAttr::AT_AvailableOnlyInDefaultEvalMethod:
+ handleAvailableOnlyInDefaultEvalMethod(S, D, AL);
+ break;
// Microsoft attributes:
case ParsedAttr::AT_LayoutVersion:
@@ -9328,6 +9470,10 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
handleHandleAttr<ReleaseHandleAttr>(S, D, AL);
break;
+ case ParsedAttr::AT_UnsafeBufferUsage:
+ handleUnsafeBufferUsage<UnsafeBufferUsageAttr>(S, D, AL);
+ break;
+
case ParsedAttr::AT_UseHandle:
handleHandleAttr<UseHandleAttr>(S, D, AL);
break;
@@ -9395,19 +9541,19 @@ void Sema::ProcessDeclAttributeList(
} else if (!D->hasAttr<CUDAGlobalAttr>()) {
if (const auto *A = D->getAttr<AMDGPUFlatWorkGroupSizeAttr>()) {
Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << A << ExpectedKernelFunction;
+ << A << A->isRegularKeywordAttribute() << ExpectedKernelFunction;
D->setInvalidDecl();
} else if (const auto *A = D->getAttr<AMDGPUWavesPerEUAttr>()) {
Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << A << ExpectedKernelFunction;
+ << A << A->isRegularKeywordAttribute() << ExpectedKernelFunction;
D->setInvalidDecl();
} else if (const auto *A = D->getAttr<AMDGPUNumSGPRAttr>()) {
Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << A << ExpectedKernelFunction;
+ << A << A->isRegularKeywordAttribute() << ExpectedKernelFunction;
D->setInvalidDecl();
} else if (const auto *A = D->getAttr<AMDGPUNumVGPRAttr>()) {
Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << A << ExpectedKernelFunction;
+ << A << A->isRegularKeywordAttribute() << ExpectedKernelFunction;
D->setInvalidDecl();
}
}
@@ -9542,8 +9688,7 @@ void Sema::DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, const WeakInfo &W) {
NamedDecl *NewD = DeclClonePragmaWeak(ND, W.getAlias(), W.getLocation());
NewD->addAttr(
AliasAttr::CreateImplicit(Context, NDId->getName(), W.getLocation()));
- NewD->addAttr(WeakAttr::CreateImplicit(Context, W.getLocation(),
- AttributeCommonInfo::AS_Pragma));
+ NewD->addAttr(WeakAttr::CreateImplicit(Context, W.getLocation()));
WeakTopLevelDecl.push_back(NewD);
// FIXME: "hideous" code from Sema::LazilyCreateBuiltin
// to insert Decl at TU scope, sorry.
@@ -9554,8 +9699,7 @@ void Sema::DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, const WeakInfo &W) {
PushOnScopeChains(NewD, S);
CurContext = SavedContext;
} else { // just add weak to existing
- ND->addAttr(WeakAttr::CreateImplicit(Context, W.getLocation(),
- AttributeCommonInfo::AS_Pragma));
+ ND->addAttr(WeakAttr::CreateImplicit(Context, W.getLocation()));
}
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
index df83442a8cd1..b62f3c475c45 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
@@ -34,6 +34,7 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/CXXFieldCollector.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedTemplate.h"
@@ -41,10 +42,11 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/SaveAndRestore.h"
#include <map>
#include <optional>
#include <set>
@@ -761,7 +763,7 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
// C++20 [dcl.dcl]/8:
// If decl-specifier-seq contains any decl-specifier other than static,
// thread_local, auto, or cv-qualifiers, the program is ill-formed.
- // C++2b [dcl.pre]/6:
+ // C++23 [dcl.pre]/6:
// Each decl-specifier in the decl-specifier-seq shall be static,
// thread_local, auto (9.2.9.6 [dcl.spec.auto]), or a cv-qualifier.
auto &DS = D.getDeclSpec();
@@ -1721,6 +1723,7 @@ static bool CheckConstexprParameterTypes(Sema &SemaRef,
e = FT->param_type_end();
i != e; ++i, ++ArgIndex) {
const ParmVarDecl *PD = FD->getParamDecl(ArgIndex);
+ assert(PD && "null in a parameter list");
SourceLocation ParamLoc = PD->getLocation();
if (CheckLiteralType(SemaRef, Kind, ParamLoc, *i,
diag::err_constexpr_non_literal_param, ArgIndex + 1,
@@ -1926,16 +1929,16 @@ static bool CheckConstexprDeclStmt(Sema &SemaRef, const FunctionDecl *Dcl,
if (VD->isStaticLocal()) {
if (Kind == Sema::CheckConstexprKind::Diagnose) {
SemaRef.Diag(VD->getLocation(),
- SemaRef.getLangOpts().CPlusPlus2b
+ SemaRef.getLangOpts().CPlusPlus23
? diag::warn_cxx20_compat_constexpr_var
: diag::ext_constexpr_static_var)
<< isa<CXXConstructorDecl>(Dcl)
<< (VD->getTLSKind() == VarDecl::TLS_Dynamic);
- } else if (!SemaRef.getLangOpts().CPlusPlus2b) {
+ } else if (!SemaRef.getLangOpts().CPlusPlus23) {
return false;
}
}
- if (SemaRef.LangOpts.CPlusPlus2b) {
+ if (SemaRef.LangOpts.CPlusPlus23) {
CheckLiteralType(SemaRef, Kind, VD->getLocation(), VD->getType(),
diag::warn_cxx20_compat_constexpr_var,
isa<CXXConstructorDecl>(Dcl),
@@ -2274,15 +2277,15 @@ static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
if (Kind == Sema::CheckConstexprKind::CheckValid) {
// If this is only valid as an extension, report that we don't satisfy the
// constraints of the current language.
- if ((Cxx2bLoc.isValid() && !SemaRef.getLangOpts().CPlusPlus2b) ||
+ if ((Cxx2bLoc.isValid() && !SemaRef.getLangOpts().CPlusPlus23) ||
(Cxx2aLoc.isValid() && !SemaRef.getLangOpts().CPlusPlus20) ||
(Cxx1yLoc.isValid() && !SemaRef.getLangOpts().CPlusPlus17))
return false;
} else if (Cxx2bLoc.isValid()) {
SemaRef.Diag(Cxx2bLoc,
- SemaRef.getLangOpts().CPlusPlus2b
+ SemaRef.getLangOpts().CPlusPlus23
? diag::warn_cxx20_compat_constexpr_body_invalid_stmt
- : diag::ext_constexpr_body_invalid_stmt_cxx2b)
+ : diag::ext_constexpr_body_invalid_stmt_cxx23)
<< isa<CXXConstructorDecl>(Dcl);
} else if (Cxx2aLoc.isValid()) {
SemaRef.Diag(Cxx2aLoc,
@@ -2435,6 +2438,114 @@ static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
return true;
}
+bool Sema::CheckImmediateEscalatingFunctionDefinition(
+ FunctionDecl *FD, const sema::FunctionScopeInfo *FSI) {
+ if (!getLangOpts().CPlusPlus20 || !FD->isImmediateEscalating())
+ return true;
+ FD->setBodyContainsImmediateEscalatingExpressions(
+ FSI->FoundImmediateEscalatingExpression);
+ if (FSI->FoundImmediateEscalatingExpression) {
+ auto it = UndefinedButUsed.find(FD->getCanonicalDecl());
+ if (it != UndefinedButUsed.end()) {
+ Diag(it->second, diag::err_immediate_function_used_before_definition)
+ << it->first;
+ Diag(FD->getLocation(), diag::note_defined_here) << FD;
+ if (FD->isImmediateFunction() && !FD->isConsteval())
+ DiagnoseImmediateEscalatingReason(FD);
+ return false;
+ }
+ }
+ return true;
+}
+
+void Sema::DiagnoseImmediateEscalatingReason(FunctionDecl *FD) {
+ assert(FD->isImmediateEscalating() && !FD->isConsteval() &&
+ "expected an immediate function");
+ assert(FD->hasBody() && "expected the function to have a body");
+ struct ImmediateEscalatingExpressionsVisitor
+ : public RecursiveASTVisitor<ImmediateEscalatingExpressionsVisitor> {
+
+ using Base = RecursiveASTVisitor<ImmediateEscalatingExpressionsVisitor>;
+ Sema &SemaRef;
+
+ const FunctionDecl *ImmediateFn;
+ bool ImmediateFnIsConstructor;
+ CXXConstructorDecl *CurrentConstructor = nullptr;
+ CXXCtorInitializer *CurrentInit = nullptr;
+
+ ImmediateEscalatingExpressionsVisitor(Sema &SemaRef, FunctionDecl *FD)
+ : SemaRef(SemaRef), ImmediateFn(FD),
+ ImmediateFnIsConstructor(isa<CXXConstructorDecl>(FD)) {}
+
+ bool shouldVisitImplicitCode() const { return true; }
+ bool shouldVisitLambdaBody() const { return false; }
+
+ void Diag(const Expr *E, const FunctionDecl *Fn, bool IsCall) {
+ SourceLocation Loc = E->getBeginLoc();
+ SourceRange Range = E->getSourceRange();
+ if (CurrentConstructor && CurrentInit) {
+ Loc = CurrentConstructor->getLocation();
+ Range = CurrentInit->isWritten() ? CurrentInit->getSourceRange()
+ : SourceRange();
+ }
+ SemaRef.Diag(Loc, diag::note_immediate_function_reason)
+ << ImmediateFn << Fn << Fn->isConsteval() << IsCall
+ << isa<CXXConstructorDecl>(Fn) << ImmediateFnIsConstructor
+ << (CurrentInit != nullptr)
+ << (CurrentInit && !CurrentInit->isWritten())
+ << (CurrentInit ? CurrentInit->getAnyMember() : nullptr) << Range;
+ }
+ bool TraverseCallExpr(CallExpr *E) {
+ if (const auto *DR =
+ dyn_cast<DeclRefExpr>(E->getCallee()->IgnoreImplicit());
+ DR && DR->isImmediateEscalating()) {
+ Diag(E, E->getDirectCallee(), /*IsCall=*/true);
+ return false;
+ }
+
+ for (Expr *A : E->arguments())
+ if (!getDerived().TraverseStmt(A))
+ return false;
+
+ return true;
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ if (const auto *ReferencedFn = dyn_cast<FunctionDecl>(E->getDecl());
+ ReferencedFn && E->isImmediateEscalating()) {
+ Diag(E, ReferencedFn, /*IsCall=*/false);
+ return false;
+ }
+
+ return true;
+ }
+
+ bool VisitCXXConstructExpr(CXXConstructExpr *E) {
+ CXXConstructorDecl *D = E->getConstructor();
+ if (E->isImmediateEscalating()) {
+ Diag(E, D, /*IsCall=*/true);
+ return false;
+ }
+ return true;
+ }
+
+ bool TraverseConstructorInitializer(CXXCtorInitializer *Init) {
+ llvm::SaveAndRestore RAII(CurrentInit, Init);
+ return Base::TraverseConstructorInitializer(Init);
+ }
+
+ bool TraverseCXXConstructorDecl(CXXConstructorDecl *Ctr) {
+ llvm::SaveAndRestore RAII(CurrentConstructor, Ctr);
+ return Base::TraverseCXXConstructorDecl(Ctr);
+ }
+
+ bool TraverseType(QualType T) { return true; }
+ bool VisitBlockExpr(BlockExpr *T) { return true; }
+
+ } Visitor(*this, FD);
+ Visitor.TraverseDecl(FD);
+}
+
/// Get the class that is directly named by the current context. This is the
/// class for which an unqualified-id in this scope could name a constructor
/// or destructor.
@@ -2608,7 +2719,8 @@ Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
}
// For the MS ABI, propagate DLL attributes to base class templates.
- if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft() ||
+ Context.getTargetInfo().getTriple().isPS()) {
if (Attr *ClassAttr = getDLLAttr(Class)) {
if (auto *BaseTemplate = dyn_cast_or_null<ClassTemplateSpecializationDecl>(
BaseType->getAsCXXRecordDecl())) {
@@ -2707,10 +2819,12 @@ BaseResult Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
for (const ParsedAttr &AL : Attributes) {
if (AL.isInvalid() || AL.getKind() == ParsedAttr::IgnoredAttribute)
continue;
- Diag(AL.getLoc(), AL.getKind() == ParsedAttr::UnknownAttribute
- ? (unsigned)diag::warn_unknown_attribute_ignored
- : (unsigned)diag::err_base_specifier_attribute)
- << AL << AL.getRange();
+ if (AL.getKind() == ParsedAttr::UnknownAttribute)
+ Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored)
+ << AL << AL.getRange();
+ else
+ Diag(AL.getLoc(), diag::err_base_specifier_attribute)
+ << AL << AL.isRegularKeywordAttribute() << AL.getRange();
}
TypeSourceInfo *TInfo = nullptr;
@@ -3230,16 +3344,6 @@ static bool InitializationHasSideEffects(const FieldDecl &FD) {
return false;
}
-static const ParsedAttr *getMSPropertyAttr(const ParsedAttributesView &list) {
- ParsedAttributesView::const_iterator Itr =
- llvm::find_if(list, [](const ParsedAttr &AL) {
- return AL.isDeclspecPropertyAttribute();
- });
- if (Itr != list.end())
- return &*Itr;
- return nullptr;
-}
-
// Check if there is a field shadowing.
void Sema::CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
@@ -3317,7 +3421,7 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
bool isFunc = D.isDeclarationOfFunction();
const ParsedAttr *MSPropertyAttr =
- getMSPropertyAttr(D.getDeclSpec().getAttributes());
+ D.getDeclSpec().getAttributes().getMSPropertyAttr();
if (cast<CXXRecordDecl>(CurContext)->isInterface()) {
// The Microsoft extension __interface only permits public member functions
@@ -3575,12 +3679,12 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
}
if (VS.isOverrideSpecified())
- Member->addAttr(OverrideAttr::Create(Context, VS.getOverrideLoc(),
- AttributeCommonInfo::AS_Keyword));
+ Member->addAttr(OverrideAttr::Create(Context, VS.getOverrideLoc()));
if (VS.isFinalSpecified())
- Member->addAttr(FinalAttr::Create(
- Context, VS.getFinalLoc(), AttributeCommonInfo::AS_Keyword,
- static_cast<FinalAttr::Spelling>(VS.isFinalSpelledSealed())));
+ Member->addAttr(FinalAttr::Create(Context, VS.getFinalLoc(),
+ VS.isFinalSpelledSealed()
+ ? FinalAttr::Keyword_sealed
+ : FinalAttr::Keyword_final));
if (VS.getLastLocation().isValid()) {
// Update the end location of a method that has a virt-specifiers.
@@ -3979,7 +4083,7 @@ namespace {
}
llvm::SmallPtrSet<QualType, 4> UninitializedBaseClasses;
- for (auto I : RD->bases())
+ for (const auto &I : RD->bases())
UninitializedBaseClasses.insert(I.getType().getCanonicalType());
if (UninitializedFields.empty() && UninitializedBaseClasses.empty())
@@ -6007,9 +6111,9 @@ void AbstractUsageInfo::CheckType(const NamedDecl *D, TypeLoc TL,
/// Check for invalid uses of an abstract type in a function declaration.
static void CheckAbstractClassUsage(AbstractUsageInfo &Info,
FunctionDecl *FD) {
- // No need to do the check on definitions, which require that
- // the return/param types be complete.
- if (FD->doesThisDeclarationHaveABody())
+ // Only definitions are required to refer to complete and
+ // non-abstract types.
+ if (!FD->doesThisDeclarationHaveABody())
return;
// For safety's sake, just ignore it if we don't have type source
@@ -6346,6 +6450,18 @@ void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
if (!ClassAttr)
return;
+ // MSVC allows imported or exported template classes that have UniqueExternal
+ // linkage. This occurs when the template class has been instantiated with
+ // a template parameter which itself has internal linkage.
+ // We drop the attribute to avoid exporting or importing any members.
+ if ((Context.getTargetInfo().getCXXABI().isMicrosoft() ||
+ Context.getTargetInfo().getTriple().isPS()) &&
+ (!Class->isExternallyVisible() && Class->hasExternalFormalLinkage())) {
+ Class->dropAttr<DLLExportAttr>();
+ Class->dropAttr<DLLImportAttr>();
+ return;
+ }
+
if (!Class->isExternallyVisible()) {
Diag(Class->getLocation(), diag::err_attribute_dll_not_extern)
<< Class << ClassAttr;
@@ -7512,7 +7628,7 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
}
}
- const FunctionProtoType *Type = MD->getType()->getAs<FunctionProtoType>();
+ const FunctionProtoType *Type = MD->getType()->castAs<FunctionProtoType>();
bool CanHaveConstParam = false;
if (CSM == CXXCopyConstructor)
@@ -7754,6 +7870,10 @@ protected:
// followed by the non-static data members of C
for (FieldDecl *Field : Record->fields()) {
+ // C++23 [class.bit]p2:
+ // Unnamed bit-fields are not members ...
+ if (Field->isUnnamedBitfield())
+ continue;
// Recursively expand anonymous structs.
if (Field->isAnonymousStructOrUnion()) {
if (visitSubobjects(Results, Field->getType()->getAsCXXRecordDecl(),
@@ -8116,7 +8236,8 @@ private:
if (Diagnose == ExplainDeleted) {
S.Diag(Subobj.Loc, diag::note_defaulted_comparison_no_viable_function)
- << FD << (OO == OO_ExclaimEqual) << Subobj.Kind << Subobj.Decl;
+ << FD << (OO == OO_EqualEqual || OO == OO_ExclaimEqual)
+ << Subobj.Kind << Subobj.Decl;
// For a three-way comparison, list both the candidates for the
// original operator and the candidates for the synthesized operator.
@@ -8571,8 +8692,8 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
// C++2a [class.compare.default]p1:
// A defaulted comparison operator function for some class C shall be a
// non-template function declared in the member-specification of C that is
- // -- a non-static const member of C having one parameter of type
- // const C&, or
+ // -- a non-static const non-volatile member of C having one parameter of
+ // type const C& and either no ref-qualifier or the ref-qualifier &, or
// -- a friend of C having two parameters of type const C& or two
// parameters of type C.
@@ -8582,6 +8703,17 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
auto *MD = cast<CXXMethodDecl>(FD);
assert(!MD->isStatic() && "comparison function cannot be a static member");
+ if (MD->getRefQualifier() == RQ_RValue) {
+ Diag(MD->getLocation(), diag::err_ref_qualifier_comparison_operator);
+
+ // Remove the ref qualifier to recover.
+ const auto *FPT = MD->getType()->castAs<FunctionProtoType>();
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.RefQualifier = RQ_None;
+ MD->setType(Context.getFunctionType(FPT->getReturnType(),
+ FPT->getParamTypes(), EPI));
+ }
+
// If we're out-of-class, this is the class we're comparing.
if (!RD)
RD = MD->getParent();
@@ -8604,6 +8736,17 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
MD->setType(Context.getFunctionType(FPT->getReturnType(),
FPT->getParamTypes(), EPI));
}
+
+ if (MD->isVolatile()) {
+ Diag(MD->getLocation(), diag::err_volatile_comparison_operator);
+
+ // Remove the 'volatile' from the type to recover.
+ const auto *FPT = MD->getType()->castAs<FunctionProtoType>();
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.TypeQuals.removeVolatile();
+ MD->setType(Context.getFunctionType(FPT->getReturnType(),
+ FPT->getParamTypes(), EPI));
+ }
}
if (FD->getNumParams() != (IsMethod ? 1 : 2)) {
@@ -8617,8 +8760,7 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
const ParmVarDecl *KnownParm = nullptr;
for (const ParmVarDecl *Param : FD->parameters()) {
QualType ParmTy = Param->getType();
- if (ParmTy->isDependentType())
- continue;
+
if (!KnownParm) {
auto CTy = ParmTy;
// Is it `T const &`?
@@ -8804,12 +8946,25 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
// the requirements for a constexpr function [...]
// The only relevant requirements are that the parameter and return types are
// literal types. The remaining conditions are checked by the analyzer.
+ //
+ // We support P2448R2 in language modes earlier than C++23 as an extension.
+ // The concept of constexpr-compatible was removed.
+ // C++23 [dcl.fct.def.default]p3 [P2448R2]
+ // A function explicitly defaulted on its first declaration is implicitly
+ // inline, and is implicitly constexpr if it is constexpr-suitable.
+ // C++23 [dcl.constexpr]p3
+ // A function is constexpr-suitable if
+ // - it is not a coroutine, and
+ // - if the function is a constructor or destructor, its class does not
+ // have any virtual base classes.
if (FD->isConstexpr()) {
if (CheckConstexprReturnType(*this, FD, CheckConstexprKind::Diagnose) &&
CheckConstexprParameterTypes(*this, FD, CheckConstexprKind::Diagnose) &&
!Info.Constexpr) {
Diag(FD->getBeginLoc(),
- diag::err_incorrect_defaulted_comparison_constexpr)
+ getLangOpts().CPlusPlus23
+ ? diag::warn_cxx23_compat_defaulted_comparison_constexpr_mismatch
+ : diag::ext_defaulted_comparison_constexpr_mismatch)
<< FD->isImplicit() << (int)DCK << FD->isConsteval();
DefaultedComparisonAnalyzer(*this, RD, FD, DCK,
DefaultedComparisonAnalyzer::ExplainConstexpr)
@@ -9157,7 +9312,18 @@ bool SpecialMemberDeletionInfo::shouldDeleteForSubobjectCall(
// must be accessible and non-deleted, but need not be trivial. Such a
// destructor is never actually called, but is semantically checked as
// if it were.
- DiagKind = 4;
+ if (CSM == Sema::CXXDefaultConstructor) {
+ // [class.default.ctor]p2:
+ // A defaulted default constructor for class X is defined as deleted if
+ // - X is a union that has a variant member with a non-trivial default
+ // constructor and no variant member of X has a default member
+ // initializer
+ const auto *RD = cast<CXXRecordDecl>(Field->getParent());
+ if (!RD->hasInClassInitializer())
+ DiagKind = 4;
+ } else {
+ DiagKind = 4;
+ }
}
if (DiagKind == -1)
@@ -11054,8 +11220,8 @@ struct BadSpecifierDiagnoser {
/// Check the validity of a declarator that we parsed for a deduction-guide.
/// These aren't actually declarators in the grammar, so we need to check that
/// the user didn't specify any pieces that are not part of the deduction-guide
-/// grammar.
-void Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
+/// grammar. Return true on invalid deduction-guide.
+bool Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC) {
TemplateName GuidedTemplate = D.getName().TemplateName.get().get();
TemplateDecl *GuidedTemplateDecl = GuidedTemplate.getAsTemplateDecl();
@@ -11105,7 +11271,7 @@ void Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
}
if (D.isInvalidType())
- return;
+ return true;
// Check the declarator is simple enough.
bool FoundFunction = false;
@@ -11118,11 +11284,9 @@ void Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
<< D.getSourceRange();
break;
}
- if (!Chunk.Fun.hasTrailingReturnType()) {
- Diag(D.getName().getBeginLoc(),
- diag::err_deduction_guide_no_trailing_return_type);
- break;
- }
+ if (!Chunk.Fun.hasTrailingReturnType())
+ return Diag(D.getName().getBeginLoc(),
+ diag::err_deduction_guide_no_trailing_return_type);
// Check that the return type is written as a specialization of
// the template specified as the deduction-guide's name.
@@ -11157,13 +11321,12 @@ void Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
MightInstantiateToSpecialization = true;
}
- if (!AcceptableReturnType) {
- Diag(TSI->getTypeLoc().getBeginLoc(),
- diag::err_deduction_guide_bad_trailing_return_type)
- << GuidedTemplate << TSI->getType()
- << MightInstantiateToSpecialization
- << TSI->getTypeLoc().getSourceRange();
- }
+ if (!AcceptableReturnType)
+ return Diag(TSI->getTypeLoc().getBeginLoc(),
+ diag::err_deduction_guide_bad_trailing_return_type)
+ << GuidedTemplate << TSI->getType()
+ << MightInstantiateToSpecialization
+ << TSI->getTypeLoc().getSourceRange();
// Keep going to check that we don't have any inner declarator pieces (we
// could still have a function returning a pointer to a function).
@@ -11171,7 +11334,9 @@ void Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
}
if (D.isFunctionDefinition())
+ // we can still create a valid deduction guide here.
Diag(D.getIdentifierLoc(), diag::err_deduction_guide_defines_function);
+ return false;
}
//===----------------------------------------------------------------------===//
@@ -11223,6 +11388,20 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
NamespaceDecl *PrevNS = nullptr;
if (II) {
+ // C++ [namespace.std]p7:
+ // A translation unit shall not declare namespace std to be an inline
+ // namespace (9.8.2).
+ //
+ // Precondition: the std namespace is in the file scope and is declared to
+ // be inline
+ auto DiagnoseInlineStdNS = [&]() {
+ assert(IsInline && II->isStr("std") &&
+ CurContext->getRedeclContext()->isTranslationUnit() &&
+ "Precondition of DiagnoseInlineStdNS not met");
+ Diag(InlineLoc, diag::err_inline_namespace_std)
+ << SourceRange(InlineLoc, InlineLoc.getLocWithOffset(6));
+ IsInline = false;
+ };
// C++ [namespace.def]p2:
// The identifier in an original-namespace-definition shall not
// have been previously defined in the declarative region in
@@ -11243,7 +11422,10 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
if (PrevNS) {
// This is an extended namespace definition.
- if (IsInline != PrevNS->isInline())
+ if (IsInline && II->isStr("std") &&
+ CurContext->getRedeclContext()->isTranslationUnit())
+ DiagnoseInlineStdNS();
+ else if (IsInline != PrevNS->isInline())
DiagnoseNamespaceInlineMismatch(*this, NamespaceLoc, Loc, II,
&IsInline, PrevNS);
} else if (PrevDecl) {
@@ -11255,6 +11437,8 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
// Continue on to push Namespc as current DeclContext and return it.
} else if (II->isStr("std") &&
CurContext->getRedeclContext()->isTranslationUnit()) {
+ if (IsInline)
+ DiagnoseInlineStdNS();
// This is the first "real" definition of the namespace "std", so update
// our cache of the "std" namespace to point at this definition.
PrevNS = getStdNamespace();
@@ -11386,21 +11570,6 @@ NamespaceDecl *Sema::getStdNamespace() const {
return cast_or_null<NamespaceDecl>(
StdNamespace.get(Context.getExternalSource()));
}
-
-NamespaceDecl *Sema::lookupStdExperimentalNamespace() {
- if (!StdExperimentalNamespaceCache) {
- if (auto Std = getStdNamespace()) {
- LookupResult Result(*this, &PP.getIdentifierTable().get("experimental"),
- SourceLocation(), LookupNamespaceName);
- if (!LookupQualifiedName(Result, Std) ||
- !(StdExperimentalNamespaceCache =
- Result.getAsSingle<NamespaceDecl>()))
- Result.suppressDiagnostics();
- }
- }
- return StdExperimentalNamespaceCache;
-}
-
namespace {
enum UnsupportedSTLSelect {
@@ -11549,6 +11718,10 @@ NamespaceDecl *Sema::getOrCreateStdNamespace() {
&PP.getIdentifierTable().get("std"),
/*PrevDecl=*/nullptr, /*Nested=*/false);
getStdNamespace()->setImplicit(true);
+ // We want the created NamespaceDecl to be available for redeclaration
+ // lookups, but not for regular name lookups.
+ Context.getTranslationUnitDecl()->addDecl(getStdNamespace());
+ getStdNamespace()->clearIdentifierNamespace();
}
return getStdNamespace();
@@ -15654,9 +15827,6 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
MarkFunctionReferenced(ConstructLoc, Constructor);
if (getLangOpts().CUDA && !CheckCUDACall(ConstructLoc, Constructor))
return ExprError();
- if (getLangOpts().SYCLIsDevice &&
- !checkSYCLDeviceFunction(ConstructLoc, Constructor))
- return ExprError();
return CheckForImmediateInvocation(
CXXConstructExpr::Create(
@@ -15684,7 +15854,11 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
return;
CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl);
-
+ // The result of `LookupDestructor` might be nullptr if the destructor is
+ // invalid, in which case it is marked as `IneligibleOrNotSelected` and
+ // will not be selected by `CXXRecordDecl::getDestructor()`.
+ if (!Destructor)
+ return;
// If this is an array, we'll require the destructor during initialization, so
// we can skip over this. We still want to emit exit-time destructor warnings
// though.
@@ -15714,7 +15888,8 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
}
}
- if (!VD->hasGlobalStorage()) return;
+ if (!VD->hasGlobalStorage() || !VD->needsDestruction(Context))
+ return;
// Emit warning for non-trivial dtor in global scope (a real global,
// class-static, function-static).
@@ -15971,7 +16146,7 @@ bool Sema::CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl) {
if (MethodDecl->isStatic()) {
if (Op == OO_Call || Op == OO_Subscript)
Diag(FnDecl->getLocation(),
- (LangOpts.CPlusPlus2b
+ (LangOpts.CPlusPlus23
? diag::warn_cxx20_compat_operator_overload_static
: diag::ext_operator_overload_static))
<< FnDecl;
@@ -16012,7 +16187,7 @@ bool Sema::CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl) {
}
if (FirstDefaultedParam) {
if (Op == OO_Subscript) {
- Diag(FnDecl->getLocation(), LangOpts.CPlusPlus2b
+ Diag(FnDecl->getLocation(), LangOpts.CPlusPlus23
? diag::ext_subscript_overload
: diag::error_subscript_overload)
<< FnDecl->getDeclName() << 1
@@ -16063,7 +16238,7 @@ bool Sema::CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl) {
}
if (Op == OO_Subscript && NumParams != 2) {
- Diag(FnDecl->getLocation(), LangOpts.CPlusPlus2b
+ Diag(FnDecl->getLocation(), LangOpts.CPlusPlus23
? diag::ext_subscript_overload
: diag::error_subscript_overload)
<< FnDecl->getDeclName() << (NumParams == 1 ? 0 : 2);
@@ -16319,15 +16494,18 @@ bool Sema::CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl) {
}
}
- StringRef LiteralName
- = FnDecl->getDeclName().getCXXLiteralIdentifier()->getName();
- if (LiteralName[0] != '_' &&
+ const IdentifierInfo *II = FnDecl->getDeclName().getCXXLiteralIdentifier();
+ ReservedLiteralSuffixIdStatus Status = II->isReservedLiteralSuffixId();
+ if (Status != ReservedLiteralSuffixIdStatus::NotReserved &&
!getSourceManager().isInSystemHeader(FnDecl->getLocation())) {
- // C++11 [usrlit.suffix]p1:
- // Literal suffix identifiers that do not start with an underscore
- // are reserved for future standardization.
+ // C++23 [usrlit.suffix]p1:
+ // Literal suffix identifiers that do not start with an underscore are
+ // reserved for future standardization. Literal suffix identifiers that
+ // contain a double underscore __ are reserved for use by C++
+ // implementations.
Diag(FnDecl->getLocation(), diag::warn_user_literal_reserved)
- << StringLiteralParser::isValidUDSuffix(getLangOpts(), LiteralName);
+ << static_cast<int>(Status)
+ << StringLiteralParser::isValidUDSuffix(getLangOpts(), II->getName());
}
return false;
@@ -16343,11 +16521,7 @@ Decl *Sema::ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc) {
StringLiteral *Lit = cast<StringLiteral>(LangStr);
- if (!Lit->isOrdinary()) {
- Diag(LangStr->getExprLoc(), diag::err_language_linkage_spec_not_ascii)
- << LangStr->getSourceRange();
- return nullptr;
- }
+ assert(Lit->isUnevaluated() && "Unexpected string literal kind");
StringRef Lang = Lit->getString();
LinkageSpecDecl::LanguageIDs Language;
@@ -16377,14 +16551,8 @@ Decl *Sema::ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc,
/// If the declaration is already in global module fragment, we don't
/// need to attach it again.
if (getLangOpts().CPlusPlusModules && isCurrentModulePurview()) {
- Module *GlobalModule =
- PushGlobalModuleFragment(ExternLoc, /*IsImplicit=*/true);
- /// According to [module.reach]p3.2,
- /// The declaration in global module fragment is reachable if it is not
- /// discarded. And the discarded declaration should be deleted. So it
- /// doesn't matter mark the declaration in global module fragment as
- /// reachable here.
- D->setModuleOwnershipKind(Decl::ModuleOwnershipKind::ReachableWhenImported);
+ Module *GlobalModule = PushImplicitGlobalModuleFragment(
+ ExternLoc, /*IsExported=*/D->isInExportDeclContext());
D->setLocalOwningModule(GlobalModule);
}
@@ -16409,8 +16577,9 @@ Decl *Sema::ActOnFinishLinkageSpecification(Scope *S,
// LinkageSpec isn't in the module created by itself. So we don't
// need to pop it.
if (getLangOpts().CPlusPlusModules && getCurrentModule() &&
- getCurrentModule()->isGlobalModule() && getCurrentModule()->Parent)
- PopGlobalModuleFragment();
+ getCurrentModule()->isImplicitGlobalModule() &&
+ getCurrentModule()->Parent)
+ PopImplicitGlobalModuleFragment();
PopDeclContext();
return LinkageSpec;
@@ -16476,6 +16645,11 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
!BaseType->isDependentType() && RequireCompleteType(Loc, BaseType, DK))
Invalid = true;
+ if (!Invalid && BaseType.isWebAssemblyReferenceType()) {
+ Diag(Loc, diag::err_wasm_reftype_tc) << 1;
+ Invalid = true;
+ }
+
if (!Invalid && Mode != 1 && BaseType->isSizelessType()) {
Diag(Loc, diag::err_catch_sizeless) << (Mode == 2 ? 1 : 0) << BaseType;
Invalid = true;
@@ -16618,14 +16792,11 @@ Decl *Sema::ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc) {
- StringLiteral *AssertMessage =
- AssertMessageExpr ? cast<StringLiteral>(AssertMessageExpr) : nullptr;
-
if (DiagnoseUnexpandedParameterPack(AssertExpr, UPPC_StaticAssertExpression))
return nullptr;
return BuildStaticAssertDeclaration(StaticAssertLoc, AssertExpr,
- AssertMessage, RParenLoc, false);
+ AssertMessageExpr, RParenLoc, false);
}
/// Convert \V to a string we can present to the user in a diagnostic
@@ -16723,7 +16894,8 @@ static bool UsefulToPrintExpr(const Expr *E) {
/// Try to print more useful information about a failed static_assert
/// with expression \E
void Sema::DiagnoseStaticAssertDetails(const Expr *E) {
- if (const auto *Op = dyn_cast<BinaryOperator>(E)) {
+ if (const auto *Op = dyn_cast<BinaryOperator>(E);
+ Op && Op->getOpcode() != BO_LOr) {
const Expr *LHS = Op->getLHS()->IgnoreParenImpCasts();
const Expr *RHS = Op->getRHS()->IgnoreParenImpCasts();
@@ -16759,13 +16931,147 @@ void Sema::DiagnoseStaticAssertDetails(const Expr *E) {
}
}
+bool Sema::EvaluateStaticAssertMessageAsString(Expr *Message,
+ std::string &Result,
+ ASTContext &Ctx,
+ bool ErrorOnInvalidMessage) {
+ assert(Message);
+ assert(!Message->isTypeDependent() && !Message->isValueDependent() &&
+ "can't evaluate a dependant static assert message");
+
+ if (const auto *SL = dyn_cast<StringLiteral>(Message)) {
+ assert(SL->isUnevaluated() && "expected an unevaluated string");
+ Result.assign(SL->getString().begin(), SL->getString().end());
+ return true;
+ }
+
+ SourceLocation Loc = Message->getBeginLoc();
+ QualType T = Message->getType().getNonReferenceType();
+ auto *RD = T->getAsCXXRecordDecl();
+ if (!RD) {
+ Diag(Loc, diag::err_static_assert_invalid_message);
+ return false;
+ }
+
+ auto FindMember = [&](StringRef Member, bool &Empty,
+ bool Diag = false) -> std::optional<LookupResult> {
+ QualType ObjectType = Message->getType();
+ Expr::Classification ObjectClassification =
+ Message->Classify(getASTContext());
+
+ DeclarationName DN = PP.getIdentifierInfo(Member);
+ LookupResult MemberLookup(*this, DN, Loc, Sema::LookupMemberName);
+ LookupQualifiedName(MemberLookup, RD);
+ Empty = MemberLookup.empty();
+ OverloadCandidateSet Candidates(MemberLookup.getNameLoc(),
+ OverloadCandidateSet::CSK_Normal);
+ for (NamedDecl *D : MemberLookup) {
+ AddMethodCandidate(DeclAccessPair::make(D, D->getAccess()), ObjectType,
+ ObjectClassification, /*Args=*/{}, Candidates);
+ }
+ OverloadCandidateSet::iterator Best;
+ switch (Candidates.BestViableFunction(*this, Loc, Best)) {
+ case OR_Success:
+ return std::move(MemberLookup);
+ default:
+ if (Diag)
+ Candidates.NoteCandidates(
+ PartialDiagnosticAt(
+ Loc, PDiag(diag::err_static_assert_invalid_mem_fn_ret_ty)
+ << (Member == "data")),
+ *this, OCD_AllCandidates, /*Args=*/{});
+ }
+ return std::nullopt;
+ };
+
+ bool SizeNotFound, DataNotFound;
+ std::optional<LookupResult> SizeMember = FindMember("size", SizeNotFound);
+ std::optional<LookupResult> DataMember = FindMember("data", DataNotFound);
+ if (SizeNotFound || DataNotFound) {
+ Diag(Loc, diag::err_static_assert_missing_member_function)
+ << ((SizeNotFound && DataNotFound) ? 2
+ : SizeNotFound ? 0
+ : 1);
+ return false;
+ }
+
+ if (!SizeMember || !DataMember) {
+ if (!SizeMember)
+ FindMember("size", SizeNotFound, /*Diag=*/true);
+ if (!DataMember)
+ FindMember("data", DataNotFound, /*Diag=*/true);
+ return false;
+ }
+
+ auto BuildExpr = [&](LookupResult &LR) {
+ ExprResult Res = BuildMemberReferenceExpr(
+ Message, Message->getType(), Message->getBeginLoc(), false,
+ CXXScopeSpec(), SourceLocation(), nullptr, LR, nullptr, nullptr);
+ if (Res.isInvalid())
+ return ExprError();
+ Res = BuildCallExpr(nullptr, Res.get(), Loc, std::nullopt, Loc, nullptr,
+ false, true);
+ if (Res.isInvalid())
+ return ExprError();
+ if (Res.get()->isTypeDependent() || Res.get()->isValueDependent())
+ return ExprError();
+ return TemporaryMaterializationConversion(Res.get());
+ };
+
+ ExprResult SizeE = BuildExpr(*SizeMember);
+ ExprResult DataE = BuildExpr(*DataMember);
+
+ QualType SizeT = Context.getSizeType();
+ QualType ConstCharPtr =
+ Context.getPointerType(Context.getConstType(Context.CharTy));
+
+ ExprResult EvaluatedSize =
+ SizeE.isInvalid() ? ExprError()
+ : BuildConvertedConstantExpression(
+ SizeE.get(), SizeT, CCEK_StaticAssertMessageSize);
+ if (EvaluatedSize.isInvalid()) {
+ Diag(Loc, diag::err_static_assert_invalid_mem_fn_ret_ty) << /*size*/ 0;
+ return false;
+ }
+
+ ExprResult EvaluatedData =
+ DataE.isInvalid()
+ ? ExprError()
+ : BuildConvertedConstantExpression(DataE.get(), ConstCharPtr,
+ CCEK_StaticAssertMessageData);
+ if (EvaluatedData.isInvalid()) {
+ Diag(Loc, diag::err_static_assert_invalid_mem_fn_ret_ty) << /*data*/ 1;
+ return false;
+ }
+
+ if (!ErrorOnInvalidMessage &&
+ Diags.isIgnored(diag::warn_static_assert_message_constexpr, Loc))
+ return true;
+
+ Expr::EvalResult Status;
+ SmallVector<PartialDiagnosticAt, 8> Notes;
+ Status.Diag = &Notes;
+ if (!Message->EvaluateCharRangeAsString(Result, EvaluatedSize.get(),
+ EvaluatedData.get(), Ctx, Status) ||
+ !Notes.empty()) {
+ Diag(Message->getBeginLoc(),
+ ErrorOnInvalidMessage ? diag::err_static_assert_message_constexpr
+ : diag::warn_static_assert_message_constexpr);
+ for (const auto &Note : Notes)
+ Diag(Note.first, Note.second);
+ return !ErrorOnInvalidMessage;
+ }
+ return true;
+}
+
Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
- Expr *AssertExpr,
- StringLiteral *AssertMessage,
+ Expr *AssertExpr, Expr *AssertMessage,
SourceLocation RParenLoc,
bool Failed) {
assert(AssertExpr != nullptr && "Expected non-null condition");
if (!AssertExpr->isTypeDependent() && !AssertExpr->isValueDependent() &&
+ (!AssertMessage || (!AssertMessage->isTypeDependent() &&
+ !AssertMessage->isValueDependent())) &&
!Failed) {
// In a static_assert-declaration, the constant-expression shall be a
// constant expression that can be contextually converted to bool.
@@ -16799,17 +17105,32 @@ Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
FoldKind).isInvalid())
Failed = true;
- if (!Failed && !Cond) {
+ // If the static_assert passes, only verify that
+ // the message is grammatically valid without evaluating it.
+ if (!Failed && AssertMessage && Cond.getBoolValue()) {
+ std::string Str;
+ EvaluateStaticAssertMessageAsString(AssertMessage, Str, Context,
+ /*ErrorOnInvalidMessage=*/false);
+ }
+
+ // CWG2518
+ // [dcl.pre]/p10 If [...] the expression is evaluated in the context of a
+ // template definition, the declaration has no effect.
+ bool InTemplateDefinition =
+ getLangOpts().CPlusPlus && CurContext->isDependentContext();
+
+ if (!Failed && !Cond && !InTemplateDefinition) {
SmallString<256> MsgBuffer;
llvm::raw_svector_ostream Msg(MsgBuffer);
+ bool HasMessage = AssertMessage;
if (AssertMessage) {
- const auto *MsgStr = cast<StringLiteral>(AssertMessage);
- if (MsgStr->isOrdinary())
- Msg << MsgStr->getString();
- else
- MsgStr->printPretty(Msg, nullptr, getPrintingPolicy());
+ std::string Str;
+ HasMessage =
+ EvaluateStaticAssertMessageAsString(
+ AssertMessage, Str, Context, /*ErrorOnInvalidMessage=*/true) ||
+ !Str.empty();
+ Msg << Str;
}
-
Expr *InnerCond = nullptr;
std::string InnerCondDescription;
std::tie(InnerCond, InnerCondDescription) =
@@ -16817,20 +17138,22 @@ Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
if (InnerCond && isa<ConceptSpecializationExpr>(InnerCond)) {
// Drill down into concept specialization expressions to see why they
// weren't satisfied.
- Diag(StaticAssertLoc, diag::err_static_assert_failed)
- << !AssertMessage << Msg.str() << AssertExpr->getSourceRange();
+ Diag(AssertExpr->getBeginLoc(), diag::err_static_assert_failed)
+ << !HasMessage << Msg.str() << AssertExpr->getSourceRange();
ConstraintSatisfaction Satisfaction;
if (!CheckConstraintSatisfaction(InnerCond, Satisfaction))
DiagnoseUnsatisfiedConstraint(Satisfaction);
} else if (InnerCond && !isa<CXXBoolLiteralExpr>(InnerCond)
&& !isa<IntegerLiteral>(InnerCond)) {
- Diag(StaticAssertLoc, diag::err_static_assert_requirement_failed)
- << InnerCondDescription << !AssertMessage
- << Msg.str() << InnerCond->getSourceRange();
+ Diag(InnerCond->getBeginLoc(),
+ diag::err_static_assert_requirement_failed)
+ << InnerCondDescription << !HasMessage << Msg.str()
+ << InnerCond->getSourceRange();
DiagnoseStaticAssertDetails(InnerCond);
} else {
- Diag(StaticAssertLoc, diag::err_static_assert_failed)
- << !AssertMessage << Msg.str() << AssertExpr->getSourceRange();
+ Diag(AssertExpr->getBeginLoc(), diag::err_static_assert_failed)
+ << !HasMessage << Msg.str() << AssertExpr->getSourceRange();
+ PrintContextStack();
}
Failed = true;
}
@@ -17934,7 +18257,7 @@ void Sema::MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
return;
// Do not mark as used if compiling for the device outside of the target
// region.
- if (TUKind != TU_Prefix && LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
+ if (TUKind != TU_Prefix && LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice &&
!isInOpenMPDeclareTargetContext() &&
!isInOpenMPTargetExecutionDirective()) {
if (!DefinitionRequired)
@@ -17982,7 +18305,7 @@ void Sema::MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
// immediately. For all other classes, we mark their virtual members
// at the end of the translation unit.
if (Class->isLocalClass())
- MarkVirtualMembersReferenced(Loc, Class);
+ MarkVirtualMembersReferenced(Loc, Class->getDefinition());
else
VTableUses.push_back(std::make_pair(Class, Loc));
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp
index a5a57c38bb48..9b7ff5ff8251 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -172,6 +172,12 @@ bool Sema::CheckSpecifiedExceptionType(QualType &T, SourceRange Range) {
RequireCompleteType(Range.getBegin(), PointeeT, DiagID, Kind, Range))
return ReturnValueOnError;
+ // WebAssembly reference types can't be used in exception specifications.
+ if (PointeeT.isWebAssemblyReferenceType()) {
+ Diag(Range.getBegin(), diag::err_wasm_reftype_exception_spec);
+ return true;
+ }
+
// The MSVC compatibility mode doesn't extend to sizeless types,
// so diagnose them separately.
if (PointeeT->isSizelessType() && Kind != 1) {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
index 9d865f487098..2716b6677105 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
@@ -41,6 +41,7 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/Designator.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Overload.h"
@@ -308,8 +309,6 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
if (getLangOpts().CUDA && !CheckCUDACall(Loc, FD))
return true;
- if (getLangOpts().SYCLIsDevice && !checkSYCLDeviceFunction(Loc, FD))
- return true;
}
if (auto *MD = dyn_cast<CXXMethodDecl>(D)) {
@@ -375,10 +374,21 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
diagnoseUseOfInternalDeclInInlineFunction(*this, D, Loc);
+ if (D->hasAttr<AvailableOnlyInDefaultEvalMethodAttr>()) {
+ if (getLangOpts().getFPEvalMethod() !=
+ LangOptions::FPEvalMethodKind::FEM_UnsetOnCommandLine &&
+ PP.getLastFPEvalPragmaLocation().isValid() &&
+ PP.getCurrentFPEvalMethod() != getLangOpts().getFPEvalMethod())
+ Diag(D->getLocation(),
+ diag::err_type_available_only_in_default_eval_method)
+ << D->getName();
+ }
+
if (auto *VD = dyn_cast<ValueDecl>(D))
checkTypeSupport(VD->getType(), Loc, VD);
- if (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice)) {
+ if (LangOpts.SYCLIsDevice ||
+ (LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice)) {
if (!Context.getTargetInfo().isTLSSupported())
if (const auto *VD = dyn_cast<VarDecl>(D))
if (VD->getTLSKind() != VarDecl::TLS_None)
@@ -940,6 +950,11 @@ Sema::VarArgKind Sema::isValidVarArgType(const QualType &Ty) {
if (Ty.isDestructedType() == QualType::DK_nontrivial_c_struct)
return VAK_Invalid;
+ if (Context.getTargetInfo().getTriple().isWasm() &&
+ Ty.isWebAssemblyReferenceType()) {
+ return VAK_Invalid;
+ }
+
if (Ty.isCXX98PODType(Context))
return VAK_Valid;
@@ -1607,13 +1622,10 @@ QualType Sema::UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
//===----------------------------------------------------------------------===//
-ExprResult
-Sema::ActOnGenericSelectionExpr(SourceLocation KeyLoc,
- SourceLocation DefaultLoc,
- SourceLocation RParenLoc,
- Expr *ControllingExpr,
- ArrayRef<ParsedType> ArgTypes,
- ArrayRef<Expr *> ArgExprs) {
+ExprResult Sema::ActOnGenericSelectionExpr(
+ SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc,
+ bool PredicateIsExpr, void *ControllingExprOrType,
+ ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs) {
unsigned NumAssocs = ArgTypes.size();
assert(NumAssocs == ArgExprs.size());
@@ -1625,42 +1637,64 @@ Sema::ActOnGenericSelectionExpr(SourceLocation KeyLoc,
Types[i] = nullptr;
}
- ExprResult ER =
- CreateGenericSelectionExpr(KeyLoc, DefaultLoc, RParenLoc, ControllingExpr,
- llvm::ArrayRef(Types, NumAssocs), ArgExprs);
+ // If we have a controlling type, we need to convert it from a parsed type
+ // into a semantic type and then pass that along.
+ if (!PredicateIsExpr) {
+ TypeSourceInfo *ControllingType;
+ (void)GetTypeFromParser(ParsedType::getFromOpaquePtr(ControllingExprOrType),
+ &ControllingType);
+ assert(ControllingType && "couldn't get the type out of the parser");
+ ControllingExprOrType = ControllingType;
+ }
+
+ ExprResult ER = CreateGenericSelectionExpr(
+ KeyLoc, DefaultLoc, RParenLoc, PredicateIsExpr, ControllingExprOrType,
+ llvm::ArrayRef(Types, NumAssocs), ArgExprs);
delete [] Types;
return ER;
}
-ExprResult
-Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
- SourceLocation DefaultLoc,
- SourceLocation RParenLoc,
- Expr *ControllingExpr,
- ArrayRef<TypeSourceInfo *> Types,
- ArrayRef<Expr *> Exprs) {
+ExprResult Sema::CreateGenericSelectionExpr(
+ SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc,
+ bool PredicateIsExpr, void *ControllingExprOrType,
+ ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs) {
unsigned NumAssocs = Types.size();
assert(NumAssocs == Exprs.size());
-
- // Decay and strip qualifiers for the controlling expression type, and handle
- // placeholder type replacement. See committee discussion from WG14 DR423.
- {
+ assert(ControllingExprOrType &&
+ "Must have either a controlling expression or a controlling type");
+
+ Expr *ControllingExpr = nullptr;
+ TypeSourceInfo *ControllingType = nullptr;
+ if (PredicateIsExpr) {
+ // Decay and strip qualifiers for the controlling expression type, and
+ // handle placeholder type replacement. See committee discussion from WG14
+ // DR423.
EnterExpressionEvaluationContext Unevaluated(
*this, Sema::ExpressionEvaluationContext::Unevaluated);
- ExprResult R = DefaultFunctionArrayLvalueConversion(ControllingExpr);
+ ExprResult R = DefaultFunctionArrayLvalueConversion(
+ reinterpret_cast<Expr *>(ControllingExprOrType));
if (R.isInvalid())
return ExprError();
ControllingExpr = R.get();
+ } else {
+ // The extension form uses the type directly rather than converting it.
+ ControllingType = reinterpret_cast<TypeSourceInfo *>(ControllingExprOrType);
+ if (!ControllingType)
+ return ExprError();
}
bool TypeErrorFound = false,
- IsResultDependent = ControllingExpr->isTypeDependent(),
- ContainsUnexpandedParameterPack
- = ControllingExpr->containsUnexpandedParameterPack();
+ IsResultDependent = ControllingExpr
+ ? ControllingExpr->isTypeDependent()
+ : ControllingType->getType()->isDependentType(),
+ ContainsUnexpandedParameterPack =
+ ControllingExpr
+ ? ControllingExpr->containsUnexpandedParameterPack()
+ : ControllingType->getType()->containsUnexpandedParameterPack();
// The controlling expression is an unevaluated operand, so side effects are
// likely unintended.
- if (!inTemplateInstantiation() && !IsResultDependent &&
+ if (!inTemplateInstantiation() && !IsResultDependent && ControllingExpr &&
ControllingExpr->HasSideEffects(Context, false))
Diag(ControllingExpr->getExprLoc(),
diag::warn_side_effects_unevaluated_context);
@@ -1676,16 +1710,24 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
if (Types[i]->getType()->isDependentType()) {
IsResultDependent = true;
} else {
+ // We relax the restriction on use of incomplete types and non-object
+ // types with the type-based extension of _Generic. Allowing incomplete
+ // objects means those can be used as "tags" for a type-safe way to map
+ // to a value. Similarly, matching on function types rather than
+ // function pointer types can be useful. However, the restriction on VM
+ // types makes sense to retain as there are open questions about how
+ // the selection can be made at compile time.
+ //
// C11 6.5.1.1p2 "The type name in a generic association shall specify a
// complete object type other than a variably modified type."
unsigned D = 0;
- if (Types[i]->getType()->isIncompleteType())
+ if (ControllingExpr && Types[i]->getType()->isIncompleteType())
D = diag::err_assoc_type_incomplete;
- else if (!Types[i]->getType()->isObjectType())
+ else if (ControllingExpr && !Types[i]->getType()->isObjectType())
D = diag::err_assoc_type_nonobject;
else if (Types[i]->getType()->isVariablyModifiedType())
D = diag::err_assoc_type_variably_modified;
- else {
+ else if (ControllingExpr) {
// Because the controlling expression undergoes lvalue conversion,
// array conversion, and function conversion, an association which is
// of array type, function type, or is qualified can never be
@@ -1700,6 +1742,10 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
// The result of these rules is that all qualified types in an
// association in C are unreachable, and in C++, only qualified non-
// class types are unreachable.
+ //
+ // NB: this does not apply when the first operand is a type rather
+ // than an expression, because the type form does not undergo
+ // conversion.
unsigned Reason = 0;
QualType QT = Types[i]->getType();
if (QT->isArrayType())
@@ -1746,10 +1792,15 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
// If we determined that the generic selection is result-dependent, don't
// try to compute the result expression.
- if (IsResultDependent)
- return GenericSelectionExpr::Create(Context, KeyLoc, ControllingExpr, Types,
+ if (IsResultDependent) {
+ if (ControllingExpr)
+ return GenericSelectionExpr::Create(Context, KeyLoc, ControllingExpr,
+ Types, Exprs, DefaultLoc, RParenLoc,
+ ContainsUnexpandedParameterPack);
+ return GenericSelectionExpr::Create(Context, KeyLoc, ControllingType, Types,
Exprs, DefaultLoc, RParenLoc,
ContainsUnexpandedParameterPack);
+ }
SmallVector<unsigned, 1> CompatIndices;
unsigned DefaultIndex = -1U;
@@ -1759,22 +1810,42 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
for (unsigned i = 0; i < NumAssocs; ++i) {
if (!Types[i])
DefaultIndex = i;
- else if (Context.typesAreCompatible(
+ else if (ControllingExpr &&
+ Context.typesAreCompatible(
ControllingExpr->getType().getCanonicalType(),
- Types[i]->getType()))
+ Types[i]->getType()))
+ CompatIndices.push_back(i);
+ else if (ControllingType &&
+ Context.typesAreCompatible(
+ ControllingType->getType().getCanonicalType(),
+ Types[i]->getType()))
CompatIndices.push_back(i);
}
+ auto GetControllingRangeAndType = [](Expr *ControllingExpr,
+ TypeSourceInfo *ControllingType) {
+ // We strip parens here because the controlling expression is typically
+ // parenthesized in macro definitions.
+ if (ControllingExpr)
+ ControllingExpr = ControllingExpr->IgnoreParens();
+
+ SourceRange SR = ControllingExpr
+ ? ControllingExpr->getSourceRange()
+ : ControllingType->getTypeLoc().getSourceRange();
+ QualType QT = ControllingExpr ? ControllingExpr->getType()
+ : ControllingType->getType();
+
+ return std::make_pair(SR, QT);
+ };
+
// C11 6.5.1.1p2 "The controlling expression of a generic selection shall have
// type compatible with at most one of the types named in its generic
// association list."
if (CompatIndices.size() > 1) {
- // We strip parens here because the controlling expression is typically
- // parenthesized in macro definitions.
- ControllingExpr = ControllingExpr->IgnoreParens();
- Diag(ControllingExpr->getBeginLoc(), diag::err_generic_sel_multi_match)
- << ControllingExpr->getSourceRange() << ControllingExpr->getType()
- << (unsigned)CompatIndices.size();
+ auto P = GetControllingRangeAndType(ControllingExpr, ControllingType);
+ SourceRange SR = P.first;
+ Diag(SR.getBegin(), diag::err_generic_sel_multi_match)
+ << SR << P.second << (unsigned)CompatIndices.size();
for (unsigned I : CompatIndices) {
Diag(Types[I]->getTypeLoc().getBeginLoc(),
diag::note_compat_assoc)
@@ -1788,11 +1859,9 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
// its controlling expression shall have type compatible with exactly one of
// the types named in its generic association list."
if (DefaultIndex == -1U && CompatIndices.size() == 0) {
- // We strip parens here because the controlling expression is typically
- // parenthesized in macro definitions.
- ControllingExpr = ControllingExpr->IgnoreParens();
- Diag(ControllingExpr->getBeginLoc(), diag::err_generic_sel_no_match)
- << ControllingExpr->getSourceRange() << ControllingExpr->getType();
+ auto P = GetControllingRangeAndType(ControllingExpr, ControllingType);
+ SourceRange SR = P.first;
+ Diag(SR.getBegin(), diag::err_generic_sel_no_match) << SR << P.second;
return ExprError();
}
@@ -1804,8 +1873,13 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
unsigned ResultIndex =
CompatIndices.size() ? CompatIndices[0] : DefaultIndex;
+ if (ControllingExpr) {
+ return GenericSelectionExpr::Create(
+ Context, KeyLoc, ControllingExpr, Types, Exprs, DefaultLoc, RParenLoc,
+ ContainsUnexpandedParameterPack, ResultIndex);
+ }
return GenericSelectionExpr::Create(
- Context, KeyLoc, ControllingExpr, Types, Exprs, DefaultLoc, RParenLoc,
+ Context, KeyLoc, ControllingType, Types, Exprs, DefaultLoc, RParenLoc,
ContainsUnexpandedParameterPack, ResultIndex);
}
@@ -1848,6 +1922,30 @@ static ExprResult BuildCookedLiteralOperatorCall(Sema &S, Scope *Scope,
return S.BuildLiteralOperatorCall(R, OpNameInfo, Args, LitEndLoc);
}
+ExprResult Sema::ActOnUnevaluatedStringLiteral(ArrayRef<Token> StringToks) {
+ StringLiteralParser Literal(StringToks, PP,
+ StringLiteralEvalMethod::Unevaluated);
+ if (Literal.hadError)
+ return ExprError();
+
+ SmallVector<SourceLocation, 4> StringTokLocs;
+ for (const Token &Tok : StringToks)
+ StringTokLocs.push_back(Tok.getLocation());
+
+ StringLiteral *Lit = StringLiteral::Create(
+ Context, Literal.GetString(), StringLiteral::Unevaluated, false, {},
+ &StringTokLocs[0], StringTokLocs.size());
+
+ if (!Literal.getUDSuffix().empty()) {
+ SourceLocation UDSuffixLoc =
+ getUDSuffixLoc(*this, StringTokLocs[Literal.getUDSuffixToken()],
+ Literal.getUDSuffixOffset());
+ return ExprError(Diag(UDSuffixLoc, diag::err_invalid_string_udl));
+ }
+
+ return Lit;
+}
+
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz"). The result string has to handle string
/// concatenation ([C99 5.1.1.2, translation phase #6]), so it may come from
@@ -2102,9 +2200,9 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
// b) if the function is a defaulted comparison, we can use the body we
// build when defining it as input to the exception specification
// computation rather than computing a new body.
- if (auto *FPT = Ty->getAs<FunctionProtoType>()) {
+ if (const auto *FPT = Ty->getAs<FunctionProtoType>()) {
if (isUnresolvedExceptionSpec(FPT->getExceptionSpecType())) {
- if (auto *NewFPT = ResolveExceptionSpec(NameInfo.getLoc(), FPT))
+ if (const auto *NewFPT = ResolveExceptionSpec(NameInfo.getLoc(), FPT))
E->setType(Context.getQualifiedType(NewFPT, Ty.getQualifiers()));
}
}
@@ -2114,8 +2212,8 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, E->getBeginLoc()))
getCurFunction()->recordUseOfWeak(E);
- FieldDecl *FD = dyn_cast<FieldDecl>(D);
- if (IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(D))
+ const auto *FD = dyn_cast<FieldDecl>(D);
+ if (const auto *IFD = dyn_cast<IndirectFieldDecl>(D))
FD = IFD->getAnonField();
if (FD) {
UnusedPrivateFields.remove(FD);
@@ -2126,8 +2224,8 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
// C++ [expr.prim]/8: The expression [...] is a bit-field if the identifier
// designates a bit-field.
- if (auto *BD = dyn_cast<BindingDecl>(D))
- if (auto *BE = BD->getBinding())
+ if (const auto *BD = dyn_cast<BindingDecl>(D))
+ if (const auto *BE = BD->getBinding())
E->setObjectKind(BE->getObjectKind());
return E;
@@ -2206,7 +2304,7 @@ static void emitEmptyLookupTypoDiagnostic(
///
/// Return \c true if the error is unrecoverable, or \c false if the caller
/// should attempt to recover using these lookup results.
-bool Sema::DiagnoseDependentMemberLookup(LookupResult &R) {
+bool Sema::DiagnoseDependentMemberLookup(const LookupResult &R) {
// During a default argument instantiation the CurContext points
// to a CXXMethodDecl; but we can't apply a this-> fixit inside a
// function parameter list, hence add an explicit check.
@@ -2214,7 +2312,7 @@ bool Sema::DiagnoseDependentMemberLookup(LookupResult &R) {
!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.back().Kind ==
CodeSynthesisContext::DefaultFunctionArgumentInstantiation;
- CXXMethodDecl *CurMethod = dyn_cast<CXXMethodDecl>(CurContext);
+ const auto *CurMethod = dyn_cast<CXXMethodDecl>(CurContext);
bool isInstance = CurMethod && CurMethod->isInstance() &&
R.getNamingClass() == CurMethod->getParent() &&
!isDefaultArgument;
@@ -2246,7 +2344,7 @@ bool Sema::DiagnoseDependentMemberLookup(LookupResult &R) {
Diag(R.getNameLoc(), DiagID) << R.getLookupName();
}
- for (NamedDecl *D : R)
+ for (const NamedDecl *D : R)
Diag(D->getLocation(), NoteID);
// Return true if we are inside a default argument instantiation
@@ -3004,7 +3102,7 @@ Sema::PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member) {
- CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Member->getDeclContext());
+ const auto *RD = dyn_cast<CXXRecordDecl>(Member->getDeclContext());
if (!RD)
return From;
@@ -3029,7 +3127,7 @@ Sema::PerformObjectMemberConversion(Expr *From,
DestType = DestRecordType;
FromRecordType = FromType;
}
- } else if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Member)) {
+ } else if (const auto *Method = dyn_cast<CXXMethodDecl>(Member)) {
if (Method->isStatic())
return From;
@@ -3149,7 +3247,7 @@ bool Sema::UseArgumentDependentLookup(const CXXScopeSpec &SS,
// Turn off ADL when we find certain kinds of declarations during
// normal lookup:
- for (NamedDecl *D : R) {
+ for (const NamedDecl *D : R) {
// C++0x [basic.lookup.argdep]p3:
// -- a declaration of a class member
// Since using decls preserve this property, we check this on the
@@ -3172,9 +3270,7 @@ bool Sema::UseArgumentDependentLookup(const CXXScopeSpec &SS,
// -- a declaration that is neither a function or a function
// template
// And also for builtin functions.
- if (isa<FunctionDecl>(D)) {
- FunctionDecl *FDecl = cast<FunctionDecl>(D);
-
+ if (const auto *FDecl = dyn_cast<FunctionDecl>(D)) {
// But also builtin functions.
if (FDecl->getBuiltinID() && FDecl->isImplicit())
return false;
@@ -3308,10 +3404,10 @@ ExprResult Sema::BuildDeclarationNameExpr(
// Handle members of anonymous structs and unions. If we got here,
// and the reference is to a class member indirect field, then this
// must be the subject of a pointer-to-member expression.
- if (IndirectFieldDecl *indirectField = dyn_cast<IndirectFieldDecl>(VD))
- if (!indirectField->isCXXClassMember())
- return BuildAnonymousStructUnionMemberReference(SS, NameInfo.getLoc(),
- indirectField);
+ if (auto *IndirectField = dyn_cast<IndirectFieldDecl>(VD);
+ IndirectField && !IndirectField->isCXXClassMember())
+ return BuildAnonymousStructUnionMemberReference(SS, NameInfo.getLoc(),
+ IndirectField);
QualType type = VD->getType();
if (type.isNull())
@@ -3577,7 +3673,8 @@ ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
}
}
- return PredefinedExpr::Create(Context, Loc, ResTy, IK, SL);
+ return PredefinedExpr::Create(Context, Loc, ResTy, IK, LangOpts.MicrosoftExt,
+ SL);
}
ExprResult Sema::BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc,
@@ -3774,7 +3871,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
if (Literal.hasUDSuffix()) {
// We're building a user-defined literal.
- IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix());
+ const IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix());
SourceLocation UDSuffixLoc =
getUDSuffixLoc(*this, Tok.getLocation(), Literal.getUDSuffixOffset());
@@ -3953,13 +4050,13 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
} else {
QualType Ty;
- // 'z/uz' literals are a C++2b feature.
+ // 'z/uz' literals are a C++23 feature.
if (Literal.isSizeT)
Diag(Tok.getLocation(), getLangOpts().CPlusPlus
- ? getLangOpts().CPlusPlus2b
+ ? getLangOpts().CPlusPlus23
? diag::warn_cxx20_compat_size_t_suffix
- : diag::ext_cxx2b_size_t_suffix
- : diag::err_cxx2b_size_t_suffix);
+ : diag::ext_cxx23_size_t_suffix
+ : diag::err_cxx23_size_t_suffix);
// 'wb/uwb' literals are a C2x feature. We support _BitInt as a type in C++,
// but we do not currently support the suffix in C++ mode because it's not
@@ -4039,7 +4136,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
Ty = Context.getBitIntType(Literal.isUnsigned, Width);
}
- // Check C++2b size_t literals.
+ // Check C++23 size_t literals.
if (Literal.isSizeT) {
assert(!Literal.MicrosoftInteger &&
"size_t literals can't be Microsoft literals");
@@ -4237,13 +4334,13 @@ static bool CheckObjCTraitOperandConstraints(Sema &S, QualType T,
/// Check whether E is a pointer from a decayed array type (the decayed
/// pointer type is equal to T) and emit a warning if it is.
static void warnOnSizeofOnArrayDecay(Sema &S, SourceLocation Loc, QualType T,
- Expr *E) {
+ const Expr *E) {
// Don't warn if the operation changed the type.
if (T != E->getType())
return;
// Now look for array decays.
- ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E);
+ const auto *ICE = dyn_cast<ImplicitCastExpr>(E);
if (!ICE || ICE->getCastKind() != CK_ArrayToPointerDecay)
return;
@@ -4294,6 +4391,15 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
E->getSourceRange(), ExprKind))
return false;
+ // WebAssembly tables are always illegal operands to unary expressions and
+ // type traits.
+ if (Context.getTargetInfo().getTriple().isWasm() &&
+ E->getType()->isWebAssemblyTableType()) {
+ Diag(E->getExprLoc(), diag::err_wasm_table_invalid_uett_operand)
+ << getTraitSpelling(ExprKind);
+ return true;
+ }
+
// 'alignof' applied to an expression only requires the base element type of
// the expression to be complete. 'sizeof' requires the expression's type to
// be complete (and will attempt to complete it if it's an array of unknown
@@ -4326,8 +4432,8 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
return true;
if (ExprKind == UETT_SizeOf) {
- if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParens())) {
- if (ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(DeclRef->getFoundDecl())) {
+ if (const auto *DeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParens())) {
+ if (const auto *PVD = dyn_cast<ParmVarDecl>(DeclRef->getFoundDecl())) {
QualType OType = PVD->getOriginalType();
QualType Type = PVD->getType();
if (Type->isPointerType() && OType->isArrayType()) {
@@ -4341,7 +4447,7 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
// Warn on "sizeof(array op x)" and "sizeof(x op array)", where the array
// decays into a pointer and returns an unintended result. This is most
// likely a typo for "sizeof(array) op x".
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E->IgnoreParens())) {
+ if (const auto *BO = dyn_cast<BinaryOperator>(E->IgnoreParens())) {
warnOnSizeofOnArrayDecay(*this, BO->getOperatorLoc(), BO->getType(),
BO->getLHS());
warnOnSizeofOnArrayDecay(*this, BO->getOperatorLoc(), BO->getType(),
@@ -4352,70 +4458,6 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
return false;
}
-/// Check the constraints on operands to unary expression and type
-/// traits.
-///
-/// This will complete any types necessary, and validate the various constraints
-/// on those operands.
-///
-/// The UsualUnaryConversions() function is *not* called by this routine.
-/// C99 6.3.2.1p[2-4] all state:
-/// Except when it is the operand of the sizeof operator ...
-///
-/// C++ [expr.sizeof]p4
-/// The lvalue-to-rvalue, array-to-pointer, and function-to-pointer
-/// standard conversions are not applied to the operand of sizeof.
-///
-/// This policy is followed for all of the unary trait expressions.
-bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType,
- SourceLocation OpLoc,
- SourceRange ExprRange,
- UnaryExprOrTypeTrait ExprKind) {
- if (ExprType->isDependentType())
- return false;
-
- // C++ [expr.sizeof]p2:
- // When applied to a reference or a reference type, the result
- // is the size of the referenced type.
- // C++11 [expr.alignof]p3:
- // When alignof is applied to a reference type, the result
- // shall be the alignment of the referenced type.
- if (const ReferenceType *Ref = ExprType->getAs<ReferenceType>())
- ExprType = Ref->getPointeeType();
-
- // C11 6.5.3.4/3, C++11 [expr.alignof]p3:
- // When alignof or _Alignof is applied to an array type, the result
- // is the alignment of the element type.
- if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf ||
- ExprKind == UETT_OpenMPRequiredSimdAlign)
- ExprType = Context.getBaseElementType(ExprType);
-
- if (ExprKind == UETT_VecStep)
- return CheckVecStepTraitOperandType(*this, ExprType, OpLoc, ExprRange);
-
- // Explicitly list some types as extensions.
- if (!CheckExtensionTraitOperandType(*this, ExprType, OpLoc, ExprRange,
- ExprKind))
- return false;
-
- if (RequireCompleteSizedType(
- OpLoc, ExprType, diag::err_sizeof_alignof_incomplete_or_sizeless_type,
- getTraitSpelling(ExprKind), ExprRange))
- return true;
-
- if (ExprType->isFunctionType()) {
- Diag(OpLoc, diag::err_sizeof_alignof_function_type)
- << getTraitSpelling(ExprKind) << ExprRange;
- return true;
- }
-
- if (CheckObjCTraitOperandConstraints(*this, ExprType, OpLoc, ExprRange,
- ExprKind))
- return true;
-
- return false;
-}
-
static bool CheckAlignOfExpr(Sema &S, Expr *E, UnaryExprOrTypeTrait ExprKind) {
// Cannot know anything else if the expression is dependent.
if (E->isTypeDependent())
@@ -4593,23 +4635,78 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
} while (!T.isNull() && T->isVariablyModifiedType());
}
-/// Build a sizeof or alignof expression given a type operand.
-ExprResult
-Sema::CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
- SourceLocation OpLoc,
- UnaryExprOrTypeTrait ExprKind,
- SourceRange R) {
- if (!TInfo)
- return ExprError();
+/// Check the constraints on operands to unary expression and type
+/// traits.
+///
+/// This will complete any types necessary, and validate the various constraints
+/// on those operands.
+///
+/// The UsualUnaryConversions() function is *not* called by this routine.
+/// C99 6.3.2.1p[2-4] all state:
+/// Except when it is the operand of the sizeof operator ...
+///
+/// C++ [expr.sizeof]p4
+/// The lvalue-to-rvalue, array-to-pointer, and function-to-pointer
+/// standard conversions are not applied to the operand of sizeof.
+///
+/// This policy is followed for all of the unary trait expressions.
+bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType,
+ SourceLocation OpLoc,
+ SourceRange ExprRange,
+ UnaryExprOrTypeTrait ExprKind,
+ StringRef KWName) {
+ if (ExprType->isDependentType())
+ return false;
- QualType T = TInfo->getType();
+ // C++ [expr.sizeof]p2:
+ // When applied to a reference or a reference type, the result
+ // is the size of the referenced type.
+ // C++11 [expr.alignof]p3:
+ // When alignof is applied to a reference type, the result
+ // shall be the alignment of the referenced type.
+ if (const ReferenceType *Ref = ExprType->getAs<ReferenceType>())
+ ExprType = Ref->getPointeeType();
- if (!T->isDependentType() &&
- CheckUnaryExprOrTypeTraitOperand(T, OpLoc, R, ExprKind))
- return ExprError();
+ // C11 6.5.3.4/3, C++11 [expr.alignof]p3:
+ // When alignof or _Alignof is applied to an array type, the result
+ // is the alignment of the element type.
+ if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf ||
+ ExprKind == UETT_OpenMPRequiredSimdAlign)
+ ExprType = Context.getBaseElementType(ExprType);
+
+ if (ExprKind == UETT_VecStep)
+ return CheckVecStepTraitOperandType(*this, ExprType, OpLoc, ExprRange);
+
+ // Explicitly list some types as extensions.
+ if (!CheckExtensionTraitOperandType(*this, ExprType, OpLoc, ExprRange,
+ ExprKind))
+ return false;
+
+ if (RequireCompleteSizedType(
+ OpLoc, ExprType, diag::err_sizeof_alignof_incomplete_or_sizeless_type,
+ KWName, ExprRange))
+ return true;
+
+ if (ExprType->isFunctionType()) {
+ Diag(OpLoc, diag::err_sizeof_alignof_function_type) << KWName << ExprRange;
+ return true;
+ }
+
+ // WebAssembly tables are always illegal operands to unary expressions and
+ // type traits.
+ if (Context.getTargetInfo().getTriple().isWasm() &&
+ ExprType->isWebAssemblyTableType()) {
+ Diag(OpLoc, diag::err_wasm_table_invalid_uett_operand)
+ << getTraitSpelling(ExprKind);
+ return true;
+ }
- if (T->isVariablyModifiedType() && FunctionScopes.size() > 1) {
- if (auto *TT = T->getAs<TypedefType>()) {
+ if (CheckObjCTraitOperandConstraints(*this, ExprType, OpLoc, ExprRange,
+ ExprKind))
+ return true;
+
+ if (ExprType->isVariablyModifiedType() && FunctionScopes.size() > 1) {
+ if (auto *TT = ExprType->getAs<TypedefType>()) {
for (auto I = FunctionScopes.rbegin(),
E = std::prev(FunctionScopes.rend());
I != E; ++I) {
@@ -4626,17 +4723,37 @@ Sema::CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
if (DC) {
if (DC->containsDecl(TT->getDecl()))
break;
- captureVariablyModifiedType(Context, T, CSI);
+ captureVariablyModifiedType(Context, ExprType, CSI);
}
}
}
}
- // C99 6.5.3.4p4: the type (an unsigned integer type) is size_t.
+ return false;
+}
+
+/// Build a sizeof or alignof expression given a type operand.
+ExprResult Sema::CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
+ SourceLocation OpLoc,
+ UnaryExprOrTypeTrait ExprKind,
+ SourceRange R) {
+ if (!TInfo)
+ return ExprError();
+
+ QualType T = TInfo->getType();
+
+ if (!T->isDependentType() &&
+ CheckUnaryExprOrTypeTraitOperand(T, OpLoc, R, ExprKind,
+ getTraitSpelling(ExprKind)))
+ return ExprError();
+
+ // Adds overload of TransformToPotentiallyEvaluated for TypeSourceInfo to
+ // properly deal with VLAs in nested calls of sizeof and typeof.
if (isUnevaluatedContext() && ExprKind == UETT_SizeOf &&
TInfo->getType()->isVariablyModifiedType())
TInfo = TransformToPotentiallyEvaluated(TInfo);
+ // C99 6.5.3.4p4: the type (an unsigned integer type) is size_t.
return new (Context) UnaryExprOrTypeTraitExpr(
ExprKind, TInfo, Context.getSizeType(), OpLoc, R.getEnd());
}
@@ -4705,6 +4822,29 @@ Sema::ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
return Result;
}
+bool Sema::CheckAlignasTypeArgument(StringRef KWName, TypeSourceInfo *TInfo,
+ SourceLocation OpLoc, SourceRange R) {
+ if (!TInfo)
+ return true;
+ return CheckUnaryExprOrTypeTraitOperand(TInfo->getType(), OpLoc, R,
+ UETT_AlignOf, KWName);
+}
+
+/// ActOnAlignasTypeArgument - Handle @c alignas(type-id) and @c
+/// _Alignas(type-name) .
+/// [dcl.align] An alignment-specifier of the form
+/// alignas(type-id) has the same effect as alignas(alignof(type-id)).
+///
+/// [N1570 6.7.5] _Alignas(type-name) is equivalent to
+/// _Alignas(_Alignof(type-name)).
+bool Sema::ActOnAlignasTypeArgument(StringRef KWName, ParsedType Ty,
+ SourceLocation OpLoc, SourceRange R) {
+ TypeSourceInfo *TInfo;
+ (void)GetTypeFromParser(ParsedType::getFromOpaquePtr(Ty.getAsOpaquePtr()),
+ &TInfo);
+ return CheckAlignasTypeArgument(KWName, TInfo, OpLoc, R);
+}
+
static QualType CheckRealImagOperand(Sema &S, ExprResult &V, SourceLocation Loc,
bool IsReal) {
if (V.get()->isTypeDependent())
@@ -4850,7 +4990,8 @@ ExprResult Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base,
};
// The matrix subscript operator ([][])is considered a single operator.
// Separating the index expressions by parenthesis is not allowed.
- if (base->hasPlaceholderType(BuiltinType::IncompleteMatrixIdx) &&
+ if (base && !base->getType().isNull() &&
+ base->hasPlaceholderType(BuiltinType::IncompleteMatrixIdx) &&
!isa<MatrixSubscriptExpr>(base)) {
Diag(base->getExprLoc(), diag::err_matrix_separate_incomplete_index)
<< SourceRange(base->getBeginLoc(), rbLoc);
@@ -4870,6 +5011,11 @@ ExprResult Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base,
matSubscriptE->getRowIdx(),
ArgExprs.front(), rbLoc);
}
+ if (base->getType()->isWebAssemblyTableType()) {
+ Diag(base->getExprLoc(), diag::err_wasm_table_art)
+ << SourceRange(base->getBeginLoc(), rbLoc) << 3;
+ return ExprError();
+ }
// Handle any non-overload placeholder types in the base and index
// expressions. We can't handle overloads here because the other
@@ -4921,7 +5067,8 @@ ExprResult Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base,
// Build an unanalyzed expression if either operand is type-dependent.
if (getLangOpts().CPlusPlus && ArgExprs.size() == 1 &&
(base->isTypeDependent() ||
- Expr::hasAnyTypeDependentArguments(ArgExprs))) {
+ Expr::hasAnyTypeDependentArguments(ArgExprs)) &&
+ !isa<PackExpansionExpr>(ArgExprs[0])) {
return new (Context) ArraySubscriptExpr(
base, ArgExprs.front(),
getDependentArraySubscriptType(base, ArgExprs.front(), getASTContext()),
@@ -4955,7 +5102,8 @@ ExprResult Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base,
// to overload resolution and so should not take this path.
if (getLangOpts().CPlusPlus && !base->getType()->isObjCObjectPointerType() &&
((base->getType()->isRecordType() ||
- (ArgExprs.size() != 1 || ArgExprs[0]->getType()->isRecordType())))) {
+ (ArgExprs.size() != 1 || isa<PackExpansionExpr>(ArgExprs[0]) ||
+ ArgExprs[0]->getType()->isRecordType())))) {
return CreateOverloadedArraySubscriptExpr(lbLoc, rbLoc, base, ArgExprs);
}
@@ -5826,6 +5974,7 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
if (!ResultType.hasQualifiers())
VK = VK_PRValue;
} else if (!ResultType->isDependentType() &&
+ !ResultType.isWebAssemblyReferenceType() &&
RequireCompleteSizedType(
LLoc, ResultType,
diag::err_subscript_incomplete_or_sizeless_type, BaseExpr))
@@ -5912,22 +6061,34 @@ bool Sema::CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
assert(!InitWithCleanup->getNumObjects() &&
"default argument expression has capturing blocks?");
}
+ // C++ [expr.const]p15.1:
+ // An expression or conversion is in an immediate function context if it is
+ // potentially evaluated and [...] its innermost enclosing non-block scope
+ // is a function parameter scope of an immediate function.
EnterExpressionEvaluationContext EvalContext(
- *this, ExpressionEvaluationContext::PotentiallyEvaluated, Param);
+ *this,
+ FD->isImmediateFunction()
+ ? ExpressionEvaluationContext::ImmediateFunctionContext
+ : ExpressionEvaluationContext::PotentiallyEvaluated,
+ Param);
ExprEvalContexts.back().IsCurrentlyCheckingDefaultArgumentOrInitializer =
SkipImmediateInvocations;
- MarkDeclarationsReferencedInExpr(Init, /*SkipLocalVariables*/ true);
+ runWithSufficientStackSpace(CallLoc, [&] {
+ MarkDeclarationsReferencedInExpr(Init, /*SkipLocalVariables=*/true);
+ });
return false;
}
struct ImmediateCallVisitor : public RecursiveASTVisitor<ImmediateCallVisitor> {
- bool HasImmediateCalls = false;
+ const ASTContext &Context;
+ ImmediateCallVisitor(const ASTContext &Ctx) : Context(Ctx) {}
+ bool HasImmediateCalls = false;
bool shouldVisitImplicitCode() const { return true; }
bool VisitCallExpr(CallExpr *E) {
if (const FunctionDecl *FD = E->getDirectCallee())
- HasImmediateCalls |= FD->isConsteval();
+ HasImmediateCalls |= FD->isImmediateFunction();
return RecursiveASTVisitor<ImmediateCallVisitor>::VisitStmt(E);
}
@@ -6000,8 +6161,16 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
// Mark that we are replacing a default argument first.
// If we are instantiating a template we won't have to
// retransform immediate calls.
+ // C++ [expr.const]p15.1:
+ // An expression or conversion is in an immediate function context if it
+ // is potentially evaluated and [...] its innermost enclosing non-block
+ // scope is a function parameter scope of an immediate function.
EnterExpressionEvaluationContext EvalContext(
- *this, ExpressionEvaluationContext::PotentiallyEvaluated, Param);
+ *this,
+ FD->isImmediateFunction()
+ ? ExpressionEvaluationContext::ImmediateFunctionContext
+ : ExpressionEvaluationContext::PotentiallyEvaluated,
+ Param);
if (Param->hasUninstantiatedDefaultArg()) {
if (InstantiateDefaultArgument(CallLoc, FD, Param))
@@ -6011,15 +6180,18 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
// An immediate invocation that is not evaluated where it appears is
// evaluated and checked for whether it is a constant expression at the
// point where the enclosing initializer is used in a function call.
- ImmediateCallVisitor V;
+ ImmediateCallVisitor V(getASTContext());
if (!NestedDefaultChecking)
V.TraverseDecl(Param);
if (V.HasImmediateCalls) {
ExprEvalContexts.back().DelayedDefaultInitializationContext = {
CallLoc, Param, CurContext};
EnsureImmediateInvocationInDefaultArgs Immediate(*this);
- ExprResult Res = Immediate.TransformInitializer(Param->getInit(),
- /*NotCopy=*/false);
+ ExprResult Res;
+ runWithSufficientStackSpace(CallLoc, [&] {
+ Res = Immediate.TransformInitializer(Param->getInit(),
+ /*NotCopy=*/false);
+ });
if (Res.isInvalid())
return ExprError();
Res = ConvertParamDefaultArgument(Param, Res.get(),
@@ -6046,6 +6218,8 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
if (Field->isInvalidDecl())
return ExprError();
+ CXXThisScopeRAII This(*this, Field->getParent(), Qualifiers());
+
auto *ParentRD = cast<CXXRecordDecl>(Field->getParent());
std::optional<ExpressionEvaluationContextRecord::InitializationContext>
@@ -6089,7 +6263,7 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
// evaluated and checked for whether it is a constant expression at the
// point where the enclosing initializer is used in a [...] a constructor
// definition, or an aggregate initialization.
- ImmediateCallVisitor V;
+ ImmediateCallVisitor V(getASTContext());
if (!NestedDefaultChecking)
V.TraverseDecl(Field);
if (V.HasImmediateCalls) {
@@ -6099,10 +6273,11 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
NestedDefaultChecking;
EnsureImmediateInvocationInDefaultArgs Immediate(*this);
-
- ExprResult Res =
- Immediate.TransformInitializer(Field->getInClassInitializer(),
- /*CXXDirectInit=*/false);
+ ExprResult Res;
+ runWithSufficientStackSpace(Loc, [&] {
+ Res = Immediate.TransformInitializer(Field->getInClassInitializer(),
+ /*CXXDirectInit=*/false);
+ });
if (!Res.isInvalid())
Res = ConvertMemberDefaultInitExpression(Field, Res.get(), Loc);
if (Res.isInvalid()) {
@@ -6115,7 +6290,9 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
if (Field->getInClassInitializer()) {
Expr *E = Init ? Init : Field->getInClassInitializer();
if (!NestedDefaultChecking)
- MarkDeclarationsReferencedInExpr(E, /*SkipLocalVariables=*/false);
+ runWithSufficientStackSpace(Loc, [&] {
+ MarkDeclarationsReferencedInExpr(E, /*SkipLocalVariables=*/false);
+ });
// C++11 [class.base.init]p7:
// The initialization of each base and member constitutes a
// full-expression.
@@ -6296,7 +6473,8 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
// Emit the location of the prototype.
if (!TC && FDecl && !FDecl->getBuiltinID() && !IsExecConfig)
- Diag(FDecl->getLocation(), diag::note_callee_decl) << FDecl;
+ Diag(FDecl->getLocation(), diag::note_callee_decl)
+ << FDecl << FDecl->getParametersSourceRange();
return true;
}
@@ -6341,7 +6519,8 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
// Emit the location of the prototype.
if (!TC && FDecl && !FDecl->getBuiltinID() && !IsExecConfig)
- Diag(FDecl->getLocation(), diag::note_callee_decl) << FDecl;
+ Diag(FDecl->getLocation(), diag::note_callee_decl)
+ << FDecl << FDecl->getParametersSourceRange();
// This deletes the extra arguments.
Call->shrinkNumArgs(NumParams);
@@ -6567,6 +6746,8 @@ static bool isPlaceholderToRemoveAsArg(QualType type) {
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
#define PLACEHOLDER_TYPE(ID, SINGLETON_ID)
#define BUILTIN_TYPE(ID, SINGLETON_ID) case BuiltinType::ID:
#include "clang/AST/BuiltinTypes.def"
@@ -6655,10 +6836,10 @@ static FunctionDecl *rewriteBuiltinFunctionDecl(Sema *Sema, ASTContext &Context,
return nullptr;
Expr *Arg = ArgRes.get();
QualType ArgType = Arg->getType();
- if (!ParamType->isPointerType() ||
- ParamType.hasAddressSpace() ||
+ if (!ParamType->isPointerType() || ParamType.hasAddressSpace() ||
!ArgType->isPointerType() ||
- !ArgType->getPointeeType().hasAddressSpace()) {
+ !ArgType->getPointeeType().hasAddressSpace() ||
+ isPtrSizeAddressSpace(ArgType->getPointeeType().getAddressSpace())) {
OverloadParams.push_back(ParamType);
continue;
}
@@ -7071,13 +7252,8 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
llvm::any_of(ArgExprs,
[](clang::Expr *E) { return E->containsErrors(); })) &&
"should only occur in error-recovery path.");
- QualType ReturnType =
- llvm::isa_and_nonnull<FunctionDecl>(NDecl)
- ? cast<FunctionDecl>(NDecl)->getCallResultType()
- : Context.DependentTy;
- return CallExpr::Create(Context, Fn, ArgExprs, ReturnType,
- Expr::getValueKindForType(ReturnType), RParenLoc,
- CurFPFeatureOverrides());
+ return CallExpr::Create(Context, Fn, ArgExprs, Context.DependentTy,
+ VK_PRValue, RParenLoc, CurFPFeatureOverrides());
}
return BuildResolvedCallExpr(Fn, NDecl, LParenLoc, ArgExprs, RParenLoc,
ExecConfig, IsExecConfig);
@@ -7329,6 +7505,16 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
TheCall->setType(FuncT->getCallResultType(Context));
TheCall->setValueKind(Expr::getValueKindForType(FuncT->getReturnType()));
+ // WebAssembly tables can't be used as arguments.
+ if (Context.getTargetInfo().getTriple().isWasm()) {
+ for (const Expr *Arg : Args) {
+ if (Arg && Arg->getType()->isWebAssemblyTableType()) {
+ return ExprError(Diag(Arg->getExprLoc(),
+ diag::err_wasm_table_as_function_parameter));
+ }
+ }
+ }
+
if (Proto) {
if (ConvertArgumentsForCall(TheCall, Fn, FDecl, Proto, Args, RParenLoc,
IsExecConfig))
@@ -7469,10 +7655,23 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd())))
return ExprError();
if (literalType->isVariableArrayType()) {
- if (!tryToFixVariablyModifiedVarType(TInfo, literalType, LParenLoc,
- diag::err_variable_object_no_init)) {
+ // C2x 6.7.9p4: An entity of variable length array type shall not be
+ // initialized except by an empty initializer.
+ //
+ // The C extension warnings are issued from ParseBraceInitializer() and
+ // do not need to be issued here. However, we continue to issue an error
+ // in the case there are initializers or we are compiling C++. We allow
+ // use of VLAs in C++, but it's not clear we want to allow {} to zero
+ // init a VLA in C++ in all cases (such as with non-trivial constructors).
+ // FIXME: should we allow this construct in C++ when it makes sense to do
+ // so?
+ std::optional<unsigned> NumInits;
+ if (const auto *ILE = dyn_cast<InitListExpr>(LiteralExpr))
+ NumInits = ILE->getNumInits();
+ if ((LangOpts.CPlusPlus || NumInits.value_or(0)) &&
+ !tryToFixVariablyModifiedVarType(TInfo, literalType, LParenLoc,
+ diag::err_variable_object_no_init))
return ExprError();
- }
}
} else if (!literalType->isDependentType() &&
RequireCompleteType(LParenLoc, literalType,
@@ -7930,7 +8129,7 @@ bool Sema::isValidSveBitcast(QualType srcTy, QualType destTy) {
assert(srcTy->isVectorType() || destTy->isVectorType());
auto ValidScalableConversion = [](QualType FirstType, QualType SecondType) {
- if (!FirstType->isSizelessBuiltinType())
+ if (!FirstType->isSVESizelessBuiltinType())
return false;
const auto *VecTy = SecondType->getAs<VectorType>();
@@ -7942,6 +8141,28 @@ bool Sema::isValidSveBitcast(QualType srcTy, QualType destTy) {
ValidScalableConversion(destTy, srcTy);
}
+/// Are the two types RVV-bitcast-compatible types? I.e. is bitcasting from the
+/// first RVV type (e.g. an RVV scalable type) to the second type (e.g. an RVV
+/// VLS type) allowed?
+///
+/// This will also return false if the two given types do not make sense from
+/// the perspective of RVV bitcasts.
+bool Sema::isValidRVVBitcast(QualType srcTy, QualType destTy) {
+ assert(srcTy->isVectorType() || destTy->isVectorType());
+
+ auto ValidScalableConversion = [](QualType FirstType, QualType SecondType) {
+ if (!FirstType->isRVVSizelessBuiltinType())
+ return false;
+
+ const auto *VecTy = SecondType->getAs<VectorType>();
+ return VecTy &&
+ VecTy->getVectorKind() == VectorType::RVVFixedLengthDataVector;
+ };
+
+ return ValidScalableConversion(srcTy, destTy) ||
+ ValidScalableConversion(destTy, srcTy);
+}
+
/// Are the two types matrix types and do they have the same dimensions i.e.
/// do they have the same number of rows and the same number of columns?
bool Sema::areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy) {
@@ -7980,30 +8201,24 @@ bool Sema::anyAltivecTypes(QualType SrcTy, QualType DestTy) {
"expected at least one type to be a vector here");
bool IsSrcTyAltivec =
- SrcTy->isVectorType() && (SrcTy->castAs<VectorType>()->getVectorKind() ==
- VectorType::AltiVecVector);
+ SrcTy->isVectorType() && ((SrcTy->castAs<VectorType>()->getVectorKind() ==
+ VectorType::AltiVecVector) ||
+ (SrcTy->castAs<VectorType>()->getVectorKind() ==
+ VectorType::AltiVecBool) ||
+ (SrcTy->castAs<VectorType>()->getVectorKind() ==
+ VectorType::AltiVecPixel));
+
bool IsDestTyAltivec = DestTy->isVectorType() &&
- (DestTy->castAs<VectorType>()->getVectorKind() ==
- VectorType::AltiVecVector);
+ ((DestTy->castAs<VectorType>()->getVectorKind() ==
+ VectorType::AltiVecVector) ||
+ (DestTy->castAs<VectorType>()->getVectorKind() ==
+ VectorType::AltiVecBool) ||
+ (DestTy->castAs<VectorType>()->getVectorKind() ==
+ VectorType::AltiVecPixel));
return (IsSrcTyAltivec || IsDestTyAltivec);
}
-// This returns true if both vectors have the same element type.
-bool Sema::areSameVectorElemTypes(QualType SrcTy, QualType DestTy) {
- assert((DestTy->isVectorType() || SrcTy->isVectorType()) &&
- "expected at least one type to be a vector here");
-
- uint64_t SrcLen, DestLen;
- QualType SrcEltTy, DestEltTy;
- if (!breakDownVectorType(SrcTy, SrcLen, SrcEltTy))
- return false;
- if (!breakDownVectorType(DestTy, DestLen, DestEltTy))
- return false;
-
- return (SrcEltTy == DestEltTy);
-}
-
/// Are the two types lax-compatible vector types? That is, given
/// that one of them is a vector, do they have equal storage sizes,
/// where the storage size is the number of elements times the element
@@ -8892,8 +9107,14 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
if (LHS.isInvalid() || RHS.isInvalid())
return QualType();
+ // WebAssembly tables are not allowed as conditional LHS or RHS.
QualType LHSTy = LHS.get()->getType();
QualType RHSTy = RHS.get()->getType();
+ if (LHSTy->isWebAssemblyTableType() || RHSTy->isWebAssemblyTableType()) {
+ Diag(QuestionLoc, diag::err_wasm_table_conditional_expression)
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return QualType();
+ }
// Diagnose attempts to convert between __ibm128, __float128 and long double
// where such conversions currently can't be handled.
@@ -9840,8 +10061,9 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
// The default for lax vector conversions with Altivec vectors will
// change, so if we are converting between vector types where
// at least one is an Altivec vector, emit a warning.
- if (anyAltivecTypes(RHSType, LHSType) &&
- !areSameVectorElemTypes(RHSType, LHSType))
+ if (Context.getTargetInfo().getTriple().isPPC() &&
+ anyAltivecTypes(RHSType, LHSType) &&
+ !Context.areCompatibleVectorTypes(RHSType, LHSType))
Diag(RHS.get()->getExprLoc(), diag::warn_deprecated_lax_vec_conv_all)
<< RHSType << LHSType;
Kind = CK_BitCast;
@@ -9857,7 +10079,10 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
const VectorType *VecType = RHSType->getAs<VectorType>();
if (VecType && VecType->getNumElements() == 1 &&
isLaxVectorConversion(RHSType, LHSType)) {
- if (VecType->getVectorKind() == VectorType::AltiVecVector)
+ if (Context.getTargetInfo().getTriple().isPPC() &&
+ (VecType->getVectorKind() == VectorType::AltiVecVector ||
+ VecType->getVectorKind() == VectorType::AltiVecBool ||
+ VecType->getVectorKind() == VectorType::AltiVecPixel))
Diag(RHS.get()->getExprLoc(), diag::warn_deprecated_lax_vec_conv_all)
<< RHSType << LHSType;
ExprResult *VecExpr = &RHS;
@@ -9868,14 +10093,24 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
}
// Allow assignments between fixed-length and sizeless SVE vectors.
- if ((LHSType->isSizelessBuiltinType() && RHSType->isVectorType()) ||
- (LHSType->isVectorType() && RHSType->isSizelessBuiltinType()))
+ if ((LHSType->isSVESizelessBuiltinType() && RHSType->isVectorType()) ||
+ (LHSType->isVectorType() && RHSType->isSVESizelessBuiltinType()))
if (Context.areCompatibleSveTypes(LHSType, RHSType) ||
Context.areLaxCompatibleSveTypes(LHSType, RHSType)) {
Kind = CK_BitCast;
return Compatible;
}
+ // Allow assignments between fixed-length and sizeless RVV vectors.
+ if ((LHSType->isRVVSizelessBuiltinType() && RHSType->isVectorType()) ||
+ (LHSType->isVectorType() && RHSType->isRVVSizelessBuiltinType())) {
+ if (Context.areCompatibleRVVTypes(LHSType, RHSType) ||
+ Context.areLaxCompatibleRVVTypes(LHSType, RHSType)) {
+ Kind = CK_BitCast;
+ return Compatible;
+ }
+ }
+
return Incompatible;
}
@@ -10045,6 +10280,15 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
return Incompatible;
}
+ // Conversion to nullptr_t (C2x only)
+ if (getLangOpts().C2x && LHSType->isNullPtrType() &&
+ RHS.get()->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull)) {
+ // null -> nullptr_t
+ Kind = CK_NullToPointer;
+ return Compatible;
+ }
+
// Conversions from pointers that are not covered by the above.
if (isa<PointerType>(RHSType)) {
// T* -> _Bool
@@ -10257,12 +10501,18 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
return Incompatible;
}
+ // The constraints are expressed in terms of the atomic, qualified, or
+ // unqualified type of the LHS.
+ QualType LHSTypeAfterConversion = LHSType.getAtomicUnqualifiedType();
+
// C99 6.5.16.1p1: the left operand is a pointer and the right is
- // a null pointer constant.
- if ((LHSType->isPointerType() || LHSType->isObjCObjectPointerType() ||
- LHSType->isBlockPointerType()) &&
- RHS.get()->isNullPointerConstant(Context,
- Expr::NPC_ValueDependentIsNull)) {
+ // a null pointer constant <C2x>or its type is nullptr_t;</C2x>.
+ if ((LHSTypeAfterConversion->isPointerType() ||
+ LHSTypeAfterConversion->isObjCObjectPointerType() ||
+ LHSTypeAfterConversion->isBlockPointerType()) &&
+ ((getLangOpts().C2x && RHS.get()->getType()->isNullPtrType()) ||
+ RHS.get()->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull))) {
if (Diagnose || ConvertRHS) {
CastKind Kind;
CXXCastPath Path;
@@ -10273,6 +10523,26 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
}
return Compatible;
}
+ // C2x 6.5.16.1p1: the left operand has type atomic, qualified, or
+ // unqualified bool, and the right operand is a pointer or its type is
+ // nullptr_t.
+ if (getLangOpts().C2x && LHSType->isBooleanType() &&
+ RHS.get()->getType()->isNullPtrType()) {
+ // NB: T* -> _Bool is handled in CheckAssignmentConstraints, this only
+ // only handles nullptr -> _Bool due to needing an extra conversion
+ // step.
+ // We model this by converting from nullptr -> void * and then let the
+ // conversion from void * -> _Bool happen naturally.
+ if (Diagnose || ConvertRHS) {
+ CastKind Kind;
+ CXXCastPath Path;
+ CheckPointerConversion(RHS.get(), Context.VoidPtrTy, Kind, Path,
+ /*IgnoreBaseAccess=*/false, Diagnose);
+ if (ConvertRHS)
+ RHS = ImpCastExprToType(RHS.get(), Context.VoidPtrTy, Kind, VK_PRValue,
+ &Path);
+ }
+ }
// OpenCL queue_t type assignment.
if (LHSType->isQueueT() && RHS.get()->isNullPointerConstant(
@@ -10495,7 +10765,7 @@ static bool canConvertIntToOtherIntTy(Sema &S, ExprResult *Int,
// bits that the vector element type, reject it.
llvm::APSInt Result = EVResult.Val.getInt();
unsigned NumBits = IntSigned
- ? (Result.isNegative() ? Result.getMinSignedBits()
+ ? (Result.isNegative() ? Result.getSignificantBits()
: Result.getActiveBits())
: Result.getActiveBits();
if (Order < 0 && S.Context.getIntWidth(OtherIntTy) < NumBits)
@@ -10645,11 +10915,9 @@ static bool tryGCCVectorConvertAndSplat(Sema &S, ExprResult *Scalar,
return true;
// Adjust scalar if desired.
- if (Scalar) {
- if (ScalarCast != CK_NoOp)
- *Scalar = S.ImpCastExprToType(Scalar->get(), VectorEltTy, ScalarCast);
- *Scalar = S.ImpCastExprToType(Scalar->get(), VectorTy, CK_VectorSplat);
- }
+ if (ScalarCast != CK_NoOp)
+ *Scalar = S.ImpCastExprToType(Scalar->get(), VectorEltTy, ScalarCast);
+ *Scalar = S.ImpCastExprToType(Scalar->get(), VectorTy, CK_VectorSplat);
return false;
}
@@ -10677,10 +10945,6 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
const VectorType *RHSVecType = RHSType->getAs<VectorType>();
assert(LHSVecType || RHSVecType);
- if ((LHSVecType && LHSVecType->getElementType()->isBFloat16Type()) ||
- (RHSVecType && RHSVecType->getElementType()->isBFloat16Type()))
- return ReportInvalid ? InvalidOperands(Loc, LHS, RHS) : QualType();
-
// AltiVec-style "vector bool op vector bool" combinations are allowed
// for some operators but not others.
if (!AllowBothBool &&
@@ -10732,41 +10996,74 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
}
}
- // Expressions containing fixed-length and sizeless SVE vectors are invalid
- // since the ambiguity can affect the ABI.
- auto IsSveConversion = [](QualType FirstType, QualType SecondType) {
+ // Expressions containing fixed-length and sizeless SVE/RVV vectors are
+ // invalid since the ambiguity can affect the ABI.
+ auto IsSveRVVConversion = [](QualType FirstType, QualType SecondType,
+ unsigned &SVEorRVV) {
const VectorType *VecType = SecondType->getAs<VectorType>();
- return FirstType->isSizelessBuiltinType() && VecType &&
- (VecType->getVectorKind() == VectorType::SveFixedLengthDataVector ||
- VecType->getVectorKind() ==
- VectorType::SveFixedLengthPredicateVector);
+ SVEorRVV = 0;
+ if (FirstType->isSizelessBuiltinType() && VecType) {
+ if (VecType->getVectorKind() == VectorType::SveFixedLengthDataVector ||
+ VecType->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
+ return true;
+ if (VecType->getVectorKind() == VectorType::RVVFixedLengthDataVector) {
+ SVEorRVV = 1;
+ return true;
+ }
+ }
+
+ return false;
};
- if (IsSveConversion(LHSType, RHSType) || IsSveConversion(RHSType, LHSType)) {
- Diag(Loc, diag::err_typecheck_sve_ambiguous) << LHSType << RHSType;
+ unsigned SVEorRVV;
+ if (IsSveRVVConversion(LHSType, RHSType, SVEorRVV) ||
+ IsSveRVVConversion(RHSType, LHSType, SVEorRVV)) {
+ Diag(Loc, diag::err_typecheck_sve_rvv_ambiguous)
+ << SVEorRVV << LHSType << RHSType;
return QualType();
}
- // Expressions containing GNU and SVE (fixed or sizeless) vectors are invalid
- // since the ambiguity can affect the ABI.
- auto IsSveGnuConversion = [](QualType FirstType, QualType SecondType) {
+ // Expressions containing GNU and SVE or RVV (fixed or sizeless) vectors are
+ // invalid since the ambiguity can affect the ABI.
+ auto IsSveRVVGnuConversion = [](QualType FirstType, QualType SecondType,
+ unsigned &SVEorRVV) {
const VectorType *FirstVecType = FirstType->getAs<VectorType>();
const VectorType *SecondVecType = SecondType->getAs<VectorType>();
- if (FirstVecType && SecondVecType)
- return FirstVecType->getVectorKind() == VectorType::GenericVector &&
- (SecondVecType->getVectorKind() ==
- VectorType::SveFixedLengthDataVector ||
- SecondVecType->getVectorKind() ==
- VectorType::SveFixedLengthPredicateVector);
+ SVEorRVV = 0;
+ if (FirstVecType && SecondVecType) {
+ if (FirstVecType->getVectorKind() == VectorType::GenericVector) {
+ if (SecondVecType->getVectorKind() ==
+ VectorType::SveFixedLengthDataVector ||
+ SecondVecType->getVectorKind() ==
+ VectorType::SveFixedLengthPredicateVector)
+ return true;
+ if (SecondVecType->getVectorKind() ==
+ VectorType::RVVFixedLengthDataVector) {
+ SVEorRVV = 1;
+ return true;
+ }
+ }
+ return false;
+ }
- return FirstType->isSizelessBuiltinType() && SecondVecType &&
- SecondVecType->getVectorKind() == VectorType::GenericVector;
+ if (SecondVecType &&
+ SecondVecType->getVectorKind() == VectorType::GenericVector) {
+ if (FirstType->isSVESizelessBuiltinType())
+ return true;
+ if (FirstType->isRVVSizelessBuiltinType()) {
+ SVEorRVV = 1;
+ return true;
+ }
+ }
+
+ return false;
};
- if (IsSveGnuConversion(LHSType, RHSType) ||
- IsSveGnuConversion(RHSType, LHSType)) {
- Diag(Loc, diag::err_typecheck_sve_gnu_ambiguous) << LHSType << RHSType;
+ if (IsSveRVVGnuConversion(LHSType, RHSType, SVEorRVV) ||
+ IsSveRVVGnuConversion(RHSType, LHSType, SVEorRVV)) {
+ Diag(Loc, diag::err_typecheck_sve_rvv_gnu_ambiguous)
+ << SVEorRVV << LHSType << RHSType;
return QualType();
}
@@ -10805,8 +11102,9 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
QualType OtherType = LHSVecType ? RHSType : LHSType;
ExprResult *OtherExpr = LHSVecType ? &RHS : &LHS;
if (isLaxVectorConversion(OtherType, VecType)) {
- if (anyAltivecTypes(RHSType, LHSType) &&
- !areSameVectorElemTypes(RHSType, LHSType))
+ if (Context.getTargetInfo().getTriple().isPPC() &&
+ anyAltivecTypes(RHSType, LHSType) &&
+ !Context.areCompatibleVectorTypes(RHSType, LHSType))
Diag(Loc, diag::warn_deprecated_lax_vec_conv_all) << RHSType << LHSType;
// If we're allowing lax vector conversions, only the total (data) size
// needs to be the same. For non compound assignment, if one of the types is
@@ -11738,7 +12036,7 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
}
llvm::APInt ResultBits =
- static_cast<llvm::APInt&>(Right) + Left.getMinSignedBits();
+ static_cast<llvm::APInt &>(Right) + Left.getSignificantBits();
if (LeftBits.uge(ResultBits))
return;
llvm::APSInt Result = Left.extend(ResultBits.getLimitedValue());
@@ -11761,9 +12059,9 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
}
S.Diag(Loc, diag::warn_shift_result_gt_typewidth)
- << HexResult.str() << Result.getMinSignedBits() << LHSType
- << Left.getBitWidth() << LHS.get()->getSourceRange()
- << RHS.get()->getSourceRange();
+ << HexResult.str() << Result.getSignificantBits() << LHSType
+ << Left.getBitWidth() << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
}
/// Return the resulting type when a vector is shifted
@@ -11876,14 +12174,14 @@ static QualType checkSizelessVectorShift(Sema &S, ExprResult &LHS,
return QualType();
QualType LHSType = LHS.get()->getType();
- const BuiltinType *LHSBuiltinTy = LHSType->getAs<BuiltinType>();
+ const BuiltinType *LHSBuiltinTy = LHSType->castAs<BuiltinType>();
QualType LHSEleType = LHSType->isVLSTBuiltinType()
? LHSBuiltinTy->getSveEltType(S.getASTContext())
: LHSType;
// Note that RHS might not be a vector
QualType RHSType = RHS.get()->getType();
- const BuiltinType *RHSBuiltinTy = RHSType->getAs<BuiltinType>();
+ const BuiltinType *RHSBuiltinTy = RHSType->castAs<BuiltinType>();
QualType RHSEleType = RHSType->isVLSTBuiltinType()
? RHSBuiltinTy->getSveEltType(S.getASTContext())
: RHSType;
@@ -12288,6 +12586,11 @@ static void diagnoseTautologicalComparison(Sema &S, SourceLocation Loc,
S.inTemplateInstantiation())
return;
+ // WebAssembly Tables cannot be compared, therefore shouldn't emit
+ // Tautological diagnostics.
+ if (LHSType->isWebAssemblyTableType() || RHSType->isWebAssemblyTableType())
+ return;
+
// Comparisons between two array types are ill-formed for operator<=>, so
// we shouldn't emit any additional warnings about it.
if (Opc == BO_Cmp && LHSType->isArrayType() && RHSType->isArrayType())
@@ -12674,6 +12977,12 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
(RHSType->isArithmeticType() || RHSType->isEnumeralType()))
return checkArithmeticOrEnumeralCompare(*this, LHS, RHS, Loc, Opc);
+ if ((LHSType->isPointerType() &&
+ LHSType->getPointeeType().isWebAssemblyReferenceType()) ||
+ (RHSType->isPointerType() &&
+ RHSType->getPointeeType().isWebAssemblyReferenceType()))
+ return InvalidOperands(Loc, LHS, RHS);
+
const Expr::NullPointerConstantKind LHSNullKind =
LHS.get()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull);
const Expr::NullPointerConstantKind RHSNullKind =
@@ -13571,47 +13880,39 @@ inline QualType Sema::CheckBitwiseOperands(ExprResult &LHS, ExprResult &RHS,
return InvalidOperands(Loc, LHS, RHS);
}
-// C99 6.5.[13,14]
-inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS,
+// Diagnose cases where the user write a logical and/or but probably meant a
+// bitwise one. We do this when one of the operands is a non-bool integer and
+// the other is a constant.
+void Sema::diagnoseLogicalInsteadOfBitwise(Expr *Op1, Expr *Op2,
SourceLocation Loc,
BinaryOperatorKind Opc) {
- // Check vector operands differently.
- if (LHS.get()->getType()->isVectorType() ||
- RHS.get()->getType()->isVectorType())
- return CheckVectorLogicalOperands(LHS, RHS, Loc);
-
- bool EnumConstantInBoolContext = false;
- for (const ExprResult &HS : {LHS, RHS}) {
- if (const auto *DREHS = dyn_cast<DeclRefExpr>(HS.get())) {
- const auto *ECDHS = dyn_cast<EnumConstantDecl>(DREHS->getDecl());
- if (ECDHS && ECDHS->getInitVal() != 0 && ECDHS->getInitVal() != 1)
- EnumConstantInBoolContext = true;
- }
- }
-
- if (EnumConstantInBoolContext)
- Diag(Loc, diag::warn_enum_constant_in_bool_context);
-
- // Diagnose cases where the user write a logical and/or but probably meant a
- // bitwise one. We do this when the LHS is a non-bool integer and the RHS
- // is a constant.
- if (!EnumConstantInBoolContext && LHS.get()->getType()->isIntegerType() &&
- !LHS.get()->getType()->isBooleanType() &&
- RHS.get()->getType()->isIntegerType() && !RHS.get()->isValueDependent() &&
+ if (Op1->getType()->isIntegerType() && !Op1->getType()->isBooleanType() &&
+ Op2->getType()->isIntegerType() && !Op2->isValueDependent() &&
// Don't warn in macros or template instantiations.
- !Loc.isMacroID() && !inTemplateInstantiation()) {
+ !Loc.isMacroID() && !inTemplateInstantiation() &&
+ !Op2->getExprLoc().isMacroID() &&
+ !Op1->getExprLoc().isMacroID()) {
+ bool IsOp1InMacro = Op1->getExprLoc().isMacroID();
+ bool IsOp2InMacro = Op2->getExprLoc().isMacroID();
+
+ // Exclude the specific expression from triggering the warning.
+ if (!(IsOp1InMacro && IsOp2InMacro && Op1->getSourceRange() == Op2->getSourceRange())) {
+ // If the RHS can be constant folded, and if it constant folds to something
+ // that isn't 0 or 1 (which indicate a potential logical operation that
+ // happened to fold to true/false) then warn.
+ // Parens on the RHS are ignored.
// If the RHS can be constant folded, and if it constant folds to something
// that isn't 0 or 1 (which indicate a potential logical operation that
// happened to fold to true/false) then warn.
// Parens on the RHS are ignored.
Expr::EvalResult EVResult;
- if (RHS.get()->EvaluateAsInt(EVResult, Context)) {
+ if (Op2->EvaluateAsInt(EVResult, Context)) {
llvm::APSInt Result = EVResult.Val.getInt();
- if ((getLangOpts().Bool && !RHS.get()->getType()->isBooleanType() &&
- !RHS.get()->getExprLoc().isMacroID()) ||
+ if ((getLangOpts().Bool && !Op2->getType()->isBooleanType() &&
+ !Op2->getExprLoc().isMacroID()) ||
(Result != 0 && Result != 1)) {
Diag(Loc, diag::warn_logical_instead_of_bitwise)
- << RHS.get()->getSourceRange() << (Opc == BO_LAnd ? "&&" : "||");
+ << Op2->getSourceRange() << (Opc == BO_LAnd ? "&&" : "||");
// Suggest replacing the logical operator with the bitwise version
Diag(Loc, diag::note_logical_instead_of_bitwise_change_operator)
<< (Opc == BO_LAnd ? "&" : "|")
@@ -13621,13 +13922,52 @@ inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS,
if (Opc == BO_LAnd)
// Suggest replacing "Foo() && kNonZero" with "Foo()"
Diag(Loc, diag::note_logical_instead_of_bitwise_remove_constant)
- << FixItHint::CreateRemoval(
- SourceRange(getLocForEndOfToken(LHS.get()->getEndLoc()),
- RHS.get()->getEndLoc()));
+ << FixItHint::CreateRemoval(SourceRange(
+ getLocForEndOfToken(Op1->getEndLoc()), Op2->getEndLoc()));
+ }
+ }
+ }
}
+}
+
+// C99 6.5.[13,14]
+inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ BinaryOperatorKind Opc) {
+ // Check vector operands differently.
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType())
+ return CheckVectorLogicalOperands(LHS, RHS, Loc);
+
+ bool EnumConstantInBoolContext = false;
+ for (const ExprResult &HS : {LHS, RHS}) {
+ if (const auto *DREHS = dyn_cast<DeclRefExpr>(HS.get())) {
+ const auto *ECDHS = dyn_cast<EnumConstantDecl>(DREHS->getDecl());
+ if (ECDHS && ECDHS->getInitVal() != 0 && ECDHS->getInitVal() != 1)
+ EnumConstantInBoolContext = true;
}
}
+ // WebAssembly tables can't be used with logical operators.
+ QualType LHSTy = LHS.get()->getType();
+ QualType RHSTy = RHS.get()->getType();
+ const auto *LHSATy = dyn_cast<ArrayType>(LHSTy);
+ const auto *RHSATy = dyn_cast<ArrayType>(RHSTy);
+ if ((LHSATy && LHSATy->getElementType().isWebAssemblyReferenceType()) ||
+ (RHSATy && RHSATy->getElementType().isWebAssemblyReferenceType())) {
+ return InvalidOperands(Loc, LHS, RHS);
+ }
+
+ if (EnumConstantInBoolContext) {
+ // Warn when converting the enum constant to a boolean
+ Diag(Loc, diag::warn_enum_constant_in_bool_context);
+ } else {
+ // Diagnose cases where the user write a logical and/or but probably meant a
+ // bitwise one.
+ diagnoseLogicalInsteadOfBitwise(LHS.get(), RHS.get(), Loc, Opc);
+ diagnoseLogicalInsteadOfBitwise(RHS.get(), LHS.get(), Loc, Opc);
+ }
+
if (!Context.getLangOpts().CPlusPlus) {
// OpenCL v1.1 s6.3.g: The logical operators and (&&), or (||) do
// not operate on the built-in scalar and vector float types.
@@ -14133,6 +14473,12 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
return QualType();
}
+ // WebAssembly tables can't be used on RHS of an assignment expression.
+ if (RHSType->isWebAssemblyTableType()) {
+ Diag(Loc, diag::err_wasm_table_art) << 0;
+ return QualType();
+ }
+
AssignConvertType ConvTy;
if (CompoundType.isNull()) {
Expr *RHSCheck = RHS.get();
@@ -14740,6 +15086,21 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
if (op->getType()->isObjCObjectType())
return Context.getObjCObjectPointerType(op->getType());
+ // Cannot take the address of WebAssembly references or tables.
+ if (Context.getTargetInfo().getTriple().isWasm()) {
+ QualType OpTy = op->getType();
+ if (OpTy.isWebAssemblyReferenceType()) {
+ Diag(OpLoc, diag::err_wasm_ca_reference)
+ << 1 << OrigOp.get()->getSourceRange();
+ return QualType();
+ }
+ if (OpTy->isWebAssemblyTableType()) {
+ Diag(OpLoc, diag::err_wasm_table_pr)
+ << 1 << OrigOp.get()->getSourceRange();
+ return QualType();
+ }
+ }
+
CheckAddressOfPackedMember(op);
return Context.getPointerType(op->getType());
@@ -14808,7 +15169,7 @@ static QualType CheckIndirectionOperand(Sema &S, Expr *Op, ExprValueKind &VK,
// be a pointer to an object type, or a pointer to a function type
LangOptions LO = S.getLangOpts();
if (LO.CPlusPlus)
- S.Diag(OpLoc, diag::ext_typecheck_indirection_through_void_pointer_cpp)
+ S.Diag(OpLoc, diag::err_typecheck_indirection_through_void_pointer_cpp)
<< OpTy << Op->getSourceRange();
else if (!(LO.C99 && IsAfterAmp) && !S.isUnevaluatedContext())
S.Diag(OpLoc, diag::ext_typecheck_indirection_through_void_pointer)
@@ -15603,13 +15964,22 @@ static ExprResult BuildOverloadedBinOp(Sema &S, Scope *Sc, SourceLocation OpLoc,
Expr *LHS, Expr *RHS) {
switch (Opc) {
case BO_Assign:
+ // In the non-overloaded case, we warn about self-assignment (x = x) for
+ // both simple assignment and certain compound assignments where algebra
+ // tells us the operation yields a constant result. When the operator is
+ // overloaded, we can't do the latter because we don't want to assume that
+ // those algebraic identities still apply; for example, a path-building
+ // library might use operator/= to append paths. But it's still reasonable
+ // to assume that simple assignment is just moving/copying values around
+ // and so self-assignment is likely a bug.
+ DiagnoseSelfAssignment(S, LHS, RHS, OpLoc, false);
+ [[fallthrough]];
case BO_DivAssign:
case BO_RemAssign:
case BO_SubAssign:
case BO_AndAssign:
case BO_OrAssign:
case BO_XorAssign:
- DiagnoseSelfAssignment(S, LHS, RHS, OpLoc, false);
CheckIdentityFieldAssignment(LHS, RHS, OpLoc, S);
break;
default:
@@ -15913,6 +16283,13 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
resultType = Context.FloatTy;
}
+ // WebAsembly tables can't be used in unary expressions.
+ if (resultType->isPointerType() &&
+ resultType->getPointeeType().isWebAssemblyReferenceType()) {
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input.get()->getSourceRange());
+ }
+
if (resultType->isDependentType())
break;
if (resultType->isScalarType() && !isScopedEnumerationType(resultType)) {
@@ -16121,6 +16498,8 @@ ExprResult Sema::ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
void Sema::ActOnStartStmtExpr() {
PushExpressionEvaluationContext(ExprEvalContexts.back().Context);
+ // Make sure we diagnose jumping into a statement expression.
+ setFunctionHasBranchProtectedScope();
}
void Sema::ActOnStmtExprError() {
@@ -16569,6 +16948,9 @@ void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
PushOnScopeChains(AI, CurBlock->TheScope);
}
+
+ if (AI->isInvalidDecl())
+ CurBlock->TheDecl->setInvalidDecl();
}
}
@@ -16769,6 +17151,9 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
if (getCurFunction())
getCurFunction()->addBlock(BD);
+ if (BD->isInvalidDecl())
+ return CreateRecoveryExpr(Result->getBeginLoc(), Result->getEndLoc(),
+ {Result}, Result->getType());
return Result;
}
@@ -16795,7 +17180,7 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
}
// NVPTX does not support va_arg expression.
- if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
+ if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
Context.getTargetInfo().getTriple().isNVPTX())
targetDiag(E->getBeginLoc(), diag::err_va_arg_in_device);
@@ -17022,7 +17407,9 @@ ExprResult Sema::ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
QualType ResultTy;
switch (Kind) {
case SourceLocExpr::File:
- case SourceLocExpr::Function: {
+ case SourceLocExpr::FileName:
+ case SourceLocExpr::Function:
+ case SourceLocExpr::FuncSig: {
QualType ArrTy = Context.getStringLiteralArrayType(Context.CharTy, 0);
ResultTy =
Context.getPointerType(ArrTy->getAsArrayTypeUnsafe()->getElementType());
@@ -17685,9 +18072,17 @@ Sema::PushExpressionEvaluationContext(
ExprEvalContexts.back().InDiscardedStatement =
ExprEvalContexts[ExprEvalContexts.size() - 2]
.isDiscardedStatementContext();
+
+ // C++23 [expr.const]/p15
+ // An expression or conversion is in an immediate function context if [...]
+ // it is a subexpression of a manifestly constant-evaluated expression or
+ // conversion.
+ const auto &Prev = ExprEvalContexts[ExprEvalContexts.size() - 2];
ExprEvalContexts.back().InImmediateFunctionContext =
- ExprEvalContexts[ExprEvalContexts.size() - 2]
- .isImmediateFunctionContext();
+ Prev.isImmediateFunctionContext() || Prev.isConstantEvaluated();
+
+ ExprEvalContexts.back().InImmediateEscalatingFunctionContext =
+ Prev.InImmediateEscalatingFunctionContext;
Cleanup.reset();
if (!MaybeODRUseExprs.empty())
@@ -17766,9 +18161,30 @@ void Sema::CheckUnusedVolatileAssignment(Expr *E) {
}
}
+void Sema::MarkExpressionAsImmediateEscalating(Expr *E) {
+ assert(!FunctionScopes.empty() && "Expected a function scope");
+ assert(getLangOpts().CPlusPlus20 &&
+ ExprEvalContexts.back().InImmediateEscalatingFunctionContext &&
+ "Cannot mark an immediate escalating expression outside of an "
+ "immediate escalating context");
+ if (auto *Call = dyn_cast<CallExpr>(E->IgnoreImplicit());
+ Call && Call->getCallee()) {
+ if (auto *DeclRef =
+ dyn_cast<DeclRefExpr>(Call->getCallee()->IgnoreImplicit()))
+ DeclRef->setIsImmediateEscalating(true);
+ } else if (auto *Ctr = dyn_cast<CXXConstructExpr>(E->IgnoreImplicit())) {
+ Ctr->setIsImmediateEscalating(true);
+ } else if (auto *DeclRef = dyn_cast<DeclRefExpr>(E->IgnoreImplicit())) {
+ DeclRef->setIsImmediateEscalating(true);
+ } else {
+ assert(false && "expected an immediately escalating expression");
+ }
+ getCurFunction()->FoundImmediateEscalatingExpression = true;
+}
+
ExprResult Sema::CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl) {
if (isUnevaluatedContext() || !E.isUsable() || !Decl ||
- !Decl->isConsteval() || isConstantEvaluated() ||
+ !Decl->isImmediateFunction() || isConstantEvaluated() ||
isCheckingDefaultArgumentOrInitializer() ||
RebuildingImmediateInvocation || isImmediateFunctionContext())
return E;
@@ -17782,13 +18198,59 @@ ExprResult Sema::CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl) {
dyn_cast<DeclRefExpr>(Call->getCallee()->IgnoreImplicit()))
ExprEvalContexts.back().ReferenceToConsteval.erase(DeclRef);
- E = MaybeCreateExprWithCleanups(E);
+ // C++23 [expr.const]/p16
+ // An expression or conversion is immediate-escalating if it is not initially
+ // in an immediate function context and it is [...] an immediate invocation
+ // that is not a constant expression and is not a subexpression of an
+ // immediate invocation.
+ APValue Cached;
+ auto CheckConstantExpressionAndKeepResult = [&]() {
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ Expr::EvalResult Eval;
+ Eval.Diag = &Notes;
+ bool Res = E.get()->EvaluateAsConstantExpr(
+ Eval, getASTContext(), ConstantExprKind::ImmediateInvocation);
+ if (Res && Notes.empty()) {
+ Cached = std::move(Eval.Val);
+ return true;
+ }
+ return false;
+ };
+
+ if (!E.get()->isValueDependent() &&
+ ExprEvalContexts.back().InImmediateEscalatingFunctionContext &&
+ !CheckConstantExpressionAndKeepResult()) {
+ MarkExpressionAsImmediateEscalating(E.get());
+ return E;
+ }
+
+ if (Cleanup.exprNeedsCleanups()) {
+ // Since an immediate invocation is a full expression itself - it requires
+ // an additional ExprWithCleanups node, but it can participate to a bigger
+ // full expression which actually requires cleanups to be run after so
+ // create ExprWithCleanups without using MaybeCreateExprWithCleanups as it
+ // may discard cleanups for outer expression too early.
+
+ // Note that ExprWithCleanups created here must always have empty cleanup
+ // objects:
+ // - compound literals do not create cleanup objects in C++ and immediate
+ // invocations are C++-only.
+ // - blocks are not allowed inside constant expressions and compiler will
+ // issue an error if they appear there.
+ //
+ // Hence, in correct code any cleanup objects created inside current
+ // evaluation context must be outside the immediate invocation.
+ E = ExprWithCleanups::Create(getASTContext(), E.get(),
+ Cleanup.cleanupsHaveSideEffects(), {});
+ }
ConstantExpr *Res = ConstantExpr::Create(
getASTContext(), E.get(),
ConstantExpr::getStorageKind(Decl->getReturnType().getTypePtr(),
getASTContext()),
/*IsImmediateInvocation*/ true);
+ if (Cached.hasValue())
+ Res->MoveIntoResult(Cached, getASTContext());
/// Value-dependent constant expressions should not be immediately
/// evaluated until they are instantiated.
if (!Res->isValueDependent())
@@ -17816,14 +18278,17 @@ static void EvaluateAndDiagnoseImmediateInvocation(
FD = Call->getConstructor();
else
llvm_unreachable("unhandled decl kind");
- assert(FD && FD->isConsteval());
- SemaRef.Diag(CE->getBeginLoc(), diag::err_invalid_consteval_call) << FD;
+ assert(FD && FD->isImmediateFunction());
+ SemaRef.Diag(CE->getBeginLoc(), diag::err_invalid_consteval_call)
+ << FD << FD->isConsteval();
if (auto Context =
SemaRef.InnermostDeclarationWithDelayedImmediateInvocations()) {
SemaRef.Diag(Context->Loc, diag::note_invalid_consteval_initializer)
<< Context->Decl;
SemaRef.Diag(Context->Decl->getBeginLoc(), diag::note_declared_at);
}
+ if (!FD->isConsteval())
+ SemaRef.DiagnoseImmediateEscalatingReason(FD);
for (auto &Note : Notes)
SemaRef.Diag(Note.first, Note.second);
return;
@@ -17969,10 +18434,48 @@ HandleImmediateInvocations(Sema &SemaRef,
if (!CE.getInt())
EvaluateAndDiagnoseImmediateInvocation(SemaRef, CE);
for (auto *DR : Rec.ReferenceToConsteval) {
+ // If the expression is immediate escalating, it is not an error;
+ // The outer context itself becomes immediate and further errors,
+ // if any, will be handled by DiagnoseImmediateEscalatingReason.
+ if (DR->isImmediateEscalating())
+ continue;
auto *FD = cast<FunctionDecl>(DR->getDecl());
- SemaRef.Diag(DR->getBeginLoc(), diag::err_invalid_consteval_take_address)
- << FD;
- SemaRef.Diag(FD->getLocation(), diag::note_declared_at);
+ const NamedDecl *ND = FD;
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(ND);
+ MD && (MD->isLambdaStaticInvoker() || isLambdaCallOperator(MD)))
+ ND = MD->getParent();
+
+ // C++23 [expr.const]/p16
+ // An expression or conversion is immediate-escalating if it is not
+ // initially in an immediate function context and it is [...] a
+ // potentially-evaluated id-expression that denotes an immediate function
+ // that is not a subexpression of an immediate invocation.
+ bool ImmediateEscalating = false;
+ bool IsPotentiallyEvaluated =
+ Rec.Context ==
+ Sema::ExpressionEvaluationContext::PotentiallyEvaluated ||
+ Rec.Context ==
+ Sema::ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed;
+ if (SemaRef.inTemplateInstantiation() && IsPotentiallyEvaluated)
+ ImmediateEscalating = Rec.InImmediateEscalatingFunctionContext;
+
+ if (!Rec.InImmediateEscalatingFunctionContext ||
+ (SemaRef.inTemplateInstantiation() && !ImmediateEscalating)) {
+ SemaRef.Diag(DR->getBeginLoc(), diag::err_invalid_consteval_take_address)
+ << ND << isa<CXXRecordDecl>(ND) << FD->isConsteval();
+ SemaRef.Diag(ND->getLocation(), diag::note_declared_at);
+ if (auto Context =
+ SemaRef.InnermostDeclarationWithDelayedImmediateInvocations()) {
+ SemaRef.Diag(Context->Loc, diag::note_invalid_consteval_initializer)
+ << Context->Decl;
+ SemaRef.Diag(Context->Decl->getBeginLoc(), diag::note_declared_at);
+ }
+ if (FD->isImmediateEscalating() && !FD->isConsteval())
+ SemaRef.DiagnoseImmediateEscalatingReason(FD);
+
+ } else {
+ SemaRef.MarkExpressionAsImmediateEscalating(DR);
+ }
}
}
@@ -18305,9 +18808,6 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
if (getLangOpts().CUDA)
CheckCUDACall(Loc, Func);
- if (getLangOpts().SYCLIsDevice)
- checkSYCLDeviceFunction(Loc, Func);
-
// If we need a definition, try to create one.
if (NeedDefinition && !Func->getBody()) {
runWithSufficientStackSpace(Loc, [&] {
@@ -18421,9 +18921,17 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
// or of another default member initializer (ie a PotentiallyEvaluatedIfUsed
// context), its initializers may not be referenced yet.
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Func)) {
+ EnterExpressionEvaluationContext EvalContext(
+ *this,
+ Constructor->isImmediateFunction()
+ ? ExpressionEvaluationContext::ImmediateFunctionContext
+ : ExpressionEvaluationContext::PotentiallyEvaluated,
+ Constructor);
for (CXXCtorInitializer *Init : Constructor->inits()) {
if (Init->isInClassMemberInitializer())
- MarkDeclarationsReferencedInExpr(Init->getInit());
+ runWithSufficientStackSpace(Init->getSourceLocation(), [&]() {
+ MarkDeclarationsReferencedInExpr(Init->getInit());
+ });
}
}
@@ -18861,6 +19369,12 @@ static bool captureInLambda(LambdaScopeInfo *LSI, ValueDecl *Var,
Invalid = true;
}
+ if (BuildAndDiagnose && S.Context.getTargetInfo().getTriple().isWasm() &&
+ CaptureType.getNonReferenceType().isWebAssemblyReferenceType()) {
+ S.Diag(Loc, diag::err_wasm_ca_reference) << 0;
+ Invalid = true;
+ }
+
// Compute the type of the field that will capture this variable.
if (ByRef) {
// C++11 [expr.prim.lambda]p15:
@@ -19050,6 +19564,15 @@ bool Sema::tryCaptureVariable(
// An init-capture is notionally from the context surrounding its
// declaration, but its parent DC is the lambda class.
DeclContext *VarDC = Var->getDeclContext();
+ DeclContext *DC = CurContext;
+
+ // tryCaptureVariable is called every time a DeclRef is formed,
+ // it can therefore have non-negigible impact on performances.
+ // For local variables and when there is no capturing scope,
+ // we can bailout early.
+ if (CapturingFunctionScopes == 0 && (!BuildAndDiagnose || VarDC == DC))
+ return true;
+
const auto *VD = dyn_cast<VarDecl>(Var);
if (VD) {
if (VD->isInitCapture())
@@ -19059,7 +19582,6 @@ bool Sema::tryCaptureVariable(
}
assert(VD && "Cannot capture a null variable");
- DeclContext *DC = CurContext;
const unsigned MaxFunctionScopesIndex = FunctionScopeIndexToStopAt
? *FunctionScopeIndexToStopAt : FunctionScopes.size() - 1;
// We need to sync up the Declaration Context with the
@@ -19072,11 +19594,6 @@ bool Sema::tryCaptureVariable(
}
}
-
- // If the variable is declared in the current context, there is no need to
- // capture it.
- if (VarDC == DC) return true;
-
// Capture global variables if it is required to use private copy of this
// variable.
bool IsGlobal = !VD->hasLocalStorage();
@@ -19102,12 +19619,41 @@ bool Sema::tryCaptureVariable(
bool Explicit = (Kind != TryCapture_Implicit);
unsigned FunctionScopesIndex = MaxFunctionScopesIndex;
do {
+
+ LambdaScopeInfo *LSI = nullptr;
+ if (!FunctionScopes.empty())
+ LSI = dyn_cast_or_null<LambdaScopeInfo>(
+ FunctionScopes[FunctionScopesIndex]);
+
+ bool IsInScopeDeclarationContext =
+ !LSI || LSI->AfterParameterList || CurContext == LSI->CallOperator;
+
+ if (LSI && !LSI->AfterParameterList) {
+ // This allows capturing parameters from a default value which does not
+ // seems correct
+ if (isa<ParmVarDecl>(Var) && !Var->getDeclContext()->isFunctionOrMethod())
+ return true;
+ }
+ // If the variable is declared in the current context, there is no need to
+ // capture it.
+ if (IsInScopeDeclarationContext &&
+ FunctionScopesIndex == MaxFunctionScopesIndex && VarDC == DC)
+ return true;
+
+ // When evaluating some attributes (like enable_if) we might refer to a
+ // function parameter appertaining to the same declaration as that
+ // attribute.
+ if (const auto *Parm = dyn_cast<ParmVarDecl>(Var);
+ Parm && Parm->getDeclContext() == DC)
+ return true;
+
// Only block literals, captured statements, and lambda expressions can
// capture; other scopes don't work.
- DeclContext *ParentDC = getParentOfCapturingContextOrNull(DC, Var,
- ExprLoc,
- BuildAndDiagnose,
- *this);
+ DeclContext *ParentDC =
+ !IsInScopeDeclarationContext
+ ? DC->getParent()
+ : getParentOfCapturingContextOrNull(DC, Var, ExprLoc,
+ BuildAndDiagnose, *this);
// We need to check for the parent *first* because, if we *have*
// private-captured a global variable, we need to recursively capture it in
// intermediate blocks, lambdas, etc.
@@ -19122,7 +19668,6 @@ bool Sema::tryCaptureVariable(
FunctionScopeInfo *FSI = FunctionScopes[FunctionScopesIndex];
CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FSI);
-
// Check whether we've already captured it.
if (isVariableAlreadyCapturedInScopeInfo(CSI, Var, Nested, CaptureType,
DeclRefType)) {
@@ -19238,10 +19783,10 @@ bool Sema::tryCaptureVariable(
}
return true;
}
-
- FunctionScopesIndex--;
- DC = ParentDC;
Explicit = false;
+ FunctionScopesIndex--;
+ if (IsInScopeDeclarationContext)
+ DC = ParentDC;
} while (!VarDC->Equals(DC));
// Walk back down the scope stack, (e.g. from outer lambda to inner lambda)
@@ -19593,9 +20138,15 @@ static ExprResult rebuildPotentialResultsAsNonOdrUsed(Sema &S, Expr *E,
}
}
+ void *ExOrTy = nullptr;
+ bool IsExpr = GSE->isExprPredicate();
+ if (IsExpr)
+ ExOrTy = GSE->getControllingExpr();
+ else
+ ExOrTy = GSE->getControllingType();
return AnyChanged ? S.CreateGenericSelectionExpr(
GSE->getGenericLoc(), GSE->getDefaultLoc(),
- GSE->getRParenLoc(), GSE->getControllingExpr(),
+ GSE->getRParenLoc(), IsExpr, ExOrTy,
GSE->getAssocTypeSourceInfos(), AssocExprs)
: ExprEmpty();
}
@@ -19843,14 +20394,31 @@ static void DoMarkVarDeclReferenced(
DRE->setDecl(DRE->getDecl());
else if (auto *ME = dyn_cast_or_null<MemberExpr>(E))
ME->setMemberDecl(ME->getMemberDecl());
- } else if (FirstInstantiation ||
- isa<VarTemplateSpecializationDecl>(Var)) {
+ } else if (FirstInstantiation) {
+ SemaRef.PendingInstantiations
+ .push_back(std::make_pair(Var, PointOfInstantiation));
+ } else {
+ bool Inserted = false;
+ for (auto &I : SemaRef.SavedPendingInstantiations) {
+ auto Iter = llvm::find_if(
+ I, [Var](const Sema::PendingImplicitInstantiation &P) {
+ return P.first == Var;
+ });
+ if (Iter != I.end()) {
+ SemaRef.PendingInstantiations.push_back(*Iter);
+ I.erase(Iter);
+ Inserted = true;
+ break;
+ }
+ }
+
// FIXME: For a specialization of a variable template, we don't
// distinguish between "declaration and type implicitly instantiated"
// and "implicit instantiation of definition requested", so we have
// no direct way to avoid enqueueing the pending instantiation
// multiple times.
- SemaRef.PendingInstantiations
+ if (isa<VarTemplateSpecializationDecl>(Var) && !Inserted)
+ SemaRef.PendingInstantiations
.push_back(std::make_pair(Var, PointOfInstantiation));
}
}
@@ -19997,12 +20565,14 @@ void Sema::MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base) {
!Method->getDevirtualizedMethod(Base, getLangOpts().AppleKext))
OdrUse = false;
- if (auto *FD = dyn_cast<FunctionDecl>(E->getDecl()))
+ if (auto *FD = dyn_cast<FunctionDecl>(E->getDecl())) {
if (!isUnevaluatedContext() && !isConstantEvaluated() &&
!isImmediateFunctionContext() &&
- !isCheckingDefaultArgumentOrInitializer() && FD->isConsteval() &&
- !RebuildingImmediateInvocation && !FD->isDependentContext())
+ !isCheckingDefaultArgumentOrInitializer() &&
+ FD->isImmediateFunction() && !RebuildingImmediateInvocation &&
+ !FD->isDependentContext())
ExprEvalContexts.back().ReferenceToConsteval.insert(E);
+ }
MarkExprReferenced(*this, E->getLocation(), E->getDecl(), E, OdrUse,
RefsMinusAssignments);
}
@@ -21106,6 +21676,8 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
#define BUILTIN_TYPE(Id, SingletonId) case BuiltinType::Id:
#define PLACEHOLDER_TYPE(Id, SingletonId)
#include "clang/AST/BuiltinTypes.def"
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
index abf5a72e7308..423d5372a6f6 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
@@ -31,6 +31,7 @@
#include "clang/Basic/TypeTraits.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedTemplate.h"
@@ -42,6 +43,7 @@
#include "clang/Sema/TemplateDeduction.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TypeSize.h"
#include <optional>
@@ -391,7 +393,7 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
//
// also looks for type-name in the scope. Unfortunately, we can't
// reasonably apply this fallback for dependent nested-name-specifiers.
- if (SS.getScopeRep()->getPrefix()) {
+ if (SS.isValid() && SS.getScopeRep()->getPrefix()) {
if (ParsedType T = LookupInScope()) {
Diag(SS.getEndLoc(), diag::ext_qualified_dtor_named_in_lexical_scope)
<< FixItHint::CreateRemoval(SS.getRange());
@@ -500,13 +502,16 @@ bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
IdentifierInfo *II = Name.Identifier;
ReservedIdentifierStatus Status = II->isReserved(PP.getLangOpts());
SourceLocation Loc = Name.getEndLoc();
- if (isReservedInAllContexts(Status) &&
- !PP.getSourceManager().isInSystemHeader(Loc)) {
- Diag(Loc, diag::warn_reserved_extern_symbol)
- << II << static_cast<int>(Status)
- << FixItHint::CreateReplacement(
- Name.getSourceRange(),
- (StringRef("operator\"\"") + II->getName()).str());
+ if (!PP.getSourceManager().isInSystemHeader(Loc)) {
+ if (auto Hint = FixItHint::CreateReplacement(
+ Name.getSourceRange(),
+ (StringRef("operator\"\"") + II->getName()).str());
+ isReservedInAllContexts(Status)) {
+ Diag(Loc, diag::warn_reserved_extern_symbol)
+ << II << static_cast<int>(Status) << Hint;
+ } else {
+ Diag(Loc, diag::warn_deprecated_literal_operator_id) << II << Hint;
+ }
}
}
@@ -975,6 +980,19 @@ bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc,
Ty = Ptr->getPointeeType();
isPointer = true;
}
+
+ // Cannot throw WebAssembly reference type.
+ if (Ty.isWebAssemblyReferenceType()) {
+ Diag(ThrowLoc, diag::err_wasm_reftype_tc) << 0 << E->getSourceRange();
+ return true;
+ }
+
+ // Cannot throw WebAssembly table.
+ if (isPointer && Ty.isWebAssemblyReferenceType()) {
+ Diag(ThrowLoc, diag::err_wasm_table_art) << 2 << E->getSourceRange();
+ return true;
+ }
+
if (!isPointer || !Ty->isVoidType()) {
if (RequireCompleteType(ThrowLoc, Ty,
isPointer ? diag::err_throw_incomplete_ptr
@@ -1135,8 +1153,7 @@ static QualType adjustCVQualifiersForCXXThisWithinLambda(
auto C = CurLSI->getCXXThisCapture();
if (C.isCopyCapture()) {
- ClassType.removeLocalCVRQualifiers(Qualifiers::CVRMask);
- if (CurLSI->CallOperator->isConst())
+ if (!CurLSI->Mutable)
ClassType.addConst();
return ASTCtx.getPointerType(ClassType);
}
@@ -1175,7 +1192,6 @@ static QualType adjustCVQualifiersForCXXThisWithinLambda(
while (Closure &&
IsThisCaptured(Closure, IsByCopyCapture, IsConstCapture)) {
if (IsByCopyCapture) {
- ClassType.removeLocalCVRQualifiers(Qualifiers::CVRMask);
if (IsConstCapture)
ClassType.addConst();
return ASTCtx.getPointerType(ClassType);
@@ -1362,15 +1378,7 @@ bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
// The type of the corresponding data member (not a 'this' pointer if 'by
// copy').
- QualType CaptureType = ThisTy;
- if (ByCopy) {
- // If we are capturing the object referred to by '*this' by copy, ignore
- // any cv qualifiers inherited from the type of the member function for
- // the type of the closure-type's corresponding data member and any use
- // of 'this'.
- CaptureType = ThisTy->getPointeeType();
- CaptureType.removeLocalCVRQualifiers(Qualifiers::CVRMask);
- }
+ QualType CaptureType = ByCopy ? ThisTy->getPointeeType() : ThisTy;
bool isNested = NumCapturingClosures > 1;
CSI->addThisCapture(isNested, Loc, CaptureType, ByCopy);
@@ -1476,10 +1484,10 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
: InitializationKind::CreateValue(TyBeginLoc, LParenOrBraceLoc,
RParenOrBraceLoc);
- // C++1z [expr.type.conv]p1:
+ // C++17 [expr.type.conv]p1:
// If the type is a placeholder for a deduced class type, [...perform class
// template argument deduction...]
- // C++2b:
+ // C++23:
// Otherwise, if the type contains a placeholder type, it is replaced by the
// type determined by placeholder type deduction.
DeducedType *Deduced = Ty->getContainedDeducedType();
@@ -1506,7 +1514,7 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
diag::err_auto_expr_init_multiple_expressions)
<< Ty << FullRange);
}
- if (getLangOpts().CPlusPlus2b) {
+ if (getLangOpts().CPlusPlus23) {
if (Ty->getAs<AutoType>())
Diag(TyBeginLoc, diag::warn_cxx20_compat_auto_expr) << FullRange;
}
@@ -1532,16 +1540,10 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
Entity = InitializedEntity::InitializeTemporary(TInfo, Ty);
}
- if (Ty->isDependentType() || CallExpr::hasAnyTypeDependentArguments(Exprs)) {
- // FIXME: CXXUnresolvedConstructExpr does not model list-initialization
- // directly. We work around this by dropping the locations of the braces.
- SourceRange Locs = ListInitialization
- ? SourceRange()
- : SourceRange(LParenOrBraceLoc, RParenOrBraceLoc);
- return CXXUnresolvedConstructExpr::Create(Context, Ty.getNonReferenceType(),
- TInfo, Locs.getBegin(), Exprs,
- Locs.getEnd());
- }
+ if (Ty->isDependentType() || CallExpr::hasAnyTypeDependentArguments(Exprs))
+ return CXXUnresolvedConstructExpr::Create(
+ Context, Ty.getNonReferenceType(), TInfo, LParenOrBraceLoc, Exprs,
+ RParenOrBraceLoc, ListInitialization);
// C++ [expr.type.conv]p1:
// If the expression list is a parenthesized single expression, the type
@@ -1590,6 +1592,9 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
Expr *Inner = Result.get();
if (CXXBindTemporaryExpr *BTE = dyn_cast_or_null<CXXBindTemporaryExpr>(Inner))
Inner = BTE->getSubExpr();
+ if (auto *CE = dyn_cast<ConstantExpr>(Inner);
+ CE && CE->isImmediateInvocation())
+ Inner = CE->getSubExpr();
if (!isa<CXXTemporaryObjectExpr>(Inner) &&
!isa<CXXScalarValueInitExpr>(Inner)) {
// If we created a CXXTemporaryObjectExpr, that node also represents the
@@ -2652,11 +2657,10 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
// FIXME: Should the Sema create the expression and embed it in the syntax
// tree? Or should the consumer just recalculate the value?
// FIXME: Using a dummy value will interact poorly with attribute enable_if.
- IntegerLiteral Size(
- Context,
- llvm::APInt::getZero(
- Context.getTargetInfo().getPointerWidth(LangAS::Default)),
- Context.getSizeType(), SourceLocation());
+ QualType SizeTy = Context.getSizeType();
+ unsigned SizeTyWidth = Context.getTypeSize(SizeTy);
+ IntegerLiteral Size(Context, llvm::APInt::getZero(SizeTyWidth), SizeTy,
+ SourceLocation());
AllocArgs.push_back(&Size);
QualType AlignValT = Context.VoidTy;
@@ -2979,7 +2983,7 @@ void Sema::DeclareGlobalNewDelete() {
// functions are replaceable ([new.delete]); these are attached to the
// global module ([module.unit]).
if (getLangOpts().CPlusPlusModules && getCurrentModule())
- PushGlobalModuleFragment(SourceLocation(), /*IsImplicit=*/true);
+ PushGlobalModuleFragment(SourceLocation());
// C++ [basic.std.dynamic]p2:
// [...] The following allocation and deallocation functions (18.4) are
@@ -3023,10 +3027,10 @@ void Sema::DeclareGlobalNewDelete() {
// The implicitly declared "std::bad_alloc" should live in global module
// fragment.
- if (GlobalModuleFragment) {
+ if (TheGlobalModuleFragment) {
getStdBadAlloc()->setModuleOwnershipKind(
Decl::ModuleOwnershipKind::ReachableWhenImported);
- getStdBadAlloc()->setLocalOwningModule(GlobalModuleFragment);
+ getStdBadAlloc()->setLocalOwningModule(TheGlobalModuleFragment);
}
}
if (!StdAlignValT && getLangOpts().AlignedAllocation) {
@@ -3038,10 +3042,10 @@ void Sema::DeclareGlobalNewDelete() {
// The implicitly declared "std::align_val_t" should live in global module
// fragment.
- if (GlobalModuleFragment) {
+ if (TheGlobalModuleFragment) {
AlignValT->setModuleOwnershipKind(
Decl::ModuleOwnershipKind::ReachableWhenImported);
- AlignValT->setLocalOwningModule(GlobalModuleFragment);
+ AlignValT->setLocalOwningModule(TheGlobalModuleFragment);
}
AlignValT->setIntegerType(Context.getSizeType());
@@ -3156,7 +3160,8 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
// Global allocation functions should always be visible.
Alloc->setVisibleDespiteOwningModule();
- if (HasBadAllocExceptionSpec && getLangOpts().NewInfallible)
+ if (HasBadAllocExceptionSpec && getLangOpts().NewInfallible &&
+ !getLangOpts().CheckNew)
Alloc->addAttr(
ReturnsNonNullAttr::CreateImplicit(Context, Alloc->getLocation()));
@@ -3170,10 +3175,10 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
// module all the time. But in the implementation, the global module
// is only meaningful when we're in a module unit. So here we attach
// these allocation functions to global module conditionally.
- if (GlobalModuleFragment) {
+ if (TheGlobalModuleFragment) {
Alloc->setModuleOwnershipKind(
Decl::ModuleOwnershipKind::ReachableWhenImported);
- Alloc->setLocalOwningModule(GlobalModuleFragment);
+ Alloc->setLocalOwningModule(TheGlobalModuleFragment);
}
Alloc->addAttr(VisibilityAttr::CreateImplicit(
@@ -4034,7 +4039,7 @@ ExprResult Sema::CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr) {
// The value of a condition that is an expression is the value of the
// expression, implicitly converted to bool.
//
- // C++2b 8.5.2p2
+ // C++23 8.5.2p2
// If the if statement is of the form if constexpr, the value of the condition
// is contextually converted to bool and the converted expression shall be
// a constant expression.
@@ -4085,6 +4090,9 @@ Sema::IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType) {
case StringLiteral::Wide:
return Context.typesAreCompatible(Context.getWideCharType(),
QualType(ToPointeeType, 0));
+ case StringLiteral::Unevaluated:
+ assert(false && "Unevaluated string literal in expression");
+ break;
}
}
}
@@ -4578,6 +4586,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
break;
case ICK_SVE_Vector_Conversion:
+ case ICK_RVV_Vector_Conversion:
From = ImpCastExprToType(From, ToType, CK_BitCast, VK_PRValue,
/*BasePath=*/nullptr, CCK)
.get();
@@ -4883,8 +4892,11 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
case UTT_IsStandardLayout:
case UTT_IsPOD:
case UTT_IsLiteral:
- // By analogy, is_trivially_relocatable imposes the same constraints.
+ // By analogy, is_trivially_relocatable and is_trivially_equality_comparable
+ // impose the same constraints.
case UTT_IsTriviallyRelocatable:
+ case UTT_IsTriviallyEqualityComparable:
+ case UTT_CanPassInRegs:
// Per the GCC type traits documentation, T shall be a complete type, cv void,
// or an array of unknown bound. But GCC actually imposes the same constraints
// as above.
@@ -5373,15 +5385,27 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
return T.isTriviallyRelocatableType(C);
case UTT_IsReferenceable:
return T.isReferenceable();
+ case UTT_CanPassInRegs:
+ if (CXXRecordDecl *RD = T->getAsCXXRecordDecl(); RD && !T.hasQualifiers())
+ return RD->canPassInRegisters();
+ Self.Diag(KeyLoc, diag::err_builtin_pass_in_regs_non_class) << T;
+ return false;
+ case UTT_IsTriviallyEqualityComparable:
+ return T.isTriviallyEqualityComparableType(C);
}
}
static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
QualType RhsT, SourceLocation KeyLoc);
-static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
- ArrayRef<TypeSourceInfo *> Args,
- SourceLocation RParenLoc) {
+static bool EvaluateBooleanTypeTrait(Sema &S, TypeTrait Kind,
+ SourceLocation KWLoc,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc,
+ bool IsDependent) {
+ if (IsDependent)
+ return false;
+
if (Kind <= UTT_Last)
return EvaluateUnaryTypeTrait(S, Kind, KWLoc, Args[0]->getType());
@@ -5549,12 +5573,19 @@ bool Sema::CheckTypeTraitArity(unsigned Arity, SourceLocation Loc, size_t N) {
return true;
}
+enum class TypeTraitReturnType {
+ Bool,
+};
+
+static TypeTraitReturnType GetReturnType(TypeTrait Kind) {
+ return TypeTraitReturnType::Bool;
+}
+
ExprResult Sema::BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc) {
if (!CheckTypeTraitArity(getTypeTraitArity(Kind), KWLoc, Args.size()))
return ExprError();
- QualType ResultType = Context.getLogicalOperationType();
if (Kind <= UTT_Last && !CheckUnaryTypeTraitTypeCompleteness(
*this, Kind, KWLoc, Args[0]->getType()))
@@ -5570,12 +5601,15 @@ ExprResult Sema::BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
}
}
- bool Result = false;
- if (!Dependent)
- Result = evaluateTypeTrait(*this, Kind, KWLoc, Args, RParenLoc);
-
- return TypeTraitExpr::Create(Context, ResultType, KWLoc, Kind, Args,
- RParenLoc, Result);
+ switch (GetReturnType(Kind)) {
+ case TypeTraitReturnType::Bool: {
+ bool Result = EvaluateBooleanTypeTrait(*this, Kind, KWLoc, Args, RParenLoc,
+ Dependent);
+ return TypeTraitExpr::Create(Context, Context.getLogicalOperationType(),
+ KWLoc, Kind, Args, RParenLoc, Result);
+ }
+ }
+ llvm_unreachable("unhandled type trait return type");
}
ExprResult Sema::ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
@@ -6563,6 +6597,13 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
if (IsSizelessVectorConditional)
return CheckSizelessVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc);
+ // WebAssembly tables are not allowed as conditional LHS or RHS.
+ if (LTy->isWebAssemblyTableType() || RTy->isWebAssemblyTableType()) {
+ Diag(QuestionLoc, diag::err_wasm_table_conditional_expression)
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
// C++11 [expr.cond]p3
// Otherwise, if the second and third operand have different types, and
// either has (cv) class type [...] an attempt is made to convert each of
@@ -8233,12 +8274,12 @@ static inline bool VariableCanNeverBeAConstantExpression(VarDecl *Var,
const VarDecl *DefVD = nullptr;
// If there is no initializer - this can not be a constant expression.
- if (!Var->getAnyInitializer(DefVD)) return true;
+ const Expr *Init = Var->getAnyInitializer(DefVD);
+ if (!Init)
+ return true;
assert(DefVD);
- if (DefVD->isWeak()) return false;
- EvaluatedStmt *Eval = DefVD->ensureEvaluatedStmt();
-
- Expr *Init = cast<Expr>(Eval->Value);
+ if (DefVD->isWeak())
+ return false;
if (Var->getType()->isDependentType() || Init->isValueDependent()) {
// FIXME: Teach the constant evaluator to deal with the non-dependent parts
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp
index a3420ac6fdd2..3d14ca3859bb 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp
@@ -161,10 +161,13 @@ static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
}
CXXRecordDecl *contextClass;
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DC))
+ if (auto *MD = dyn_cast<CXXMethodDecl>(DC))
contextClass = MD->getParent()->getCanonicalDecl();
+ else if (auto *RD = dyn_cast<CXXRecordDecl>(DC))
+ contextClass = RD;
else
- contextClass = cast<CXXRecordDecl>(DC);
+ return AbstractInstanceResult ? AbstractInstanceResult
+ : IMA_Error_StaticContext;
// [class.mfct.non-static]p3:
// ...is used in the body of a non-static member function of class X,
@@ -764,7 +767,7 @@ Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
QualType RecordTy = BaseType;
if (IsArrow) RecordTy = RecordTy->castAs<PointerType>()->getPointeeType();
if (LookupMemberExprInRecord(
- *this, R, nullptr, RecordTy->getAs<RecordType>(), OpLoc, IsArrow,
+ *this, R, nullptr, RecordTy->castAs<RecordType>(), OpLoc, IsArrow,
SS, TemplateArgs != nullptr, TemplateKWLoc, TE))
return ExprError();
if (TE)
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp
index a4372349fff7..5df830e5bee6 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp
@@ -2438,6 +2438,9 @@ ExprResult Sema::BuildClassMessageImplicit(QualType ReceiverType,
if (!ReceiverType.isNull())
receiverTypeInfo = Context.getTrivialTypeSourceInfo(ReceiverType);
+ assert(((isSuperReceiver && Loc.isValid()) || receiverTypeInfo) &&
+ "Either the super receiver location needs to be valid or the receiver "
+ "needs valid type source information");
return BuildClassMessage(receiverTypeInfo, ReceiverType,
/*SuperLoc=*/isSuperReceiver ? Loc : SourceLocation(),
Sel, Method, Loc, Loc, Loc, Args,
@@ -4551,6 +4554,7 @@ Expr *Sema::stripARCUnbridgedCast(Expr *e) {
CurFPFeatureOverrides());
} else if (GenericSelectionExpr *gse = dyn_cast<GenericSelectionExpr>(e)) {
assert(!gse->isResultDependent());
+ assert(!gse->isTypePredicate());
unsigned n = gse->getNumAssocs();
SmallVector<Expr *, 4> subExprs;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp b/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp
index cc8d1405ec55..32c9215184eb 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp
@@ -20,12 +20,16 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Designator.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -139,6 +143,9 @@ static StringInitFailureKind IsStringInit(Expr *Init, const ArrayType *AT,
if (IsWideCharCompatible(ElemTy, Context))
return SIF_IncompatWideStringIntoWideChar;
return SIF_Other;
+ case StringLiteral::Unevaluated:
+ assert(false && "Unevaluated string literal in initialization");
+ break;
}
llvm_unreachable("missed a StringLiteral kind?");
@@ -173,6 +180,8 @@ static void updateStringLiteralType(Expr *E, QualType Ty) {
E = GSE->getResultExpr();
} else if (ChooseExpr *CE = dyn_cast<ChooseExpr>(E)) {
E = CE->getChosenSubExpr();
+ } else if (PredefinedExpr *PE = dyn_cast<PredefinedExpr>(E)) {
+ E = PE->getFunctionName();
} else {
llvm_unreachable("unexpected expr in string literal init");
}
@@ -301,6 +310,7 @@ class InitListChecker {
bool InOverloadResolution;
InitListExpr *FullyStructuredList = nullptr;
NoInitExpr *DummyExpr = nullptr;
+ SmallVectorImpl<QualType> *AggrDeductionCandidateParamTypes = nullptr;
NoInitExpr *getDummyInit() {
if (!DummyExpr)
@@ -350,7 +360,7 @@ class InitListChecker {
unsigned &StructuredIndex);
void CheckStructUnionTypes(const InitializedEntity &Entity,
InitListExpr *IList, QualType DeclType,
- CXXRecordDecl::base_class_range Bases,
+ CXXRecordDecl::base_class_const_range Bases,
RecordDecl::field_iterator Field,
bool SubobjectIsDesignatorContext, unsigned &Index,
InitListExpr *StructuredList,
@@ -387,18 +397,22 @@ class InitListChecker {
unsigned ExpectedNumInits);
int numArrayElements(QualType DeclType);
int numStructUnionElements(QualType DeclType);
+ static RecordDecl *getRecordDecl(QualType DeclType);
ExprResult PerformEmptyInit(SourceLocation Loc,
const InitializedEntity &Entity);
/// Diagnose that OldInit (or part thereof) has been overridden by NewInit.
void diagnoseInitOverride(Expr *OldInit, SourceRange NewInitRange,
+ bool UnionOverride = false,
bool FullyOverwritten = true) {
// Overriding an initializer via a designator is valid with C99 designated
// initializers, but ill-formed with C++20 designated initializers.
- unsigned DiagID = SemaRef.getLangOpts().CPlusPlus
- ? diag::ext_initializer_overrides
- : diag::warn_initializer_overrides;
+ unsigned DiagID =
+ SemaRef.getLangOpts().CPlusPlus
+ ? (UnionOverride ? diag::ext_initializer_union_overrides
+ : diag::ext_initializer_overrides)
+ : diag::warn_initializer_overrides;
if (InOverloadResolution && SemaRef.getLangOpts().CPlusPlus) {
// In overload resolution, we have to strictly enforce the rules, and so
@@ -486,9 +500,19 @@ class InitListChecker {
SourceLocation Loc);
public:
+ InitListChecker(
+ Sema &S, const InitializedEntity &Entity, InitListExpr *IL, QualType &T,
+ bool VerifyOnly, bool TreatUnavailableAsInvalid,
+ bool InOverloadResolution = false,
+ SmallVectorImpl<QualType> *AggrDeductionCandidateParamTypes = nullptr);
InitListChecker(Sema &S, const InitializedEntity &Entity, InitListExpr *IL,
- QualType &T, bool VerifyOnly, bool TreatUnavailableAsInvalid,
- bool InOverloadResolution = false);
+ QualType &T,
+ SmallVectorImpl<QualType> &AggrDeductionCandidateParamTypes)
+ : InitListChecker(S, Entity, IL, T, /*VerifyOnly=*/true,
+ /*TreatUnavailableAsInvalid=*/false,
+ /*InOverloadResolution=*/false,
+ &AggrDeductionCandidateParamTypes){};
+
bool HadError() { return hadError; }
// Retrieves the fully-structured initializer list used for
@@ -805,7 +829,7 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
// order to leave them uninitialized, the ILE is expanded and the extra
// fields are then filled with NoInitExpr.
unsigned NumElems = numStructUnionElements(ILE->getType());
- if (RDecl->hasFlexibleArrayMember())
+ if (!RDecl->isUnion() && RDecl->hasFlexibleArrayMember())
++NumElems;
if (!VerifyOnly && ILE->getNumInits() < NumElems)
ILE->resizeInits(SemaRef.Context, NumElems);
@@ -948,18 +972,19 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
static bool hasAnyDesignatedInits(const InitListExpr *IL) {
for (const Stmt *Init : *IL)
- if (Init && isa<DesignatedInitExpr>(Init))
+ if (isa_and_nonnull<DesignatedInitExpr>(Init))
return true;
return false;
}
-InitListChecker::InitListChecker(Sema &S, const InitializedEntity &Entity,
- InitListExpr *IL, QualType &T, bool VerifyOnly,
- bool TreatUnavailableAsInvalid,
- bool InOverloadResolution)
+InitListChecker::InitListChecker(
+ Sema &S, const InitializedEntity &Entity, InitListExpr *IL, QualType &T,
+ bool VerifyOnly, bool TreatUnavailableAsInvalid, bool InOverloadResolution,
+ SmallVectorImpl<QualType> *AggrDeductionCandidateParamTypes)
: SemaRef(S), VerifyOnly(VerifyOnly),
TreatUnavailableAsInvalid(TreatUnavailableAsInvalid),
- InOverloadResolution(InOverloadResolution) {
+ InOverloadResolution(InOverloadResolution),
+ AggrDeductionCandidateParamTypes(AggrDeductionCandidateParamTypes) {
if (!VerifyOnly || hasAnyDesignatedInits(IL)) {
FullyStructuredList =
createInitListExpr(T, IL->getSourceRange(), IL->getNumInits());
@@ -973,7 +998,7 @@ InitListChecker::InitListChecker(Sema &S, const InitializedEntity &Entity,
CheckExplicitInitList(Entity, IL, T, FullyStructuredList,
/*TopLevelObject=*/true);
- if (!hadError && FullyStructuredList) {
+ if (!hadError && !AggrDeductionCandidateParamTypes && FullyStructuredList) {
bool RequiresSecondPass = false;
FillInEmptyInitializations(Entity, FullyStructuredList, RequiresSecondPass,
/*OuterILE=*/nullptr, /*OuterIndex=*/0);
@@ -1009,6 +1034,14 @@ int InitListChecker::numStructUnionElements(QualType DeclType) {
return InitializableMembers - structDecl->hasFlexibleArrayMember();
}
+RecordDecl *InitListChecker::getRecordDecl(QualType DeclType) {
+ if (const auto *RT = DeclType->getAs<RecordType>())
+ return RT->getDecl();
+ if (const auto *Inject = DeclType->getAs<InjectedClassNameType>())
+ return Inject->getDecl();
+ return nullptr;
+}
+
/// Determine whether Entity is an entity for which it is idiomatic to elide
/// the braces in aggregate initialization.
static bool isIdiomaticBraceElisionEntity(const InitializedEntity &Entity) {
@@ -1303,15 +1336,18 @@ void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
} else if (DeclType->isVectorType()) {
CheckVectorType(Entity, IList, DeclType, Index,
StructuredList, StructuredIndex);
- } else if (DeclType->isRecordType()) {
- assert(DeclType->isAggregateType() &&
- "non-aggregate records should be handed in CheckSubElementType");
- RecordDecl *RD = DeclType->castAs<RecordType>()->getDecl();
+ } else if (const RecordDecl *RD = getRecordDecl(DeclType)) {
auto Bases =
- CXXRecordDecl::base_class_range(CXXRecordDecl::base_class_iterator(),
- CXXRecordDecl::base_class_iterator());
- if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
- Bases = CXXRD->bases();
+ CXXRecordDecl::base_class_const_range(CXXRecordDecl::base_class_const_iterator(),
+ CXXRecordDecl::base_class_const_iterator());
+ if (DeclType->isRecordType()) {
+ assert(DeclType->isAggregateType() &&
+ "non-aggregate records should be handed in CheckSubElementType");
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ Bases = CXXRD->bases();
+ } else {
+ Bases = cast<CXXRecordDecl>(RD)->bases();
+ }
CheckStructUnionTypes(Entity, IList, DeclType, Bases, RD->field_begin(),
SubobjectIsDesignatorContext, Index, StructuredList,
StructuredIndex, TopLevelObject);
@@ -1341,6 +1377,13 @@ void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
// Checks for scalar type are sufficient for these types too.
CheckScalarType(Entity, IList, DeclType, Index, StructuredList,
StructuredIndex);
+ } else if (DeclType->isDependentType()) {
+ // C++ [over.match.class.deduct]p1.5:
+ // brace elision is not considered for any aggregate element that has a
+ // dependent non-array type or an array type with a value-dependent bound
+ ++Index;
+ assert(AggrDeductionCandidateParamTypes);
+ AggrDeductionCandidateParamTypes->push_back(DeclType);
} else {
if (!VerifyOnly)
SemaRef.Diag(IList->getBeginLoc(), diag::err_illegal_initializer_type)
@@ -1398,31 +1441,46 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
? InitializedEntity::InitializeTemporary(ElemType)
: Entity;
- InitializationSequence Seq(SemaRef, TmpEntity, Kind, expr,
- /*TopLevelOfInitList*/ true);
+ if (TmpEntity.getType()->isDependentType()) {
+ // C++ [over.match.class.deduct]p1.5:
+ // brace elision is not considered for any aggregate element that has a
+ // dependent non-array type or an array type with a value-dependent
+ // bound
+ assert(AggrDeductionCandidateParamTypes);
+ if (!isa_and_nonnull<ConstantArrayType>(
+ SemaRef.Context.getAsArrayType(ElemType))) {
+ ++Index;
+ AggrDeductionCandidateParamTypes->push_back(ElemType);
+ return;
+ }
+ } else {
+ InitializationSequence Seq(SemaRef, TmpEntity, Kind, expr,
+ /*TopLevelOfInitList*/ true);
+ // C++14 [dcl.init.aggr]p13:
+ // If the assignment-expression can initialize a member, the member is
+ // initialized. Otherwise [...] brace elision is assumed
+ //
+ // Brace elision is never performed if the element is not an
+ // assignment-expression.
+ if (Seq || isa<InitListExpr>(expr)) {
+ if (!VerifyOnly) {
+ ExprResult Result = Seq.Perform(SemaRef, TmpEntity, Kind, expr);
+ if (Result.isInvalid())
+ hadError = true;
- // C++14 [dcl.init.aggr]p13:
- // If the assignment-expression can initialize a member, the member is
- // initialized. Otherwise [...] brace elision is assumed
- //
- // Brace elision is never performed if the element is not an
- // assignment-expression.
- if (Seq || isa<InitListExpr>(expr)) {
- if (!VerifyOnly) {
- ExprResult Result = Seq.Perform(SemaRef, TmpEntity, Kind, expr);
- if (Result.isInvalid())
+ UpdateStructuredListElement(StructuredList, StructuredIndex,
+ Result.getAs<Expr>());
+ } else if (!Seq) {
hadError = true;
-
- UpdateStructuredListElement(StructuredList, StructuredIndex,
- Result.getAs<Expr>());
- } else if (!Seq) {
- hadError = true;
- } else if (StructuredList) {
- UpdateStructuredListElement(StructuredList, StructuredIndex,
- getDummyInit());
+ } else if (StructuredList) {
+ UpdateStructuredListElement(StructuredList, StructuredIndex,
+ getDummyInit());
+ }
+ ++Index;
+ if (AggrDeductionCandidateParamTypes)
+ AggrDeductionCandidateParamTypes->push_back(ElemType);
+ return;
}
- ++Index;
- return;
}
// Fall through for subaggregate initialization
@@ -1537,7 +1595,7 @@ void InitListChecker::CheckComplexType(const InitializedEntity &Entity,
// the element type of the complex type. The first element initializes
// the real part, and the second element intitializes the imaginary part.
- if (IList->getNumInits() != 2)
+ if (IList->getNumInits() < 2)
return CheckScalarType(Entity, IList, DeclType, Index, StructuredList,
StructuredIndex);
@@ -1566,20 +1624,23 @@ void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
unsigned &StructuredIndex) {
if (Index >= IList->getNumInits()) {
if (!VerifyOnly) {
- if (DeclType->isSizelessBuiltinType())
- SemaRef.Diag(IList->getBeginLoc(),
- SemaRef.getLangOpts().CPlusPlus11
- ? diag::warn_cxx98_compat_empty_sizeless_initializer
- : diag::err_empty_sizeless_initializer)
- << DeclType << IList->getSourceRange();
- else
- SemaRef.Diag(IList->getBeginLoc(),
- SemaRef.getLangOpts().CPlusPlus11
- ? diag::warn_cxx98_compat_empty_scalar_initializer
- : diag::err_empty_scalar_initializer)
- << IList->getSourceRange();
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ if (DeclType->isSizelessBuiltinType())
+ SemaRef.Diag(IList->getBeginLoc(),
+ SemaRef.getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_empty_sizeless_initializer
+ : diag::err_empty_sizeless_initializer)
+ << DeclType << IList->getSourceRange();
+ else
+ SemaRef.Diag(IList->getBeginLoc(),
+ SemaRef.getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_empty_scalar_initializer
+ : diag::err_empty_scalar_initializer)
+ << IList->getSourceRange();
+ }
}
- hadError = !SemaRef.getLangOpts().CPlusPlus11;
+ hadError =
+ SemaRef.getLangOpts().CPlusPlus && !SemaRef.getLangOpts().CPlusPlus11;
++Index;
++StructuredIndex;
return;
@@ -1635,6 +1696,8 @@ void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
}
UpdateStructuredListElement(StructuredList, StructuredIndex, ResultExpr);
++Index;
+ if (AggrDeductionCandidateParamTypes)
+ AggrDeductionCandidateParamTypes->push_back(DeclType);
}
void InitListChecker::CheckReferenceType(const InitializedEntity &Entity,
@@ -1690,6 +1753,8 @@ void InitListChecker::CheckReferenceType(const InitializedEntity &Entity,
UpdateStructuredListElement(StructuredList, StructuredIndex, expr);
++Index;
+ if (AggrDeductionCandidateParamTypes)
+ AggrDeductionCandidateParamTypes->push_back(DeclType);
}
void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
@@ -1741,6 +1806,8 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
}
UpdateStructuredListElement(StructuredList, StructuredIndex, ResultExpr);
++Index;
+ if (AggrDeductionCandidateParamTypes)
+ AggrDeductionCandidateParamTypes->push_back(elementType);
return;
}
@@ -1902,6 +1969,8 @@ void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
StructuredList->resizeInits(SemaRef.Context, StructuredIndex);
}
++Index;
+ if (AggrDeductionCandidateParamTypes)
+ AggrDeductionCandidateParamTypes->push_back(DeclType);
return;
}
}
@@ -1909,11 +1978,24 @@ void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
// Check for VLAs; in standard C it would be possible to check this
// earlier, but I don't know where clang accepts VLAs (gcc accepts
// them in all sorts of strange places).
- if (!VerifyOnly)
- SemaRef.Diag(VAT->getSizeExpr()->getBeginLoc(),
- diag::err_variable_object_no_init)
- << VAT->getSizeExpr()->getSourceRange();
- hadError = true;
+ bool HasErr = IList->getNumInits() != 0 || SemaRef.getLangOpts().CPlusPlus;
+ if (!VerifyOnly) {
+ // C2x 6.7.9p4: An entity of variable length array type shall not be
+ // initialized except by an empty initializer.
+ //
+ // The C extension warnings are issued from ParseBraceInitializer() and
+ // do not need to be issued here. However, we continue to issue an error
+ // in the case there are initializers or we are compiling C++. We allow
+ // use of VLAs in C++, but it's not clear we want to allow {} to zero
+ // init a VLA in C++ in all cases (such as with non-trivial constructors).
+ // FIXME: should we allow this construct in C++ when it makes sense to do
+ // so?
+ if (HasErr)
+ SemaRef.Diag(VAT->getSizeExpr()->getBeginLoc(),
+ diag::err_variable_object_no_init)
+ << VAT->getSizeExpr()->getSourceRange();
+ }
+ hadError = HasErr;
++Index;
++StructuredIndex;
return;
@@ -2044,24 +2126,22 @@ bool InitListChecker::CheckFlexibleArrayInit(const InitializedEntity &Entity,
void InitListChecker::CheckStructUnionTypes(
const InitializedEntity &Entity, InitListExpr *IList, QualType DeclType,
- CXXRecordDecl::base_class_range Bases, RecordDecl::field_iterator Field,
+ CXXRecordDecl::base_class_const_range Bases, RecordDecl::field_iterator Field,
bool SubobjectIsDesignatorContext, unsigned &Index,
InitListExpr *StructuredList, unsigned &StructuredIndex,
bool TopLevelObject) {
- RecordDecl *structDecl = DeclType->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD = getRecordDecl(DeclType);
// If the record is invalid, some of it's members are invalid. To avoid
// confusion, we forgo checking the initializer for the entire record.
- if (structDecl->isInvalidDecl()) {
+ if (RD->isInvalidDecl()) {
// Assume it was supposed to consume a single initializer.
++Index;
hadError = true;
return;
}
- if (DeclType->isUnionType() && IList->getNumInits() == 0) {
- RecordDecl *RD = DeclType->castAs<RecordType>()->getDecl();
-
+ if (RD->isUnion() && IList->getNumInits() == 0) {
if (!VerifyOnly)
for (FieldDecl *FD : RD->fields()) {
QualType ET = SemaRef.Context.getBaseElementType(FD->getType());
@@ -2105,7 +2185,8 @@ void InitListChecker::CheckStructUnionTypes(
bool InitializedSomething = false;
// If we have any base classes, they are initialized prior to the fields.
- for (auto &Base : Bases) {
+ for (auto I = Bases.begin(), E = Bases.end(); I != E; ++I) {
+ auto &Base = *I;
Expr *Init = Index < IList->getNumInits() ? IList->getInit(Index) : nullptr;
// Designated inits always initialize fields, so if we see one, all
@@ -2113,6 +2194,34 @@ void InitListChecker::CheckStructUnionTypes(
if (Init && isa<DesignatedInitExpr>(Init))
Init = nullptr;
+ // C++ [over.match.class.deduct]p1.6:
+ // each non-trailing aggregate element that is a pack expansion is assumed
+ // to correspond to no elements of the initializer list, and (1.7) a
+ // trailing aggregate element that is a pack expansion is assumed to
+ // correspond to all remaining elements of the initializer list (if any).
+
+ // C++ [over.match.class.deduct]p1.9:
+ // ... except that additional parameter packs of the form P_j... are
+ // inserted into the parameter list in their original aggregate element
+ // position corresponding to each non-trailing aggregate element of
+ // type P_j that was skipped because it was a parameter pack, and the
+ // trailing sequence of parameters corresponding to a trailing
+ // aggregate element that is a pack expansion (if any) is replaced
+ // by a single parameter of the form T_n....
+ if (AggrDeductionCandidateParamTypes && Base.isPackExpansion()) {
+ AggrDeductionCandidateParamTypes->push_back(
+ SemaRef.Context.getPackExpansionType(Base.getType(), std::nullopt));
+
+ // Trailing pack expansion
+ if (I + 1 == E && RD->field_empty()) {
+ if (Index < IList->getNumInits())
+ Index = IList->getNumInits();
+ return;
+ }
+
+ continue;
+ }
+
SourceLocation InitLoc = Init ? Init->getBeginLoc() : IList->getEndLoc();
InitializedEntity BaseEntity = InitializedEntity::InitializeBase(
SemaRef.Context, &Base, false, &Entity);
@@ -2135,7 +2244,6 @@ void InitListChecker::CheckStructUnionTypes(
// anything except look at designated initializers; That's okay,
// because an error should get printed out elsewhere. It might be
// worthwhile to skip over the rest of the initializer, though.
- RecordDecl *RD = DeclType->castAs<RecordType>()->getDecl();
RecordDecl::field_iterator FieldEnd = RD->field_end();
size_t NumRecordDecls = llvm::count_if(RD->decls(), [&](const Decl *D) {
return isa<FieldDecl>(D) || isa<RecordDecl>(D);
@@ -2219,7 +2327,7 @@ void InitListChecker::CheckStructUnionTypes(
}
// We've already initialized a member of a union. We're done.
- if (InitializedSomething && DeclType->isUnionType())
+ if (InitializedSomething && RD->isUnion())
break;
// If we've hit the flexible array member at the end, we're done.
@@ -2260,7 +2368,7 @@ void InitListChecker::CheckStructUnionTypes(
StructuredList, StructuredIndex);
InitializedSomething = true;
- if (DeclType->isUnionType() && StructuredList) {
+ if (RD->isUnion() && StructuredList) {
// Initialize the first field within the union.
StructuredList->setInitializedFieldInUnion(*Field);
}
@@ -2271,7 +2379,7 @@ void InitListChecker::CheckStructUnionTypes(
// Emit warnings for missing struct field initializers.
if (!VerifyOnly && InitializedSomething && CheckForMissingFields &&
Field != FieldEnd && !Field->getType()->isIncompleteArrayType() &&
- !DeclType->isUnionType()) {
+ !RD->isUnion()) {
// It is possible we have one or more unnamed bitfields remaining.
// Find first (if any) named field and emit warning.
for (RecordDecl::field_iterator it = Field, end = RD->field_end();
@@ -2286,7 +2394,7 @@ void InitListChecker::CheckStructUnionTypes(
// Check that any remaining fields can be value-initialized if we're not
// building a structured list. (If we are, we'll check this later.)
- if (!StructuredList && Field != FieldEnd && !DeclType->isUnionType() &&
+ if (!StructuredList && Field != FieldEnd && !RD->isUnion() &&
!Field->getType()->isIncompleteArrayType()) {
for (; Field != FieldEnd && !hadError; ++Field) {
if (!Field->isUnnamedBitfield() && !Field->hasInClassInitializer())
@@ -2325,7 +2433,8 @@ void InitListChecker::CheckStructUnionTypes(
InitializedEntity MemberEntity =
InitializedEntity::InitializeMember(*Field, &Entity);
- if (isa<InitListExpr>(IList->getInit(Index)))
+ if (isa<InitListExpr>(IList->getInit(Index)) ||
+ AggrDeductionCandidateParamTypes)
CheckSubElementType(MemberEntity, IList, Field->getType(), Index,
StructuredList, StructuredIndex);
else
@@ -2348,14 +2457,14 @@ static void ExpandAnonymousFieldDesignator(Sema &SemaRef,
for (IndirectFieldDecl::chain_iterator PI = IndirectField->chain_begin(),
PE = IndirectField->chain_end(); PI != PE; ++PI) {
if (PI + 1 == PE)
- Replacements.push_back(Designator((IdentifierInfo *)nullptr,
- DIE->getDesignator(DesigIdx)->getDotLoc(),
- DIE->getDesignator(DesigIdx)->getFieldLoc()));
+ Replacements.push_back(Designator::CreateFieldDesignator(
+ (IdentifierInfo *)nullptr, DIE->getDesignator(DesigIdx)->getDotLoc(),
+ DIE->getDesignator(DesigIdx)->getFieldLoc()));
else
- Replacements.push_back(Designator((IdentifierInfo *)nullptr,
- SourceLocation(), SourceLocation()));
+ Replacements.push_back(Designator::CreateFieldDesignator(
+ (IdentifierInfo *)nullptr, SourceLocation(), SourceLocation()));
assert(isa<FieldDecl>(*PI));
- Replacements.back().setField(cast<FieldDecl>(*PI));
+ Replacements.back().setFieldDecl(cast<FieldDecl>(*PI));
}
// Expand the current designator into the set of replacement
@@ -2383,7 +2492,7 @@ namespace {
// the given struct or union.
class FieldInitializerValidatorCCC final : public CorrectionCandidateCallback {
public:
- explicit FieldInitializerValidatorCCC(RecordDecl *RD)
+ explicit FieldInitializerValidatorCCC(const RecordDecl *RD)
: Record(RD) {}
bool ValidateCandidate(const TypoCorrection &candidate) override {
@@ -2396,7 +2505,7 @@ class FieldInitializerValidatorCCC final : public CorrectionCandidateCallback {
}
private:
- RecordDecl *Record;
+ const RecordDecl *Record;
};
} // end anonymous namespace
@@ -2472,6 +2581,8 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
Result.get());
}
++Index;
+ if (AggrDeductionCandidateParamTypes)
+ AggrDeductionCandidateParamTypes->push_back(CurrentObjectType);
return !Seq;
}
@@ -2529,6 +2640,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// subobject [0].b.
diagnoseInitOverride(ExistingInit,
SourceRange(D->getBeginLoc(), DIE->getEndLoc()),
+ /*UnionOverride=*/false,
/*FullyOverwritten=*/false);
if (!VerifyOnly) {
@@ -2564,8 +2676,8 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// then the current object (defined below) shall have
// structure or union type and the identifier shall be the
// name of a member of that type.
- const RecordType *RT = CurrentObjectType->getAs<RecordType>();
- if (!RT) {
+ RecordDecl *RD = getRecordDecl(CurrentObjectType);
+ if (!RD) {
SourceLocation Loc = D->getDotLoc();
if (Loc.isInvalid())
Loc = D->getFieldLoc();
@@ -2576,10 +2688,10 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
return true;
}
- FieldDecl *KnownField = D->getField();
+ FieldDecl *KnownField = D->getFieldDecl();
if (!KnownField) {
- IdentifierInfo *FieldName = D->getFieldName();
- DeclContext::lookup_result Lookup = RT->getDecl()->lookup(FieldName);
+ const IdentifierInfo *FieldName = D->getFieldName();
+ DeclContext::lookup_result Lookup = RD->lookup(FieldName);
for (NamedDecl *ND : Lookup) {
if (auto *FD = dyn_cast<FieldDecl>(ND)) {
KnownField = FD;
@@ -2613,11 +2725,11 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// Name lookup didn't find anything.
// Determine whether this was a typo for another field name.
- FieldInitializerValidatorCCC CCC(RT->getDecl());
+ FieldInitializerValidatorCCC CCC(RD);
if (TypoCorrection Corrected = SemaRef.CorrectTypo(
DeclarationNameInfo(FieldName, D->getFieldLoc()),
Sema::LookupMemberName, /*Scope=*/nullptr, /*SS=*/nullptr, CCC,
- Sema::CTK_ErrorRecovery, RT->getDecl())) {
+ Sema::CTK_ErrorRecovery, RD)) {
SemaRef.diagnoseTypo(
Corrected,
SemaRef.PDiag(diag::err_field_designator_unknown_suggest)
@@ -2626,8 +2738,15 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
hadError = true;
} else {
// Typo correction didn't find anything.
- SemaRef.Diag(D->getFieldLoc(), diag::err_field_designator_unknown)
- << FieldName << CurrentObjectType;
+ SourceLocation Loc = D->getFieldLoc();
+
+ // The loc can be invalid with a "null" designator (i.e. an anonymous
+ // union/struct). Do our best to approximate the location.
+ if (Loc.isInvalid())
+ Loc = IList->getBeginLoc();
+
+ SemaRef.Diag(Loc, diag::err_field_designator_unknown)
+ << FieldName << CurrentObjectType << DIE->getSourceRange();
++Index;
return true;
}
@@ -2635,12 +2754,12 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
}
unsigned NumBases = 0;
- if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
NumBases = CXXRD->getNumBases();
unsigned FieldIndex = NumBases;
- for (auto *FI : RT->getDecl()->fields()) {
+ for (auto *FI : RD->fields()) {
if (FI->isUnnamedBitfield())
continue;
if (declaresSameEntity(KnownField, FI)) {
@@ -2655,7 +2774,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// All of the fields of a union are located at the same place in
// the initializer list.
- if (RT->getDecl()->isUnion()) {
+ if (RD->isUnion()) {
FieldIndex = 0;
if (StructuredList) {
FieldDecl *CurrentField = StructuredList->getInitializedFieldInUnion();
@@ -2667,7 +2786,10 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
if (ExistingInit) {
// We're about to throw away an initializer, emit warning.
diagnoseInitOverride(
- ExistingInit, SourceRange(D->getBeginLoc(), DIE->getEndLoc()));
+ ExistingInit, SourceRange(D->getBeginLoc(), DIE->getEndLoc()),
+ /*UnionOverride=*/true,
+ /*FullyOverwritten=*/SemaRef.getLangOpts().CPlusPlus ? false
+ : true);
}
// remove existing initializer
@@ -2707,15 +2829,14 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// cases where a designator takes us backwards too.
if (IsFirstDesignator && !VerifyOnly && SemaRef.getLangOpts().CPlusPlus &&
NextField &&
- (*NextField == RT->getDecl()->field_end() ||
+ (*NextField == RD->field_end() ||
(*NextField)->getFieldIndex() > Field->getFieldIndex() + 1)) {
// Find the field that we just initialized.
FieldDecl *PrevField = nullptr;
- for (auto FI = RT->getDecl()->field_begin();
- FI != RT->getDecl()->field_end(); ++FI) {
+ for (auto FI = RD->field_begin(); FI != RD->field_end(); ++FI) {
if (FI->isUnnamedBitfield())
continue;
- if (*NextField != RT->getDecl()->field_end() &&
+ if (*NextField != RD->field_end() &&
declaresSameEntity(*FI, **NextField))
break;
PrevField = *FI;
@@ -2726,7 +2847,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
SemaRef.Diag(DIE->getBeginLoc(), diag::ext_designated_init_reordered)
<< KnownField << PrevField << DIE->getSourceRange();
- unsigned OldIndex = NumBases + PrevField->getFieldIndex();
+ unsigned OldIndex = StructuredIndex - 1;
if (StructuredList && OldIndex <= StructuredList->getNumInits()) {
if (Expr *PrevInit = StructuredList->getInit(OldIndex)) {
SemaRef.Diag(PrevInit->getBeginLoc(),
@@ -2740,7 +2861,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// Update the designator with the field declaration.
if (!VerifyOnly)
- D->setField(*Field);
+ D->setFieldDecl(*Field);
// Make sure that our non-designated initializer list has space
// for a subobject corresponding to this field.
@@ -2830,8 +2951,12 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// If this the first designator, our caller will continue checking
// the rest of this struct/class/union subobject.
if (IsFirstDesignator) {
+ if (Field != RD->field_end() && Field->isUnnamedBitfield())
+ ++Field;
+
if (NextField)
*NextField = Field;
+
StructuredIndex = FieldIndex;
return false;
}
@@ -2840,7 +2965,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
return false;
// We've already initialized something in the union; we're done.
- if (RT->getDecl()->isUnion())
+ if (RD->isUnion())
return hadError;
// Check the remaining fields within this class/struct/union subobject.
@@ -3148,6 +3273,8 @@ InitListChecker::createInitListExpr(QualType CurrentObjectType,
NumElements = VType->getNumElements();
} else if (CurrentObjectType->isRecordType()) {
NumElements = numStructUnionElements(CurrentObjectType);
+ } else if (CurrentObjectType->isDependentType()) {
+ NumElements = 1;
}
Result->reserveInits(SemaRef.Context, NumElements);
@@ -3226,13 +3353,11 @@ ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
// Build designators and check array designator expressions.
for (unsigned Idx = 0; Idx < Desig.getNumDesignators(); ++Idx) {
const Designator &D = Desig.getDesignator(Idx);
- switch (D.getKind()) {
- case Designator::FieldDesignator:
- Designators.push_back(ASTDesignator(D.getField(), D.getDotLoc(),
- D.getFieldLoc()));
- break;
- case Designator::ArrayDesignator: {
+ if (D.isFieldDesignator()) {
+ Designators.push_back(ASTDesignator::CreateFieldDesignator(
+ D.getFieldDecl(), D.getDotLoc(), D.getFieldLoc()));
+ } else if (D.isArrayDesignator()) {
Expr *Index = static_cast<Expr *>(D.getArrayIndex());
llvm::APSInt IndexValue;
if (!Index->isTypeDependent() && !Index->isValueDependent())
@@ -3240,15 +3365,11 @@ ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
if (!Index)
Invalid = true;
else {
- Designators.push_back(ASTDesignator(InitExpressions.size(),
- D.getLBracketLoc(),
- D.getRBracketLoc()));
+ Designators.push_back(ASTDesignator::CreateArrayDesignator(
+ InitExpressions.size(), D.getLBracketLoc(), D.getRBracketLoc()));
InitExpressions.push_back(Index);
}
- break;
- }
-
- case Designator::ArrayRangeDesignator: {
+ } else if (D.isArrayRangeDesignator()) {
Expr *StartIndex = static_cast<Expr *>(D.getArrayRangeStart());
Expr *EndIndex = static_cast<Expr *>(D.getArrayRangeEnd());
llvm::APSInt StartValue;
@@ -3280,25 +3401,19 @@ ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
<< StartIndex->getSourceRange() << EndIndex->getSourceRange();
Invalid = true;
} else {
- Designators.push_back(ASTDesignator(InitExpressions.size(),
- D.getLBracketLoc(),
- D.getEllipsisLoc(),
- D.getRBracketLoc()));
+ Designators.push_back(ASTDesignator::CreateArrayRangeDesignator(
+ InitExpressions.size(), D.getLBracketLoc(), D.getEllipsisLoc(),
+ D.getRBracketLoc()));
InitExpressions.push_back(StartIndex);
InitExpressions.push_back(EndIndex);
}
}
- break;
- }
}
}
if (Invalid || Init.isInvalid())
return ExprError();
- // Clear out the expressions within the designation.
- Desig.ClearExprs(*this);
-
return DesignatedInitExpr::Create(Context, Designators, InitExpressions,
EqualOrColonLoc, GNUSyntax,
Init.getAs<Expr>());
@@ -3598,6 +3713,7 @@ bool InitializationSequence::isAmbiguous() const {
case FK_ExplicitConstructor:
case FK_AddressOfUnaddressableFunction:
case FK_ParenthesizedListInitFailed:
+ case FK_DesignatedInitForNonAggregate:
return false;
case FK_ReferenceInitOverloadFailed:
@@ -4435,6 +4551,22 @@ static void TryListInitialization(Sema &S,
return;
}
+ // C++20 [dcl.init.list]p3:
+ // - If the braced-init-list contains a designated-initializer-list, T shall
+ // be an aggregate class. [...] Aggregate initialization is performed.
+ //
+ // We allow arrays here too in order to support array designators.
+ //
+ // FIXME: This check should precede the handling of reference initialization.
+ // We follow other compilers in allowing things like 'Aggr &&a = {.x = 1};'
+ // as a tentative DR resolution.
+ bool IsDesignatedInit = InitList->hasDesignatedInit();
+ if (!DestType->isAggregateType() && IsDesignatedInit) {
+ Sequence.SetFailed(
+ InitializationSequence::FK_DesignatedInitForNonAggregate);
+ return;
+ }
+
// C++11 [dcl.init.list]p3, per DR1467:
// - If T is a class type and the initializer list has a single element of
// type cv U, where U is T or a class derived from T, the object is
@@ -4446,7 +4578,8 @@ static void TryListInitialization(Sema &S,
// (8.5.2 [dcl.init.string]), initialization is performed as described
// in that section.
// - Otherwise, if T is an aggregate, [...] (continue below).
- if (S.getLangOpts().CPlusPlus11 && InitList->getNumInits() == 1) {
+ if (S.getLangOpts().CPlusPlus11 && InitList->getNumInits() == 1 &&
+ !IsDesignatedInit) {
if (DestType->isRecordType()) {
QualType InitType = InitList->getInit(0)->getType();
if (S.Context.hasSameUnqualifiedType(InitType, DestType) ||
@@ -4488,7 +4621,7 @@ static void TryListInitialization(Sema &S,
// - If T is an aggregate, aggregate initialization is performed.
if ((DestType->isRecordType() && !DestType->isAggregateType()) ||
(S.getLangOpts().CPlusPlus11 &&
- S.isStdInitializerList(DestType, nullptr))) {
+ S.isStdInitializerList(DestType, nullptr) && !IsDesignatedInit)) {
if (S.getLangOpts().CPlusPlus11) {
// - Otherwise, if the initializer list has no elements and T is a
// class type with a default constructor, the object is
@@ -5460,7 +5593,8 @@ static void TryOrBuildParenListInitialization(
// C++ [dcl.init]p16.6.2.2
// The remaining elements are initialized with their default
// member initializers, if any
- ExprResult DIE = S.BuildCXXDefaultInitExpr(FD->getLocation(), FD);
+ ExprResult DIE = S.BuildCXXDefaultInitExpr(
+ Kind.getParenOrBraceRange().getEnd(), FD);
if (DIE.isInvalid())
return;
S.checkInitializerLifetime(SubEntity, DIE.get());
@@ -6207,6 +6341,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
// We're at the end of the line for C: it's either a write-back conversion
// or it's a C assignment. There's no need to check anything else.
if (!S.getLangOpts().CPlusPlus) {
+ assert(Initializer && "Initializer must be non-null");
// If allowed, check whether this is an Objective-C writeback conversion.
if (allowObjCWritebackConversion &&
tryObjCWritebackConversion(S, *this, Entity, Initializer)) {
@@ -6233,7 +6368,8 @@ void InitializationSequence::InitializeFrom(Sema &S,
if (Kind.getKind() == InitializationKind::IK_Direct ||
(Kind.getKind() == InitializationKind::IK_Copy &&
(Context.hasSameUnqualifiedType(SourceType, DestType) ||
- S.IsDerivedFrom(Initializer->getBeginLoc(), SourceType, DestType)))) {
+ (Initializer && S.IsDerivedFrom(Initializer->getBeginLoc(),
+ SourceType, DestType))))) {
TryConstructorInitialization(S, Entity, Kind, Args, DestType, DestType,
*this);
@@ -6277,6 +6413,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
// function is used) to a derived class thereof are enumerated as
// described in 13.3.1.4, and the best one is chosen through
// overload resolution (13.3).
+ assert(Initializer && "Initializer must be non-null");
TryUserDefinedConversion(S, DestType, Kind, Initializer, *this,
TopLevelOfInitList);
}
@@ -6328,6 +6465,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
// - Otherwise, if the source type is a (possibly cv-qualified) class
// type, conversion functions are considered.
if (!SourceType.isNull() && SourceType->isRecordType()) {
+ assert(Initializer && "Initializer must be non-null");
// For a conversion to _Atomic(T) from either T or a class type derived
// from T, initialize the T object then convert to _Atomic type.
bool NeedAtomicConversion = false;
@@ -6915,7 +7053,7 @@ PerformConstructorInitialization(Sema &S,
if (isExplicitTemporary(Entity, Kind, NumArgs)) {
// An explicitly-constructed temporary, e.g., X(1, 2).
- if (S.DiagnoseUseOfDecl(Constructor, Loc))
+ if (S.DiagnoseUseOfDecl(Step.Function.FoundDecl, Loc))
return ExprError();
TypeSourceInfo *TSInfo = Entity.getTypeSourceInfo();
@@ -6930,8 +7068,6 @@ PerformConstructorInitialization(Sema &S,
if (auto *Shadow = dyn_cast<ConstructorUsingShadowDecl>(
Step.Function.FoundDecl.getDecl())) {
CalleeDecl = S.findInheritingConstructor(Loc, Constructor, Shadow);
- if (S.DiagnoseUseOfDecl(CalleeDecl, Loc))
- return ExprError();
}
S.MarkFunctionReferenced(Loc, CalleeDecl);
@@ -8155,7 +8291,7 @@ void Sema::checkInitializerLifetime(const InitializedEntity &Entity,
case IndirectLocalPathEntry::DefaultInit: {
auto *FD = cast<FieldDecl>(Elem.D);
- Diag(FD->getLocation(), diag::note_init_with_default_member_initalizer)
+ Diag(FD->getLocation(), diag::note_init_with_default_member_initializer)
<< FD << nextPathEntryRange(Path, I + 1, L);
break;
}
@@ -8473,6 +8609,15 @@ ExprResult InitializationSequence::Perform(Sema &S,
<< Init->getSourceRange();
}
+ if (S.getLangOpts().MicrosoftExt && Args.size() == 1 &&
+ isa<PredefinedExpr>(Args[0]) && Entity.getType()->isArrayType()) {
+ // Produce a Microsoft compatibility warning when initializing from a
+ // predefined expression since MSVC treats predefined expressions as string
+ // literals.
+ Expr *Init = Args[0];
+ S.Diag(Init->getBeginLoc(), diag::ext_init_from_predefined) << Init;
+ }
+
// OpenCL v2.0 s6.13.11.1. atomic variables can be initialized in global scope
QualType ETy = Entity.getType();
bool HasGlobalAS = ETy.hasAddressSpace() &&
@@ -9254,10 +9399,13 @@ ExprResult InitializationSequence::Perform(Sema &S,
}
}
+ Expr *Init = CurInit.get();
+ if (!Init)
+ return ExprError();
+
// Check whether the initializer has a shorter lifetime than the initialized
// entity, and if not, either lifetime-extend or warn as appropriate.
- if (auto *Init = CurInit.get())
- S.checkInitializerLifetime(Entity, Init);
+ S.checkInitializerLifetime(Entity, Init);
// Diagnose non-fatal problems with the completed initialization.
if (InitializedEntity::EntityKind EK = Entity.getKind();
@@ -9265,16 +9413,13 @@ ExprResult InitializationSequence::Perform(Sema &S,
EK == InitializedEntity::EK_ParenAggInitMember) &&
cast<FieldDecl>(Entity.getDecl())->isBitField())
S.CheckBitFieldInitialization(Kind.getLocation(),
- cast<FieldDecl>(Entity.getDecl()),
- CurInit.get());
+ cast<FieldDecl>(Entity.getDecl()), Init);
// Check for std::move on construction.
- if (const Expr *E = CurInit.get()) {
- CheckMoveOnConstruction(S, E,
- Entity.getKind() == InitializedEntity::EK_Result);
- }
+ CheckMoveOnConstruction(S, Init,
+ Entity.getKind() == InitializedEntity::EK_Result);
- return CurInit;
+ return Init;
}
/// Somewhere within T there is an uninitialized reference subobject.
@@ -9860,6 +10005,12 @@ bool InitializationSequence::Diagnose(Sema &S,
TryOrBuildParenListInitialization(S, Entity, Kind, Args, *this,
/*VerifyOnly=*/false);
break;
+
+ case FK_DesignatedInitForNonAggregate:
+ InitListExpr *InitList = cast<InitListExpr>(Args[0]);
+ S.Diag(Kind.getLocation(), diag::err_designated_init_for_non_aggregate)
+ << Entity.getType() << InitList->getSourceRange();
+ break;
}
PrintInitLocationNote(S, Entity);
@@ -10030,6 +10181,10 @@ void InitializationSequence::dump(raw_ostream &OS) const {
case FK_ParenthesizedListInitFailed:
OS << "parenthesized list initialization failed";
break;
+
+ case FK_DesignatedInitForNonAggregate:
+ OS << "designated initializer for non-aggregate type";
+ break;
}
OS << '\n';
return;
@@ -10402,7 +10557,7 @@ static bool isOrIsDerivedFromSpecializationOf(CXXRecordDecl *RD,
QualType Sema::DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TSInfo, const InitializedEntity &Entity,
- const InitializationKind &Kind, MultiExprArg Inits) {
+ const InitializationKind &Kind, MultiExprArg Inits, ParenListExpr *PL) {
auto *DeducedTST = dyn_cast<DeducedTemplateSpecializationType>(
TSInfo->getType()->getContainedDeducedType());
assert(DeducedTST && "not a deduced template specialization type");
@@ -10472,13 +10627,137 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
OverloadCandidateSet::CSK_Normal);
OverloadCandidateSet::iterator Best;
- bool HasAnyDeductionGuide = false;
bool AllowExplicit = !Kind.isCopyInit() || ListInit;
- auto tryToResolveOverload =
+ // Return true is the candidate is added successfully, false otherwise.
+ auto addDeductionCandidate = [&](FunctionTemplateDecl *TD,
+ CXXDeductionGuideDecl *GD,
+ DeclAccessPair FoundDecl,
+ bool OnlyListConstructors,
+ bool AllowAggregateDeductionCandidate) {
+ // C++ [over.match.ctor]p1: (non-list copy-initialization from non-class)
+ // For copy-initialization, the candidate functions are all the
+ // converting constructors (12.3.1) of that class.
+ // C++ [over.match.copy]p1: (non-list copy-initialization from class)
+ // The converting constructors of T are candidate functions.
+ if (!AllowExplicit) {
+ // Overload resolution checks whether the deduction guide is declared
+ // explicit for us.
+
+ // When looking for a converting constructor, deduction guides that
+ // could never be called with one argument are not interesting to
+ // check or note.
+ if (GD->getMinRequiredArguments() > 1 ||
+ (GD->getNumParams() == 0 && !GD->isVariadic()))
+ return;
+ }
+
+ // C++ [over.match.list]p1.1: (first phase list initialization)
+ // Initially, the candidate functions are the initializer-list
+ // constructors of the class T
+ if (OnlyListConstructors && !isInitListConstructor(GD))
+ return;
+
+ if (!AllowAggregateDeductionCandidate &&
+ GD->getDeductionCandidateKind() == DeductionCandidate::Aggregate)
+ return;
+
+ // C++ [over.match.list]p1.2: (second phase list initialization)
+ // the candidate functions are all the constructors of the class T
+ // C++ [over.match.ctor]p1: (all other cases)
+ // the candidate functions are all the constructors of the class of
+ // the object being initialized
+
+ // C++ [over.best.ics]p4:
+ // When [...] the constructor [...] is a candidate by
+ // - [over.match.copy] (in all cases)
+ // FIXME: The "second phase of [over.match.list] case can also
+ // theoretically happen here, but it's not clear whether we can
+ // ever have a parameter of the right type.
+ bool SuppressUserConversions = Kind.isCopyInit();
+
+ if (TD) {
+ SmallVector<Expr *, 8> TmpInits;
+ for (Expr *E : Inits)
+ if (auto *DI = dyn_cast<DesignatedInitExpr>(E))
+ TmpInits.push_back(DI->getInit());
+ else
+ TmpInits.push_back(E);
+ AddTemplateOverloadCandidate(
+ TD, FoundDecl, /*ExplicitArgs=*/nullptr, TmpInits, Candidates,
+ SuppressUserConversions,
+ /*PartialOverloading=*/false, AllowExplicit, ADLCallKind::NotADL,
+ /*PO=*/{}, AllowAggregateDeductionCandidate);
+ } else {
+ AddOverloadCandidate(GD, FoundDecl, Inits, Candidates,
+ SuppressUserConversions,
+ /*PartialOverloading=*/false, AllowExplicit);
+ }
+ };
+
+ bool FoundDeductionGuide = false;
+
+ auto TryToResolveOverload =
[&](bool OnlyListConstructors) -> OverloadingResult {
Candidates.clear(OverloadCandidateSet::CSK_Normal);
- HasAnyDeductionGuide = false;
+ bool HasAnyDeductionGuide = false;
+
+ auto SynthesizeAggrGuide = [&](InitListExpr *ListInit) {
+ auto *RD = cast<CXXRecordDecl>(Template->getTemplatedDecl());
+ if (!(RD->getDefinition() && RD->isAggregate()))
+ return;
+ QualType Ty = Context.getRecordType(RD);
+ SmallVector<QualType, 8> ElementTypes;
+
+ InitListChecker CheckInitList(*this, Entity, ListInit, Ty, ElementTypes);
+ if (!CheckInitList.HadError()) {
+ // C++ [over.match.class.deduct]p1.8:
+ // if e_i is of array type and x_i is a braced-init-list, T_i is an
+ // rvalue reference to the declared type of e_i and
+ // C++ [over.match.class.deduct]p1.9:
+ // if e_i is of array type and x_i is a bstring-literal, T_i is an
+ // lvalue reference to the const-qualified declared type of e_i and
+ // C++ [over.match.class.deduct]p1.10:
+ // otherwise, T_i is the declared type of e_i
+ for (int I = 0, E = ListInit->getNumInits();
+ I < E && !isa<PackExpansionType>(ElementTypes[I]); ++I)
+ if (ElementTypes[I]->isArrayType()) {
+ if (isa<InitListExpr>(ListInit->getInit(I)))
+ ElementTypes[I] = Context.getRValueReferenceType(ElementTypes[I]);
+ else if (isa<StringLiteral>(
+ ListInit->getInit(I)->IgnoreParenImpCasts()))
+ ElementTypes[I] =
+ Context.getLValueReferenceType(ElementTypes[I].withConst());
+ }
+
+ llvm::FoldingSetNodeID ID;
+ ID.AddPointer(Template);
+ for (auto &T : ElementTypes)
+ T.getCanonicalType().Profile(ID);
+ unsigned Hash = ID.ComputeHash();
+ if (AggregateDeductionCandidates.count(Hash) == 0) {
+ if (FunctionTemplateDecl *TD =
+ DeclareImplicitDeductionGuideFromInitList(
+ Template, ElementTypes,
+ TSInfo->getTypeLoc().getEndLoc())) {
+ auto *GD = cast<CXXDeductionGuideDecl>(TD->getTemplatedDecl());
+ GD->setDeductionCandidateKind(DeductionCandidate::Aggregate);
+ AggregateDeductionCandidates[Hash] = GD;
+ addDeductionCandidate(TD, GD, DeclAccessPair::make(TD, AS_public),
+ OnlyListConstructors,
+ /*AllowAggregateDeductionCandidate=*/true);
+ }
+ } else {
+ CXXDeductionGuideDecl *GD = AggregateDeductionCandidates[Hash];
+ FunctionTemplateDecl *TD = GD->getDescribedFunctionTemplate();
+ assert(TD && "aggregate deduction candidate is function template");
+ addDeductionCandidate(TD, GD, DeclAccessPair::make(TD, AS_public),
+ OnlyListConstructors,
+ /*AllowAggregateDeductionCandidate=*/true);
+ }
+ HasAnyDeductionGuide = true;
+ }
+ };
for (auto I = Guides.begin(), E = Guides.end(); I != E; ++I) {
NamedDecl *D = (*I)->getUnderlyingDecl();
@@ -10486,7 +10765,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
continue;
auto *TD = dyn_cast<FunctionTemplateDecl>(D);
- auto *GD = dyn_cast_or_null<CXXDeductionGuideDecl>(
+ auto *GD = dyn_cast_if_present<CXXDeductionGuideDecl>(
TD ? TD->getTemplatedDecl() : dyn_cast<FunctionDecl>(D));
if (!GD)
continue;
@@ -10494,53 +10773,30 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
if (!GD->isImplicit())
HasAnyDeductionGuide = true;
- // C++ [over.match.ctor]p1: (non-list copy-initialization from non-class)
- // For copy-initialization, the candidate functions are all the
- // converting constructors (12.3.1) of that class.
- // C++ [over.match.copy]p1: (non-list copy-initialization from class)
- // The converting constructors of T are candidate functions.
- if (!AllowExplicit) {
- // Overload resolution checks whether the deduction guide is declared
- // explicit for us.
-
- // When looking for a converting constructor, deduction guides that
- // could never be called with one argument are not interesting to
- // check or note.
- if (GD->getMinRequiredArguments() > 1 ||
- (GD->getNumParams() == 0 && !GD->isVariadic()))
- continue;
+ addDeductionCandidate(TD, GD, I.getPair(), OnlyListConstructors,
+ /*AllowAggregateDeductionCandidate=*/false);
+ }
+
+ // C++ [over.match.class.deduct]p1.4:
+ // if C is defined and its definition satisfies the conditions for an
+ // aggregate class ([dcl.init.aggr]) with the assumption that any
+ // dependent base class has no virtual functions and no virtual base
+ // classes, and the initializer is a non-empty braced-init-list or
+ // parenthesized expression-list, and there are no deduction-guides for
+ // C, the set contains an additional function template, called the
+ // aggregate deduction candidate, defined as follows.
+ if (getLangOpts().CPlusPlus20 && !HasAnyDeductionGuide) {
+ if (ListInit && ListInit->getNumInits()) {
+ SynthesizeAggrGuide(ListInit);
+ } else if (PL && PL->getNumExprs()) {
+ InitListExpr TempListInit(getASTContext(), PL->getLParenLoc(),
+ PL->exprs(), PL->getRParenLoc());
+ SynthesizeAggrGuide(&TempListInit);
}
+ }
- // C++ [over.match.list]p1.1: (first phase list initialization)
- // Initially, the candidate functions are the initializer-list
- // constructors of the class T
- if (OnlyListConstructors && !isInitListConstructor(GD))
- continue;
+ FoundDeductionGuide = FoundDeductionGuide || HasAnyDeductionGuide;
- // C++ [over.match.list]p1.2: (second phase list initialization)
- // the candidate functions are all the constructors of the class T
- // C++ [over.match.ctor]p1: (all other cases)
- // the candidate functions are all the constructors of the class of
- // the object being initialized
-
- // C++ [over.best.ics]p4:
- // When [...] the constructor [...] is a candidate by
- // - [over.match.copy] (in all cases)
- // FIXME: The "second phase of [over.match.list] case can also
- // theoretically happen here, but it's not clear whether we can
- // ever have a parameter of the right type.
- bool SuppressUserConversions = Kind.isCopyInit();
-
- if (TD)
- AddTemplateOverloadCandidate(TD, I.getPair(), /*ExplicitArgs*/ nullptr,
- Inits, Candidates, SuppressUserConversions,
- /*PartialOverloading*/ false,
- AllowExplicit);
- else
- AddOverloadCandidate(GD, I.getPair(), Inits, Candidates,
- SuppressUserConversions,
- /*PartialOverloading*/ false, AllowExplicit);
- }
return Candidates.BestViableFunction(*this, Kind.getLocation(), Best);
};
@@ -10576,7 +10832,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
}
if (TryListConstructors)
- Result = tryToResolveOverload(/*OnlyListConstructor*/true);
+ Result = TryToResolveOverload(/*OnlyListConstructor*/true);
// Then unwrap the initializer list and try again considering all
// constructors.
Inits = MultiExprArg(ListInit->getInits(), ListInit->getNumInits());
@@ -10585,7 +10841,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
// If list-initialization fails, or if we're doing any other kind of
// initialization, we (eventually) consider constructors.
if (Result == OR_No_Viable_Function)
- Result = tryToResolveOverload(/*OnlyListConstructor*/false);
+ Result = TryToResolveOverload(/*OnlyListConstructor*/false);
switch (Result) {
case OR_Ambiguous:
@@ -10639,7 +10895,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
// Make sure we didn't select an unusable deduction guide, and mark it
// as referenced.
- DiagnoseUseOfDecl(Best->Function, Kind.getLocation());
+ DiagnoseUseOfDecl(Best->FoundDecl, Kind.getLocation());
MarkFunctionReferenced(Kind.getLocation(), Best->Function);
break;
}
@@ -10655,7 +10911,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
// Warn if CTAD was used on a type that does not have any user-defined
// deduction guides.
- if (!HasAnyDeductionGuide) {
+ if (!FoundDeductionGuide) {
Diag(TSInfo->getTypeLoc().getBeginLoc(),
diag::warn_ctad_maybe_unsupported)
<< TemplateName;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp b/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp
index 00ab6ba580bf..06fc53591a76 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp
@@ -247,8 +247,9 @@ Sema::createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info,
DeclContext *DC = CurContext;
while (!(DC->isFunctionOrMethod() || DC->isRecord() || DC->isFileContext()))
DC = DC->getParent();
- bool IsGenericLambda = getGenericLambdaTemplateParameterList(getCurLambda(),
- *this);
+
+ bool IsGenericLambda =
+ Info && getGenericLambdaTemplateParameterList(getCurLambda(), *this);
// Start constructing the lambda class.
CXXRecordDecl *Class = CXXRecordDecl::CreateLambda(
Context, DC, Info, IntroducerRange.getBegin(), LambdaDependencyKind,
@@ -282,12 +283,14 @@ Sema::getCurrentMangleNumberContext(const DeclContext *DC) {
Normal,
DefaultArgument,
DataMember,
- StaticDataMember,
InlineVariable,
- VariableTemplate,
+ TemplatedVariable,
Concept
} Kind = Normal;
+ bool IsInNonspecializedTemplate =
+ inTemplateInstantiation() || CurContext->isDependentContext();
+
// Default arguments of member function parameters that appear in a class
// definition, as well as the initializers of data members, receive special
// treatment. Identify them.
@@ -298,15 +301,15 @@ Sema::getCurrentMangleNumberContext(const DeclContext *DC) {
if (LexicalDC->isRecord())
Kind = DefaultArgument;
} else if (VarDecl *Var = dyn_cast<VarDecl>(ManglingContextDecl)) {
- if (Var->getDeclContext()->isRecord())
- Kind = StaticDataMember;
- else if (Var->getMostRecentDecl()->isInline())
+ if (Var->getMostRecentDecl()->isInline())
Kind = InlineVariable;
+ else if (Var->getDeclContext()->isRecord() && IsInNonspecializedTemplate)
+ Kind = TemplatedVariable;
else if (Var->getDescribedVarTemplate())
- Kind = VariableTemplate;
+ Kind = TemplatedVariable;
else if (auto *VTS = dyn_cast<VarTemplateSpecializationDecl>(Var)) {
if (!VTS->isExplicitSpecialization())
- Kind = VariableTemplate;
+ Kind = TemplatedVariable;
}
} else if (isa<FieldDecl>(ManglingContextDecl)) {
Kind = DataMember;
@@ -318,12 +321,9 @@ Sema::getCurrentMangleNumberContext(const DeclContext *DC) {
// Itanium ABI [5.1.7]:
// In the following contexts [...] the one-definition rule requires closure
// types in different translation units to "correspond":
- bool IsInNonspecializedTemplate =
- inTemplateInstantiation() || CurContext->isDependentContext();
switch (Kind) {
case Normal: {
- // -- the bodies of non-exported nonspecialized template functions
- // -- the bodies of inline functions
+ // -- the bodies of inline or templated functions
if ((IsInNonspecializedTemplate &&
!(ManglingContextDecl && isa<ParmVarDecl>(ManglingContextDecl))) ||
isInInlineFunction(CurContext)) {
@@ -340,21 +340,13 @@ Sema::getCurrentMangleNumberContext(const DeclContext *DC) {
// however the ManglingContextDecl is important for the purposes of
// re-forming the template argument list of the lambda for constraint
// evaluation.
- case StaticDataMember:
- // -- the initializers of nonspecialized static members of template classes
- if (!IsInNonspecializedTemplate)
- return std::make_tuple(nullptr, ManglingContextDecl);
- // Fall through to get the current context.
- [[fallthrough]];
-
case DataMember:
- // -- the in-class initializers of class members
+ // -- default member initializers
case DefaultArgument:
// -- default arguments appearing in class definitions
case InlineVariable:
- // -- the initializers of inline variables
- case VariableTemplate:
- // -- the initializers of templated variables
+ case TemplatedVariable:
+ // -- the initializers of inline or templated variables
return std::make_tuple(
&Context.getManglingNumberContext(ASTContext::NeedExtraManglingDecl,
ManglingContextDecl),
@@ -364,14 +356,13 @@ Sema::getCurrentMangleNumberContext(const DeclContext *DC) {
llvm_unreachable("unexpected context");
}
-CXXMethodDecl *Sema::startLambdaDefinition(
- CXXRecordDecl *Class, SourceRange IntroducerRange,
- TypeSourceInfo *MethodTypeInfo, SourceLocation EndLoc,
- ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind,
- StorageClass SC, Expr *TrailingRequiresClause) {
+static QualType
+buildTypeForLambdaCallOperator(Sema &S, clang::CXXRecordDecl *Class,
+ TemplateParameterList *TemplateParams,
+ TypeSourceInfo *MethodTypeInfo) {
+ assert(MethodTypeInfo && "expected a non null type");
+
QualType MethodType = MethodTypeInfo->getType();
- TemplateParameterList *TemplateParams =
- getGenericLambdaTemplateParameterList(getCurLambda(), *this);
// If a lambda appears in a dependent context or is a generic lambda (has
// template parameters) and has an 'auto' return type, deduce it to a
// dependent type.
@@ -379,75 +370,24 @@ CXXMethodDecl *Sema::startLambdaDefinition(
const FunctionProtoType *FPT = MethodType->castAs<FunctionProtoType>();
QualType Result = FPT->getReturnType();
if (Result->isUndeducedType()) {
- Result = SubstAutoTypeDependent(Result);
- MethodType = Context.getFunctionType(Result, FPT->getParamTypes(),
- FPT->getExtProtoInfo());
+ Result = S.SubstAutoTypeDependent(Result);
+ MethodType = S.Context.getFunctionType(Result, FPT->getParamTypes(),
+ FPT->getExtProtoInfo());
}
}
-
- // C++11 [expr.prim.lambda]p5:
- // The closure type for a lambda-expression has a public inline function
- // call operator (13.5.4) whose parameters and return type are described by
- // the lambda-expression's parameter-declaration-clause and
- // trailing-return-type respectively.
- DeclarationName MethodName
- = Context.DeclarationNames.getCXXOperatorName(OO_Call);
- DeclarationNameLoc MethodNameLoc =
- DeclarationNameLoc::makeCXXOperatorNameLoc(IntroducerRange);
- CXXMethodDecl *Method = CXXMethodDecl::Create(
- Context, Class, EndLoc,
- DeclarationNameInfo(MethodName, IntroducerRange.getBegin(),
- MethodNameLoc),
- MethodType, MethodTypeInfo, SC, getCurFPFeatures().isFPConstrained(),
- /*isInline=*/true, ConstexprKind, EndLoc, TrailingRequiresClause);
- Method->setAccess(AS_public);
- if (!TemplateParams)
- Class->addDecl(Method);
-
- // Temporarily set the lexical declaration context to the current
- // context, so that the Scope stack matches the lexical nesting.
- Method->setLexicalDeclContext(CurContext);
- // Create a function template if we have a template parameter list
- FunctionTemplateDecl *const TemplateMethod = TemplateParams ?
- FunctionTemplateDecl::Create(Context, Class,
- Method->getLocation(), MethodName,
- TemplateParams,
- Method) : nullptr;
- if (TemplateMethod) {
- TemplateMethod->setAccess(AS_public);
- Method->setDescribedFunctionTemplate(TemplateMethod);
- Class->addDecl(TemplateMethod);
- TemplateMethod->setLexicalDeclContext(CurContext);
- }
-
- // Add parameters.
- if (!Params.empty()) {
- Method->setParams(Params);
- CheckParmsForFunctionDef(Params,
- /*CheckParameterNames=*/false);
-
- for (auto *P : Method->parameters())
- P->setOwningFunction(Method);
- }
-
- return Method;
+ return MethodType;
}
void Sema::handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
- std::optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling) {
- if (Mangling) {
- bool HasKnownInternalLinkage;
- unsigned ManglingNumber, DeviceManglingNumber;
- Decl *ManglingContextDecl;
- std::tie(HasKnownInternalLinkage, ManglingNumber, DeviceManglingNumber,
- ManglingContextDecl) = *Mangling;
- Class->setLambdaMangling(ManglingNumber, ManglingContextDecl,
- HasKnownInternalLinkage);
- Class->setDeviceLambdaManglingNumber(DeviceManglingNumber);
+ std::optional<CXXRecordDecl::LambdaNumbering> NumberingOverride) {
+ if (NumberingOverride) {
+ Class->setLambdaNumbering(*NumberingOverride);
return;
}
+ ContextRAII ManglingContext(*this, Class->getDeclContext());
+
auto getMangleNumberingContext =
[this](CXXRecordDecl *Class,
Decl *ManglingContextDecl) -> MangleNumberingContext * {
@@ -462,11 +402,10 @@ void Sema::handleLambdaNumbering(
return &Context.getManglingNumberContext(DC);
};
+ CXXRecordDecl::LambdaNumbering Numbering;
MangleNumberingContext *MCtx;
- Decl *ManglingContextDecl;
- std::tie(MCtx, ManglingContextDecl) =
+ std::tie(MCtx, Numbering.ContextDecl) =
getCurrentMangleNumberContext(Class->getDeclContext());
- bool HasKnownInternalLinkage = false;
if (!MCtx && (getLangOpts().CUDA || getLangOpts().SYCLIsDevice ||
getLangOpts().SYCLIsHost)) {
// Force lambda numbering in CUDA/HIP as we need to name lambdas following
@@ -476,26 +415,41 @@ void Sema::handleLambdaNumbering(
// Also force for SYCL, since we need this for the
// __builtin_sycl_unique_stable_name implementation, which depends on lambda
// mangling.
- MCtx = getMangleNumberingContext(Class, ManglingContextDecl);
+ MCtx = getMangleNumberingContext(Class, Numbering.ContextDecl);
assert(MCtx && "Retrieving mangle numbering context failed!");
- HasKnownInternalLinkage = true;
+ Numbering.HasKnownInternalLinkage = true;
}
if (MCtx) {
- unsigned ManglingNumber = MCtx->getManglingNumber(Method);
- Class->setLambdaMangling(ManglingNumber, ManglingContextDecl,
- HasKnownInternalLinkage);
- Class->setDeviceLambdaManglingNumber(MCtx->getDeviceManglingNumber(Method));
+ Numbering.IndexInContext = MCtx->getNextLambdaIndex();
+ Numbering.ManglingNumber = MCtx->getManglingNumber(Method);
+ Numbering.DeviceManglingNumber = MCtx->getDeviceManglingNumber(Method);
+ Class->setLambdaNumbering(Numbering);
+
+ if (auto *Source =
+ dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
+ Source->AssignedLambdaNumbering(Class);
+ }
+}
+
+static void buildLambdaScopeReturnType(Sema &S, LambdaScopeInfo *LSI,
+ CXXMethodDecl *CallOperator,
+ bool ExplicitResultType) {
+ if (ExplicitResultType) {
+ LSI->HasImplicitReturnType = false;
+ LSI->ReturnType = CallOperator->getReturnType();
+ if (!LSI->ReturnType->isDependentType() && !LSI->ReturnType->isVoidType())
+ S.RequireCompleteType(CallOperator->getBeginLoc(), LSI->ReturnType,
+ diag::err_lambda_incomplete_result);
+ } else {
+ LSI->HasImplicitReturnType = true;
}
}
-void Sema::buildLambdaScope(LambdaScopeInfo *LSI,
- CXXMethodDecl *CallOperator,
- SourceRange IntroducerRange,
- LambdaCaptureDefault CaptureDefault,
- SourceLocation CaptureDefaultLoc,
- bool ExplicitParams,
- bool ExplicitResultType,
- bool Mutable) {
+void Sema::buildLambdaScope(LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator,
+ SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ SourceLocation CaptureDefaultLoc,
+ bool ExplicitParams, bool Mutable) {
LSI->CallOperator = CallOperator;
CXXRecordDecl *LambdaClass = CallOperator->getParent();
LSI->Lambda = LambdaClass;
@@ -507,30 +461,16 @@ void Sema::buildLambdaScope(LambdaScopeInfo *LSI,
LSI->IntroducerRange = IntroducerRange;
LSI->ExplicitParams = ExplicitParams;
LSI->Mutable = Mutable;
-
- if (ExplicitResultType) {
- LSI->ReturnType = CallOperator->getReturnType();
-
- if (!LSI->ReturnType->isDependentType() &&
- !LSI->ReturnType->isVoidType()) {
- if (RequireCompleteType(CallOperator->getBeginLoc(), LSI->ReturnType,
- diag::err_lambda_incomplete_result)) {
- // Do nothing.
- }
- }
- } else {
- LSI->HasImplicitReturnType = true;
- }
}
void Sema::finishLambdaExplicitCaptures(LambdaScopeInfo *LSI) {
LSI->finishedExplicitCaptures();
}
-void Sema::ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
- ArrayRef<NamedDecl *> TParams,
- SourceLocation RAngleLoc,
- ExprResult RequiresClause) {
+void Sema::ActOnLambdaExplicitTemplateParameterList(
+ LambdaIntroducer &Intro, SourceLocation LAngleLoc,
+ ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc,
+ ExprResult RequiresClause) {
LambdaScopeInfo *LSI = getCurLambda();
assert(LSI && "Expected a lambda scope");
assert(LSI->NumExplicitTemplateParams == 0 &&
@@ -546,35 +486,6 @@ void Sema::ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
LSI->RequiresClause = RequiresClause;
}
-void Sema::addLambdaParameters(
- ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
- CXXMethodDecl *CallOperator, Scope *CurScope) {
- // Introduce our parameters into the function scope
- for (unsigned p = 0, NumParams = CallOperator->getNumParams();
- p < NumParams; ++p) {
- ParmVarDecl *Param = CallOperator->getParamDecl(p);
-
- // If this has an identifier, add it to the scope stack.
- if (CurScope && Param->getIdentifier()) {
- bool Error = false;
- // Resolution of CWG 2211 in C++17 renders shadowing ill-formed, but we
- // retroactively apply it.
- for (const auto &Capture : Captures) {
- if (Capture.Id == Param->getIdentifier()) {
- Error = true;
- Diag(Param->getLocation(), diag::err_parameter_shadow_capture);
- Diag(Capture.Loc, diag::note_var_explicitly_captured_here)
- << Capture.Id << true;
- }
- }
- if (!Error)
- CheckShadow(CurScope, Param);
-
- PushOnScopeChains(Param, CurScope);
- }
- }
-}
-
/// If this expression is an enumerator-like expression of some type
/// T, return the type T; otherwise, return null.
///
@@ -861,11 +772,9 @@ QualType Sema::buildLambdaInitCaptureInitialization(
return DeducedType;
}
-VarDecl *Sema::createLambdaInitCaptureVarDecl(SourceLocation Loc,
- QualType InitCaptureType,
- SourceLocation EllipsisLoc,
- IdentifierInfo *Id,
- unsigned InitStyle, Expr *Init) {
+VarDecl *Sema::createLambdaInitCaptureVarDecl(
+ SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc,
+ IdentifierInfo *Id, unsigned InitStyle, Expr *Init, DeclContext *DeclCtx) {
// FIXME: Retain the TypeSourceInfo from buildLambdaInitCaptureInitialization
// rather than reconstructing it here.
TypeSourceInfo *TSI = Context.getTrivialTypeSourceInfo(InitCaptureType, Loc);
@@ -876,8 +785,8 @@ VarDecl *Sema::createLambdaInitCaptureVarDecl(SourceLocation Loc,
// used as a variable, and only exists as a way to name and refer to the
// init-capture.
// FIXME: Pass in separate source locations for '&' and identifier.
- VarDecl *NewVD = VarDecl::Create(Context, CurContext, Loc,
- Loc, Id, InitCaptureType, TSI, SC_Auto);
+ VarDecl *NewVD = VarDecl::Create(Context, DeclCtx, Loc, Loc, Id,
+ InitCaptureType, TSI, SC_Auto);
NewVD->setInitCapture(true);
NewVD->setReferenced(true);
// FIXME: Pass in a VarDecl::InitializationStyle.
@@ -889,43 +798,53 @@ VarDecl *Sema::createLambdaInitCaptureVarDecl(SourceLocation Loc,
return NewVD;
}
-void Sema::addInitCapture(LambdaScopeInfo *LSI, VarDecl *Var,
- bool isReferenceType) {
+void Sema::addInitCapture(LambdaScopeInfo *LSI, VarDecl *Var, bool ByRef) {
assert(Var->isInitCapture() && "init capture flag should be set");
- LSI->addCapture(Var, /*isBlock*/ false, isReferenceType,
- /*isNested*/ false, Var->getLocation(), SourceLocation(),
- Var->getType(), /*Invalid*/ false);
+ LSI->addCapture(Var, /*isBlock=*/false, ByRef,
+ /*isNested=*/false, Var->getLocation(), SourceLocation(),
+ Var->getType(), /*Invalid=*/false);
}
-void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
- Declarator &ParamInfo,
- Scope *CurScope) {
- LambdaScopeInfo *const LSI = getCurLambda();
- assert(LSI && "LambdaScopeInfo should be on stack!");
+// Unlike getCurLambda, getCurrentLambdaScopeUnsafe doesn't
+// check that the current lambda is in a consistent or fully constructed state.
+static LambdaScopeInfo *getCurrentLambdaScopeUnsafe(Sema &S) {
+ assert(!S.FunctionScopes.empty());
+ return cast<LambdaScopeInfo>(S.FunctionScopes[S.FunctionScopes.size() - 1]);
+}
- // Determine if we're within a context where we know that the lambda will
- // be dependent, because there are template parameters in scope.
- CXXRecordDecl::LambdaDependencyKind LambdaDependencyKind =
- CXXRecordDecl::LDK_Unknown;
- if (LSI->NumExplicitTemplateParams > 0) {
- auto *TemplateParamScope = CurScope->getTemplateParamParent();
- assert(TemplateParamScope &&
- "Lambda with explicit template param list should establish a "
- "template param scope");
- assert(TemplateParamScope->getParent());
- if (TemplateParamScope->getParent()->getTemplateParamParent() != nullptr)
- LambdaDependencyKind = CXXRecordDecl::LDK_AlwaysDependent;
- } else if (CurScope->getTemplateParamParent() != nullptr) {
- LambdaDependencyKind = CXXRecordDecl::LDK_AlwaysDependent;
- }
+static TypeSourceInfo *
+getDummyLambdaType(Sema &S, SourceLocation Loc = SourceLocation()) {
+ // C++11 [expr.prim.lambda]p4:
+ // If a lambda-expression does not include a lambda-declarator, it is as
+ // if the lambda-declarator were ().
+ FunctionProtoType::ExtProtoInfo EPI(S.Context.getDefaultCallingConvention(
+ /*IsVariadic=*/false, /*IsCXXMethod=*/true));
+ EPI.HasTrailingReturn = true;
+ EPI.TypeQuals.addConst();
+ LangAS AS = S.getDefaultCXXMethodAddrSpace();
+ if (AS != LangAS::Default)
+ EPI.TypeQuals.addAddressSpace(AS);
+
+ // C++1y [expr.prim.lambda]:
+ // The lambda return type is 'auto', which is replaced by the
+ // trailing-return type if provided and/or deduced from 'return'
+ // statements
+ // We don't do this before C++1y, because we don't support deduced return
+ // types there.
+ QualType DefaultTypeForNoTrailingReturn = S.getLangOpts().CPlusPlus14
+ ? S.Context.getAutoDeductType()
+ : S.Context.DependentTy;
+ QualType MethodTy = S.Context.getFunctionType(DefaultTypeForNoTrailingReturn,
+ std::nullopt, EPI);
+ return S.Context.getTrivialTypeSourceInfo(MethodTy, Loc);
+}
- // Determine the signature of the call operator.
- TypeSourceInfo *MethodTyInfo;
- bool ExplicitParams = true;
- bool ExplicitResultType = true;
- bool ContainsUnexpandedParameterPack = false;
- SourceLocation EndLoc;
- SmallVector<ParmVarDecl *, 8> Params;
+static TypeSourceInfo *getLambdaType(Sema &S, LambdaIntroducer &Intro,
+ Declarator &ParamInfo, Scope *CurScope,
+ SourceLocation Loc,
+ bool &ExplicitResultType) {
+
+ ExplicitResultType = false;
assert(
(ParamInfo.getDeclSpec().getStorageClassSpec() ==
@@ -935,146 +854,172 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
bool IsLambdaStatic =
ParamInfo.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static;
+ TypeSourceInfo *MethodTyInfo;
+
if (ParamInfo.getNumTypeObjects() == 0) {
- // C++11 [expr.prim.lambda]p4:
- // If a lambda-expression does not include a lambda-declarator, it is as
- // if the lambda-declarator were ().
- FunctionProtoType::ExtProtoInfo EPI(Context.getDefaultCallingConvention(
- /*IsVariadic=*/false, /*IsCXXMethod=*/true));
- EPI.HasTrailingReturn = true;
- EPI.TypeQuals.addConst();
- LangAS AS = getDefaultCXXMethodAddrSpace();
- if (AS != LangAS::Default)
- EPI.TypeQuals.addAddressSpace(AS);
-
- // C++1y [expr.prim.lambda]:
- // The lambda return type is 'auto', which is replaced by the
- // trailing-return type if provided and/or deduced from 'return'
- // statements
- // We don't do this before C++1y, because we don't support deduced return
- // types there.
- QualType DefaultTypeForNoTrailingReturn =
- getLangOpts().CPlusPlus14 ? Context.getAutoDeductType()
- : Context.DependentTy;
- QualType MethodTy = Context.getFunctionType(DefaultTypeForNoTrailingReturn,
- std::nullopt, EPI);
- MethodTyInfo = Context.getTrivialTypeSourceInfo(MethodTy);
- ExplicitParams = false;
- ExplicitResultType = false;
- EndLoc = Intro.Range.getEnd();
+ MethodTyInfo = getDummyLambdaType(S, Loc);
} else {
- assert(ParamInfo.isFunctionDeclarator() &&
- "lambda-declarator is a function");
DeclaratorChunk::FunctionTypeInfo &FTI = ParamInfo.getFunctionTypeInfo();
-
- // C++11 [expr.prim.lambda]p5:
- // This function call operator is declared const (9.3.1) if and only if
- // the lambda-expression's parameter-declaration-clause is not followed
- // by mutable. It is neither virtual nor declared volatile. [...]
- if (!FTI.hasMutableQualifier() && !IsLambdaStatic) {
- FTI.getOrCreateMethodQualifiers().SetTypeQual(DeclSpec::TQ_const,
- SourceLocation());
- }
-
- MethodTyInfo = GetTypeForDeclarator(ParamInfo, CurScope);
- assert(MethodTyInfo && "no type from lambda-declarator");
- EndLoc = ParamInfo.getSourceRange().getEnd();
-
ExplicitResultType = FTI.hasTrailingReturnType();
+ if (!FTI.hasMutableQualifier() && !IsLambdaStatic)
+ FTI.getOrCreateMethodQualifiers().SetTypeQual(DeclSpec::TQ_const, Loc);
- if (ExplicitResultType && getLangOpts().HLSL) {
+ if (ExplicitResultType && S.getLangOpts().HLSL) {
QualType RetTy = FTI.getTrailingReturnType().get();
if (!RetTy.isNull()) {
// HLSL does not support specifying an address space on a lambda return
// type.
LangAS AddressSpace = RetTy.getAddressSpace();
if (AddressSpace != LangAS::Default)
- Diag(FTI.getTrailingReturnTypeLoc(),
- diag::err_return_value_with_address_space);
+ S.Diag(FTI.getTrailingReturnTypeLoc(),
+ diag::err_return_value_with_address_space);
}
}
- if (FTIHasNonVoidParameters(FTI)) {
- Params.reserve(FTI.NumParams);
- for (unsigned i = 0, e = FTI.NumParams; i != e; ++i)
- Params.push_back(cast<ParmVarDecl>(FTI.Params[i].Param));
- }
+ MethodTyInfo = S.GetTypeForDeclarator(ParamInfo, CurScope);
+ assert(MethodTyInfo && "no type from lambda-declarator");
// Check for unexpanded parameter packs in the method type.
if (MethodTyInfo->getType()->containsUnexpandedParameterPack())
- DiagnoseUnexpandedParameterPack(Intro.Range.getBegin(), MethodTyInfo,
- UPPC_DeclarationType);
+ S.DiagnoseUnexpandedParameterPack(Intro.Range.getBegin(), MethodTyInfo,
+ S.UPPC_DeclarationType);
}
+ return MethodTyInfo;
+}
- CXXRecordDecl *Class = createLambdaClosureType(
- Intro.Range, MethodTyInfo, LambdaDependencyKind, Intro.Default);
- CXXMethodDecl *Method =
- startLambdaDefinition(Class, Intro.Range, MethodTyInfo, EndLoc, Params,
- ParamInfo.getDeclSpec().getConstexprSpecifier(),
- IsLambdaStatic ? SC_Static : SC_None,
- ParamInfo.getTrailingRequiresClause());
- if (ExplicitParams)
- CheckCXXDefaultArguments(Method);
+CXXMethodDecl *Sema::CreateLambdaCallOperator(SourceRange IntroducerRange,
+ CXXRecordDecl *Class) {
+
+ // C++20 [expr.prim.lambda.closure]p3:
+ // The closure type for a lambda-expression has a public inline function
+ // call operator (for a non-generic lambda) or function call operator
+ // template (for a generic lambda) whose parameters and return type are
+ // described by the lambda-expression's parameter-declaration-clause
+ // and trailing-return-type respectively.
+ DeclarationName MethodName =
+ Context.DeclarationNames.getCXXOperatorName(OO_Call);
+ DeclarationNameLoc MethodNameLoc =
+ DeclarationNameLoc::makeCXXOperatorNameLoc(IntroducerRange.getBegin());
+ CXXMethodDecl *Method = CXXMethodDecl::Create(
+ Context, Class, SourceLocation(),
+ DeclarationNameInfo(MethodName, IntroducerRange.getBegin(),
+ MethodNameLoc),
+ QualType(), /*Tinfo=*/nullptr, SC_None,
+ getCurFPFeatures().isFPConstrained(),
+ /*isInline=*/true, ConstexprSpecKind::Unspecified, SourceLocation(),
+ /*TrailingRequiresClause=*/nullptr);
+ Method->setAccess(AS_public);
+ return Method;
+}
- // This represents the function body for the lambda function, check if we
- // have to apply optnone due to a pragma.
- AddRangeBasedOptnone(Method);
+void Sema::CompleteLambdaCallOperator(
+ CXXMethodDecl *Method, SourceLocation LambdaLoc,
+ SourceLocation CallOperatorLoc, Expr *TrailingRequiresClause,
+ TypeSourceInfo *MethodTyInfo, ConstexprSpecKind ConstexprKind,
+ StorageClass SC, ArrayRef<ParmVarDecl *> Params,
+ bool HasExplicitResultType) {
- // code_seg attribute on lambda apply to the method.
- if (Attr *A = getImplicitCodeSegOrSectionAttrForFunction(Method, /*IsDefinition=*/true))
- Method->addAttr(A);
+ LambdaScopeInfo *LSI = getCurrentLambdaScopeUnsafe(*this);
- // Attributes on the lambda apply to the method.
- ProcessDeclAttributes(CurScope, Method, ParamInfo);
+ if (TrailingRequiresClause)
+ Method->setTrailingRequiresClause(TrailingRequiresClause);
- // CUDA lambdas get implicit host and device attributes.
- if (getLangOpts().CUDA)
- CUDASetLambdaAttrs(Method);
+ TemplateParameterList *TemplateParams =
+ getGenericLambdaTemplateParameterList(LSI, *this);
+
+ DeclContext *DC = Method->getLexicalDeclContext();
+ Method->setLexicalDeclContext(LSI->Lambda);
+ if (TemplateParams) {
+ FunctionTemplateDecl *TemplateMethod = FunctionTemplateDecl::Create(
+ Context, LSI->Lambda, Method->getLocation(), Method->getDeclName(),
+ TemplateParams, Method);
+ TemplateMethod->setAccess(AS_public);
+ Method->setDescribedFunctionTemplate(TemplateMethod);
+ LSI->Lambda->addDecl(TemplateMethod);
+ TemplateMethod->setLexicalDeclContext(DC);
+ } else {
+ LSI->Lambda->addDecl(Method);
+ }
+ LSI->Lambda->setLambdaIsGeneric(TemplateParams);
+ LSI->Lambda->setLambdaTypeInfo(MethodTyInfo);
+
+ Method->setLexicalDeclContext(DC);
+ Method->setLocation(LambdaLoc);
+ Method->setInnerLocStart(CallOperatorLoc);
+ Method->setTypeSourceInfo(MethodTyInfo);
+ Method->setType(buildTypeForLambdaCallOperator(*this, LSI->Lambda,
+ TemplateParams, MethodTyInfo));
+ Method->setConstexprKind(ConstexprKind);
+ Method->setStorageClass(SC);
+ if (!Params.empty()) {
+ CheckParmsForFunctionDef(Params, /*CheckParameterNames=*/false);
+ Method->setParams(Params);
+ for (auto P : Method->parameters()) {
+ assert(P && "null in a parameter list");
+ P->setOwningFunction(Method);
+ }
+ }
- // OpenMP lambdas might get assumumption attributes.
- if (LangOpts.OpenMP)
- ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Method);
+ buildLambdaScopeReturnType(*this, LSI, Method, HasExplicitResultType);
+}
- // Number the lambda for linkage purposes if necessary.
- handleLambdaNumbering(Class, Method);
+void Sema::ActOnLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro,
+ Scope *CurrentScope) {
- // Introduce the function call operator as the current declaration context.
- PushDeclContext(CurScope, Method);
+ LambdaScopeInfo *LSI = getCurLambda();
+ assert(LSI && "LambdaScopeInfo should be on stack!");
- // Build the lambda scope.
- buildLambdaScope(LSI, Method, Intro.Range, Intro.Default, Intro.DefaultLoc,
- ExplicitParams, ExplicitResultType, !Method->isConst());
+ if (Intro.Default == LCD_ByCopy)
+ LSI->ImpCaptureStyle = LambdaScopeInfo::ImpCap_LambdaByval;
+ else if (Intro.Default == LCD_ByRef)
+ LSI->ImpCaptureStyle = LambdaScopeInfo::ImpCap_LambdaByref;
+ LSI->CaptureDefaultLoc = Intro.DefaultLoc;
+ LSI->IntroducerRange = Intro.Range;
+ LSI->AfterParameterList = false;
- // C++11 [expr.prim.lambda]p9:
- // A lambda-expression whose smallest enclosing scope is a block scope is a
- // local lambda expression; any other lambda expression shall not have a
- // capture-default or simple-capture in its lambda-introducer.
- //
- // For simple-captures, this is covered by the check below that any named
- // entity is a variable that can be captured.
- //
- // For DR1632, we also allow a capture-default in any context where we can
- // odr-use 'this' (in particular, in a default initializer for a non-static
- // data member).
- if (Intro.Default != LCD_None && !Class->getParent()->isFunctionOrMethod() &&
- (getCurrentThisType().isNull() ||
- CheckCXXThisCapture(SourceLocation(), /*Explicit*/true,
- /*BuildAndDiagnose*/false)))
- Diag(Intro.DefaultLoc, diag::err_capture_default_non_local);
+ assert(LSI->NumExplicitTemplateParams == 0);
+
+ // Determine if we're within a context where we know that the lambda will
+ // be dependent, because there are template parameters in scope.
+ CXXRecordDecl::LambdaDependencyKind LambdaDependencyKind =
+ CXXRecordDecl::LDK_Unknown;
+ if (LSI->NumExplicitTemplateParams > 0) {
+ Scope *TemplateParamScope = CurScope->getTemplateParamParent();
+ assert(TemplateParamScope &&
+ "Lambda with explicit template param list should establish a "
+ "template param scope");
+ assert(TemplateParamScope->getParent());
+ if (TemplateParamScope->getParent()->getTemplateParamParent() != nullptr)
+ LambdaDependencyKind = CXXRecordDecl::LDK_AlwaysDependent;
+ } else if (CurScope->getTemplateParamParent() != nullptr) {
+ LambdaDependencyKind = CXXRecordDecl::LDK_AlwaysDependent;
+ }
+
+ CXXRecordDecl *Class = createLambdaClosureType(
+ Intro.Range, /*Info=*/nullptr, LambdaDependencyKind, Intro.Default);
+ LSI->Lambda = Class;
+
+ CXXMethodDecl *Method = CreateLambdaCallOperator(Intro.Range, Class);
+ LSI->CallOperator = Method;
+ Method->setLexicalDeclContext(CurContext);
+
+ PushDeclContext(CurScope, Method);
+
+ bool ContainsUnexpandedParameterPack = false;
// Distinct capture names, for diagnostics.
- llvm::SmallSet<IdentifierInfo*, 8> CaptureNames;
+ llvm::DenseMap<IdentifierInfo *, ValueDecl *> CaptureNames;
// Handle explicit captures.
- SourceLocation PrevCaptureLoc
- = Intro.Default == LCD_None? Intro.Range.getBegin() : Intro.DefaultLoc;
+ SourceLocation PrevCaptureLoc =
+ Intro.Default == LCD_None ? Intro.Range.getBegin() : Intro.DefaultLoc;
for (auto C = Intro.Captures.begin(), E = Intro.Captures.end(); C != E;
PrevCaptureLoc = C->Loc, ++C) {
if (C->Kind == LCK_This || C->Kind == LCK_StarThis) {
if (C->Kind == LCK_StarThis)
Diag(C->Loc, !getLangOpts().CPlusPlus17
- ? diag::ext_star_this_lambda_capture_cxx17
- : diag::warn_cxx14_compat_star_this_lambda_capture);
+ ? diag::ext_star_this_lambda_capture_cxx17
+ : diag::warn_cxx14_compat_star_this_lambda_capture);
// C++11 [expr.prim.lambda]p8:
// An identifier or this shall not appear more than once in a
@@ -1087,7 +1032,7 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
continue;
}
- // C++2a [expr.prim.lambda]p8:
+ // C++20 [expr.prim.lambda]p8:
// If a lambda-capture includes a capture-default that is =,
// each simple-capture of that lambda-capture shall be of the form
// "&identifier", "this", or "* this". [ Note: The form [&,this] is
@@ -1153,13 +1098,11 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
}
Var = createLambdaInitCaptureVarDecl(C->Loc, C->InitCaptureType.get(),
C->EllipsisLoc, C->Id, InitStyle,
- C->Init.get());
- // C++1y [expr.prim.lambda]p11:
- // An init-capture behaves as if it declares and explicitly
- // captures a variable [...] whose declarative region is the
- // lambda-expression's compound-statement
- if (Var)
- PushOnScopeChains(Var, CurScope, false);
+ C->Init.get(), Method);
+ assert(Var && "createLambdaInitCaptureVarDecl returned a null VarDecl?");
+ if (auto *V = dyn_cast<VarDecl>(Var))
+ CheckShadow(CurrentScope, V);
+ PushOnScopeChains(Var, CurrentScope, false);
} else {
assert(C->InitKind == LambdaCaptureInitKind::NoInit &&
"init capture has valid but null init?");
@@ -1205,31 +1148,33 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
continue;
}
+ // C++11 [expr.prim.lambda]p10:
+ // [...] each such lookup shall find a variable with automatic storage
+ // duration declared in the reaching scope of the local lambda expression.
+ // Note that the 'reaching scope' check happens in tryCaptureVariable().
+ if (!Var) {
+ Diag(C->Loc, diag::err_capture_does_not_name_variable) << C->Id;
+ continue;
+ }
+
// C++11 [expr.prim.lambda]p8:
// An identifier or this shall not appear more than once in a
// lambda-capture.
- if (!CaptureNames.insert(C->Id).second) {
- if (Var && LSI->isCaptured(Var)) {
+ if (auto [It, Inserted] = CaptureNames.insert(std::pair{C->Id, Var});
+ !Inserted) {
+ if (C->InitKind == LambdaCaptureInitKind::NoInit &&
+ !Var->isInitCapture()) {
Diag(C->Loc, diag::err_capture_more_than_once)
- << C->Id << SourceRange(LSI->getCapture(Var).getLocation())
+ << C->Id << It->second->getBeginLoc()
<< FixItHint::CreateRemoval(
SourceRange(getLocForEndOfToken(PrevCaptureLoc), C->Loc));
} else
// Previous capture captured something different (one or both was
- // an init-cpature): no fixit.
+ // an init-capture): no fixit.
Diag(C->Loc, diag::err_capture_more_than_once) << C->Id;
continue;
}
- // C++11 [expr.prim.lambda]p10:
- // [...] each such lookup shall find a variable with automatic storage
- // duration declared in the reaching scope of the local lambda expression.
- // Note that the 'reaching scope' check happens in tryCaptureVariable().
- if (!Var) {
- Diag(C->Loc, diag::err_capture_does_not_name_variable) << C->Id;
- continue;
- }
-
// Ignore invalid decls; they'll just confuse the code later.
if (Var->isInvalidDecl())
continue;
@@ -1261,20 +1206,214 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
if (C->Init.isUsable()) {
addInitCapture(LSI, cast<VarDecl>(Var), C->Kind == LCK_ByRef);
+ PushOnScopeChains(Var, CurScope, false);
} else {
- TryCaptureKind Kind = C->Kind == LCK_ByRef ? TryCapture_ExplicitByRef :
- TryCapture_ExplicitByVal;
+ TryCaptureKind Kind = C->Kind == LCK_ByRef ? TryCapture_ExplicitByRef
+ : TryCapture_ExplicitByVal;
tryCaptureVariable(Var, C->Loc, Kind, EllipsisLoc);
}
if (!LSI->Captures.empty())
LSI->ExplicitCaptureRanges[LSI->Captures.size() - 1] = C->ExplicitRange;
}
finishLambdaExplicitCaptures(LSI);
-
LSI->ContainsUnexpandedParameterPack |= ContainsUnexpandedParameterPack;
+ PopDeclContext();
+}
+
+void Sema::ActOnLambdaClosureQualifiers(LambdaIntroducer &Intro,
+ SourceLocation MutableLoc) {
+
+ LambdaScopeInfo *LSI = getCurrentLambdaScopeUnsafe(*this);
+ LSI->Mutable = MutableLoc.isValid();
+ ContextRAII Context(*this, LSI->CallOperator, /*NewThisContext*/ false);
+
+ // C++11 [expr.prim.lambda]p9:
+ // A lambda-expression whose smallest enclosing scope is a block scope is a
+ // local lambda expression; any other lambda expression shall not have a
+ // capture-default or simple-capture in its lambda-introducer.
+ //
+ // For simple-captures, this is covered by the check below that any named
+ // entity is a variable that can be captured.
+ //
+ // For DR1632, we also allow a capture-default in any context where we can
+ // odr-use 'this' (in particular, in a default initializer for a non-static
+ // data member).
+ if (Intro.Default != LCD_None &&
+ !LSI->Lambda->getParent()->isFunctionOrMethod() &&
+ (getCurrentThisType().isNull() ||
+ CheckCXXThisCapture(SourceLocation(), /*Explicit=*/true,
+ /*BuildAndDiagnose=*/false)))
+ Diag(Intro.DefaultLoc, diag::err_capture_default_non_local);
+}
- // Add lambda parameters into scope.
- addLambdaParameters(Intro.Captures, Method, CurScope);
+void Sema::ActOnLambdaClosureParameters(
+ Scope *LambdaScope, MutableArrayRef<DeclaratorChunk::ParamInfo> Params) {
+ LambdaScopeInfo *LSI = getCurrentLambdaScopeUnsafe(*this);
+ PushDeclContext(LambdaScope, LSI->CallOperator);
+
+ for (const DeclaratorChunk::ParamInfo &P : Params) {
+ auto *Param = cast<ParmVarDecl>(P.Param);
+ Param->setOwningFunction(LSI->CallOperator);
+ if (Param->getIdentifier())
+ PushOnScopeChains(Param, LambdaScope, false);
+ }
+
+ LSI->AfterParameterList = true;
+}
+
+void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
+ Declarator &ParamInfo,
+ const DeclSpec &DS) {
+
+ LambdaScopeInfo *LSI = getCurrentLambdaScopeUnsafe(*this);
+ LSI->CallOperator->setConstexprKind(DS.getConstexprSpecifier());
+
+ SmallVector<ParmVarDecl *, 8> Params;
+ bool ExplicitResultType;
+
+ SourceLocation TypeLoc, CallOperatorLoc;
+ if (ParamInfo.getNumTypeObjects() == 0) {
+ CallOperatorLoc = TypeLoc = Intro.Range.getEnd();
+ } else {
+ unsigned Index;
+ ParamInfo.isFunctionDeclarator(Index);
+ const auto &Object = ParamInfo.getTypeObject(Index);
+ TypeLoc =
+ Object.Loc.isValid() ? Object.Loc : ParamInfo.getSourceRange().getEnd();
+ CallOperatorLoc = ParamInfo.getSourceRange().getEnd();
+ }
+
+ CXXRecordDecl *Class = LSI->Lambda;
+ CXXMethodDecl *Method = LSI->CallOperator;
+
+ TypeSourceInfo *MethodTyInfo = getLambdaType(
+ *this, Intro, ParamInfo, getCurScope(), TypeLoc, ExplicitResultType);
+
+ LSI->ExplicitParams = ParamInfo.getNumTypeObjects() != 0;
+
+ if (ParamInfo.isFunctionDeclarator() != 0 &&
+ !FTIHasSingleVoidParameter(ParamInfo.getFunctionTypeInfo())) {
+ const auto &FTI = ParamInfo.getFunctionTypeInfo();
+ Params.reserve(Params.size());
+ for (unsigned I = 0; I < FTI.NumParams; ++I) {
+ auto *Param = cast<ParmVarDecl>(FTI.Params[I].Param);
+ Param->setScopeInfo(0, Params.size());
+ Params.push_back(Param);
+ }
+ }
+
+ bool IsLambdaStatic =
+ ParamInfo.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static;
+
+ CompleteLambdaCallOperator(
+ Method, Intro.Range.getBegin(), CallOperatorLoc,
+ ParamInfo.getTrailingRequiresClause(), MethodTyInfo,
+ ParamInfo.getDeclSpec().getConstexprSpecifier(),
+ IsLambdaStatic ? SC_Static : SC_None, Params, ExplicitResultType);
+
+ CheckCXXDefaultArguments(Method);
+
+ // This represents the function body for the lambda function, check if we
+ // have to apply optnone due to a pragma.
+ AddRangeBasedOptnone(Method);
+
+ // code_seg attribute on lambda apply to the method.
+ if (Attr *A = getImplicitCodeSegOrSectionAttrForFunction(
+ Method, /*IsDefinition=*/true))
+ Method->addAttr(A);
+
+ // Attributes on the lambda apply to the method.
+ ProcessDeclAttributes(CurScope, Method, ParamInfo);
+
+ // CUDA lambdas get implicit host and device attributes.
+ if (getLangOpts().CUDA)
+ CUDASetLambdaAttrs(Method);
+
+ // OpenMP lambdas might get assumumption attributes.
+ if (LangOpts.OpenMP)
+ ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Method);
+
+ handleLambdaNumbering(Class, Method);
+
+ for (auto &&C : LSI->Captures) {
+ if (!C.isVariableCapture())
+ continue;
+ ValueDecl *Var = C.getVariable();
+ if (Var && Var->isInitCapture()) {
+ PushOnScopeChains(Var, CurScope, false);
+ }
+ }
+
+ auto CheckRedefinition = [&](ParmVarDecl *Param) {
+ for (const auto &Capture : Intro.Captures) {
+ if (Capture.Id == Param->getIdentifier()) {
+ Diag(Param->getLocation(), diag::err_parameter_shadow_capture);
+ Diag(Capture.Loc, diag::note_var_explicitly_captured_here)
+ << Capture.Id << true;
+ return false;
+ }
+ }
+ return true;
+ };
+
+ for (ParmVarDecl *P : Params) {
+ if (!P->getIdentifier())
+ continue;
+ if (CheckRedefinition(P))
+ CheckShadow(CurScope, P);
+ PushOnScopeChains(P, CurScope);
+ }
+
+ // C++23 [expr.prim.lambda.capture]p5:
+ // If an identifier in a capture appears as the declarator-id of a parameter
+ // of the lambda-declarator's parameter-declaration-clause or as the name of a
+ // template parameter of the lambda-expression's template-parameter-list, the
+ // program is ill-formed.
+ TemplateParameterList *TemplateParams =
+ getGenericLambdaTemplateParameterList(LSI, *this);
+ if (TemplateParams) {
+ for (const auto *TP : TemplateParams->asArray()) {
+ if (!TP->getIdentifier())
+ continue;
+ for (const auto &Capture : Intro.Captures) {
+ if (Capture.Id == TP->getIdentifier()) {
+ Diag(Capture.Loc, diag::err_template_param_shadow) << Capture.Id;
+ Diag(TP->getLocation(), diag::note_template_param_here);
+ }
+ }
+ }
+ }
+
+ // C++20: dcl.decl.general p4:
+ // The optional requires-clause ([temp.pre]) in an init-declarator or
+ // member-declarator shall be present only if the declarator declares a
+ // templated function ([dcl.fct]).
+ if (Expr *TRC = Method->getTrailingRequiresClause()) {
+ // [temp.pre]/8:
+ // An entity is templated if it is
+ // - a template,
+ // - an entity defined ([basic.def]) or created ([class.temporary]) in a
+ // templated entity,
+ // - a member of a templated entity,
+ // - an enumerator for an enumeration that is a templated entity, or
+ // - the closure type of a lambda-expression ([expr.prim.lambda.closure])
+ // appearing in the declaration of a templated entity. [Note 6: A local
+ // class, a local or block variable, or a friend function defined in a
+ // templated entity is a templated entity. — end note]
+ //
+ // A templated function is a function template or a function that is
+ // templated. A templated class is a class template or a class that is
+ // templated. A templated variable is a variable template or a variable
+ // that is templated.
+
+ // Note: we only have to check if this is defined in a template entity, OR
+ // if we are a template, since the rest don't apply. The requires clause
+ // applies to the call operator, which we already know is a member function,
+ // AND defined.
+ if (!Method->getDescribedFunctionTemplate() && !Method->isTemplated()) {
+ Diag(TRC->getBeginLoc(), diag::err_constrained_non_templated_function);
+ }
+ }
// Enter a new evaluation context to insulate the lambda from any
// cleanups from the enclosing full-expression.
@@ -1282,6 +1421,10 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
LSI->CallOperator->isConsteval()
? ExpressionEvaluationContext::ImmediateFunctionContext
: ExpressionEvaluationContext::PotentiallyEvaluated);
+ ExprEvalContexts.back().InImmediateFunctionContext =
+ LSI->CallOperator->isConsteval();
+ ExprEvalContexts.back().InImmediateEscalatingFunctionContext =
+ getLangOpts().CPlusPlus20 && LSI->CallOperator->isImmediateEscalating();
}
void Sema::ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
@@ -1490,6 +1633,11 @@ static void addFunctionPointerConversion(Sema &S, SourceRange IntroducerRange,
Conversion->setAccess(AS_public);
Conversion->setImplicit(true);
+ // A non-generic lambda may still be a templated entity. We need to preserve
+ // constraints when converting the lambda to a function pointer. See GH63181.
+ if (Expr *Requires = CallOperator->getTrailingRequiresClause())
+ Conversion->setTrailingRequiresClause(Requires);
+
if (Class->isGenericLambda()) {
// Create a template version of the conversion operator, using the template
// parameter list of the function call operator.
@@ -1529,7 +1677,7 @@ static void addFunctionPointerConversion(Sema &S, SourceRange IntroducerRange,
S.Context, Class, Loc, DeclarationNameInfo(InvokerName, Loc),
InvokerFunctionTy, CallOperator->getTypeSourceInfo(), SC_Static,
S.getCurFPFeatures().isFPConstrained(),
- /*isInline=*/true, ConstexprSpecKind::Unspecified,
+ /*isInline=*/true, CallOperator->getConstexprKind(),
CallOperator->getBody()->getEndLoc());
for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I)
InvokerParams[I]->setOwningFunction(Invoke);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp b/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
index b2e943699c5f..c4f4edb6666c 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
@@ -199,7 +199,7 @@ namespace {
const_iterator end() const { return list.end(); }
llvm::iterator_range<const_iterator>
- getNamespacesFor(DeclContext *DC) const {
+ getNamespacesFor(const DeclContext *DC) const {
return llvm::make_range(std::equal_range(begin(), end(),
DC->getPrimaryContext(),
UnqualUsingEntry::Comparator()));
@@ -351,12 +351,12 @@ void LookupResult::deletePaths(CXXBasePaths *Paths) {
/// Get a representative context for a declaration such that two declarations
/// will have the same context if they were found within the same scope.
-static DeclContext *getContextForScopeMatching(Decl *D) {
+static const DeclContext *getContextForScopeMatching(const Decl *D) {
// For function-local declarations, use that function as the context. This
// doesn't account for scopes within the function; the caller must deal with
// those.
- DeclContext *DC = D->getLexicalDeclContext();
- if (DC->isFunctionOrMethod())
+ if (const DeclContext *DC = D->getLexicalDeclContext();
+ DC->isFunctionOrMethod())
return DC;
// Otherwise, look at the semantic context of the declaration. The
@@ -367,15 +367,16 @@ static DeclContext *getContextForScopeMatching(Decl *D) {
/// Determine whether \p D is a better lookup result than \p Existing,
/// given that they declare the same entity.
static bool isPreferredLookupResult(Sema &S, Sema::LookupNameKind Kind,
- NamedDecl *D, NamedDecl *Existing) {
+ const NamedDecl *D,
+ const NamedDecl *Existing) {
// When looking up redeclarations of a using declaration, prefer a using
// shadow declaration over any other declaration of the same entity.
if (Kind == Sema::LookupUsingDeclName && isa<UsingShadowDecl>(D) &&
!isa<UsingShadowDecl>(Existing))
return true;
- auto *DUnderlying = D->getUnderlyingDecl();
- auto *EUnderlying = Existing->getUnderlyingDecl();
+ const auto *DUnderlying = D->getUnderlyingDecl();
+ const auto *EUnderlying = Existing->getUnderlyingDecl();
// If they have different underlying declarations, prefer a typedef over the
// original type (this happens when two type declarations denote the same
@@ -397,8 +398,8 @@ static bool isPreferredLookupResult(Sema &S, Sema::LookupNameKind Kind,
// FIXME: In the presence of ambiguous default arguments, we should keep both,
// so we can diagnose the ambiguity if the default argument is needed.
// See C++ [over.match.best]p3.
- if (auto *DFD = dyn_cast<FunctionDecl>(DUnderlying)) {
- auto *EFD = cast<FunctionDecl>(EUnderlying);
+ if (const auto *DFD = dyn_cast<FunctionDecl>(DUnderlying)) {
+ const auto *EFD = cast<FunctionDecl>(EUnderlying);
unsigned DMin = DFD->getMinRequiredArguments();
unsigned EMin = EFD->getMinRequiredArguments();
// If D has more default arguments, it is preferred.
@@ -409,8 +410,8 @@ static bool isPreferredLookupResult(Sema &S, Sema::LookupNameKind Kind,
}
// Pick the template with more default template arguments.
- if (auto *DTD = dyn_cast<TemplateDecl>(DUnderlying)) {
- auto *ETD = cast<TemplateDecl>(EUnderlying);
+ if (const auto *DTD = dyn_cast<TemplateDecl>(DUnderlying)) {
+ const auto *ETD = cast<TemplateDecl>(EUnderlying);
unsigned DMin = DTD->getTemplateParameters()->getMinRequiredArguments();
unsigned EMin = ETD->getTemplateParameters()->getMinRequiredArguments();
// If D has more default arguments, it is preferred. Note that default
@@ -433,8 +434,8 @@ static bool isPreferredLookupResult(Sema &S, Sema::LookupNameKind Kind,
// VarDecl can have incomplete array types, prefer the one with more complete
// array type.
- if (VarDecl *DVD = dyn_cast<VarDecl>(DUnderlying)) {
- VarDecl *EVD = cast<VarDecl>(EUnderlying);
+ if (const auto *DVD = dyn_cast<VarDecl>(DUnderlying)) {
+ const auto *EVD = cast<VarDecl>(EUnderlying);
if (EVD->getType()->isIncompleteType() &&
!DVD->getType()->isIncompleteType()) {
// Prefer the decl with a more complete type if visible.
@@ -451,7 +452,7 @@ static bool isPreferredLookupResult(Sema &S, Sema::LookupNameKind Kind,
}
// Pick the newer declaration; it might have a more precise type.
- for (Decl *Prev = DUnderlying->getPreviousDecl(); Prev;
+ for (const Decl *Prev = DUnderlying->getPreviousDecl(); Prev;
Prev = Prev->getPreviousDecl())
if (Prev == EUnderlying)
return true;
@@ -459,7 +460,7 @@ static bool isPreferredLookupResult(Sema &S, Sema::LookupNameKind Kind,
}
/// Determine whether \p D can hide a tag declaration.
-static bool canHideTag(NamedDecl *D) {
+static bool canHideTag(const NamedDecl *D) {
// C++ [basic.scope.declarative]p4:
// Given a set of declarations in a single declarative region [...]
// exactly one declaration shall declare a class name or enumeration name
@@ -492,7 +493,7 @@ void LookupResult::resolveKind() {
// If there's a single decl, we need to examine it to decide what
// kind of lookup this is.
if (N == 1) {
- NamedDecl *D = (*Decls.begin())->getUnderlyingDecl();
+ const NamedDecl *D = (*Decls.begin())->getUnderlyingDecl();
if (isa<FunctionTemplateDecl>(D))
ResultKind = FoundOverloaded;
else if (isa<UnresolvedUsingValueDecl>(D))
@@ -503,37 +504,58 @@ void LookupResult::resolveKind() {
// Don't do any extra resolution if we've already resolved as ambiguous.
if (ResultKind == Ambiguous) return;
- llvm::SmallDenseMap<NamedDecl*, unsigned, 16> Unique;
+ llvm::SmallDenseMap<const NamedDecl *, unsigned, 16> Unique;
llvm::SmallDenseMap<QualType, unsigned, 16> UniqueTypes;
bool Ambiguous = false;
bool HasTag = false, HasFunction = false;
bool HasFunctionTemplate = false, HasUnresolved = false;
- NamedDecl *HasNonFunction = nullptr;
-
- llvm::SmallVector<NamedDecl*, 4> EquivalentNonFunctions;
+ const NamedDecl *HasNonFunction = nullptr;
- unsigned UniqueTagIndex = 0;
+ llvm::SmallVector<const NamedDecl *, 4> EquivalentNonFunctions;
+ llvm::BitVector RemovedDecls(N);
- unsigned I = 0;
- while (I < N) {
- NamedDecl *D = Decls[I]->getUnderlyingDecl();
+ for (unsigned I = 0; I < N; I++) {
+ const NamedDecl *D = Decls[I]->getUnderlyingDecl();
D = cast<NamedDecl>(D->getCanonicalDecl());
// Ignore an invalid declaration unless it's the only one left.
// Also ignore HLSLBufferDecl which not have name conflict with other Decls.
- if ((D->isInvalidDecl() || isa<HLSLBufferDecl>(D)) && !(I == 0 && N == 1)) {
- Decls[I] = Decls[--N];
+ if ((D->isInvalidDecl() || isa<HLSLBufferDecl>(D)) &&
+ N - RemovedDecls.count() > 1) {
+ RemovedDecls.set(I);
continue;
}
+ // C++ [basic.scope.hiding]p2:
+ // A class name or enumeration name can be hidden by the name of
+ // an object, function, or enumerator declared in the same
+ // scope. If a class or enumeration name and an object, function,
+ // or enumerator are declared in the same scope (in any order)
+ // with the same name, the class or enumeration name is hidden
+ // wherever the object, function, or enumerator name is visible.
+ if (HideTags && isa<TagDecl>(D)) {
+ bool Hidden = false;
+ for (auto *OtherDecl : Decls) {
+ if (canHideTag(OtherDecl) &&
+ getContextForScopeMatching(OtherDecl)->Equals(
+ getContextForScopeMatching(Decls[I]))) {
+ RemovedDecls.set(I);
+ Hidden = true;
+ break;
+ }
+ }
+ if (Hidden)
+ continue;
+ }
+
std::optional<unsigned> ExistingI;
// Redeclarations of types via typedef can occur both within a scope
// and, through using declarations and directives, across scopes. There is
// no ambiguity if they all refer to the same type, so unique based on the
// canonical type.
- if (TypeDecl *TD = dyn_cast<TypeDecl>(D)) {
+ if (const auto *TD = dyn_cast<TypeDecl>(D)) {
QualType T = getSema().Context.getTypeDeclType(TD);
auto UniqueResult = UniqueTypes.insert(
std::make_pair(getSema().Context.getCanonicalType(T), I));
@@ -559,7 +581,7 @@ void LookupResult::resolveKind() {
if (isPreferredLookupResult(getSema(), getLookupKind(), Decls[I],
Decls[*ExistingI]))
Decls[*ExistingI] = Decls[I];
- Decls[I] = Decls[--N];
+ RemovedDecls.set(I);
continue;
}
@@ -570,7 +592,6 @@ void LookupResult::resolveKind() {
} else if (isa<TagDecl>(D)) {
if (HasTag)
Ambiguous = true;
- UniqueTagIndex = I;
HasTag = true;
} else if (isa<FunctionTemplateDecl>(D)) {
HasFunction = true;
@@ -586,7 +607,7 @@ void LookupResult::resolveKind() {
if (getSema().isEquivalentInternalLinkageDeclaration(HasNonFunction,
D)) {
EquivalentNonFunctions.push_back(D);
- Decls[I] = Decls[--N];
+ RemovedDecls.set(I);
continue;
}
@@ -594,28 +615,6 @@ void LookupResult::resolveKind() {
}
HasNonFunction = D;
}
- I++;
- }
-
- // C++ [basic.scope.hiding]p2:
- // A class name or enumeration name can be hidden by the name of
- // an object, function, or enumerator declared in the same
- // scope. If a class or enumeration name and an object, function,
- // or enumerator are declared in the same scope (in any order)
- // with the same name, the class or enumeration name is hidden
- // wherever the object, function, or enumerator name is visible.
- // But it's still an error if there are distinct tag types found,
- // even if they're not visible. (ref?)
- if (N > 1 && HideTags && HasTag && !Ambiguous &&
- (HasFunction || HasNonFunction || HasUnresolved)) {
- NamedDecl *OtherDecl = Decls[UniqueTagIndex ? 0 : N - 1];
- if (isa<TagDecl>(Decls[UniqueTagIndex]->getUnderlyingDecl()) &&
- getContextForScopeMatching(Decls[UniqueTagIndex])->Equals(
- getContextForScopeMatching(OtherDecl)) &&
- canHideTag(OtherDecl))
- Decls[UniqueTagIndex] = Decls[--N];
- else
- Ambiguous = true;
}
// FIXME: This diagnostic should really be delayed until we're done with
@@ -624,9 +623,15 @@ void LookupResult::resolveKind() {
getSema().diagnoseEquivalentInternalLinkageDeclarations(
getNameLoc(), HasNonFunction, EquivalentNonFunctions);
+ // Remove decls by replacing them with decls from the end (which
+ // means that we need to iterate from the end) and then truncating
+ // to the new size.
+ for (int I = RemovedDecls.find_last(); I >= 0; I = RemovedDecls.find_prev(I))
+ Decls[I] = Decls[--N];
Decls.truncate(N);
- if (HasNonFunction && (HasFunction || HasUnresolved))
+ if ((HasNonFunction && (HasFunction || HasUnresolved)) ||
+ (HideTags && HasTag && (HasFunction || HasNonFunction || HasUnresolved)))
Ambiguous = true;
if (Ambiguous)
@@ -932,10 +937,12 @@ bool Sema::LookupBuiltin(LookupResult &R) {
}
}
- if (DeclareRISCVVBuiltins) {
+ if (DeclareRISCVVBuiltins || DeclareRISCVSiFiveVectorBuiltins) {
if (!RVIntrinsicManager)
RVIntrinsicManager = CreateRISCVIntrinsicManager(*this);
+ RVIntrinsicManager->InitIntrinsicList();
+
if (RVIntrinsicManager->CreateIntrinsicIfFound(R, II, PP))
return true;
}
@@ -1197,9 +1204,9 @@ static bool LookupDirect(Sema &S, LookupResult &R, const DeclContext *DC) {
}
// Performs C++ unqualified lookup into the given file context.
-static bool
-CppNamespaceLookup(Sema &S, LookupResult &R, ASTContext &Context,
- DeclContext *NS, UnqualUsingDirectiveSet &UDirs) {
+static bool CppNamespaceLookup(Sema &S, LookupResult &R, ASTContext &Context,
+ const DeclContext *NS,
+ UnqualUsingDirectiveSet &UDirs) {
assert(NS && NS->isFileContext() && "CppNamespaceLookup() requires namespace!");
@@ -1333,8 +1340,7 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
if (!SearchNamespaceScope) {
R.resolveKind();
if (S->isClassScope())
- if (CXXRecordDecl *Record =
- dyn_cast_or_null<CXXRecordDecl>(S->getEntity()))
+ if (auto *Record = dyn_cast_if_present<CXXRecordDecl>(S->getEntity()))
R.setNamingClass(Record);
return true;
}
@@ -1579,7 +1585,8 @@ bool Sema::isUsableModule(const Module *M) {
// [module.global.frag]p1:
// The global module fragment can be used to provide declarations that are
// attached to the global module and usable within the module unit.
- if (M == GlobalModuleFragment ||
+ if (M == TheGlobalModuleFragment || M == TheImplicitGlobalModuleFragment ||
+ M == TheExportedImplicitGlobalModuleFragment ||
// If M is the module we're parsing, it should be usable. This covers the
// private module fragment. The private module fragment is usable only if
// it is within the current module unit. And it must be the current
@@ -1602,14 +1609,14 @@ bool Sema::isUsableModule(const Module *M) {
return false;
}
-bool Sema::hasVisibleMergedDefinition(NamedDecl *Def) {
+bool Sema::hasVisibleMergedDefinition(const NamedDecl *Def) {
for (const Module *Merged : Context.getModulesWithMergedDefinition(Def))
if (isModuleVisible(Merged))
return true;
return false;
}
-bool Sema::hasMergedDefinitionInCurrentModule(NamedDecl *Def) {
+bool Sema::hasMergedDefinitionInCurrentModule(const NamedDecl *Def) {
for (const Module *Merged : Context.getModulesWithMergedDefinition(Def))
if (isUsableModule(Merged))
return true;
@@ -1858,19 +1865,6 @@ bool LookupResult::isAcceptableSlow(Sema &SemaRef, NamedDecl *D,
}
bool Sema::isModuleVisible(const Module *M, bool ModulePrivate) {
- // [module.global.frag]p2:
- // A global-module-fragment specifies the contents of the global module
- // fragment for a module unit. The global module fragment can be used to
- // provide declarations that are attached to the global module and usable
- // within the module unit.
- //
- // Global module fragment is special. Global Module fragment is only usable
- // within the module unit it got defined [module.global.frag]p2. So here we
- // check if the Module is the global module fragment in current translation
- // unit.
- if (M->isGlobalModule() && M != this->GlobalModuleFragment)
- return false;
-
// The module might be ordinarily visible. For a module-private query, that
// means it is part of the current module.
if (ModulePrivate && isUsableModule(M))
@@ -1893,6 +1887,12 @@ bool Sema::isModuleVisible(const Module *M, bool ModulePrivate) {
if (LookupModules.count(M))
return true;
+ // The global module fragments are visible to its corresponding module unit.
+ // So the global module fragment should be visible if the its corresponding
+ // module unit is visible.
+ if (M->isGlobalModule() && LookupModules.count(M->getTopLevelModule()))
+ return true;
+
// For a module-private query, that's everywhere we get to look.
if (ModulePrivate)
return false;
@@ -1911,14 +1911,11 @@ bool LookupResult::isReachableSlow(Sema &SemaRef, NamedDecl *D) {
Module *DeclModule = SemaRef.getOwningModule(D);
assert(DeclModule && "hidden decl has no owning module");
- // Entities in module map modules are reachable only if they're visible.
- if (DeclModule->isModuleMapModule())
+ // Entities in header like modules are reachable only if they're visible.
+ if (DeclModule->isHeaderLikeModule())
return false;
- // If D comes from a module and SemaRef doesn't own a module, it implies D
- // comes from another TU. In case SemaRef owns a module, we could judge if D
- // comes from another TU by comparing the module unit.
- if (SemaRef.isModuleUnitOfCurrentTU(DeclModule))
+ if (!D->isInAnotherModuleUnit())
return true;
// [module.reach]/p3:
@@ -2433,8 +2430,9 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool oldVal;
DeclContext *Context;
// Set flag in DeclContext informing debugger that we're looking for qualified name
- QualifiedLookupInScope(DeclContext *ctx) : Context(ctx) {
- oldVal = ctx->setUseQualifiedLookup();
+ QualifiedLookupInScope(DeclContext *ctx)
+ : oldVal(ctx->shouldUseQualifiedLookup()), Context(ctx) {
+ ctx->setUseQualifiedLookup();
}
~QualifiedLookupInScope() {
Context->setUseQualifiedLookup(oldVal);
@@ -3767,8 +3765,8 @@ Sema::LookupLiteralOperator(Scope *S, LookupResult &R,
// operator template, but not both.
if (FoundRaw && FoundTemplate) {
Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call) << R.getLookupName();
- for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I)
- NoteOverloadCandidate(*I, (*I)->getUnderlyingDecl()->getAsFunction());
+ for (const NamedDecl *D : R)
+ NoteOverloadCandidate(D, D->getUnderlyingDecl()->getAsFunction());
return LOLR_Error;
}
@@ -3883,10 +3881,14 @@ void Sema::ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
if (isVisible(D)) {
Visible = true;
break;
- } else if (getLangOpts().CPlusPlusModules &&
- D->isInExportDeclContext()) {
- // C++20 [basic.lookup.argdep] p4.3 .. are exported ...
+ }
+
+ if (!getLangOpts().CPlusPlusModules)
+ continue;
+
+ if (D->isInExportDeclContext()) {
Module *FM = D->getOwningModule();
+ // C++20 [basic.lookup.argdep] p4.3 .. are exported ...
// exports are only valid in module purview and outside of any
// PMF (although a PMF should not even be present in a module
// with an import).
@@ -3894,14 +3896,12 @@ void Sema::ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
"bad export context");
// .. are attached to a named module M, do not appear in the
// translation unit containing the point of the lookup..
- if (!isModuleUnitOfCurrentTU(FM) &&
+ if (D->isInAnotherModuleUnit() &&
llvm::any_of(AssociatedClasses, [&](auto *E) {
// ... and have the same innermost enclosing non-inline
// namespace scope as a declaration of an associated entity
// attached to M
- if (!E->hasOwningModule() ||
- E->getOwningModule()->getTopLevelModuleName() !=
- FM->getTopLevelModuleName())
+ if (E->getOwningModule() != FM)
return false;
// TODO: maybe this could be cached when generating the
// associated namespaces / entities.
@@ -4155,22 +4155,21 @@ private:
// Enumerate all of the results in this context.
for (DeclContextLookupResult R :
Load ? Ctx->lookups()
- : Ctx->noload_lookups(/*PreserveInternalState=*/false)) {
- for (auto *D : R) {
- if (auto *ND = Result.getAcceptableDecl(D)) {
- // Rather than visit immediately, we put ND into a vector and visit
- // all decls, in order, outside of this loop. The reason is that
- // Consumer.FoundDecl() may invalidate the iterators used in the two
- // loops above.
- DeclsToVisit.push_back(ND);
- }
+ : Ctx->noload_lookups(/*PreserveInternalState=*/false))
+ for (auto *D : R)
+ // Rather than visit immediately, we put ND into a vector and visit
+ // all decls, in order, outside of this loop. The reason is that
+ // Consumer.FoundDecl() and LookupResult::getAcceptableDecl(D)
+ // may invalidate the iterators used in the two
+ // loops above.
+ DeclsToVisit.push_back(D);
+
+ for (auto *D : DeclsToVisit)
+ if (auto *ND = Result.getAcceptableDecl(D)) {
+ Consumer.FoundDecl(ND, Visited.checkHidden(ND), Ctx, InBaseClass);
+ Visited.add(ND);
}
- }
- for (auto *ND : DeclsToVisit) {
- Consumer.FoundDecl(ND, Visited.checkHidden(ND), Ctx, InBaseClass);
- Visited.add(ND);
- }
DeclsToVisit.clear();
// Traverse using directives for qualified name lookup.
@@ -5615,15 +5614,15 @@ bool FunctionCallFilterCCC::ValidateCandidate(const TypoCorrection &candidate) {
// unless the method being corrected--or the current DeclContext, if the
// function being corrected is not a method--is a method in the same class
// or a descendent class of the candidate's parent class.
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
if (MemberFn || !MD->isStatic()) {
- CXXMethodDecl *CurMD =
+ const auto *CurMD =
MemberFn
- ? dyn_cast_or_null<CXXMethodDecl>(MemberFn->getMemberDecl())
- : dyn_cast_or_null<CXXMethodDecl>(CurContext);
- CXXRecordDecl *CurRD =
+ ? dyn_cast_if_present<CXXMethodDecl>(MemberFn->getMemberDecl())
+ : dyn_cast_if_present<CXXMethodDecl>(CurContext);
+ const CXXRecordDecl *CurRD =
CurMD ? CurMD->getParent()->getCanonicalDecl() : nullptr;
- CXXRecordDecl *RD = MD->getParent()->getCanonicalDecl();
+ const CXXRecordDecl *RD = MD->getParent()->getCanonicalDecl();
if (!CurRD || (CurRD != RD && !CurRD->isDerivedFrom(RD)))
continue;
}
@@ -5642,28 +5641,28 @@ void Sema::diagnoseTypo(const TypoCorrection &Correction,
/// Find which declaration we should import to provide the definition of
/// the given declaration.
-static NamedDecl *getDefinitionToImport(NamedDecl *D) {
- if (VarDecl *VD = dyn_cast<VarDecl>(D))
+static const NamedDecl *getDefinitionToImport(const NamedDecl *D) {
+ if (const auto *VD = dyn_cast<VarDecl>(D))
return VD->getDefinition();
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (const auto *FD = dyn_cast<FunctionDecl>(D))
return FD->getDefinition();
- if (TagDecl *TD = dyn_cast<TagDecl>(D))
+ if (const auto *TD = dyn_cast<TagDecl>(D))
return TD->getDefinition();
- if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D))
+ if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(D))
return ID->getDefinition();
- if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D))
+ if (const auto *PD = dyn_cast<ObjCProtocolDecl>(D))
return PD->getDefinition();
- if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D))
- if (NamedDecl *TTD = TD->getTemplatedDecl())
+ if (const auto *TD = dyn_cast<TemplateDecl>(D))
+ if (const NamedDecl *TTD = TD->getTemplatedDecl())
return getDefinitionToImport(TTD);
return nullptr;
}
-void Sema::diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
+void Sema::diagnoseMissingImport(SourceLocation Loc, const NamedDecl *Decl,
MissingImportKind MIK, bool Recover) {
// Suggest importing a module providing the definition of this entity, if
// possible.
- NamedDecl *Def = getDefinitionToImport(Decl);
+ const NamedDecl *Def = getDefinitionToImport(Decl);
if (!Def)
Def = Decl;
@@ -5689,7 +5688,7 @@ static std::string getHeaderNameForHeader(Preprocessor &PP, const FileEntry *E,
return (IsSystem ? '<' : '"') + Path + (IsSystem ? '>' : '"');
}
-void Sema::diagnoseMissingImport(SourceLocation UseLoc, NamedDecl *Decl,
+void Sema::diagnoseMissingImport(SourceLocation UseLoc, const NamedDecl *Decl,
SourceLocation DeclLoc,
ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover) {
@@ -5740,7 +5739,7 @@ void Sema::diagnoseMissingImport(SourceLocation UseLoc, NamedDecl *Decl,
if (Modules.size() > 1) {
std::string ModuleList;
unsigned N = 0;
- for (Module *M : Modules) {
+ for (const auto *M : Modules) {
ModuleList += "\n ";
if (++N == 5 && N != Modules.size()) {
ModuleList += "[...]";
@@ -5843,3 +5842,7 @@ void Sema::ActOnPragmaDump(Scope *S, SourceLocation IILoc, IdentifierInfo *II) {
LookupName(R, S);
R.dump();
}
+
+void Sema::ActOnPragmaDump(Expr *E) {
+ E->dump();
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp b/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
index 194239ab0e10..cd38cd4cf69d 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
@@ -15,6 +15,7 @@
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/SemaInternal.h"
+#include "llvm/ADT/StringExtras.h"
#include <optional>
using namespace clang;
@@ -74,20 +75,9 @@ static std::string stringFromPath(ModuleIdPath Path) {
Sema::DeclGroupPtrTy
Sema::ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc) {
- if (!ModuleScopes.empty() &&
- ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment) {
- // Under -std=c++2a -fmodules-ts, we can find an explicit 'module;' after
- // already implicitly entering the global module fragment. That's OK.
- assert(getLangOpts().CPlusPlusModules && getLangOpts().ModulesTS &&
- "unexpectedly encountered multiple global module fragment decls");
- ModuleScopes.back().BeginLoc = ModuleLoc;
- return nullptr;
- }
-
- // We start in the global module; all those declarations are implicitly
- // module-private (though they do not have module linkage).
+ // We start in the global module;
Module *GlobalModule =
- PushGlobalModuleFragment(ModuleLoc, /*IsImplicit=*/false);
+ PushGlobalModuleFragment(ModuleLoc);
// All declarations created from now on are owned by the global module.
auto *TU = Context.getTranslationUnitDecl();
@@ -135,7 +125,6 @@ void Sema::HandleStartOfHeaderUnit() {
ModuleScopes.back().BeginLoc = StartOfTU;
ModuleScopes.back().Module = Mod;
ModuleScopes.back().ModuleInterface = true;
- ModuleScopes.back().IsPartition = false;
VisibleModules.setVisible(Mod, StartOfTU);
// From now on, we have an owning module for all declarations we see.
@@ -168,19 +157,24 @@ static bool DiagReservedModuleName(Sema &S, const IdentifierInfo *II,
if (Reason == Reserved && S.getSourceManager().isInSystemHeader(Loc))
Reason = Valid;
- if (Reason != Valid) {
- S.Diag(Loc, diag::err_invalid_module_name) << II << (int)Reason;
- return true;
+ switch (Reason) {
+ case Valid:
+ return false;
+ case Invalid:
+ return S.Diag(Loc, diag::err_invalid_module_name) << II;
+ case Reserved:
+ S.Diag(Loc, diag::warn_reserved_module_name) << II;
+ return false;
}
- return false;
+ llvm_unreachable("fell off a fully covered switch");
}
Sema::DeclGroupPtrTy
Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
ModuleDeclKind MDK, ModuleIdPath Path,
ModuleIdPath Partition, ModuleImportState &ImportState) {
- assert((getLangOpts().ModulesTS || getLangOpts().CPlusPlusModules) &&
- "should only have module decl in Modules TS or C++20");
+ assert(getLangOpts().CPlusPlusModules &&
+ "should only have module decl in standard C++ modules");
bool IsFirstDecl = ImportState == ModuleImportState::FirstDecl;
bool SeenGMF = ImportState == ModuleImportState::GlobalFragment;
@@ -244,8 +238,8 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
return nullptr;
}
- assert((!getLangOpts().CPlusPlusModules || getLangOpts().ModulesTS ||
- SeenGMF == (bool)this->GlobalModuleFragment) &&
+ assert((!getLangOpts().CPlusPlusModules ||
+ SeenGMF == (bool)this->TheGlobalModuleFragment) &&
"mismatched global module state");
// In C++20, the module-declaration must be the first declaration if there
@@ -262,7 +256,7 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
}
}
- // C++2b [module.unit]p1: ... The identifiers module and import shall not
+ // C++23 [module.unit]p1: ... The identifiers module and import shall not
// appear as identifiers in a module-name or module-partition. All
// module-names either beginning with an identifier consisting of std
// followed by zero or more digits or containing a reserved identifier
@@ -275,11 +269,8 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
if (!getSourceManager().isInSystemHeader(Path[0].second) &&
(FirstComponentName == "std" ||
(FirstComponentName.startswith("std") &&
- llvm::all_of(FirstComponentName.drop_front(3), &llvm::isDigit)))) {
- Diag(Path[0].second, diag::err_invalid_module_name)
- << Path[0].first << /*reserved*/ 1;
- return nullptr;
- }
+ llvm::all_of(FirstComponentName.drop_front(3), &llvm::isDigit))))
+ Diag(Path[0].second, diag::warn_reserved_module_name) << Path[0].first;
// Then test all of the components in the path to see if any of them are
// using another kind of reserved or invalid identifier.
@@ -310,8 +301,8 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
const_cast<LangOptions&>(getLangOpts()).CurrentModule = ModuleName;
auto &Map = PP.getHeaderSearchInfo().getModuleMap();
- Module *Mod;
-
+ Module *Mod; // The module we are creating.
+ Module *Interface = nullptr; // The interface for an implementation.
switch (MDK) {
case ModuleDeclKind::Interface:
case ModuleDeclKind::PartitionInterface: {
@@ -348,18 +339,19 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
// we're building if `LangOpts.CurrentModule` equals to 'ModuleName'.
// Change the value for `LangOpts.CurrentModule` temporarily to make the
// module loader work properly.
- const_cast<LangOptions&>(getLangOpts()).CurrentModule = "";
- Mod = getModuleLoader().loadModule(ModuleLoc, {ModuleNameLoc},
- Module::AllVisible,
- /*IsInclusionDirective=*/false);
+ const_cast<LangOptions &>(getLangOpts()).CurrentModule = "";
+ Interface = getModuleLoader().loadModule(ModuleLoc, {ModuleNameLoc},
+ Module::AllVisible,
+ /*IsInclusionDirective=*/false);
const_cast<LangOptions&>(getLangOpts()).CurrentModule = ModuleName;
- if (!Mod) {
+ if (!Interface) {
Diag(ModuleLoc, diag::err_module_not_defined) << ModuleName;
// Create an empty module interface unit for error recovery.
Mod = Map.createModuleForInterfaceUnit(ModuleLoc, ModuleName);
+ } else {
+ Mod = Map.createModuleForImplementationUnit(ModuleLoc, ModuleName);
}
-
} break;
case ModuleDeclKind::PartitionImplementation:
@@ -370,7 +362,7 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
break;
}
- if (!this->GlobalModuleFragment) {
+ if (!this->TheGlobalModuleFragment) {
ModuleScopes.push_back({});
if (getLangOpts().ModulesLocalVisibility)
ModuleScopes.back().OuterVisibleModules = std::move(VisibleModules);
@@ -383,7 +375,6 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
ModuleScopes.back().BeginLoc = StartLoc;
ModuleScopes.back().Module = Mod;
ModuleScopes.back().ModuleInterface = MDK != ModuleDeclKind::Implementation;
- ModuleScopes.back().IsPartition = IsPartition;
VisibleModules.setVisible(Mod, ModuleLoc);
// From now on, we have an owning module for all declarations we see.
@@ -399,17 +390,32 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
// statements, so imports are allowed.
ImportState = ModuleImportState::ImportAllowed;
- // For an implementation, We already made an implicit import (its interface).
- // Make and return the import decl to be added to the current TU.
- if (MDK == ModuleDeclKind::Implementation) {
- // Make the import decl for the interface.
- ImportDecl *Import =
- ImportDecl::Create(Context, CurContext, ModuleLoc, Mod, Path[0].second);
- // and return it to be added.
+ getASTContext().setCurrentNamedModule(Mod);
+
+ // We already potentially made an implicit import (in the case of a module
+ // implementation unit importing its interface). Make this module visible
+ // and return the import decl to be added to the current TU.
+ if (Interface) {
+
+ VisibleModules.setVisible(Interface, ModuleLoc);
+ VisibleModules.makeTransitiveImportsVisible(Interface, ModuleLoc);
+
+ // Make the import decl for the interface in the impl module.
+ ImportDecl *Import = ImportDecl::Create(Context, CurContext, ModuleLoc,
+ Interface, Path[0].second);
+ CurContext->addDecl(Import);
+
+ // Sequence initialization of the imported module before that of the current
+ // module, if any.
+ Context.addModuleInitializer(ModuleScopes.back().Module, Import);
+ Mod->Imports.insert(Interface); // As if we imported it.
+ // Also save this as a shortcut to checking for decls in the interface
+ ThePrimaryInterface = Interface;
+ // If we made an implicit import of the module interface, then return the
+ // imported module decl.
return ConvertDeclToDeclGroup(Import);
}
- // FIXME: Create a ModuleDecl.
return nullptr;
}
@@ -419,10 +425,11 @@ Sema::ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
// C++20 [basic.link]/2:
// A private-module-fragment shall appear only in a primary module
// interface unit.
- switch (ModuleScopes.empty() ? Module::GlobalModuleFragment
+ switch (ModuleScopes.empty() ? Module::ExplicitGlobalModuleFragment
: ModuleScopes.back().Module->Kind) {
case Module::ModuleMapModule:
- case Module::GlobalModuleFragment:
+ case Module::ExplicitGlobalModuleFragment:
+ case Module::ImplicitGlobalModuleFragment:
case Module::ModulePartitionImplementation:
case Module::ModulePartitionInterface:
case Module::ModuleHeaderUnit:
@@ -434,19 +441,17 @@ Sema::ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
Diag(ModuleScopes.back().BeginLoc, diag::note_previous_definition);
return nullptr;
- case Module::ModuleInterfaceUnit:
- break;
- }
-
- if (!ModuleScopes.back().ModuleInterface) {
+ case Module::ModuleImplementationUnit:
Diag(PrivateLoc, diag::err_private_module_fragment_not_module_interface);
Diag(ModuleScopes.back().BeginLoc,
diag::note_not_module_interface_add_export)
<< FixItHint::CreateInsertion(ModuleScopes.back().BeginLoc, "export ");
return nullptr;
+
+ case Module::ModuleInterfaceUnit:
+ break;
}
- // FIXME: Check this isn't a module interface partition.
// FIXME: Check that this translation unit does not import any partitions;
// such imports would violate [basic.link]/2's "shall be the only module unit"
// restriction.
@@ -482,9 +487,8 @@ DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path,
bool IsPartition) {
-
- bool Cxx20Mode = getLangOpts().CPlusPlusModules || getLangOpts().ModulesTS;
- assert((!IsPartition || Cxx20Mode) && "partition seen in non-C++20 code?");
+ assert((!IsPartition || getLangOpts().CPlusPlusModules) &&
+ "partition seen in non-C++20 code?");
// For a C++20 module name, flatten into a single identifier with the source
// location of the first component.
@@ -502,7 +506,7 @@ DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
ModuleName += stringFromPath(Path);
ModuleNameLoc = {PP.getIdentifierInfo(ModuleName), Path[0].second};
Path = ModuleIdPath(ModuleNameLoc);
- } else if (Cxx20Mode) {
+ } else if (getLangOpts().CPlusPlusModules) {
ModuleName = stringFromPath(Path);
ModuleNameLoc = {PP.getIdentifierInfo(ModuleName), Path[0].second};
Path = ModuleIdPath(ModuleNameLoc);
@@ -542,6 +546,9 @@ DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *Mod,
ModuleIdPath Path) {
+ if (Mod->isHeaderUnit())
+ Diag(ImportLoc, diag::warn_experimental_header_unit);
+
VisibleModules.setVisible(Mod, ImportLoc);
checkModuleImportContext(*this, Mod, ImportLoc, CurContext);
@@ -550,8 +557,7 @@ DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
// of the same top-level module. Until we do, make it an error rather than
// silently ignoring the import.
// FIXME: Should we warn on a redundant import of the current module?
- if (Mod->isForBuilding(getLangOpts()) &&
- (getLangOpts().isCompilingModule() || !getLangOpts().ModulesTS)) {
+ if (Mod->isForBuilding(getLangOpts())) {
Diag(ImportLoc, getLangOpts().isCompilingModule()
? diag::err_module_self_import
: diag::err_module_import_in_implementation)
@@ -611,16 +617,9 @@ DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
// [module.interface]p1:
// An export-declaration shall inhabit a namespace scope and appear in the
// purview of a module interface unit.
- Diag(ExportLoc, diag::err_export_not_in_module_interface)
- << (!ModuleScopes.empty() &&
- !ModuleScopes.back().ImplicitGlobalModuleFragment);
+ Diag(ExportLoc, diag::err_export_not_in_module_interface);
}
- // In some cases we need to know if an entity was present in a directly-
- // imported module (as opposed to a transitive import). This avoids
- // searching both Imports and Exports.
- DirectModuleImports.insert(Mod);
-
return Import;
}
@@ -639,11 +638,9 @@ void Sema::BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
TUKind == TU_Module &&
getSourceManager().isWrittenInMainFile(DirectiveLoc);
- bool ShouldAddImport = !IsInModuleIncludes;
-
- // If this module import was due to an inclusion directive, create an
- // implicit import declaration to capture it in the AST.
- if (ShouldAddImport) {
+ // If we are really importing a module (not just checking layering) due to an
+ // #include in the main file, synthesize an ImportDecl.
+ if (getLangOpts().Modules && !IsInModuleIncludes) {
TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl();
ImportDecl *ImportD = ImportDecl::CreateImplicit(getASTContext(), TU,
DirectiveLoc, Mod,
@@ -819,76 +816,22 @@ Decl *Sema::ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
return D;
}
-static bool checkExportedDeclContext(Sema &S, DeclContext *DC,
- SourceLocation BlockStart);
-
-namespace {
-enum class UnnamedDeclKind {
- Empty,
- StaticAssert,
- Asm,
- UsingDirective,
- Namespace,
- Context
-};
-}
-
-static std::optional<UnnamedDeclKind> getUnnamedDeclKind(Decl *D) {
- if (isa<EmptyDecl>(D))
- return UnnamedDeclKind::Empty;
- if (isa<StaticAssertDecl>(D))
- return UnnamedDeclKind::StaticAssert;
- if (isa<FileScopeAsmDecl>(D))
- return UnnamedDeclKind::Asm;
- if (isa<UsingDirectiveDecl>(D))
- return UnnamedDeclKind::UsingDirective;
- // Everything else either introduces one or more names or is ill-formed.
- return std::nullopt;
-}
-
-unsigned getUnnamedDeclDiag(UnnamedDeclKind UDK, bool InBlock) {
- switch (UDK) {
- case UnnamedDeclKind::Empty:
- case UnnamedDeclKind::StaticAssert:
- // Allow empty-declarations and static_asserts in an export block as an
- // extension.
- return InBlock ? diag::ext_export_no_name_block : diag::err_export_no_name;
-
- case UnnamedDeclKind::UsingDirective:
- // Allow exporting using-directives as an extension.
- return diag::ext_export_using_directive;
-
- case UnnamedDeclKind::Namespace:
- // Anonymous namespace with no content.
- return diag::introduces_no_names;
+static bool checkExportedDecl(Sema &, Decl *, SourceLocation);
- case UnnamedDeclKind::Context:
- // Allow exporting DeclContexts that transitively contain no declarations
- // as an extension.
- return diag::ext_export_no_names;
-
- case UnnamedDeclKind::Asm:
- return diag::err_export_no_name;
- }
- llvm_unreachable("unknown kind");
-}
-
-static void diagExportedUnnamedDecl(Sema &S, UnnamedDeclKind UDK, Decl *D,
- SourceLocation BlockStart) {
- S.Diag(D->getLocation(), getUnnamedDeclDiag(UDK, BlockStart.isValid()))
- << (unsigned)UDK;
- if (BlockStart.isValid())
- S.Diag(BlockStart, diag::note_export);
+/// Check that it's valid to export all the declarations in \p DC.
+static bool checkExportedDeclContext(Sema &S, DeclContext *DC,
+ SourceLocation BlockStart) {
+ bool AllUnnamed = true;
+ for (auto *D : DC->decls())
+ AllUnnamed &= checkExportedDecl(S, D, BlockStart);
+ return AllUnnamed;
}
/// Check that it's valid to export \p D.
static bool checkExportedDecl(Sema &S, Decl *D, SourceLocation BlockStart) {
- // C++2a [module.interface]p3:
- // An exported declaration shall declare at least one name
- if (auto UDK = getUnnamedDeclKind(D))
- diagExportedUnnamedDecl(S, *UDK, D, BlockStart);
- // [...] shall not declare a name with internal linkage.
+ // C++20 [module.interface]p3:
+ // [...] it shall not declare a name with internal linkage.
bool HasName = false;
if (auto *ND = dyn_cast<NamedDecl>(D)) {
// Don't diagnose anonymous union objects; we'll diagnose their members
@@ -898,6 +841,7 @@ static bool checkExportedDecl(Sema &S, Decl *D, SourceLocation BlockStart) {
S.Diag(ND->getLocation(), diag::err_export_internal) << ND;
if (BlockStart.isValid())
S.Diag(BlockStart, diag::note_export);
+ return false;
}
}
@@ -913,31 +857,29 @@ static bool checkExportedDecl(Sema &S, Decl *D, SourceLocation BlockStart) {
S.Diag(Target->getLocation(), diag::note_using_decl_target);
if (BlockStart.isValid())
S.Diag(BlockStart, diag::note_export);
+ return false;
}
}
// Recurse into namespace-scope DeclContexts. (Only namespace-scope
- // declarations are exported.).
+ // declarations are exported).
if (auto *DC = dyn_cast<DeclContext>(D)) {
- if (isa<NamespaceDecl>(D) && DC->decls().empty()) {
- if (!HasName)
- // We don't allow an empty anonymous namespace (we don't allow decls
- // in them either, but that's handled in the recursion).
- diagExportedUnnamedDecl(S, UnnamedDeclKind::Namespace, D, BlockStart);
- // We allow an empty named namespace decl.
- } else if (DC->getRedeclContext()->isFileContext() && !isa<EnumDecl>(D))
- return checkExportedDeclContext(S, DC, BlockStart);
- }
- return false;
-}
-
-/// Check that it's valid to export all the declarations in \p DC.
-static bool checkExportedDeclContext(Sema &S, DeclContext *DC,
- SourceLocation BlockStart) {
- bool AllUnnamed = true;
- for (auto *D : DC->decls())
- AllUnnamed &= checkExportedDecl(S, D, BlockStart);
- return AllUnnamed;
+ if (!isa<NamespaceDecl>(D))
+ return true;
+
+ if (auto *ND = dyn_cast<NamedDecl>(D)) {
+ if (!ND->getDeclName()) {
+ S.Diag(ND->getLocation(), diag::err_export_anon_ns_internal);
+ if (BlockStart.isValid())
+ S.Diag(BlockStart, diag::note_export);
+ return false;
+ } else if (!DC->decls().empty() &&
+ DC->getRedeclContext()->isFileContext()) {
+ return checkExportedDeclContext(S, DC, BlockStart);
+ }
+ }
+ }
+ return true;
}
/// Complete the definition of an export declaration.
@@ -952,12 +894,7 @@ Decl *Sema::ActOnFinishExportDecl(Scope *S, Decl *D, SourceLocation RBraceLoc) {
SourceLocation BlockStart =
ED->hasBraces() ? ED->getBeginLoc() : SourceLocation();
for (auto *Child : ED->decls()) {
- if (checkExportedDecl(*this, Child, BlockStart)) {
- // If a top-level child is a linkage-spec declaration, it might contain
- // no declarations (transitively), in which case it's ill-formed.
- diagExportedUnnamedDecl(*this, UnnamedDeclKind::Context, Child,
- BlockStart);
- }
+ checkExportedDecl(*this, Child, BlockStart);
if (auto *FD = dyn_cast<FunctionDecl>(Child)) {
// [dcl.inline]/7
// If an inline function or variable that is attached to a named module
@@ -975,44 +912,55 @@ Decl *Sema::ActOnFinishExportDecl(Scope *S, Decl *D, SourceLocation RBraceLoc) {
return D;
}
-Module *Sema::PushGlobalModuleFragment(SourceLocation BeginLoc,
- bool IsImplicit) {
+Module *Sema::PushGlobalModuleFragment(SourceLocation BeginLoc) {
// We shouldn't create new global module fragment if there is already
// one.
- if (!GlobalModuleFragment) {
+ if (!TheGlobalModuleFragment) {
ModuleMap &Map = PP.getHeaderSearchInfo().getModuleMap();
- GlobalModuleFragment = Map.createGlobalModuleFragmentForModuleUnit(
+ TheGlobalModuleFragment = Map.createGlobalModuleFragmentForModuleUnit(
BeginLoc, getCurrentModule());
}
- assert(GlobalModuleFragment && "module creation should not fail");
+ assert(TheGlobalModuleFragment && "module creation should not fail");
// Enter the scope of the global module.
- ModuleScopes.push_back({BeginLoc, GlobalModuleFragment,
+ ModuleScopes.push_back({BeginLoc, TheGlobalModuleFragment,
/*ModuleInterface=*/false,
- /*IsPartition=*/false,
- /*ImplicitGlobalModuleFragment=*/IsImplicit,
/*OuterVisibleModules=*/{}});
- VisibleModules.setVisible(GlobalModuleFragment, BeginLoc);
+ VisibleModules.setVisible(TheGlobalModuleFragment, BeginLoc);
- return GlobalModuleFragment;
+ return TheGlobalModuleFragment;
}
void Sema::PopGlobalModuleFragment() {
- assert(!ModuleScopes.empty() && getCurrentModule()->isGlobalModule() &&
+ assert(!ModuleScopes.empty() &&
+ getCurrentModule()->isExplicitGlobalModule() &&
"left the wrong module scope, which is not global module fragment");
ModuleScopes.pop_back();
}
-bool Sema::isModuleUnitOfCurrentTU(const Module *M) const {
- assert(M);
-
- Module *CurrentModuleUnit = getCurrentModule();
+Module *Sema::PushImplicitGlobalModuleFragment(SourceLocation BeginLoc,
+ bool IsExported) {
+ Module **M = IsExported ? &TheExportedImplicitGlobalModuleFragment
+ : &TheImplicitGlobalModuleFragment;
+ if (!*M) {
+ ModuleMap &Map = PP.getHeaderSearchInfo().getModuleMap();
+ *M = Map.createImplicitGlobalModuleFragmentForModuleUnit(
+ BeginLoc, IsExported, getCurrentModule());
+ }
+ assert(*M && "module creation should not fail");
- // If we are not in a module currently, M must not be the module unit of
- // current TU.
- if (!CurrentModuleUnit)
- return false;
+ // Enter the scope of the global module.
+ ModuleScopes.push_back({BeginLoc, *M,
+ /*ModuleInterface=*/false,
+ /*OuterVisibleModules=*/{}});
+ VisibleModules.setVisible(*M, BeginLoc);
+ return *M;
+}
- return M->isSubModuleOf(CurrentModuleUnit->getTopLevelModule());
+void Sema::PopImplicitGlobalModuleFragment() {
+ assert(!ModuleScopes.empty() &&
+ getCurrentModule()->isImplicitGlobalModule() &&
+ "left the wrong module scope, which is not global module fragment");
+ ModuleScopes.pop_back();
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp b/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp
index 584c4a31793c..7e5dc3a71cbb 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp
@@ -1363,10 +1363,9 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (!Context.hasSameType(PropertyIvarType, IvarType)) {
if (isa<ObjCObjectPointerType>(PropertyIvarType)
&& isa<ObjCObjectPointerType>(IvarType))
- compat =
- Context.canAssignObjCInterfaces(
- PropertyIvarType->getAs<ObjCObjectPointerType>(),
- IvarType->getAs<ObjCObjectPointerType>());
+ compat = Context.canAssignObjCInterfaces(
+ PropertyIvarType->castAs<ObjCObjectPointerType>(),
+ IvarType->castAs<ObjCObjectPointerType>());
else {
compat = (CheckAssignmentConstraints(PropertyIvarLoc, PropertyIvarType,
IvarType)
@@ -2508,8 +2507,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
if (const SectionAttr *SA = property->getAttr<SectionAttr>())
GetterMethod->addAttr(SectionAttr::CreateImplicit(
- Context, SA->getName(), Loc, AttributeCommonInfo::AS_GNU,
- SectionAttr::GNU_section));
+ Context, SA->getName(), Loc, SectionAttr::GNU_section));
if (getLangOpts().ObjCAutoRefCount)
CheckARCMethodDecl(GetterMethod);
@@ -2581,8 +2579,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
CD->addDecl(SetterMethod);
if (const SectionAttr *SA = property->getAttr<SectionAttr>())
SetterMethod->addAttr(SectionAttr::CreateImplicit(
- Context, SA->getName(), Loc, AttributeCommonInfo::AS_GNU,
- SectionAttr::GNU_section));
+ Context, SA->getName(), Loc, SectionAttr::GNU_section));
// It's possible for the user to have set a very odd custom
// setter selector that causes it to have a method family.
if (getLangOpts().ObjCAutoRefCount)
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp b/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
index c767341d922b..cf805987b378 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
@@ -27,6 +27,7 @@
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
@@ -87,8 +88,7 @@ public:
};
using OperatorOffsetTy =
llvm::SmallVector<std::pair<Expr *, OverloadedOperatorKind>, 4>;
- using DoacrossDependMapTy =
- llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
+ using DoacrossClauseMapTy = llvm::DenseMap<OMPClause *, OperatorOffsetTy>;
/// Kind of the declaration used in the uses_allocators clauses.
enum class UsesAllocatorsDeclKind {
/// Predefined allocator
@@ -169,7 +169,7 @@ private:
/// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
/// get the data (loop counters etc.) about enclosing loop-based construct.
/// This data is required during codegen.
- DoacrossDependMapTy DoacrossDepends;
+ DoacrossClauseMapTy DoacrossDepends;
/// First argument (Expr *) contains optional argument of the
/// 'ordered' clause, the second one is true if the regions has 'ordered'
/// clause, false otherwise.
@@ -1054,17 +1054,16 @@ public:
assert(!isStackEmpty());
return getStackSize() - 1;
}
- void addDoacrossDependClause(OMPDependClause *C,
- const OperatorOffsetTy &OpsOffs) {
+ void addDoacrossDependClause(OMPClause *C, const OperatorOffsetTy &OpsOffs) {
SharingMapTy *Parent = getSecondOnStackOrNull();
assert(Parent && isOpenMPWorksharingDirective(Parent->Directive));
Parent->DoacrossDepends.try_emplace(C, OpsOffs);
}
- llvm::iterator_range<DoacrossDependMapTy::const_iterator>
+ llvm::iterator_range<DoacrossClauseMapTy::const_iterator>
getDoacrossDependClauses() const {
const SharingMapTy &StackElem = getTopOfStack();
if (isOpenMPWorksharingDirective(StackElem.Directive)) {
- const DoacrossDependMapTy &Ref = StackElem.DoacrossDepends;
+ const DoacrossClauseMapTy &Ref = StackElem.DoacrossDepends;
return llvm::make_range(Ref.begin(), Ref.end());
}
return llvm::make_range(StackElem.DoacrossDepends.end(),
@@ -2011,7 +2010,7 @@ void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
}
static bool isOpenMPDeviceDelayedContext(Sema &S) {
- assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsDevice &&
+ assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsTargetDevice &&
"Expected OpenMP device compilation.");
return !S.isInOpenMPTargetExecutionDirective();
}
@@ -2025,10 +2024,10 @@ enum class FunctionEmissionStatus {
};
} // anonymous namespace
-Sema::SemaDiagnosticBuilder Sema::diagIfOpenMPDeviceCode(SourceLocation Loc,
- unsigned DiagID,
- FunctionDecl *FD) {
- assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
+Sema::SemaDiagnosticBuilder
+Sema::diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID,
+ const FunctionDecl *FD) {
+ assert(LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice &&
"Expected OpenMP device compilation.");
SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
@@ -2065,8 +2064,8 @@ Sema::SemaDiagnosticBuilder Sema::diagIfOpenMPDeviceCode(SourceLocation Loc,
Sema::SemaDiagnosticBuilder Sema::diagIfOpenMPHostCode(SourceLocation Loc,
unsigned DiagID,
- FunctionDecl *FD) {
- assert(LangOpts.OpenMP && !LangOpts.OpenMPIsDevice &&
+ const FunctionDecl *FD) {
+ assert(LangOpts.OpenMP && !LangOpts.OpenMPIsTargetDevice &&
"Expected OpenMP host compilation.");
SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
@@ -2203,11 +2202,14 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
++EI;
if (EI == EE)
return false;
-
- if (isa<ArraySubscriptExpr>(EI->getAssociatedExpression()) ||
- isa<OMPArraySectionExpr>(EI->getAssociatedExpression()) ||
+ auto Last = std::prev(EE);
+ const auto *UO =
+ dyn_cast<UnaryOperator>(Last->getAssociatedExpression());
+ if ((UO && UO->getOpcode() == UO_Deref) ||
+ isa<ArraySubscriptExpr>(Last->getAssociatedExpression()) ||
+ isa<OMPArraySectionExpr>(Last->getAssociatedExpression()) ||
isa<MemberExpr>(EI->getAssociatedExpression()) ||
- isa<OMPArrayShapingExpr>(EI->getAssociatedExpression())) {
+ isa<OMPArrayShapingExpr>(Last->getAssociatedExpression())) {
IsVariableAssociatedWithSection = true;
// There is nothing more we need to know about this variable.
return true;
@@ -2270,10 +2272,10 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
// and alignment, because the runtime library only deals with uintptr types.
// If it does not fit the uintptr size, we need to pass the data by reference
// instead.
- if (!IsByRef &&
- (Ctx.getTypeSizeInChars(Ty) >
- Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
- Ctx.getDeclAlign(D) > Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
+ if (!IsByRef && (Ctx.getTypeSizeInChars(Ty) >
+ Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
+ Ctx.getAlignOfGlobalVarInChars(Ty) >
+ Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
IsByRef = true;
}
@@ -2547,7 +2549,8 @@ OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
}
}
}
- if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
+ if (isOpenMPLoopDirective(DSAStack->getCurrentDirective()) &&
+ !isOpenMPLoopTransformationDirective(DSAStack->getCurrentDirective())) {
if (DSAStack->getAssociatedLoops() > 0 && !DSAStack->isLoopStarted()) {
DSAStack->resetPossibleLoopCounter(D);
DSAStack->loopStart();
@@ -2699,16 +2702,16 @@ void Sema::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
OMPDeclareTargetDeclAttr::getDeviceType(Caller->getMostRecentDecl());
// Ignore host functions during device analyzis.
- if (LangOpts.OpenMPIsDevice &&
+ if (LangOpts.OpenMPIsTargetDevice &&
(!DevTy || *DevTy == OMPDeclareTargetDeclAttr::DT_Host))
return;
// Ignore nohost functions during host analyzis.
- if (!LangOpts.OpenMPIsDevice && DevTy &&
+ if (!LangOpts.OpenMPIsTargetDevice && DevTy &&
*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
return;
const FunctionDecl *FD = Callee->getMostRecentDecl();
DevTy = OMPDeclareTargetDeclAttr::getDeviceType(FD);
- if (LangOpts.OpenMPIsDevice && DevTy &&
+ if (LangOpts.OpenMPIsTargetDevice && DevTy &&
*DevTy == OMPDeclareTargetDeclAttr::DT_Host) {
// Diagnose host function called during device codegen.
StringRef HostDevTy =
@@ -2719,8 +2722,8 @@ void Sema::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
<< HostDevTy;
return;
}
- if (!LangOpts.OpenMPIsDevice && !LangOpts.OpenMPOffloadMandatory && DevTy &&
- *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
+ if (!LangOpts.OpenMPIsTargetDevice && !LangOpts.OpenMPOffloadMandatory &&
+ DevTy && *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
// In OpenMP 5.2 or later, if the function has a host variant then allow
// that to be called instead
auto &&HasHostAttr = [](const FunctionDecl *Callee) {
@@ -3383,7 +3386,7 @@ Sema::ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList,
// allocate directives that appear in a target region must specify an
// allocator clause unless a requires directive with the dynamic_allocators
// clause is present in the same compilation unit.
- if (LangOpts.OpenMPIsDevice &&
+ if (LangOpts.OpenMPIsTargetDevice &&
!DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())
targetDiag(Loc, diag::err_expected_allocator_clause);
} else {
@@ -4196,7 +4199,6 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
- case OMPD_target_teams_loop:
case OMPD_target_parallel_loop:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd: {
@@ -4224,8 +4226,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
- Context, {}, AttributeCommonInfo::AS_Keyword,
- AlwaysInlineAttr::Keyword_forceinline));
+ Context, {}, AlwaysInlineAttr::Keyword_forceinline));
Sema::CapturedParamNameType ParamsTarget[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
@@ -4269,8 +4270,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
- Context, {}, AttributeCommonInfo::AS_Keyword,
- AlwaysInlineAttr::Keyword_forceinline));
+ Context, {}, AlwaysInlineAttr::Keyword_forceinline));
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
std::make_pair(StringRef(), QualType()),
/*OpenMPCaptureLevel=*/1);
@@ -4330,8 +4330,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
- Context, {}, AttributeCommonInfo::AS_Keyword,
- AlwaysInlineAttr::Keyword_forceinline));
+ Context, {}, AlwaysInlineAttr::Keyword_forceinline));
break;
}
case OMPD_taskloop:
@@ -4377,8 +4376,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
- Context, {}, AttributeCommonInfo::AS_Keyword,
- AlwaysInlineAttr::Keyword_forceinline));
+ Context, {}, AlwaysInlineAttr::Keyword_forceinline));
break;
}
case OMPD_parallel_masked_taskloop:
@@ -4430,8 +4428,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
- Context, {}, AttributeCommonInfo::AS_Keyword,
- AlwaysInlineAttr::Keyword_forceinline));
+ Context, {}, AlwaysInlineAttr::Keyword_forceinline));
break;
}
case OMPD_distribute_parallel_for_simd:
@@ -4450,6 +4447,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
Params);
break;
}
+ case OMPD_target_teams_loop:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
@@ -4477,8 +4475,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
- Context, {}, AttributeCommonInfo::AS_Keyword,
- AlwaysInlineAttr::Keyword_forceinline));
+ Context, {}, AlwaysInlineAttr::Keyword_forceinline));
Sema::CapturedParamNameType ParamsTarget[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
@@ -4509,22 +4506,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
break;
}
- case OMPD_teams_loop: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
-
- Sema::CapturedParamNameType ParamsTeams[] = {
- std::make_pair(".global_tid.", KmpInt32PtrTy),
- std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
- // Start a captured region for 'teams'.
- ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- ParamsTeams, /*OpenMPCaptureLevel=*/0);
- break;
- }
-
+ case OMPD_teams_loop:
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
@@ -4580,8 +4562,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
- Context, {}, AttributeCommonInfo::AS_Keyword,
- AlwaysInlineAttr::Keyword_forceinline));
+ Context, {}, AlwaysInlineAttr::Keyword_forceinline));
break;
}
case OMPD_threadprivate:
@@ -4668,11 +4649,12 @@ static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
CaptureExpr->getExprLoc());
}
-static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref) {
+static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref,
+ StringRef Name) {
CaptureExpr = S.DefaultLvalueConversion(CaptureExpr).get();
if (!Ref) {
OMPCapturedExprDecl *CD = buildCaptureDecl(
- S, &S.getASTContext().Idents.get(".capture_expr."), CaptureExpr,
+ S, &S.getASTContext().Idents.get(Name), CaptureExpr,
/*WithInit=*/true, S.CurContext, /*AsExpression=*/true);
Ref = buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
CaptureExpr->getExprLoc());
@@ -6118,6 +6100,11 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
BindKind, StartLoc))
return StmtError();
+ // Report affected OpenMP target offloading behavior when in HIP lang-mode.
+ if (getLangOpts().HIP && (isOpenMPTargetExecutionDirective(Kind) ||
+ isOpenMPTargetDataManagementDirective(Kind)))
+ Diag(StartLoc, diag::warn_hip_omp_target_directives);
+
llvm::SmallVector<OMPClause *, 8> ClausesWithImplicit;
VarsWithInheritedDSAType VarsWithInheritedDSA;
bool ErrorFound = false;
@@ -7259,7 +7246,7 @@ ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope,
return Call;
if (LangOpts.OpenMP >= 51 && CalleeFnDecl->getIdentifier() &&
- CalleeFnDecl->getName().startswith_insensitive("omp_")) {
+ CalleeFnDecl->getName().starts_with_insensitive("omp_")) {
// checking for any calls inside an Order region
if (Scope && Scope->isOpenMPOrderClauseScope())
Diag(LParenLoc, diag::err_omp_unexpected_call_to_omp_runtime_api);
@@ -8441,7 +8428,8 @@ bool OpenMPIterationSpaceChecker::checkAndSetInc(Expr *S) {
static ExprResult
tryBuildCapture(Sema &SemaRef, Expr *Capture,
- llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
+ llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
+ StringRef Name = ".capture_expr.") {
if (SemaRef.CurContext->isDependentContext() || Capture->containsErrors())
return Capture;
if (Capture->isEvaluatable(SemaRef.Context, Expr::SE_AllowSideEffects))
@@ -8450,9 +8438,9 @@ tryBuildCapture(Sema &SemaRef, Expr *Capture,
/*AllowExplicit=*/true);
auto I = Captures.find(Capture);
if (I != Captures.end())
- return buildCapture(SemaRef, Capture, I->second);
+ return buildCapture(SemaRef, Capture, I->second, Name);
DeclRefExpr *Ref = nullptr;
- ExprResult Res = buildCapture(SemaRef, Capture, Ref);
+ ExprResult Res = buildCapture(SemaRef, Capture, Ref, Name);
Captures[Capture] = Ref;
return Res;
}
@@ -8464,7 +8452,7 @@ calculateNumIters(Sema &SemaRef, Scope *S, SourceLocation DefaultLoc,
Expr *Lower, Expr *Upper, Expr *Step, QualType LCTy,
bool TestIsStrictOp, bool RoundToStep,
llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
- ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
+ ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures, ".new_step");
if (!NewStep.isUsable())
return nullptr;
llvm::APSInt LRes, SRes;
@@ -8640,8 +8628,8 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
return nullptr;
Expr *LBVal = LB;
Expr *UBVal = UB;
- // LB = TestIsLessOp.getValue() ? min(LB(MinVal), LB(MaxVal)) :
- // max(LB(MinVal), LB(MaxVal))
+ // OuterVar = (LB = TestIsLessOp.getValue() ? min(LB(MinVal), LB(MaxVal)) :
+ // max(LB(MinVal), LB(MaxVal)))
if (InitDependOnLC) {
const LoopIterationSpace &IS = ResultIterSpaces[*InitDependOnLC - 1];
if (!IS.MinValue || !IS.MaxValue)
@@ -8686,8 +8674,10 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
if (!LBMaxVal.isUsable())
return nullptr;
- Expr *LBMin = tryBuildCapture(SemaRef, LBMinVal.get(), Captures).get();
- Expr *LBMax = tryBuildCapture(SemaRef, LBMaxVal.get(), Captures).get();
+ Expr *LBMin =
+ tryBuildCapture(SemaRef, LBMinVal.get(), Captures, ".lb_min").get();
+ Expr *LBMax =
+ tryBuildCapture(SemaRef, LBMaxVal.get(), Captures, ".lb_max").get();
if (!LBMin || !LBMax)
return nullptr;
// LB(MinVal) < LB(MaxVal)
@@ -8696,7 +8686,8 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
if (!MinLessMaxRes.isUsable())
return nullptr;
Expr *MinLessMax =
- tryBuildCapture(SemaRef, MinLessMaxRes.get(), Captures).get();
+ tryBuildCapture(SemaRef, MinLessMaxRes.get(), Captures, ".min_less_max")
+ .get();
if (!MinLessMax)
return nullptr;
if (*TestIsLessOp) {
@@ -8716,6 +8707,12 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
return nullptr;
LBVal = MaxLB.get();
}
+ // OuterVar = LB
+ LBMinVal =
+ SemaRef.BuildBinOp(S, DefaultLoc, BO_Assign, IS.CounterVar, LBVal);
+ if (!LBMinVal.isUsable())
+ return nullptr;
+ LBVal = LBMinVal.get();
}
// UB = TestIsLessOp.getValue() ? max(UB(MinVal), UB(MaxVal)) :
// min(UB(MinVal), UB(MaxVal))
@@ -8763,8 +8760,10 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
if (!UBMaxVal.isUsable())
return nullptr;
- Expr *UBMin = tryBuildCapture(SemaRef, UBMinVal.get(), Captures).get();
- Expr *UBMax = tryBuildCapture(SemaRef, UBMaxVal.get(), Captures).get();
+ Expr *UBMin =
+ tryBuildCapture(SemaRef, UBMinVal.get(), Captures, ".ub_min").get();
+ Expr *UBMax =
+ tryBuildCapture(SemaRef, UBMaxVal.get(), Captures, ".ub_max").get();
if (!UBMin || !UBMax)
return nullptr;
// UB(MinVal) > UB(MaxVal)
@@ -8772,8 +8771,9 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
SemaRef.BuildBinOp(S, DefaultLoc, BO_GT, UBMin, UBMax);
if (!MinGreaterMaxRes.isUsable())
return nullptr;
- Expr *MinGreaterMax =
- tryBuildCapture(SemaRef, MinGreaterMaxRes.get(), Captures).get();
+ Expr *MinGreaterMax = tryBuildCapture(SemaRef, MinGreaterMaxRes.get(),
+ Captures, ".min_greater_max")
+ .get();
if (!MinGreaterMax)
return nullptr;
if (*TestIsLessOp) {
@@ -8796,8 +8796,8 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
}
Expr *UBExpr = *TestIsLessOp ? UBVal : LBVal;
Expr *LBExpr = *TestIsLessOp ? LBVal : UBVal;
- Expr *Upper = tryBuildCapture(SemaRef, UBExpr, Captures).get();
- Expr *Lower = tryBuildCapture(SemaRef, LBExpr, Captures).get();
+ Expr *Upper = tryBuildCapture(SemaRef, UBExpr, Captures, ".upper").get();
+ Expr *Lower = tryBuildCapture(SemaRef, LBExpr, Captures, ".lower").get();
if (!Upper || !Lower)
return nullptr;
@@ -8891,7 +8891,7 @@ std::pair<Expr *, Expr *> OpenMPIterationSpaceChecker::buildMinMaxValues(
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
+ ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures, ".new_step");
if (!NewStep.isUsable())
return std::make_pair(nullptr, nullptr);
Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Mul, Diff.get(), NewStep.get());
@@ -9165,6 +9165,22 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
}
}
+namespace {
+// Utility for openmp doacross clause kind
+class OMPDoacrossKind {
+public:
+ bool isSource(const OMPDoacrossClause *C) {
+ return C->getDependenceType() == OMPC_DOACROSS_source ||
+ C->getDependenceType() == OMPC_DOACROSS_source_omp_cur_iteration;
+ }
+ bool isSink(const OMPDoacrossClause *C) {
+ return C->getDependenceType() == OMPC_DOACROSS_sink;
+ }
+ bool isSinkIter(const OMPDoacrossClause *C) {
+ return C->getDependenceType() == OMPC_DOACROSS_sink_omp_cur_iteration;
+ }
+};
+} // namespace
/// Called on a for stmt to check and extract its iteration space
/// for further processing (such as collapsing).
static bool checkOpenMPIterationSpace(
@@ -9318,30 +9334,61 @@ static bool checkOpenMPIterationSpace(
}
}
for (auto &Pair : DSA.getDoacrossDependClauses()) {
- if (CurrentNestedLoopCount >= Pair.first->getNumLoops()) {
+ auto *DependC = dyn_cast<OMPDependClause>(Pair.first);
+ auto *DoacrossC = dyn_cast<OMPDoacrossClause>(Pair.first);
+ unsigned NumLoops =
+ DependC ? DependC->getNumLoops() : DoacrossC->getNumLoops();
+ if (CurrentNestedLoopCount >= NumLoops) {
// Erroneous case - clause has some problems.
continue;
}
- if (Pair.first->getDependencyKind() == OMPC_DEPEND_sink &&
+ if (DependC && DependC->getDependencyKind() == OMPC_DEPEND_sink &&
Pair.second.size() <= CurrentNestedLoopCount) {
// Erroneous case - clause has some problems.
- Pair.first->setLoopData(CurrentNestedLoopCount, nullptr);
+ DependC->setLoopData(CurrentNestedLoopCount, nullptr);
+ continue;
+ }
+ OMPDoacrossKind ODK;
+ if (DoacrossC && ODK.isSink(DoacrossC) &&
+ Pair.second.size() <= CurrentNestedLoopCount) {
+ // Erroneous case - clause has some problems.
+ DoacrossC->setLoopData(CurrentNestedLoopCount, nullptr);
continue;
}
Expr *CntValue;
- if (Pair.first->getDependencyKind() == OMPC_DEPEND_source)
+ SourceLocation DepLoc =
+ DependC ? DependC->getDependencyLoc() : DoacrossC->getDependenceLoc();
+ if ((DependC && DependC->getDependencyKind() == OMPC_DEPEND_source) ||
+ (DoacrossC && ODK.isSource(DoacrossC)))
CntValue = ISC.buildOrderedLoopData(
DSA.getCurScope(),
ResultIterSpaces[CurrentNestedLoopCount].CounterVar, Captures,
- Pair.first->getDependencyLoc());
- else
+ DepLoc);
+ else if (DoacrossC && ODK.isSinkIter(DoacrossC)) {
+ Expr *Cnt = SemaRef
+ .DefaultLvalueConversion(
+ ResultIterSpaces[CurrentNestedLoopCount].CounterVar)
+ .get();
+ if (!Cnt)
+ continue;
+ // build CounterVar - 1
+ Expr *Inc =
+ SemaRef.ActOnIntegerConstant(DoacrossC->getColonLoc(), /*Val=*/1)
+ .get();
+ CntValue = ISC.buildOrderedLoopData(
+ DSA.getCurScope(),
+ ResultIterSpaces[CurrentNestedLoopCount].CounterVar, Captures,
+ DepLoc, Inc, clang::OO_Minus);
+ } else
CntValue = ISC.buildOrderedLoopData(
DSA.getCurScope(),
ResultIterSpaces[CurrentNestedLoopCount].CounterVar, Captures,
- Pair.first->getDependencyLoc(),
- Pair.second[CurrentNestedLoopCount].first,
+ DepLoc, Pair.second[CurrentNestedLoopCount].first,
Pair.second[CurrentNestedLoopCount].second);
- Pair.first->setLoopData(CurrentNestedLoopCount, CntValue);
+ if (DependC)
+ DependC->setLoopData(CurrentNestedLoopCount, CntValue);
+ else
+ DoacrossC->setLoopData(CurrentNestedLoopCount, CntValue);
}
}
@@ -10164,10 +10211,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
Built.DependentInits[Cnt] = nullptr;
Built.FinalsConditions[Cnt] = nullptr;
if (IS.IsNonRectangularLB || IS.IsNonRectangularUB) {
- Built.DependentCounters[Cnt] =
- Built.Counters[NestedLoopCount - 1 - IS.LoopDependentIdx];
- Built.DependentInits[Cnt] =
- Built.Inits[NestedLoopCount - 1 - IS.LoopDependentIdx];
+ Built.DependentCounters[Cnt] = Built.Counters[IS.LoopDependentIdx - 1];
+ Built.DependentInits[Cnt] = Built.Inits[IS.LoopDependentIdx - 1];
Built.FinalsConditions[Cnt] = IS.FinalCondition;
}
}
@@ -11259,33 +11304,48 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
const OMPClause *DependFound = nullptr;
const OMPClause *DependSourceClause = nullptr;
const OMPClause *DependSinkClause = nullptr;
+ const OMPClause *DoacrossFound = nullptr;
+ const OMPClause *DoacrossSourceClause = nullptr;
+ const OMPClause *DoacrossSinkClause = nullptr;
bool ErrorFound = false;
const OMPThreadsClause *TC = nullptr;
const OMPSIMDClause *SC = nullptr;
for (const OMPClause *C : Clauses) {
- if (auto *DC = dyn_cast<OMPDependClause>(C)) {
- DependFound = C;
- if (DC->getDependencyKind() == OMPC_DEPEND_source) {
- if (DependSourceClause) {
+ auto DOC = dyn_cast<OMPDoacrossClause>(C);
+ auto DC = dyn_cast<OMPDependClause>(C);
+ if (DC || DOC) {
+ DependFound = DC ? C : nullptr;
+ DoacrossFound = DOC ? C : nullptr;
+ OMPDoacrossKind ODK;
+ if ((DC && DC->getDependencyKind() == OMPC_DEPEND_source) ||
+ (DOC && (ODK.isSource(DOC)))) {
+ if ((DC && DependSourceClause) || (DOC && DoacrossSourceClause)) {
Diag(C->getBeginLoc(), diag::err_omp_more_one_clause)
<< getOpenMPDirectiveName(OMPD_ordered)
- << getOpenMPClauseName(OMPC_depend) << 2;
+ << getOpenMPClauseName(DC ? OMPC_depend : OMPC_doacross) << 2;
ErrorFound = true;
} else {
- DependSourceClause = C;
+ if (DC)
+ DependSourceClause = C;
+ else
+ DoacrossSourceClause = C;
}
- if (DependSinkClause) {
- Diag(C->getBeginLoc(), diag::err_omp_depend_sink_source_not_allowed)
- << 0;
+ if ((DC && DependSinkClause) || (DOC && DoacrossSinkClause)) {
+ Diag(C->getBeginLoc(), diag::err_omp_sink_and_source_not_allowed)
+ << (DC ? "depend" : "doacross") << 0;
ErrorFound = true;
}
- } else if (DC->getDependencyKind() == OMPC_DEPEND_sink) {
- if (DependSourceClause) {
- Diag(C->getBeginLoc(), diag::err_omp_depend_sink_source_not_allowed)
- << 1;
+ } else if ((DC && DC->getDependencyKind() == OMPC_DEPEND_sink) ||
+ (DOC && (ODK.isSink(DOC) || ODK.isSinkIter(DOC)))) {
+ if (DependSourceClause || DoacrossSourceClause) {
+ Diag(C->getBeginLoc(), diag::err_omp_sink_and_source_not_allowed)
+ << (DC ? "depend" : "doacross") << 1;
ErrorFound = true;
}
- DependSinkClause = C;
+ if (DC)
+ DependSinkClause = C;
+ else
+ DoacrossSinkClause = C;
}
} else if (C->getClauseKind() == OMPC_threads) {
TC = cast<OMPThreadsClause>(C);
@@ -11301,13 +11361,19 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Diag(StartLoc, diag::err_omp_prohibited_region_simd)
<< (LangOpts.OpenMP >= 50 ? 1 : 0);
ErrorFound = true;
- } else if (DependFound && (TC || SC)) {
- Diag(DependFound->getBeginLoc(), diag::err_omp_depend_clause_thread_simd)
+ } else if ((DependFound || DoacrossFound) && (TC || SC)) {
+ SourceLocation Loc =
+ DependFound ? DependFound->getBeginLoc() : DoacrossFound->getBeginLoc();
+ Diag(Loc, diag::err_omp_depend_clause_thread_simd)
+ << getOpenMPClauseName(DependFound ? OMPC_depend : OMPC_doacross)
<< getOpenMPClauseName(TC ? TC->getClauseKind() : SC->getClauseKind());
ErrorFound = true;
- } else if (DependFound && !DSAStack->getParentOrderedRegionParam().first) {
- Diag(DependFound->getBeginLoc(),
- diag::err_omp_ordered_directive_without_param);
+ } else if ((DependFound || DoacrossFound) &&
+ !DSAStack->getParentOrderedRegionParam().first) {
+ SourceLocation Loc =
+ DependFound ? DependFound->getBeginLoc() : DoacrossFound->getBeginLoc();
+ Diag(Loc, diag::err_omp_ordered_directive_without_param)
+ << getOpenMPClauseName(DependFound ? OMPC_depend : OMPC_doacross);
ErrorFound = true;
} else if (TC || Clauses.empty()) {
if (const Expr *Param = DSAStack->getParentOrderedRegionParam().first) {
@@ -11318,7 +11384,7 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
ErrorFound = true;
}
}
- if ((!AStmt && !DependFound) || ErrorFound)
+ if ((!AStmt && !DependFound && !DoacrossFound) || ErrorFound)
return StmtError();
// OpenMP 5.0, 2.17.9, ordered Construct, Restrictions.
@@ -11326,7 +11392,7 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
// within a worksharing-loop, simd, or worksharing-loop SIMD region, a thread
// must not execute more than one ordered region corresponding to an ordered
// construct without a depend clause.
- if (!DependFound) {
+ if (!DependFound && !DoacrossFound) {
if (DSAStack->doesParentHasOrderedDirective()) {
Diag(StartLoc, diag::err_omp_several_directives_in_region) << "ordered";
Diag(DSAStack->getParentOrderedDirectiveLoc(),
@@ -13271,6 +13337,10 @@ StmtResult Sema::ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
if (!AStmt)
return StmtError();
+ // Report affected OpenMP target offloading behavior when in HIP lang-mode.
+ if (getLangOpts().HIP && (DSAStack->getParentDirective() == OMPD_target))
+ Diag(StartLoc, diag::warn_hip_omp_target_directives);
+
auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
@@ -15329,6 +15399,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
break;
}
[[fallthrough]];
+ case OMPD_target_teams_loop:
case OMPD_target_teams_distribute_parallel_for:
// If this clause applies to the nested 'parallel' region, capture within
// the 'teams' region, otherwise do not capture.
@@ -15421,7 +15492,6 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_target:
case OMPD_target_teams:
case OMPD_target_teams_distribute:
- case OMPD_target_teams_loop:
case OMPD_distribute_parallel_for:
case OMPD_task:
case OMPD_taskloop:
@@ -17662,6 +17732,13 @@ OMPClause *Sema::ActOnOpenMPDestroyClause(Expr *InteropVar,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc) {
+ if (!InteropVar && LangOpts.OpenMP >= 52 &&
+ DSAStack->getCurrentDirective() == OMPD_depobj) {
+ Diag(StartLoc, diag::err_omp_expected_clause_argument)
+ << getOpenMPClauseName(OMPC_destroy)
+ << getOpenMPDirectiveName(OMPD_depobj);
+ return nullptr;
+ }
if (InteropVar &&
!isValidInteropVariable(*this, InteropVar, VarLoc, OMPC_destroy))
return nullptr;
@@ -17878,6 +17955,11 @@ OMPClause *Sema::ActOnOpenMPVarListClause(OpenMPClauseKind Kind,
Res = ActOnOpenMPAffinityClause(StartLoc, LParenLoc, ColonLoc, EndLoc,
Data.DepModOrTailExpr, VarList);
break;
+ case OMPC_doacross:
+ Res = ActOnOpenMPDoacrossClause(
+ static_cast<OpenMPDoacrossClauseModifier>(ExtraModifier),
+ ExtraModifierLoc, ColonLoc, VarList, StartLoc, LParenLoc, EndLoc);
+ break;
case OMPC_if:
case OMPC_depobj:
case OMPC_final:
@@ -19087,9 +19169,17 @@ static bool actOnOMPReductionKindClause(
// operators: +, -, *, &, |, ^, && and ||
switch (OOK) {
case OO_Plus:
- case OO_Minus:
BOK = BO_Add;
break;
+ case OO_Minus:
+ // Minus(-) operator is not supported in TR11 (OpenMP 6.0). Setting BOK to
+ // BO_Comma will automatically diagnose it for OpenMP > 52 as not allowed
+ // reduction identifier.
+ if (S.LangOpts.OpenMP > 52)
+ BOK = BO_Comma;
+ else
+ BOK = BO_Add;
+ break;
case OO_Star:
BOK = BO_Mul;
break;
@@ -19156,6 +19246,12 @@ static bool actOnOMPReductionKindClause(
}
break;
}
+
+ // OpenMP 5.2, 5.5.5 (see page 627, line 18) reduction Clause, Restrictions
+ // A reduction clause with the minus (-) operator was deprecated
+ if (OOK == OO_Minus && S.LangOpts.OpenMP == 52)
+ S.Diag(ReductionId.getLoc(), diag::warn_omp_minus_in_reduction_deprecated);
+
SourceRange ReductionIdRange;
if (ReductionIdScopeSpec.isValid())
ReductionIdRange.setBegin(ReductionIdScopeSpec.getBeginLoc());
@@ -19324,9 +19420,14 @@ static bool actOnOMPReductionKindClause(
}
if (BOK == BO_Comma && DeclareReductionRef.isUnset()) {
// Not allowed reduction identifier is found.
- S.Diag(ReductionId.getBeginLoc(),
- diag::err_omp_unknown_reduction_identifier)
- << Type << ReductionIdRange;
+ if (S.LangOpts.OpenMP > 52)
+ S.Diag(ReductionId.getBeginLoc(),
+ diag::err_omp_unknown_reduction_identifier_since_omp_6_0)
+ << Type << ReductionIdRange;
+ else
+ S.Diag(ReductionId.getBeginLoc(),
+ diag::err_omp_unknown_reduction_identifier_prior_omp_6_0)
+ << Type << ReductionIdRange;
continue;
}
@@ -20487,71 +20588,35 @@ OMPClause *Sema::ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
return OMPDepobjClause::Create(Context, StartLoc, LParenLoc, EndLoc, Depobj);
}
-OMPClause *
-Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
- Expr *DepModifier, ArrayRef<Expr *> VarList,
- SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation EndLoc) {
- OpenMPDependClauseKind DepKind = Data.DepKind;
- SourceLocation DepLoc = Data.DepLoc;
- if (DSAStack->getCurrentDirective() == OMPD_ordered &&
- DepKind != OMPC_DEPEND_source && DepKind != OMPC_DEPEND_sink) {
- Diag(DepLoc, diag::err_omp_unexpected_clause_value)
- << "'source' or 'sink'" << getOpenMPClauseName(OMPC_depend);
- return nullptr;
- }
- if (DSAStack->getCurrentDirective() == OMPD_taskwait &&
- DepKind == OMPC_DEPEND_mutexinoutset) {
- Diag(DepLoc, diag::err_omp_taskwait_depend_mutexinoutset_not_allowed);
- return nullptr;
- }
- if ((DSAStack->getCurrentDirective() != OMPD_ordered ||
- DSAStack->getCurrentDirective() == OMPD_depobj) &&
- (DepKind == OMPC_DEPEND_unknown || DepKind == OMPC_DEPEND_source ||
- DepKind == OMPC_DEPEND_sink ||
- ((LangOpts.OpenMP < 50 ||
- DSAStack->getCurrentDirective() == OMPD_depobj) &&
- DepKind == OMPC_DEPEND_depobj))) {
- SmallVector<unsigned, 6> Except = {OMPC_DEPEND_source, OMPC_DEPEND_sink,
- OMPC_DEPEND_outallmemory,
- OMPC_DEPEND_inoutallmemory};
- if (LangOpts.OpenMP < 50 || DSAStack->getCurrentDirective() == OMPD_depobj)
- Except.push_back(OMPC_DEPEND_depobj);
- if (LangOpts.OpenMP < 51)
- Except.push_back(OMPC_DEPEND_inoutset);
- std::string Expected = (LangOpts.OpenMP >= 50 && !DepModifier)
- ? "depend modifier(iterator) or "
- : "";
- Diag(DepLoc, diag::err_omp_unexpected_clause_value)
- << Expected + getListOfPossibleValues(OMPC_depend, /*First=*/0,
- /*Last=*/OMPC_DEPEND_unknown,
- Except)
- << getOpenMPClauseName(OMPC_depend);
- return nullptr;
- }
- if (DepModifier &&
- (DepKind == OMPC_DEPEND_source || DepKind == OMPC_DEPEND_sink)) {
- Diag(DepModifier->getExprLoc(),
- diag::err_omp_depend_sink_source_with_modifier);
- return nullptr;
- }
- if (DepModifier &&
- !DepModifier->getType()->isSpecificBuiltinType(BuiltinType::OMPIterator))
- Diag(DepModifier->getExprLoc(), diag::err_omp_depend_modifier_not_iterator);
+namespace {
+// Utility struct that gathers the related info for doacross clause.
+struct DoacrossDataInfoTy {
+ // The list of expressions.
+ SmallVector<Expr *, 8> Vars;
+ // The OperatorOffset for doacross loop.
+ DSAStackTy::OperatorOffsetTy OpsOffs;
+ // The depended loop count.
+ llvm::APSInt TotalDepCount;
+};
+} // namespace
+static DoacrossDataInfoTy
+ProcessOpenMPDoacrossClauseCommon(Sema &SemaRef, bool IsSource,
+ ArrayRef<Expr *> VarList, DSAStackTy *Stack,
+ SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
DSAStackTy::OperatorOffsetTy OpsOffs;
llvm::APSInt DepCounter(/*BitWidth=*/32);
llvm::APSInt TotalDepCount(/*BitWidth=*/32);
- if (DepKind == OMPC_DEPEND_sink || DepKind == OMPC_DEPEND_source) {
- if (const Expr *OrderedCountExpr =
- DSAStack->getParentOrderedRegionParam().first) {
- TotalDepCount = OrderedCountExpr->EvaluateKnownConstInt(Context);
- TotalDepCount.setIsUnsigned(/*Val=*/true);
- }
+
+ if (const Expr *OrderedCountExpr =
+ Stack->getParentOrderedRegionParam().first) {
+ TotalDepCount = OrderedCountExpr->EvaluateKnownConstInt(SemaRef.Context);
+ TotalDepCount.setIsUnsigned(/*Val=*/true);
}
+
for (Expr *RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP shared clause.");
+ assert(RefExpr && "NULL expr in OpenMP doacross clause.");
if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -20560,10 +20625,10 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
SourceLocation ELoc = RefExpr->getExprLoc();
Expr *SimpleExpr = RefExpr->IgnoreParenCasts();
- if (DepKind == OMPC_DEPEND_sink) {
- if (DSAStack->getParentOrderedRegionParam().first &&
+ if (!IsSource) {
+ if (Stack->getParentOrderedRegionParam().first &&
DepCounter >= TotalDepCount) {
- Diag(ELoc, diag::err_omp_depend_sink_unexpected_expr);
+ SemaRef.Diag(ELoc, diag::err_omp_depend_sink_unexpected_expr);
continue;
}
++DepCounter;
@@ -20575,7 +20640,7 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
// directive, xi denotes the loop iteration variable of the i-th nested
// loop associated with the loop directive, and di is a constant
// non-negative integer.
- if (CurContext->isDependentContext()) {
+ if (SemaRef.CurContext->isDependentContext()) {
// It will be analyzed later.
Vars.push_back(RefExpr);
continue;
@@ -20606,7 +20671,7 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
}
SourceLocation ELoc;
SourceRange ERange;
- auto Res = getPrivateItem(*this, LHS, ELoc, ERange);
+ auto Res = getPrivateItem(SemaRef, LHS, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -20616,129 +20681,213 @@ Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
continue;
if (OOK != OO_Plus && OOK != OO_Minus && (RHS || OOK != OO_None)) {
- Diag(OOLoc, diag::err_omp_depend_sink_expected_plus_minus);
+ SemaRef.Diag(OOLoc, diag::err_omp_depend_sink_expected_plus_minus);
continue;
}
if (RHS) {
- ExprResult RHSRes = VerifyPositiveIntegerConstantInClause(
+ ExprResult RHSRes = SemaRef.VerifyPositiveIntegerConstantInClause(
RHS, OMPC_depend, /*StrictlyPositive=*/false);
if (RHSRes.isInvalid())
continue;
}
- if (!CurContext->isDependentContext() &&
- DSAStack->getParentOrderedRegionParam().first &&
- DepCounter != DSAStack->isParentLoopControlVariable(D).first) {
+ if (!SemaRef.CurContext->isDependentContext() &&
+ Stack->getParentOrderedRegionParam().first &&
+ DepCounter != Stack->isParentLoopControlVariable(D).first) {
const ValueDecl *VD =
- DSAStack->getParentLoopControlVariable(DepCounter.getZExtValue());
+ Stack->getParentLoopControlVariable(DepCounter.getZExtValue());
if (VD)
- Diag(ELoc, diag::err_omp_depend_sink_expected_loop_iteration)
+ SemaRef.Diag(ELoc, diag::err_omp_depend_sink_expected_loop_iteration)
<< 1 << VD;
else
- Diag(ELoc, diag::err_omp_depend_sink_expected_loop_iteration) << 0;
+ SemaRef.Diag(ELoc, diag::err_omp_depend_sink_expected_loop_iteration)
+ << 0;
continue;
}
OpsOffs.emplace_back(RHS, OOK);
- } else {
- bool OMPDependTFound = LangOpts.OpenMP >= 50;
- if (OMPDependTFound)
- OMPDependTFound = findOMPDependT(*this, StartLoc, DSAStack,
- DepKind == OMPC_DEPEND_depobj);
- if (DepKind == OMPC_DEPEND_depobj) {
- // OpenMP 5.0, 2.17.11 depend Clause, Restrictions, C/C++
- // List items used in depend clauses with the depobj dependence type
- // must be expressions of the omp_depend_t type.
- if (!RefExpr->isValueDependent() && !RefExpr->isTypeDependent() &&
- !RefExpr->isInstantiationDependent() &&
- !RefExpr->containsUnexpandedParameterPack() &&
- (OMPDependTFound &&
- !Context.hasSameUnqualifiedType(DSAStack->getOMPDependT(),
- RefExpr->getType()))) {
- Diag(ELoc, diag::err_omp_expected_omp_depend_t_lvalue)
- << 0 << RefExpr->getType() << RefExpr->getSourceRange();
- continue;
- }
- if (!RefExpr->isLValue()) {
- Diag(ELoc, diag::err_omp_expected_omp_depend_t_lvalue)
- << 1 << RefExpr->getType() << RefExpr->getSourceRange();
- continue;
- }
- } else {
- // OpenMP 5.0 [2.17.11, Restrictions]
- // List items used in depend clauses cannot be zero-length array
- // sections.
- QualType ExprTy = RefExpr->getType().getNonReferenceType();
- const auto *OASE = dyn_cast<OMPArraySectionExpr>(SimpleExpr);
- if (OASE) {
- QualType BaseType =
- OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
- if (const auto *ATy = BaseType->getAsArrayTypeUnsafe())
- ExprTy = ATy->getElementType();
- else
- ExprTy = BaseType->getPointeeType();
- ExprTy = ExprTy.getNonReferenceType();
- const Expr *Length = OASE->getLength();
- Expr::EvalResult Result;
- if (Length && !Length->isValueDependent() &&
- Length->EvaluateAsInt(Result, Context) &&
- Result.Val.getInt().isZero()) {
- Diag(ELoc,
- diag::err_omp_depend_zero_length_array_section_not_allowed)
- << SimpleExpr->getSourceRange();
+ }
+ Vars.push_back(RefExpr->IgnoreParenImpCasts());
+ }
+ if (!SemaRef.CurContext->isDependentContext() && !IsSource &&
+ TotalDepCount > VarList.size() &&
+ Stack->getParentOrderedRegionParam().first &&
+ Stack->getParentLoopControlVariable(VarList.size() + 1)) {
+ SemaRef.Diag(EndLoc, diag::err_omp_depend_sink_expected_loop_iteration)
+ << 1 << Stack->getParentLoopControlVariable(VarList.size() + 1);
+ }
+ return {Vars, OpsOffs, TotalDepCount};
+}
+
+OMPClause *
+Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
+ Expr *DepModifier, ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ OpenMPDependClauseKind DepKind = Data.DepKind;
+ SourceLocation DepLoc = Data.DepLoc;
+ if (DSAStack->getCurrentDirective() == OMPD_ordered &&
+ DepKind != OMPC_DEPEND_source && DepKind != OMPC_DEPEND_sink) {
+ Diag(DepLoc, diag::err_omp_unexpected_clause_value)
+ << "'source' or 'sink'" << getOpenMPClauseName(OMPC_depend);
+ return nullptr;
+ }
+ if (DSAStack->getCurrentDirective() == OMPD_taskwait &&
+ DepKind == OMPC_DEPEND_mutexinoutset) {
+ Diag(DepLoc, diag::err_omp_taskwait_depend_mutexinoutset_not_allowed);
+ return nullptr;
+ }
+ if ((DSAStack->getCurrentDirective() != OMPD_ordered ||
+ DSAStack->getCurrentDirective() == OMPD_depobj) &&
+ (DepKind == OMPC_DEPEND_unknown || DepKind == OMPC_DEPEND_source ||
+ DepKind == OMPC_DEPEND_sink ||
+ ((LangOpts.OpenMP < 50 ||
+ DSAStack->getCurrentDirective() == OMPD_depobj) &&
+ DepKind == OMPC_DEPEND_depobj))) {
+ SmallVector<unsigned, 6> Except = {OMPC_DEPEND_source, OMPC_DEPEND_sink,
+ OMPC_DEPEND_outallmemory,
+ OMPC_DEPEND_inoutallmemory};
+ if (LangOpts.OpenMP < 50 || DSAStack->getCurrentDirective() == OMPD_depobj)
+ Except.push_back(OMPC_DEPEND_depobj);
+ if (LangOpts.OpenMP < 51)
+ Except.push_back(OMPC_DEPEND_inoutset);
+ std::string Expected = (LangOpts.OpenMP >= 50 && !DepModifier)
+ ? "depend modifier(iterator) or "
+ : "";
+ Diag(DepLoc, diag::err_omp_unexpected_clause_value)
+ << Expected + getListOfPossibleValues(OMPC_depend, /*First=*/0,
+ /*Last=*/OMPC_DEPEND_unknown,
+ Except)
+ << getOpenMPClauseName(OMPC_depend);
+ return nullptr;
+ }
+ if (DepModifier &&
+ (DepKind == OMPC_DEPEND_source || DepKind == OMPC_DEPEND_sink)) {
+ Diag(DepModifier->getExprLoc(),
+ diag::err_omp_depend_sink_source_with_modifier);
+ return nullptr;
+ }
+ if (DepModifier &&
+ !DepModifier->getType()->isSpecificBuiltinType(BuiltinType::OMPIterator))
+ Diag(DepModifier->getExprLoc(), diag::err_omp_depend_modifier_not_iterator);
+
+ SmallVector<Expr *, 8> Vars;
+ DSAStackTy::OperatorOffsetTy OpsOffs;
+ llvm::APSInt TotalDepCount(/*BitWidth=*/32);
+
+ if (DepKind == OMPC_DEPEND_sink || DepKind == OMPC_DEPEND_source) {
+ DoacrossDataInfoTy VarOffset = ProcessOpenMPDoacrossClauseCommon(
+ *this, DepKind == OMPC_DEPEND_source, VarList, DSAStack, EndLoc);
+ Vars = VarOffset.Vars;
+ OpsOffs = VarOffset.OpsOffs;
+ TotalDepCount = VarOffset.TotalDepCount;
+ } else {
+ for (Expr *RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP shared clause.");
+ if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ continue;
+ }
+
+ SourceLocation ELoc = RefExpr->getExprLoc();
+ Expr *SimpleExpr = RefExpr->IgnoreParenCasts();
+ if (DepKind != OMPC_DEPEND_sink && DepKind != OMPC_DEPEND_source) {
+ bool OMPDependTFound = LangOpts.OpenMP >= 50;
+ if (OMPDependTFound)
+ OMPDependTFound = findOMPDependT(*this, StartLoc, DSAStack,
+ DepKind == OMPC_DEPEND_depobj);
+ if (DepKind == OMPC_DEPEND_depobj) {
+ // OpenMP 5.0, 2.17.11 depend Clause, Restrictions, C/C++
+ // List items used in depend clauses with the depobj dependence type
+ // must be expressions of the omp_depend_t type.
+ if (!RefExpr->isValueDependent() && !RefExpr->isTypeDependent() &&
+ !RefExpr->isInstantiationDependent() &&
+ !RefExpr->containsUnexpandedParameterPack() &&
+ (OMPDependTFound &&
+ !Context.hasSameUnqualifiedType(DSAStack->getOMPDependT(),
+ RefExpr->getType()))) {
+ Diag(ELoc, diag::err_omp_expected_omp_depend_t_lvalue)
+ << 0 << RefExpr->getType() << RefExpr->getSourceRange();
continue;
}
- }
+ if (!RefExpr->isLValue()) {
+ Diag(ELoc, diag::err_omp_expected_omp_depend_t_lvalue)
+ << 1 << RefExpr->getType() << RefExpr->getSourceRange();
+ continue;
+ }
+ } else {
+ // OpenMP 5.0 [2.17.11, Restrictions]
+ // List items used in depend clauses cannot be zero-length array
+ // sections.
+ QualType ExprTy = RefExpr->getType().getNonReferenceType();
+ const auto *OASE = dyn_cast<OMPArraySectionExpr>(SimpleExpr);
+ if (OASE) {
+ QualType BaseType =
+ OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
+ if (const auto *ATy = BaseType->getAsArrayTypeUnsafe())
+ ExprTy = ATy->getElementType();
+ else
+ ExprTy = BaseType->getPointeeType();
+ ExprTy = ExprTy.getNonReferenceType();
+ const Expr *Length = OASE->getLength();
+ Expr::EvalResult Result;
+ if (Length && !Length->isValueDependent() &&
+ Length->EvaluateAsInt(Result, Context) &&
+ Result.Val.getInt().isZero()) {
+ Diag(ELoc,
+ diag::err_omp_depend_zero_length_array_section_not_allowed)
+ << SimpleExpr->getSourceRange();
+ continue;
+ }
+ }
- // OpenMP 5.0, 2.17.11 depend Clause, Restrictions, C/C++
- // List items used in depend clauses with the in, out, inout,
- // inoutset, or mutexinoutset dependence types cannot be
- // expressions of the omp_depend_t type.
- if (!RefExpr->isValueDependent() && !RefExpr->isTypeDependent() &&
- !RefExpr->isInstantiationDependent() &&
- !RefExpr->containsUnexpandedParameterPack() &&
- (!RefExpr->IgnoreParenImpCasts()->isLValue() ||
- (OMPDependTFound &&
- DSAStack->getOMPDependT().getTypePtr() == ExprTy.getTypePtr()))) {
- Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
- << (LangOpts.OpenMP >= 50 ? 1 : 0)
- << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
- continue;
- }
+ // OpenMP 5.0, 2.17.11 depend Clause, Restrictions, C/C++
+ // List items used in depend clauses with the in, out, inout,
+ // inoutset, or mutexinoutset dependence types cannot be
+ // expressions of the omp_depend_t type.
+ if (!RefExpr->isValueDependent() && !RefExpr->isTypeDependent() &&
+ !RefExpr->isInstantiationDependent() &&
+ !RefExpr->containsUnexpandedParameterPack() &&
+ (!RefExpr->IgnoreParenImpCasts()->isLValue() ||
+ (OMPDependTFound && DSAStack->getOMPDependT().getTypePtr() ==
+ ExprTy.getTypePtr()))) {
+ Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
+ continue;
+ }
- auto *ASE = dyn_cast<ArraySubscriptExpr>(SimpleExpr);
- if (ASE && !ASE->getBase()->isTypeDependent() &&
- !ASE->getBase()->getType().getNonReferenceType()->isPointerType() &&
- !ASE->getBase()->getType().getNonReferenceType()->isArrayType()) {
- Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
- << (LangOpts.OpenMP >= 50 ? 1 : 0)
- << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
- continue;
- }
+ auto *ASE = dyn_cast<ArraySubscriptExpr>(SimpleExpr);
+ if (ASE && !ASE->getBase()->isTypeDependent() &&
+ !ASE->getBase()
+ ->getType()
+ .getNonReferenceType()
+ ->isPointerType() &&
+ !ASE->getBase()->getType().getNonReferenceType()->isArrayType()) {
+ Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
+ continue;
+ }
- ExprResult Res;
- {
- Sema::TentativeAnalysisScope Trap(*this);
- Res = CreateBuiltinUnaryOp(ELoc, UO_AddrOf,
- RefExpr->IgnoreParenImpCasts());
- }
- if (!Res.isUsable() && !isa<OMPArraySectionExpr>(SimpleExpr) &&
- !isa<OMPArrayShapingExpr>(SimpleExpr)) {
- Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
- << (LangOpts.OpenMP >= 50 ? 1 : 0)
- << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
- continue;
+ ExprResult Res;
+ {
+ Sema::TentativeAnalysisScope Trap(*this);
+ Res = CreateBuiltinUnaryOp(ELoc, UO_AddrOf,
+ RefExpr->IgnoreParenImpCasts());
+ }
+ if (!Res.isUsable() && !isa<OMPArraySectionExpr>(SimpleExpr) &&
+ !isa<OMPArrayShapingExpr>(SimpleExpr)) {
+ Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
+ continue;
+ }
}
}
+ Vars.push_back(RefExpr->IgnoreParenImpCasts());
}
- Vars.push_back(RefExpr->IgnoreParenImpCasts());
}
- if (!CurContext->isDependentContext() && DepKind == OMPC_DEPEND_sink &&
- TotalDepCount > VarList.size() &&
- DSAStack->getParentOrderedRegionParam().first &&
- DSAStack->getParentLoopControlVariable(VarList.size() + 1)) {
- Diag(EndLoc, diag::err_omp_depend_sink_expected_loop_iteration)
- << 1 << DSAStack->getParentLoopControlVariable(VarList.size() + 1);
- }
if (DepKind != OMPC_DEPEND_source && DepKind != OMPC_DEPEND_sink &&
DepKind != OMPC_DEPEND_outallmemory &&
DepKind != OMPC_DEPEND_inoutallmemory && Vars.empty())
@@ -22833,6 +22982,11 @@ bool Sema::ActOnStartOpenMPDeclareTargetContext(
Diag(DTCI.Loc, diag::err_omp_region_not_file_context);
return false;
}
+
+ // Report affected OpenMP target offloading behavior when in HIP lang-mode.
+ if (getLangOpts().HIP)
+ Diag(DTCI.Loc, diag::warn_hip_omp_target_directives);
+
DeclareTargetNesting.push_back(DTCI);
return true;
}
@@ -22905,6 +23059,10 @@ void Sema::ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
(ND->isUsed(/*CheckUsedAttr=*/false) || ND->isReferenced()))
Diag(Loc, diag::warn_omp_declare_target_after_first_use);
+ // Report affected OpenMP target offloading behavior when in HIP lang-mode.
+ if (getLangOpts().HIP)
+ Diag(Loc, diag::warn_hip_omp_target_directives);
+
// Explicit declare target lists have precedence.
const unsigned Level = -1;
@@ -23054,6 +23212,55 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
checkDeclInTargetContext(E->getExprLoc(), E->getSourceRange(), *this, D);
}
+/// This class visits every VarDecl that the initializer references and adds
+/// OMPDeclareTargetDeclAttr to each of them.
+class GlobalDeclRefChecker final
+ : public StmtVisitor<GlobalDeclRefChecker> {
+ SmallVector<VarDecl *> DeclVector;
+ Attr *A;
+
+public:
+ /// A StmtVisitor class function that visits all DeclRefExpr and adds
+ /// OMPDeclareTargetDeclAttr to them.
+ void VisitDeclRefExpr(DeclRefExpr *Node) {
+ if (auto *VD = dyn_cast<VarDecl>(Node->getDecl())) {
+ VD->addAttr(A);
+ DeclVector.push_back(VD);
+ }
+ }
+ /// A function that iterates across each of the Expr's children.
+ void VisitExpr(Expr *Ex) {
+ for (auto *Child : Ex->children()) {
+ Visit(Child);
+ }
+ }
+ /// A function that keeps a record of all the Decls that are variables, has
+ /// OMPDeclareTargetDeclAttr, and has global storage in the DeclVector. Pop
+ /// each Decl one at a time and use the inherited 'visit' functions to look
+ /// for DeclRefExpr.
+ void declareTargetInitializer(Decl *TD) {
+ A = TD->getAttr<OMPDeclareTargetDeclAttr>();
+ DeclVector.push_back(cast<VarDecl>(TD));
+ while (!DeclVector.empty()) {
+ VarDecl *TargetVarDecl = DeclVector.pop_back_val();
+ if (TargetVarDecl->hasAttr<OMPDeclareTargetDeclAttr>() &&
+ TargetVarDecl->hasInit() && TargetVarDecl->hasGlobalStorage()) {
+ if (Expr *Ex = TargetVarDecl->getInit())
+ Visit(Ex);
+ }
+ }
+ }
+};
+
+/// Adding OMPDeclareTargetDeclAttr to variables with static storage
+/// duration that are referenced in the initializer expression list of
+/// variables with static storage duration in declare target directive.
+void Sema::ActOnOpenMPDeclareTargetInitializer(Decl *TargetDecl) {
+ GlobalDeclRefChecker Checker;
+ if (isa<VarDecl>(TargetDecl))
+ Checker.declareTargetInitializer(TargetDecl);
+}
+
OMPClause *Sema::ActOnOpenMPToClause(
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
@@ -23459,7 +23666,7 @@ OMPClause *Sema::ActOnOpenMPAllocateClause(
// target region must specify an allocator expression unless a requires
// directive with the dynamic_allocators clause is present in the same
// compilation unit.
- if (LangOpts.OpenMPIsDevice &&
+ if (LangOpts.OpenMPIsTargetDevice &&
!DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())
targetDiag(StartLoc, diag::err_expected_allocator_expression);
}
@@ -23851,3 +24058,38 @@ OMPClause *Sema::ActOnOpenMPXDynCGroupMemClause(Expr *Size,
return new (Context) OMPXDynCGroupMemClause(
ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc);
}
+
+OMPClause *Sema::ActOnOpenMPDoacrossClause(
+ OpenMPDoacrossClauseModifier DepType, SourceLocation DepLoc,
+ SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc) {
+
+ if (DSAStack->getCurrentDirective() == OMPD_ordered &&
+ DepType != OMPC_DOACROSS_source && DepType != OMPC_DOACROSS_sink &&
+ DepType != OMPC_DOACROSS_sink_omp_cur_iteration &&
+ DepType != OMPC_DOACROSS_source_omp_cur_iteration &&
+ DepType != OMPC_DOACROSS_source) {
+ Diag(DepLoc, diag::err_omp_unexpected_clause_value)
+ << "'source' or 'sink'" << getOpenMPClauseName(OMPC_doacross);
+ return nullptr;
+ }
+
+ SmallVector<Expr *, 8> Vars;
+ DSAStackTy::OperatorOffsetTy OpsOffs;
+ llvm::APSInt TotalDepCount(/*BitWidth=*/32);
+ DoacrossDataInfoTy VarOffset = ProcessOpenMPDoacrossClauseCommon(
+ *this,
+ DepType == OMPC_DOACROSS_source ||
+ DepType == OMPC_DOACROSS_source_omp_cur_iteration ||
+ DepType == OMPC_DOACROSS_sink_omp_cur_iteration,
+ VarList, DSAStack, EndLoc);
+ Vars = VarOffset.Vars;
+ OpsOffs = VarOffset.OpsOffs;
+ TotalDepCount = VarOffset.TotalDepCount;
+ auto *C = OMPDoacrossClause::Create(Context, StartLoc, LParenLoc, EndLoc,
+ DepType, DepLoc, ColonLoc, Vars,
+ TotalDepCount.getZExtValue());
+ if (DSAStack->isParentOrderedRegion())
+ DSAStack->addDoacrossDependClause(C, OpsOffs);
+ return C;
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp b/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
index d68337a26d97..a3d9abb15377 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTLambda.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
@@ -26,6 +27,7 @@
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Overload.h"
@@ -120,7 +122,7 @@ CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
/// corresponding to the given implicit conversion kind.
ImplicitConversionRank clang::GetConversionRank(ImplicitConversionKind Kind) {
static const ImplicitConversionRank
- Rank[(int)ICK_Num_Conversion_Kinds] = {
+ Rank[] = {
ICR_Exact_Match,
ICR_Exact_Match,
ICR_Exact_Match,
@@ -141,6 +143,7 @@ ImplicitConversionRank clang::GetConversionRank(ImplicitConversionKind Kind) {
ICR_Conversion,
ICR_Conversion,
ICR_Conversion,
+ ICR_Conversion,
ICR_OCL_Scalar_Widening,
ICR_Complex_Real_Conversion,
ICR_Conversion,
@@ -149,16 +152,20 @@ ImplicitConversionRank clang::GetConversionRank(ImplicitConversionKind Kind) {
ICR_Exact_Match, // NOTE(gbiv): This may not be completely right --
// it was omitted by the patch that added
// ICK_Zero_Event_Conversion
+ ICR_Exact_Match, // NOTE(ctopper): This may not be completely right --
+ // it was omitted by the patch that added
+ // ICK_Zero_Queue_Conversion
ICR_C_Conversion,
ICR_C_Conversion_Extension
};
+ static_assert(std::size(Rank) == (int)ICK_Num_Conversion_Kinds);
return Rank[(int)Kind];
}
/// GetImplicitConversionName - Return the name of this kind of
/// implicit conversion.
static const char* GetImplicitConversionName(ImplicitConversionKind Kind) {
- static const char* const Name[(int)ICK_Num_Conversion_Kinds] = {
+ static const char* const Name[] = {
"No conversion",
"Lvalue-to-rvalue",
"Array-to-pointer",
@@ -179,15 +186,18 @@ static const char* GetImplicitConversionName(ImplicitConversionKind Kind) {
"Derived-to-base conversion",
"Vector conversion",
"SVE Vector conversion",
+ "RVV Vector conversion",
"Vector splat",
"Complex-real conversion",
"Block Pointer conversion",
"Transparent Union Conversion",
"Writeback conversion",
"OpenCL Zero Event Conversion",
+ "OpenCL Zero Queue Conversion",
"C specific type conversion",
"Incompatible pointer conversion"
};
+ static_assert(std::size(Name) == (int)ICK_Num_Conversion_Kinds);
return Name[Kind];
}
@@ -1155,15 +1165,6 @@ Sema::CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &Old,
!shouldLinkPossiblyHiddenDecl(*I, New))
continue;
- // C++20 [temp.friend] p9: A non-template friend declaration with a
- // requires-clause shall be a definition. A friend function template
- // with a constraint that depends on a template parameter from an
- // enclosing template shall be a definition. Such a constrained friend
- // function or function template declaration does not declare the same
- // function or function template as a declaration in any other scope.
- if (Context.FriendsDifferByConstraints(OldF, New))
- continue;
-
Match = *I;
return Ovl_Match;
}
@@ -1280,6 +1281,12 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
!FunctionParamTypesAreEqual(OldType, NewType)))
return true;
+ // For member-like friends, the enclosing class is part of the signature.
+ if ((New->isMemberLikeConstrainedFriend() ||
+ Old->isMemberLikeConstrainedFriend()) &&
+ !New->getLexicalDeclContext()->Equals(Old->getLexicalDeclContext()))
+ return true;
+
if (NewTemplate) {
// C++ [temp.over.link]p4:
// The signature of a function template consists of its function
@@ -1291,7 +1298,7 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
// We check the return type and template parameter lists for function
// templates first; the remaining checks follow.
bool SameTemplateParameterList = TemplateParameterListsAreEqual(
- NewTemplate->getTemplateParameters(),
+ NewTemplate, NewTemplate->getTemplateParameters(), OldTemplate,
OldTemplate->getTemplateParameters(), false, TPL_TemplateMatch);
bool SameReturnType = Context.hasSameType(Old->getDeclaredReturnType(),
New->getDeclaredReturnType());
@@ -1750,13 +1757,22 @@ static bool IsVectorConversion(Sema &S, QualType FromType, QualType ToType,
}
}
- if (ToType->isSizelessBuiltinType() || FromType->isSizelessBuiltinType())
+ if (ToType->isSVESizelessBuiltinType() ||
+ FromType->isSVESizelessBuiltinType())
if (S.Context.areCompatibleSveTypes(FromType, ToType) ||
S.Context.areLaxCompatibleSveTypes(FromType, ToType)) {
ICK = ICK_SVE_Vector_Conversion;
return true;
}
+ if (ToType->isRVVSizelessBuiltinType() ||
+ FromType->isRVVSizelessBuiltinType())
+ if (S.Context.areCompatibleRVVTypes(FromType, ToType) ||
+ S.Context.areLaxCompatibleRVVTypes(FromType, ToType)) {
+ ICK = ICK_RVV_Vector_Conversion;
+ return true;
+ }
+
// We can perform the conversion between vector types in the following cases:
// 1)vector types are equivalent AltiVec and GCC vector types
// 2)lax vector conversions are permitted and the vector types are of the
@@ -1768,9 +1784,10 @@ static bool IsVectorConversion(Sema &S, QualType FromType, QualType ToType,
if (S.Context.areCompatibleVectorTypes(FromType, ToType) ||
(S.isLaxVectorConversion(FromType, ToType) &&
!ToType->hasAttr(attr::ArmMveStrictPolymorphism))) {
- if (S.isLaxVectorConversion(FromType, ToType) &&
+ if (S.getASTContext().getTargetInfo().getTriple().isPPC() &&
+ S.isLaxVectorConversion(FromType, ToType) &&
S.anyAltivecTypes(FromType, ToType) &&
- !S.areSameVectorElemTypes(FromType, ToType) &&
+ !S.Context.areCompatibleVectorTypes(FromType, ToType) &&
!InOverloadResolution && !CStyle) {
S.Diag(From->getBeginLoc(), diag::warn_deprecated_lax_vec_conv_all)
<< FromType << ToType;
@@ -1979,8 +1996,11 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// if their representation is different until there is back end support
// We of course allow this conversion if long double is really double.
- // Conversions between bfloat and other floats are not permitted.
- if (FromType == S.Context.BFloat16Ty || ToType == S.Context.BFloat16Ty)
+ // Conversions between bfloat16 and float16 are currently not supported.
+ if ((FromType->isBFloat16Type() &&
+ (ToType->isFloat16Type() || ToType->isHalfType())) ||
+ (ToType->isBFloat16Type() &&
+ (FromType->isFloat16Type() || FromType->isHalfType())))
return false;
// Conversions between IEEE-quad and IBM-extended semantics are not
@@ -2001,9 +2021,6 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
ToType->isIntegralType(S.Context)) ||
(FromType->isIntegralOrUnscopedEnumerationType() &&
ToType->isRealFloatingType())) {
- // Conversions between bfloat and int are not permitted.
- if (FromType->isBFloat16Type() || ToType->isBFloat16Type())
- return false;
// Floating-integral conversions (C++ 4.9).
SCS.Second = ICK_Floating_Integral;
@@ -4322,6 +4339,20 @@ CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
: ImplicitConversionSequence::Worse;
}
+ if (SCS1.Second == ICK_RVV_Vector_Conversion &&
+ SCS2.Second == ICK_RVV_Vector_Conversion) {
+ bool SCS1IsCompatibleRVVVectorConversion =
+ S.Context.areCompatibleRVVTypes(SCS1.getFromType(), SCS1.getToType(2));
+ bool SCS2IsCompatibleRVVVectorConversion =
+ S.Context.areCompatibleRVVTypes(SCS2.getFromType(), SCS2.getToType(2));
+
+ if (SCS1IsCompatibleRVVVectorConversion !=
+ SCS2IsCompatibleRVVVectorConversion)
+ return SCS1IsCompatibleRVVVectorConversion
+ ? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+ }
+
return ImplicitConversionSequence::Indistinguishable;
}
@@ -5125,6 +5156,18 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
if (!S.isCompleteType(From->getBeginLoc(), InitTy))
return Result;
+ // C++20 [over.ics.list]/2:
+ // If the initializer list is a designated-initializer-list, a conversion
+ // is only possible if the parameter has an aggregate type
+ //
+ // FIXME: The exception for reference initialization here is not part of the
+ // language rules, but follow other compilers in adding it as a tentative DR
+ // resolution.
+ bool IsDesignatedInit = From->hasDesignatedInit();
+ if (!ToType->isAggregateType() && !ToType->isReferenceType() &&
+ IsDesignatedInit)
+ return Result;
+
// Per DR1467:
// If the parameter type is a class X and the initializer list has a single
// element of type cv U, where U is X or a class derived from X, the
@@ -5135,7 +5178,7 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
// and the initializer list has a single element that is an
// appropriately-typed string literal (8.5.2 [dcl.init.string]), the
// implicit conversion sequence is the identity conversion.
- if (From->getNumInits() == 1) {
+ if (From->getNumInits() == 1 && !IsDesignatedInit) {
if (ToType->isRecordType()) {
QualType InitType = From->getInit(0)->getType();
if (S.Context.hasSameUnqualifiedType(InitType, ToType) ||
@@ -5173,7 +5216,7 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
// default-constructible, and if all the elements of the initializer list
// can be implicitly converted to X, the implicit conversion sequence is
// the worst conversion necessary to convert an element of the list to X.
- if (AT || S.isStdInitializerList(ToType, &InitTy)) {
+ if ((AT || S.isStdInitializerList(ToType, &InitTy)) && !IsDesignatedInit) {
unsigned e = From->getNumInits();
ImplicitConversionSequence DfltElt;
DfltElt.setBad(BadConversionSequence::no_conversion, QualType(),
@@ -5315,7 +5358,7 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
// If the initializer list has a single element that is reference-related
// to the parameter type, we initialize the reference from that.
- if (From->getNumInits() == 1) {
+ if (From->getNumInits() == 1 && !IsDesignatedInit) {
Expr *Init = From->getInit(0);
QualType T2 = Init->getType();
@@ -5748,6 +5791,7 @@ static bool CheckConvertedConstantConversions(Sema &S,
case ICK_Derived_To_Base:
case ICK_Vector_Conversion:
case ICK_SVE_Vector_Conversion:
+ case ICK_RVV_Vector_Conversion:
case ICK_Vector_Splat:
case ICK_Complex_Real:
case ICK_Block_Pointer_Conversion:
@@ -5774,14 +5818,14 @@ static bool CheckConvertedConstantConversions(Sema &S,
llvm_unreachable("unknown conversion kind");
}
-/// CheckConvertedConstantExpression - Check that the expression From is a
-/// converted constant expression of type T, perform the conversion and produce
-/// the converted expression, per C++11 [expr.const]p3.
-static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
- QualType T, APValue &Value,
+/// BuildConvertedConstantExpression - Check that the expression From is a
+/// converted constant expression of type T, perform the conversion but
+/// does not evaluate the expression
+static ExprResult BuildConvertedConstantExpression(Sema &S, Expr *From,
+ QualType T,
Sema::CCEKind CCE,
- bool RequireInt,
- NamedDecl *Dest) {
+ NamedDecl *Dest,
+ APValue &PreNarrowingValue) {
assert(S.getLangOpts().CPlusPlus11 &&
"converted constant expression outside C++11");
@@ -5865,7 +5909,6 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
// Check for a narrowing implicit conversion.
bool ReturnPreNarrowingValue = false;
- APValue PreNarrowingValue;
QualType PreNarrowingType;
switch (SCS->getNarrowingKind(S.Context, Result.get(), PreNarrowingValue,
PreNarrowingType)) {
@@ -5899,12 +5942,19 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
<< CCE << /*Constant*/ 0 << From->getType() << T;
break;
}
+ if (!ReturnPreNarrowingValue)
+ PreNarrowingValue = {};
- if (Result.get()->isValueDependent()) {
- Value = APValue();
- return Result;
- }
+ return Result;
+}
+/// EvaluateConvertedConstantExpression - Evaluate an Expression
+/// That is a converted constant expression
+/// (which was built with BuildConvertedConstantExpression)
+static ExprResult EvaluateConvertedConstantExpression(
+ Sema &S, Expr *E, QualType T, APValue &Value, Sema::CCEKind CCE,
+ bool RequireInt, const APValue &PreNarrowingValue) {
+ ExprResult Result = E;
// Check the expression is a constant expression.
SmallVector<PartialDiagnosticAt, 8> Notes;
Expr::EvalResult Eval;
@@ -5918,7 +5968,7 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
else
Kind = ConstantExprKind::Normal;
- if (!Result.get()->EvaluateAsConstantExpr(Eval, S.Context, Kind) ||
+ if (!E->EvaluateAsConstantExpr(Eval, S.Context, Kind) ||
(RequireInt && !Eval.Val.isInt())) {
// The expression can't be folded, so we can't keep it at this position in
// the AST.
@@ -5929,7 +5979,7 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
if (Notes.empty()) {
// It's a constant expression.
Expr *E = ConstantExpr::Create(S.Context, Result.get(), Value);
- if (ReturnPreNarrowingValue)
+ if (!PreNarrowingValue.isAbsent())
Value = std::move(PreNarrowingValue);
return E;
}
@@ -5945,14 +5995,42 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
for (unsigned I = 0; I < Notes.size(); ++I)
S.Diag(Notes[I].first, Notes[I].second);
} else {
- S.Diag(From->getBeginLoc(), diag::err_expr_not_cce)
- << CCE << From->getSourceRange();
+ S.Diag(E->getBeginLoc(), diag::err_expr_not_cce)
+ << CCE << E->getSourceRange();
for (unsigned I = 0; I < Notes.size(); ++I)
S.Diag(Notes[I].first, Notes[I].second);
}
return ExprError();
}
+/// CheckConvertedConstantExpression - Check that the expression From is a
+/// converted constant expression of type T, perform the conversion and produce
+/// the converted expression, per C++11 [expr.const]p3.
+static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
+ QualType T, APValue &Value,
+ Sema::CCEKind CCE,
+ bool RequireInt,
+ NamedDecl *Dest) {
+
+ APValue PreNarrowingValue;
+ ExprResult Result = BuildConvertedConstantExpression(S, From, T, CCE, Dest,
+ PreNarrowingValue);
+ if (Result.isInvalid() || Result.get()->isValueDependent()) {
+ Value = APValue();
+ return Result;
+ }
+ return EvaluateConvertedConstantExpression(S, Result.get(), T, Value, CCE,
+ RequireInt, PreNarrowingValue);
+}
+
+ExprResult Sema::BuildConvertedConstantExpression(Expr *From, QualType T,
+ CCEKind CCE,
+ NamedDecl *Dest) {
+ APValue PreNarrowingValue;
+ return ::BuildConvertedConstantExpression(*this, From, T, CCE, Dest,
+ PreNarrowingValue);
+}
+
ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE,
NamedDecl *Dest) {
@@ -6425,7 +6503,7 @@ void Sema::AddOverloadCandidate(
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions,
bool PartialOverloading, bool AllowExplicit, bool AllowExplicitConversions,
ADLCallKind IsADLCandidate, ConversionSequenceList EarlyConversions,
- OverloadCandidateParamOrder PO) {
+ OverloadCandidateParamOrder PO, bool AggregateCandidateDeduction) {
const FunctionProtoType *Proto
= dyn_cast<FunctionProtoType>(Function->getType()->getAs<FunctionType>());
assert(Proto && "Functions without a prototype cannot be overloaded");
@@ -6500,23 +6578,20 @@ void Sema::AddOverloadCandidate(
}
// Functions with internal linkage are only viable in the same module unit.
- if (auto *MF = Function->getOwningModule()) {
- if (getLangOpts().CPlusPlusModules && !MF->isModuleMapModule() &&
- !isModuleUnitOfCurrentTU(MF)) {
- /// FIXME: Currently, the semantics of linkage in clang is slightly
- /// different from the semantics in C++ spec. In C++ spec, only names
- /// have linkage. So that all entities of the same should share one
- /// linkage. But in clang, different entities of the same could have
- /// different linkage.
- NamedDecl *ND = Function;
- if (auto *SpecInfo = Function->getTemplateSpecializationInfo())
- ND = SpecInfo->getTemplate();
-
- if (ND->getFormalLinkage() == Linkage::InternalLinkage) {
- Candidate.Viable = false;
- Candidate.FailureKind = ovl_fail_module_mismatched;
- return;
- }
+ if (getLangOpts().CPlusPlusModules && Function->isInAnotherModuleUnit()) {
+ /// FIXME: Currently, the semantics of linkage in clang is slightly
+ /// different from the semantics in C++ spec. In C++ spec, only names
+ /// have linkage. So that all entities of the same should share one
+ /// linkage. But in clang, different entities of the same could have
+ /// different linkage.
+ NamedDecl *ND = Function;
+ if (auto *SpecInfo = Function->getTemplateSpecializationInfo())
+ ND = SpecInfo->getTemplate();
+
+ if (ND->getFormalLinkage() == Linkage::InternalLinkage) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_module_mismatched;
+ return;
}
}
@@ -6595,7 +6670,8 @@ void Sema::AddOverloadCandidate(
// parameter list is truncated on the right, so that there are
// exactly m parameters.
unsigned MinRequiredArgs = Function->getMinRequiredArguments();
- if (Args.size() < MinRequiredArgs && !PartialOverloading) {
+ if (!AggregateCandidateDeduction && Args.size() < MinRequiredArgs &&
+ !PartialOverloading) {
// Not enough arguments.
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_too_few_arguments;
@@ -7221,7 +7297,8 @@ void Sema::AddMethodTemplateCandidate(
ConversionSequenceList Conversions;
if (TemplateDeductionResult Result = DeduceTemplateArguments(
MethodTmpl, ExplicitTemplateArgs, Args, Specialization, Info,
- PartialOverloading, [&](ArrayRef<QualType> ParamTypes) {
+ PartialOverloading, /*AggregateDeductionCandidate=*/false,
+ [&](ArrayRef<QualType> ParamTypes) {
return CheckNonDependentConversions(
MethodTmpl, ParamTypes, Args, CandidateSet, Conversions,
SuppressUserConversions, ActingContext, ObjectType,
@@ -7274,7 +7351,7 @@ void Sema::AddTemplateOverloadCandidate(
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions,
bool PartialOverloading, bool AllowExplicit, ADLCallKind IsADLCandidate,
- OverloadCandidateParamOrder PO) {
+ OverloadCandidateParamOrder PO, bool AggregateCandidateDeduction) {
if (!CandidateSet.isNewCandidate(FunctionTemplate, PO))
return;
@@ -7304,7 +7381,8 @@ void Sema::AddTemplateOverloadCandidate(
ConversionSequenceList Conversions;
if (TemplateDeductionResult Result = DeduceTemplateArguments(
FunctionTemplate, ExplicitTemplateArgs, Args, Specialization, Info,
- PartialOverloading, [&](ArrayRef<QualType> ParamTypes) {
+ PartialOverloading, AggregateCandidateDeduction,
+ [&](ArrayRef<QualType> ParamTypes) {
return CheckNonDependentConversions(
FunctionTemplate, ParamTypes, Args, CandidateSet, Conversions,
SuppressUserConversions, nullptr, QualType(), {}, PO);
@@ -7340,7 +7418,8 @@ void Sema::AddTemplateOverloadCandidate(
AddOverloadCandidate(
Specialization, FoundDecl, Args, CandidateSet, SuppressUserConversions,
PartialOverloading, AllowExplicit,
- /*AllowExplicitConversions*/ false, IsADLCandidate, Conversions, PO);
+ /*AllowExplicitConversions=*/false, IsADLCandidate, Conversions, PO,
+ Info.AggregateDeductionCandidateHasMismatchedArity);
}
/// Check that implicit conversion sequences can be formed for each argument
@@ -7807,6 +7886,17 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
}
}
+ if (Conversion->getTrailingRequiresClause()) {
+ ConstraintSatisfaction Satisfaction;
+ if (CheckFunctionConstraints(Conversion, Satisfaction, /*Loc*/ {},
+ /*ForOverloadResolution*/ true) ||
+ !Satisfaction.IsSatisfied) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_constraints_not_satisfied;
+ return;
+ }
+ }
+
if (EnableIfAttr *FailedAttr =
CheckEnableIf(Conversion, CandidateSet.getLocation(), std::nullopt)) {
Candidate.Viable = false;
@@ -7984,8 +8074,7 @@ namespace {
/// enumeration types.
class BuiltinCandidateTypeSet {
/// TypeSet - A set of types.
- typedef llvm::SetVector<QualType, SmallVector<QualType, 8>,
- llvm::SmallPtrSet<QualType, 8>> TypeSet;
+ typedef llvm::SmallSetVector<QualType, 8> TypeSet;
/// PointerTypes - The set of pointer types that will be used in the
/// built-in candidates.
@@ -9886,7 +9975,7 @@ bool clang::isBetterOverloadCandidate(
}
}
- // C++ [over.match.best]p1: (Changed in C++2b)
+ // C++ [over.match.best]p1: (Changed in C++23)
//
// -- if F is a static member function, ICS1(F) is defined such
// that ICS1(F) is neither better nor worse than ICS1(G) for
@@ -10108,7 +10197,7 @@ bool clang::isBetterOverloadCandidate(
return Guide2->isImplicit();
// -- F1 is the copy deduction candidate(16.3.1.8) and F2 is not
- if (Guide1->isCopyDeductionCandidate())
+ if (Guide1->getDeductionCandidateKind() == DeductionCandidate::Copy)
return true;
}
}
@@ -10368,7 +10457,8 @@ enum OverloadCandidateSelect {
};
static std::pair<OverloadCandidateKind, OverloadCandidateSelect>
-ClassifyOverloadCandidate(Sema &S, NamedDecl *Found, FunctionDecl *Fn,
+ClassifyOverloadCandidate(Sema &S, const NamedDecl *Found,
+ const FunctionDecl *Fn,
OverloadCandidateRewriteKind CRK,
std::string &Description) {
@@ -10392,7 +10482,7 @@ ClassifyOverloadCandidate(Sema &S, NamedDecl *Found, FunctionDecl *Fn,
if (CRK & CRK_Reversed)
return oc_reversed_binary_operator;
- if (CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(Fn)) {
+ if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(Fn)) {
if (!Ctor->isImplicit()) {
if (isa<ConstructorUsingShadowDecl>(Found))
return oc_inherited_constructor;
@@ -10411,7 +10501,7 @@ ClassifyOverloadCandidate(Sema &S, NamedDecl *Found, FunctionDecl *Fn,
return oc_implicit_copy_constructor;
}
- if (CXXMethodDecl *Meth = dyn_cast<CXXMethodDecl>(Fn)) {
+ if (const auto *Meth = dyn_cast<CXXMethodDecl>(Fn)) {
// This actually gets spelled 'candidate function' for now, but
// it doesn't hurt to split it out.
if (!Meth->isImplicit())
@@ -10433,10 +10523,10 @@ ClassifyOverloadCandidate(Sema &S, NamedDecl *Found, FunctionDecl *Fn,
return std::make_pair(Kind, Select);
}
-void MaybeEmitInheritedConstructorNote(Sema &S, Decl *FoundDecl) {
+void MaybeEmitInheritedConstructorNote(Sema &S, const Decl *FoundDecl) {
// FIXME: It'd be nice to only emit a note once per using-decl per overload
// set.
- if (auto *Shadow = dyn_cast<ConstructorUsingShadowDecl>(FoundDecl))
+ if (const auto *Shadow = dyn_cast<ConstructorUsingShadowDecl>(FoundDecl))
S.Diag(FoundDecl->getLocation(),
diag::note_ovl_candidate_inherited_constructor)
<< Shadow->getNominatedBaseClass();
@@ -10543,7 +10633,7 @@ bool Sema::checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
// Don't print candidates other than the one that matches the calling
// convention of the call operator, since that is guaranteed to exist.
-static bool shouldSkipNotingLambdaConversionDecl(FunctionDecl *Fn) {
+static bool shouldSkipNotingLambdaConversionDecl(const FunctionDecl *Fn) {
const auto *ConvD = dyn_cast<CXXConversionDecl>(Fn);
if (!ConvD)
@@ -10563,7 +10653,7 @@ static bool shouldSkipNotingLambdaConversionDecl(FunctionDecl *Fn) {
}
// Notes the location of an overload candidate.
-void Sema::NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
+void Sema::NoteOverloadCandidate(const NamedDecl *Found, const FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind,
QualType DestType, bool TakingAddress) {
if (TakingAddress && !checkAddressOfCandidateIsAvailable(*this, Fn))
@@ -10709,6 +10799,8 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
Expr *FromExpr = Conv.Bad.FromExpr;
QualType FromTy = Conv.Bad.getFromType();
QualType ToTy = Conv.Bad.getToType();
+ SourceRange ToParamRange =
+ !isObjectArgument ? Fn->getParamDecl(I)->getSourceRange() : SourceRange();
if (FromTy == S.Context.OverloadTy) {
assert(FromExpr && "overload set argument came from implicit argument?");
@@ -10719,8 +10811,7 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_overload)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << ToTy
- << Name << I + 1;
+ << ToParamRange << ToTy << Name << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -10749,14 +10840,12 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
if (isObjectArgument)
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_addrspace_this)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second
- << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << FromQs.getAddressSpace() << ToQs.getAddressSpace();
+ << FnDesc << FromQs.getAddressSpace() << ToQs.getAddressSpace();
else
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_addrspace)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second
- << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << FromQs.getAddressSpace() << ToQs.getAddressSpace()
- << ToTy->isReferenceType() << I + 1;
+ << FnDesc << ToParamRange << FromQs.getAddressSpace()
+ << ToQs.getAddressSpace() << ToTy->isReferenceType() << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -10764,9 +10853,8 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
if (FromQs.getObjCLifetime() != ToQs.getObjCLifetime()) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_ownership)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
- << FromQs.getObjCLifetime() << ToQs.getObjCLifetime()
- << (unsigned)isObjectArgument << I + 1;
+ << ToParamRange << FromTy << FromQs.getObjCLifetime()
+ << ToQs.getObjCLifetime() << (unsigned)isObjectArgument << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -10774,18 +10862,8 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
if (FromQs.getObjCGCAttr() != ToQs.getObjCGCAttr()) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_gc)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
- << FromQs.getObjCGCAttr() << ToQs.getObjCGCAttr()
- << (unsigned)isObjectArgument << I + 1;
- MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
- return;
- }
-
- if (FromQs.hasUnaligned() != ToQs.hasUnaligned()) {
- S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_unaligned)
- << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
- << FromQs.hasUnaligned() << I + 1;
+ << ToParamRange << FromTy << FromQs.getObjCGCAttr()
+ << ToQs.getObjCGCAttr() << (unsigned)isObjectArgument << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -10796,13 +10874,11 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
if (isObjectArgument) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_cvr_this)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
- << (CVR - 1);
+ << FromTy << (CVR - 1);
} else {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_cvr)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
- << (CVR - 1) << I + 1;
+ << ToParamRange << FromTy << (CVR - 1) << I + 1;
}
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
@@ -10814,7 +10890,7 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
<< (unsigned)isObjectArgument << I + 1
<< (Conv.Bad.Kind == BadConversionSequence::rvalue_ref_to_lvalue)
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange());
+ << ToParamRange;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -10824,8 +10900,7 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
if (FromExpr && isa<InitListExpr>(FromExpr)) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_list_argument)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
- << ToTy << (unsigned)isObjectArgument << I + 1
+ << ToParamRange << FromTy << ToTy << (unsigned)isObjectArgument << I + 1
<< (Conv.Bad.Kind == BadConversionSequence::too_few_initializers ? 1
: Conv.Bad.Kind == BadConversionSequence::too_many_initializers
? 2
@@ -10844,8 +10919,7 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
// Emit the generic diagnostic and, optionally, add the hints to it.
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_conv_incomplete)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
- << ToTy << (unsigned)isObjectArgument << I + 1
+ << ToParamRange << FromTy << ToTy << (unsigned)isObjectArgument << I + 1
<< (unsigned)(Cand->Fix.Kind);
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
@@ -10886,24 +10960,24 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
if (BaseToDerivedConversion) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_base_to_derived_conv)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << (BaseToDerivedConversion - 1) << FromTy << ToTy << I + 1;
+ << ToParamRange << (BaseToDerivedConversion - 1) << FromTy << ToTy
+ << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
if (isa<ObjCObjectPointerType>(CFromTy) &&
isa<PointerType>(CToTy)) {
- Qualifiers FromQs = CFromTy.getQualifiers();
- Qualifiers ToQs = CToTy.getQualifiers();
- if (FromQs.getObjCLifetime() != ToQs.getObjCLifetime()) {
- S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_arc_conv)
- << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second
- << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << FromTy << ToTy << (unsigned)isObjectArgument << I + 1;
- MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
- return;
- }
+ Qualifiers FromQs = CFromTy.getQualifiers();
+ Qualifiers ToQs = CToTy.getQualifiers();
+ if (FromQs.getObjCLifetime() != ToQs.getObjCLifetime()) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_arc_conv)
+ << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
+ << ToParamRange << FromTy << ToTy << (unsigned)isObjectArgument
+ << I + 1;
+ MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
+ return;
+ }
}
if (TakingCandidateAddress &&
@@ -10913,8 +10987,7 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
// Emit the generic diagnostic and, optionally, add the hints to it.
PartialDiagnostic FDiag = S.PDiag(diag::note_ovl_candidate_bad_conv);
FDiag << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
- << ToTy << (unsigned)isObjectArgument << I + 1
+ << ToParamRange << FromTy << ToTy << (unsigned)isObjectArgument << I + 1
<< (unsigned)(Cand->Fix.Kind);
// Check that location of Fn is not in system header.
@@ -10997,11 +11070,13 @@ static void DiagnoseArityMismatch(Sema &S, NamedDecl *Found, Decl *D,
if (modeCount == 1 && Fn->getParamDecl(0)->getDeclName())
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_arity_one)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second
- << Description << mode << Fn->getParamDecl(0) << NumFormalArgs;
+ << Description << mode << Fn->getParamDecl(0) << NumFormalArgs
+ << Fn->getParametersSourceRange();
else
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_arity)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second
- << Description << mode << modeCount << NumFormalArgs;
+ << Description << mode << modeCount << NumFormalArgs
+ << Fn->getParametersSourceRange();
MaybeEmitInheritedConstructorNote(S, Found);
}
@@ -11583,8 +11658,18 @@ static void NoteSurrogateCandidate(Sema &S, OverloadCandidate *Cand) {
if (isRValueReference) FnType = S.Context.getRValueReferenceType(FnType);
if (isLValueReference) FnType = S.Context.getLValueReferenceType(FnType);
- S.Diag(Cand->Surrogate->getLocation(), diag::note_ovl_surrogate_cand)
- << FnType;
+ if (!Cand->Viable &&
+ Cand->FailureKind == ovl_fail_constraints_not_satisfied) {
+ S.Diag(Cand->Surrogate->getLocation(),
+ diag::note_ovl_surrogate_constraints_not_satisfied)
+ << Cand->Surrogate;
+ ConstraintSatisfaction Satisfaction;
+ if (S.CheckFunctionConstraints(Cand->Surrogate, Satisfaction))
+ S.DiagnoseUnsatisfiedConstraint(Satisfaction);
+ } else {
+ S.Diag(Cand->Surrogate->getLocation(), diag::note_ovl_surrogate_cand)
+ << FnType;
+ }
}
static void NoteBuiltinOperatorCandidate(Sema &S, StringRef Opc,
@@ -11979,7 +12064,16 @@ void OverloadCandidateSet::NoteCandidates(
S.Diag(PD.first, PD.second, shouldDeferDiags(S, Args, OpLoc));
- NoteCandidates(S, Args, Cands, Opc, OpLoc);
+ // In WebAssembly we don't want to emit further diagnostics if a table is
+ // passed as an argument to a function.
+ bool NoteCands = true;
+ for (const Expr *Arg : Args) {
+ if (Arg->getType()->isWebAssemblyTableType())
+ NoteCands = false;
+ }
+
+ if (NoteCands)
+ NoteCandidates(S, Args, Cands, Opc, OpLoc);
if (OCD == OCD_AmbiguousCandidates)
MaybeDiagnoseAmbiguousConstraints(S, {begin(), end()});
@@ -12756,10 +12850,9 @@ bool Sema::resolveAndFixAddressOfSingleOverloadCandidate(
///
/// If no template-ids are found, no diagnostics are emitted and NULL is
/// returned.
-FunctionDecl *
-Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
- bool Complain,
- DeclAccessPair *FoundResult) {
+FunctionDecl *Sema::ResolveSingleFunctionTemplateSpecialization(
+ OverloadExpr *ovl, bool Complain, DeclAccessPair *FoundResult,
+ TemplateSpecCandidateSet *FailedTSC) {
// C++ [over.over]p1:
// [...] [Note: any redundant set of parentheses surrounding the
// overloaded function name is ignored (5.1). ]
@@ -12773,7 +12866,6 @@ Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
TemplateArgumentListInfo ExplicitTemplateArgs;
ovl->copyTemplateArgumentsInto(ExplicitTemplateArgs);
- TemplateSpecCandidateSet FailedCandidates(ovl->getNameLoc());
// Look through all of the overloaded functions, searching for one
// whose type matches exactly.
@@ -12796,16 +12888,16 @@ Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
// function template specialization, which is added to the set of
// overloaded functions considered.
FunctionDecl *Specialization = nullptr;
- TemplateDeductionInfo Info(FailedCandidates.getLocation());
+ TemplateDeductionInfo Info(ovl->getNameLoc());
if (TemplateDeductionResult Result
= DeduceTemplateArguments(FunctionTemplate, &ExplicitTemplateArgs,
Specialization, Info,
/*IsAddressOfFunction*/true)) {
// Make a note of the failed deduction for diagnostics.
- // TODO: Actually use the failed-deduction info?
- FailedCandidates.addCandidate()
- .set(I.getPair(), FunctionTemplate->getTemplatedDecl(),
- MakeDeductionFailureInfo(Context, Result, Info));
+ if (FailedTSC)
+ FailedTSC->addCandidate().set(
+ I.getPair(), FunctionTemplate->getTemplatedDecl(),
+ MakeDeductionFailureInfo(Context, Result, Info));
continue;
}
@@ -13962,8 +14054,8 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
Diag(FnDecl->getLocation(),
diag::note_ovl_ambiguous_oper_binary_reversed_self);
// Mark member== const or provide matching != to disallow reversed
- // args. Eg.
- // struct S { bool operator==(const S&); };
+ // args. Eg.
+ // struct S { bool operator==(const S&); };
// S()==S();
if (auto *MD = dyn_cast<CXXMethodDecl>(FnDecl))
if (Op == OverloadedOperatorKind::OO_EqualEqual &&
@@ -14900,6 +14992,22 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
/*SuppressUserConversion=*/false);
}
+ // When calling a lambda, both the call operator, and
+ // the conversion operator to function pointer
+ // are considered. But when constraint checking
+ // on the call operator fails, it will also fail on the
+ // conversion operator as the constraints are always the same.
+ // As the user probably does not intend to perform a surrogate call,
+ // we filter them out to produce better error diagnostics, ie to avoid
+ // showing 2 failed overloads instead of one.
+ bool IgnoreSurrogateFunctions = false;
+ if (CandidateSet.size() == 1 && Record->getAsCXXRecordDecl()->isLambda()) {
+ const OverloadCandidate &Candidate = *CandidateSet.begin();
+ if (!Candidate.Viable &&
+ Candidate.FailureKind == ovl_fail_constraints_not_satisfied)
+ IgnoreSurrogateFunctions = true;
+ }
+
// C++ [over.call.object]p2:
// In addition, for each (non-explicit in C++0x) conversion function
// declared in T of the form
@@ -14919,7 +15027,8 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
// within T by another intervening declaration.
const auto &Conversions =
cast<CXXRecordDecl>(Record->getDecl())->getVisibleConversionFunctions();
- for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) {
+ for (auto I = Conversions.begin(), E = Conversions.end();
+ !IgnoreSurrogateFunctions && I != E; ++I) {
NamedDecl *D = *I;
CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(D->getDeclContext());
if (isa<UsingShadowDecl>(D))
@@ -15055,7 +15164,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
bool IsError = false;
// Initialize the implicit object parameter if needed.
- // Since C++2b, this could also be a call to a static call operator
+ // Since C++23, this could also be a call to a static call operator
// which we emit as a regular CallExpr.
if (Method->isInstance()) {
ExprResult ObjRes = PerformObjectArgumentInitialization(
@@ -15421,8 +15530,14 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
unsigned ResultIdx = GSE->getResultIndex();
AssocExprs[ResultIdx] = SubExpr;
+ if (GSE->isExprPredicate())
+ return GenericSelectionExpr::Create(
+ Context, GSE->getGenericLoc(), GSE->getControllingExpr(),
+ GSE->getAssocTypeSourceInfos(), AssocExprs, GSE->getDefaultLoc(),
+ GSE->getRParenLoc(), GSE->containsUnexpandedParameterPack(),
+ ResultIdx);
return GenericSelectionExpr::Create(
- Context, GSE->getGenericLoc(), GSE->getControllingExpr(),
+ Context, GSE->getGenericLoc(), GSE->getControllingType(),
GSE->getAssocTypeSourceInfos(), AssocExprs, GSE->getDefaultLoc(),
GSE->getRParenLoc(), GSE->containsUnexpandedParameterPack(),
ResultIdx);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp b/contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp
index abbdc12e7047..408f71044fa3 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp
@@ -152,8 +152,13 @@ namespace {
assocTypes.push_back(assoc.getTypeSourceInfo());
}
+ if (gse->isExprPredicate())
+ return GenericSelectionExpr::Create(
+ S.Context, gse->getGenericLoc(), gse->getControllingExpr(),
+ assocTypes, assocExprs, gse->getDefaultLoc(), gse->getRParenLoc(),
+ gse->containsUnexpandedParameterPack(), resultIndex);
return GenericSelectionExpr::Create(
- S.Context, gse->getGenericLoc(), gse->getControllingExpr(),
+ S.Context, gse->getGenericLoc(), gse->getControllingType(),
assocTypes, assocExprs, gse->getDefaultLoc(), gse->getRParenLoc(),
gse->containsUnexpandedParameterPack(), resultIndex);
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp b/contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp
index fedc314f2965..db2059e68b3d 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp
@@ -28,6 +28,8 @@ using namespace llvm;
using namespace clang;
using namespace clang::RISCV;
+using IntrinsicKind = sema::RISCVIntrinsicManager::IntrinsicKind;
+
namespace {
// Function definition of a RVV intrinsic.
@@ -58,16 +60,34 @@ static const PrototypeDescriptor RVVSignatureTable[] = {
#undef DECL_SIGNATURE_TABLE
};
+static const PrototypeDescriptor RVSiFiveVectorSignatureTable[] = {
+#define DECL_SIGNATURE_TABLE
+#include "clang/Basic/riscv_sifive_vector_builtin_sema.inc"
+#undef DECL_SIGNATURE_TABLE
+};
+
static const RVVIntrinsicRecord RVVIntrinsicRecords[] = {
#define DECL_INTRINSIC_RECORDS
#include "clang/Basic/riscv_vector_builtin_sema.inc"
#undef DECL_INTRINSIC_RECORDS
};
+static const RVVIntrinsicRecord RVSiFiveVectorIntrinsicRecords[] = {
+#define DECL_INTRINSIC_RECORDS
+#include "clang/Basic/riscv_sifive_vector_builtin_sema.inc"
+#undef DECL_INTRINSIC_RECORDS
+};
+
// Get subsequence of signature table.
-static ArrayRef<PrototypeDescriptor> ProtoSeq2ArrayRef(uint16_t Index,
- uint8_t Length) {
- return ArrayRef(&RVVSignatureTable[Index], Length);
+static ArrayRef<PrototypeDescriptor>
+ProtoSeq2ArrayRef(IntrinsicKind K, uint16_t Index, uint8_t Length) {
+ switch (K) {
+ case IntrinsicKind::RVV:
+ return ArrayRef(&RVVSignatureTable[Index], Length);
+ case IntrinsicKind::SIFIVE_VECTOR:
+ return ArrayRef(&RVSiFiveVectorSignatureTable[Index], Length);
+ }
+ llvm_unreachable("Unhandled IntrinsicKind");
}
static QualType RVVType2Qual(ASTContext &Context, const RVVType *Type) {
@@ -115,8 +135,12 @@ static QualType RVVType2Qual(ASTContext &Context, const RVVType *Type) {
case Invalid:
llvm_unreachable("Unhandled type.");
}
- if (Type->isVector())
- QT = Context.getScalableVectorType(QT, *Type->getScale());
+ if (Type->isVector()) {
+ if (Type->isTuple())
+ QT = Context.getScalableVectorType(QT, *Type->getScale(), Type->getNF());
+ else
+ QT = Context.getScalableVectorType(QT, *Type->getScale());
+ }
if (Type->isConstant())
QT = Context.getConstType(QT);
@@ -134,6 +158,8 @@ private:
Sema &S;
ASTContext &Context;
RVVTypeCache TypeCache;
+ bool ConstructedRISCVVBuiltins;
+ bool ConstructedRISCVSiFiveVectorBuiltins;
// List of all RVV intrinsic.
std::vector<RVVIntrinsicDef> IntrinsicList;
@@ -142,8 +168,6 @@ private:
// Mapping function name to RVVOverloadIntrinsicDef.
StringMap<RVVOverloadIntrinsicDef> OverloadIntrinsics;
- // Create IntrinsicList
- void InitIntrinsicList();
// Create RVVIntrinsicDef.
void InitRVVIntrinsic(const RVVIntrinsicRecord &Record, StringRef SuffixStr,
@@ -155,11 +179,18 @@ private:
Preprocessor &PP, unsigned Index,
bool IsOverload);
+ void ConstructRVVIntrinsics(ArrayRef<RVVIntrinsicRecord> Recs,
+ IntrinsicKind K);
+
public:
RISCVIntrinsicManagerImpl(clang::Sema &S) : S(S), Context(S.Context) {
- InitIntrinsicList();
+ ConstructedRISCVVBuiltins = false;
+ ConstructedRISCVSiFiveVectorBuiltins = false;
}
+ // Initialize IntrinsicList
+ void InitIntrinsicList() override;
+
// Create RISC-V vector intrinsic and insert into symbol table if found, and
// return true, otherwise return false.
bool CreateIntrinsicIfFound(LookupResult &LR, IdentifierInfo *II,
@@ -167,25 +198,21 @@ public:
};
} // namespace
-void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
+void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics(
+ ArrayRef<RVVIntrinsicRecord> Recs, IntrinsicKind K) {
const TargetInfo &TI = Context.getTargetInfo();
- bool HasVectorFloat32 = TI.hasFeature("zve32f");
- bool HasVectorFloat64 = TI.hasFeature("zve64d");
- bool HasZvfh = TI.hasFeature("experimental-zvfh");
bool HasRV64 = TI.hasFeature("64bit");
- bool HasFullMultiply = TI.hasFeature("v");
-
// Construction of RVVIntrinsicRecords need to sync with createRVVIntrinsics
// in RISCVVEmitter.cpp.
- for (auto &Record : RVVIntrinsicRecords) {
+ for (auto &Record : Recs) {
// Create Intrinsics for each type and LMUL.
BasicType BaseType = BasicType::Unknown;
ArrayRef<PrototypeDescriptor> BasicProtoSeq =
- ProtoSeq2ArrayRef(Record.PrototypeIndex, Record.PrototypeLength);
+ ProtoSeq2ArrayRef(K, Record.PrototypeIndex, Record.PrototypeLength);
ArrayRef<PrototypeDescriptor> SuffixProto =
- ProtoSeq2ArrayRef(Record.SuffixIndex, Record.SuffixLength);
+ ProtoSeq2ArrayRef(K, Record.SuffixIndex, Record.SuffixLength);
ArrayRef<PrototypeDescriptor> OverloadedSuffixProto = ProtoSeq2ArrayRef(
- Record.OverloadedSuffixIndex, Record.OverloadedSuffixSize);
+ K, Record.OverloadedSuffixIndex, Record.OverloadedSuffixSize);
PolicyScheme UnMaskedPolicyScheme =
static_cast<PolicyScheme>(Record.UnMaskedPolicyScheme);
@@ -195,15 +222,16 @@ void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
const Policy DefaultPolicy;
llvm::SmallVector<PrototypeDescriptor> ProtoSeq =
- RVVIntrinsic::computeBuiltinTypes(BasicProtoSeq, /*IsMasked=*/false,
- /*HasMaskedOffOperand=*/false,
- Record.HasVL, Record.NF,
- UnMaskedPolicyScheme, DefaultPolicy);
+ RVVIntrinsic::computeBuiltinTypes(
+ BasicProtoSeq, /*IsMasked=*/false,
+ /*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
+ UnMaskedPolicyScheme, DefaultPolicy, Record.IsTuple);
llvm::SmallVector<PrototypeDescriptor> ProtoMaskSeq =
RVVIntrinsic::computeBuiltinTypes(
BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
- Record.HasVL, Record.NF, MaskedPolicyScheme, DefaultPolicy);
+ Record.HasVL, Record.NF, MaskedPolicyScheme, DefaultPolicy,
+ Record.IsTuple);
bool UnMaskedHasPolicy = UnMaskedPolicyScheme != PolicyScheme::SchemeNone;
bool MaskedHasPolicy = MaskedPolicyScheme != PolicyScheme::SchemeNone;
@@ -223,25 +251,10 @@ void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
continue;
// Check requirement.
- if (BaseType == BasicType::Float16 && !HasZvfh)
- continue;
-
- if (BaseType == BasicType::Float32 && !HasVectorFloat32)
- continue;
-
- if (BaseType == BasicType::Float64 && !HasVectorFloat64)
- continue;
-
if (((Record.RequiredExtensions & RVV_REQ_RV64) == RVV_REQ_RV64) &&
!HasRV64)
continue;
- if ((BaseType == BasicType::Int64) &&
- ((Record.RequiredExtensions & RVV_REQ_FullMultiply) ==
- RVV_REQ_FullMultiply) &&
- !HasFullMultiply)
- continue;
-
// Expanded with different LMUL.
for (int Log2LMUL = -3; Log2LMUL <= 3; Log2LMUL++) {
if (!(Record.Log2LMULMask & (1 << (Log2LMUL + 3))))
@@ -270,7 +283,7 @@ void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
RVVIntrinsic::computeBuiltinTypes(
BasicProtoSeq, /*IsMasked=*/false,
/*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
- UnMaskedPolicyScheme, P);
+ UnMaskedPolicyScheme, P, Record.IsTuple);
std::optional<RVVTypes> PolicyTypes = TypeCache.computeTypes(
BaseType, Log2LMUL, Record.NF, PolicyPrototype);
InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
@@ -292,14 +305,30 @@ void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
llvm::SmallVector<PrototypeDescriptor> PolicyPrototype =
RVVIntrinsic::computeBuiltinTypes(
BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
- Record.HasVL, Record.NF, MaskedPolicyScheme, P);
+ Record.HasVL, Record.NF, MaskedPolicyScheme, P,
+ Record.IsTuple);
std::optional<RVVTypes> PolicyTypes = TypeCache.computeTypes(
BaseType, Log2LMUL, Record.NF, PolicyPrototype);
InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
/*IsMask=*/true, *PolicyTypes, MaskedHasPolicy, P);
}
} // End for different LMUL
- } // End for different TypeRange
+ } // End for different TypeRange
+ }
+}
+
+void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
+
+ if (S.DeclareRISCVVBuiltins && !ConstructedRISCVVBuiltins) {
+ ConstructedRISCVVBuiltins = true;
+ ConstructRVVIntrinsics(RVVIntrinsicRecords,
+ IntrinsicKind::RVV);
+ }
+ if (S.DeclareRISCVSiFiveVectorBuiltins &&
+ !ConstructedRISCVSiFiveVectorBuiltins) {
+ ConstructedRISCVSiFiveVectorBuiltins = true;
+ ConstructRVVIntrinsics(RVSiFiveVectorIntrinsicRecords,
+ IntrinsicKind::SIFIVE_VECTOR);
}
}
@@ -326,7 +355,8 @@ void RISCVIntrinsicManagerImpl::InitRVVIntrinsic(
std::string BuiltinName = "__builtin_rvv_" + std::string(Record.Name);
RVVIntrinsic::updateNamesAndPolicy(IsMasked, HasPolicy, Name, BuiltinName,
- OverloadedName, PolicyAttrs);
+ OverloadedName, PolicyAttrs,
+ Record.HasFRMRoundModeOp);
// Put into IntrinsicList.
size_t Index = IntrinsicList.size();
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp b/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp
index f8c713c8545d..ca0254d29e7f 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp
@@ -33,22 +33,6 @@ Sema::SemaDiagnosticBuilder Sema::SYCLDiagIfDeviceCode(SourceLocation Loc,
return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, FD, *this);
}
-bool Sema::checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee) {
- assert(getLangOpts().SYCLIsDevice &&
- "Should only be called during SYCL compilation");
- assert(Callee && "Callee may not be null.");
-
- // Errors in an unevaluated context don't need to be generated,
- // so we can safely skip them.
- if (isUnevaluatedContext() || isConstantEvaluated())
- return true;
-
- SemaDiagnosticBuilder::Kind DiagKind = SemaDiagnosticBuilder::K_Nop;
-
- return DiagKind != SemaDiagnosticBuilder::K_Immediate &&
- DiagKind != SemaDiagnosticBuilder::K_ImmediateWithCallStack;
-}
-
static bool isZeroSizedArray(Sema &SemaRef, QualType Ty) {
if (const auto *CAT = SemaRef.getASTContext().getAsConstantArrayType(Ty))
return CAT->getSize() == 0;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp b/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
index f15603fd0bd4..70a549938d08 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
@@ -39,6 +39,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace sema;
@@ -932,11 +933,12 @@ StmtResult Sema::ActOnIfStmt(SourceLocation IfLoc,
}
if (ConstevalOrNegatedConsteval) {
- bool Immediate = isImmediateFunctionContext();
+ bool Immediate = ExprEvalContexts.back().Context ==
+ ExpressionEvaluationContext::ImmediateFunctionContext;
if (CurContext->isFunctionOrMethod()) {
const auto *FD =
dyn_cast<FunctionDecl>(Decl::castFromDeclContext(CurContext));
- if (FD && FD->isConsteval())
+ if (FD && FD->isImmediateFunction())
Immediate = true;
}
if (isUnevaluatedContext() || Immediate)
@@ -1728,9 +1730,7 @@ Sema::ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
namespace {
// Use SetVector since the diagnostic cares about the ordering of the Decl's.
- using DeclSetVector =
- llvm::SetVector<VarDecl *, llvm::SmallVector<VarDecl *, 8>,
- llvm::SmallPtrSet<VarDecl *, 8>>;
+ using DeclSetVector = llvm::SmallSetVector<VarDecl *, 8>;
// This visitor will traverse a conditional statement and store all
// the evaluated decls into a vector. Simple is set to true if none
@@ -3364,7 +3364,7 @@ Sema::ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope) {
/// might be modified by the implementation.
///
/// \param Mode Overrides detection of current language mode
-/// and uses the rules for C++2b.
+/// and uses the rules for C++23.
///
/// \returns An aggregate which contains the Candidate and isMoveEligible
/// and isCopyElidable methods. If Candidate is non-null, it means
@@ -3385,7 +3385,7 @@ Sema::NamedReturnInfo Sema::getNamedReturnInfo(Expr *&E,
if (Res.Candidate && !E->isXValue() &&
(Mode == SimplerImplicitMoveMode::ForceOn ||
(Mode != SimplerImplicitMoveMode::ForceOff &&
- getLangOpts().CPlusPlus2b))) {
+ getLangOpts().CPlusPlus23))) {
E = ImplicitCastExpr::Create(Context, VD->getType().getNonReferenceType(),
CK_NoOp, E, nullptr, VK_XValue,
FPOptionsOverride());
@@ -3529,7 +3529,7 @@ ExprResult Sema::PerformMoveOrCopyInitialization(
const InitializedEntity &Entity, const NamedReturnInfo &NRInfo, Expr *Value,
bool SupressSimplerImplicitMoves) {
if (getLangOpts().CPlusPlus &&
- (!getLangOpts().CPlusPlus2b || SupressSimplerImplicitMoves) &&
+ (!getLangOpts().CPlusPlus23 || SupressSimplerImplicitMoves) &&
NRInfo.isMoveEligible()) {
ImplicitCastExpr AsRvalue(ImplicitCastExpr::OnStack, Value->getType(),
CK_NoOp, Value, VK_XValue, FPOptionsOverride());
@@ -3730,6 +3730,11 @@ StmtResult Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc,
if (FunctionScopes.back()->FirstReturnLoc.isInvalid())
FunctionScopes.back()->FirstReturnLoc = ReturnLoc;
+ if (auto *CurBlock = dyn_cast<BlockScopeInfo>(CurCap);
+ CurBlock && CurCap->HasImplicitReturnType && RetValExp &&
+ RetValExp->containsErrors())
+ CurBlock->TheDecl->setInvalidDecl();
+
return Result;
}
@@ -3825,9 +3830,18 @@ bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
{
// Otherwise, [...] deduce a value for U using the rules of template
// argument deduction.
- TemplateDeductionInfo Info(RetExpr->getExprLoc());
- TemplateDeductionResult Res =
- DeduceAutoType(OrigResultType, RetExpr, Deduced, Info);
+ auto RetExprLoc = RetExpr->getExprLoc();
+ TemplateDeductionInfo Info(RetExprLoc);
+ SourceLocation TemplateSpecLoc;
+ if (RetExpr->getType() == Context.OverloadTy) {
+ auto FindResult = OverloadExpr::find(RetExpr);
+ if (FindResult.Expression)
+ TemplateSpecLoc = FindResult.Expression->getNameLoc();
+ }
+ TemplateSpecCandidateSet FailedTSC(TemplateSpecLoc);
+ TemplateDeductionResult Res = DeduceAutoType(
+ OrigResultType, RetExpr, Deduced, Info, /*DependentDeduction=*/false,
+ /*IgnoreConstraints=*/false, &FailedTSC);
if (Res != TDK_Success && FD->isInvalidDecl())
return true;
switch (Res) {
@@ -3853,6 +3867,7 @@ bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
default:
Diag(RetExpr->getExprLoc(), diag::err_auto_fn_deduction_failure)
<< OrigResultType.getType() << RetExpr->getType();
+ FailedTSC.NoteCandidates(*this, RetExprLoc);
return true;
}
}
@@ -3902,7 +3917,7 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
static bool CheckSimplerImplicitMovesMSVCWorkaround(const Sema &S,
const Expr *E) {
- if (!E || !S.getLangOpts().CPlusPlus2b || !S.getLangOpts().MSVCCompat)
+ if (!E || !S.getLangOpts().CPlusPlus23 || !S.getLangOpts().MSVCCompat)
return false;
const Decl *D = E->getReferencedDeclOfCallee();
if (!D || !S.SourceMgr.isInSystemHeader(D->getLocation()))
@@ -3969,6 +3984,14 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
} else // If we don't have a function/method context, bail.
return StmtError();
+ if (RetValExp) {
+ const auto *ATy = dyn_cast<ArrayType>(RetValExp->getType());
+ if (ATy && ATy->getElementType().isWebAssemblyReferenceType()) {
+ Diag(ReturnLoc, diag::err_wasm_table_art) << 1;
+ return StmtError();
+ }
+ }
+
// C++1z: discarded return statements are not considered when deducing a
// return type.
if (ExprEvalContexts.back().isDiscardedStatementContext() &&
@@ -4351,9 +4374,9 @@ public:
if (QT->isPointerType())
IsPointer = true;
+ QT = QT.getUnqualifiedType();
if (IsPointer || QT->isReferenceType())
QT = QT->getPointeeType();
- QT = QT.getUnqualifiedType();
}
/// Used when creating a CatchHandlerType from a base class type; pretends the
@@ -4401,32 +4424,42 @@ template <> struct DenseMapInfo<CatchHandlerType> {
namespace {
class CatchTypePublicBases {
- ASTContext &Ctx;
- const llvm::DenseMap<CatchHandlerType, CXXCatchStmt *> &TypesToCheck;
- const bool CheckAgainstPointer;
+ const llvm::DenseMap<QualType, CXXCatchStmt *> &TypesToCheck;
CXXCatchStmt *FoundHandler;
- CanQualType FoundHandlerType;
+ QualType FoundHandlerType;
+ QualType TestAgainstType;
public:
- CatchTypePublicBases(
- ASTContext &Ctx,
- const llvm::DenseMap<CatchHandlerType, CXXCatchStmt *> &T, bool C)
- : Ctx(Ctx), TypesToCheck(T), CheckAgainstPointer(C),
- FoundHandler(nullptr) {}
+ CatchTypePublicBases(const llvm::DenseMap<QualType, CXXCatchStmt *> &T,
+ QualType QT)
+ : TypesToCheck(T), FoundHandler(nullptr), TestAgainstType(QT) {}
CXXCatchStmt *getFoundHandler() const { return FoundHandler; }
- CanQualType getFoundHandlerType() const { return FoundHandlerType; }
+ QualType getFoundHandlerType() const { return FoundHandlerType; }
bool operator()(const CXXBaseSpecifier *S, CXXBasePath &) {
if (S->getAccessSpecifier() == AccessSpecifier::AS_public) {
- CatchHandlerType Check(S->getType(), CheckAgainstPointer);
+ QualType Check = S->getType().getCanonicalType();
const auto &M = TypesToCheck;
auto I = M.find(Check);
if (I != M.end()) {
- FoundHandler = I->second;
- FoundHandlerType = Ctx.getCanonicalType(S->getType());
- return true;
+ // We're pretty sure we found what we need to find. However, we still
+ // need to make sure that we properly compare for pointers and
+ // references, to handle cases like:
+ //
+ // } catch (Base *b) {
+ // } catch (Derived &d) {
+ // }
+ //
+ // where there is a qualification mismatch that disqualifies this
+ // handler as a potential problem.
+ if (I->second->getCaughtType()->isPointerType() ==
+ TestAgainstType->isPointerType()) {
+ FoundHandler = I->second;
+ FoundHandlerType = Check;
+ return true;
+ }
}
}
return false;
@@ -4465,6 +4498,7 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
assert(!Handlers.empty() &&
"The parser shouldn't call this if there are no handlers.");
+ llvm::DenseMap<QualType, CXXCatchStmt *> HandledBaseTypes;
llvm::DenseMap<CatchHandlerType, CXXCatchStmt *> HandledTypes;
for (unsigned i = 0; i < NumHandlers; ++i) {
CXXCatchStmt *H = cast<CXXCatchStmt>(Handlers[i]);
@@ -4482,8 +4516,7 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
// Walk the type hierarchy to diagnose when this type has already been
// handled (duplication), or cannot be handled (derivation inversion). We
// ignore top-level cv-qualifiers, per [except.handle]p3
- CatchHandlerType HandlerCHT =
- (QualType)Context.getCanonicalType(H->getCaughtType());
+ CatchHandlerType HandlerCHT = H->getCaughtType().getCanonicalType();
// We can ignore whether the type is a reference or a pointer; we need the
// underlying declaration type in order to get at the underlying record
@@ -4499,10 +4532,12 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
// as the original type.
CXXBasePaths Paths;
Paths.setOrigin(RD);
- CatchTypePublicBases CTPB(Context, HandledTypes, HandlerCHT.isPointer());
+ CatchTypePublicBases CTPB(HandledBaseTypes,
+ H->getCaughtType().getCanonicalType());
if (RD->lookupInBases(CTPB, Paths)) {
const CXXCatchStmt *Problem = CTPB.getFoundHandler();
- if (!Paths.isAmbiguous(CTPB.getFoundHandlerType())) {
+ if (!Paths.isAmbiguous(
+ CanQualType::CreateUnsafe(CTPB.getFoundHandlerType()))) {
Diag(H->getExceptionDecl()->getTypeSpecStartLoc(),
diag::warn_exception_caught_by_earlier_handler)
<< H->getCaughtType();
@@ -4511,11 +4546,16 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
<< Problem->getCaughtType();
}
}
+ // Strip the qualifiers here because we're going to be comparing this
+ // type to the base type specifiers of a class, which are ignored in a
+ // base specifier per [class.derived.general]p2.
+ HandledBaseTypes[Underlying.getUnqualifiedType()] = H;
}
// Add the type the list of ones we have handled; diagnose if we've already
// handled it.
- auto R = HandledTypes.insert(std::make_pair(H->getCaughtType(), H));
+ auto R = HandledTypes.insert(
+ std::make_pair(H->getCaughtType().getCanonicalType(), H));
if (!R.second) {
const CXXCatchStmt *Problem = R.first->second;
Diag(H->getExceptionDecl()->getTypeSpecStartLoc(),
@@ -4529,7 +4569,8 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
FSI->setHasCXXTry(TryLoc);
- return CXXTryStmt::Create(Context, TryLoc, TryBlock, Handlers);
+ return CXXTryStmt::Create(Context, TryLoc, cast<CompoundStmt>(TryBlock),
+ Handlers);
}
StmtResult Sema::ActOnSEHTryBlock(bool IsCXXTry, SourceLocation TryLoc,
@@ -4725,6 +4766,7 @@ void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
PushExpressionEvaluationContext(
ExpressionEvaluationContext::PotentiallyEvaluated);
+ ExprEvalContexts.back().InImmediateEscalatingFunctionContext = false;
}
void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp b/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp
index 97400483c63a..2acb269f0423 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp
@@ -22,6 +22,7 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include <optional>
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp
index 6d443837a4c5..ad20bc8871f1 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp
@@ -215,6 +215,59 @@ static Attr *handleNoMergeAttr(Sema &S, Stmt *St, const ParsedAttr &A,
return ::new (S.Context) NoMergeAttr(S.Context, A);
}
+template <typename OtherAttr, int DiagIdx>
+static bool CheckStmtInlineAttr(Sema &SemaRef, const Stmt *OrigSt,
+ const Stmt *CurSt,
+ const AttributeCommonInfo &A) {
+ CallExprFinder OrigCEF(SemaRef, OrigSt);
+ CallExprFinder CEF(SemaRef, CurSt);
+
+ // If the call expressions lists are equal in size, we can skip
+ // previously emitted diagnostics. However, if the statement has a pack
+ // expansion, we have no way of telling which CallExpr is the instantiated
+ // version of the other. In this case, we will end up re-diagnosing in the
+ // instantiation.
+ // ie: [[clang::always_inline]] non_dependent(), (other_call<Pack>()...)
+ // will diagnose nondependent again.
+ bool CanSuppressDiag =
+ OrigSt && CEF.getCallExprs().size() == OrigCEF.getCallExprs().size();
+
+ if (!CEF.foundCallExpr()) {
+ return SemaRef.Diag(CurSt->getBeginLoc(),
+ diag::warn_attribute_ignored_no_calls_in_stmt)
+ << A;
+ }
+
+ for (const auto &Tup :
+ llvm::zip_longest(OrigCEF.getCallExprs(), CEF.getCallExprs())) {
+ // If the original call expression already had a callee, we already
+ // diagnosed this, so skip it here. We can't skip if there isn't a 1:1
+ // relationship between the two lists of call expressions.
+ if (!CanSuppressDiag || !(*std::get<0>(Tup))->getCalleeDecl()) {
+ const Decl *Callee = (*std::get<1>(Tup))->getCalleeDecl();
+ if (Callee &&
+ (Callee->hasAttr<OtherAttr>() || Callee->hasAttr<FlattenAttr>())) {
+ SemaRef.Diag(CurSt->getBeginLoc(),
+ diag::warn_function_stmt_attribute_precedence)
+ << A << (Callee->hasAttr<OtherAttr>() ? DiagIdx : 1);
+ SemaRef.Diag(Callee->getBeginLoc(), diag::note_conflicting_attribute);
+ }
+ }
+ }
+
+ return false;
+}
+
+bool Sema::CheckNoInlineAttr(const Stmt *OrigSt, const Stmt *CurSt,
+ const AttributeCommonInfo &A) {
+ return CheckStmtInlineAttr<AlwaysInlineAttr, 0>(*this, OrigSt, CurSt, A);
+}
+
+bool Sema::CheckAlwaysInlineAttr(const Stmt *OrigSt, const Stmt *CurSt,
+ const AttributeCommonInfo &A) {
+ return CheckStmtInlineAttr<NoInlineAttr, 2>(*this, OrigSt, CurSt, A);
+}
+
static Attr *handleNoInlineAttr(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange Range) {
NoInlineAttr NIA(S.Context, A);
@@ -224,19 +277,8 @@ static Attr *handleNoInlineAttr(Sema &S, Stmt *St, const ParsedAttr &A,
return nullptr;
}
- CallExprFinder CEF(S, St);
- if (!CEF.foundCallExpr()) {
- S.Diag(St->getBeginLoc(), diag::warn_attribute_ignored_no_calls_in_stmt)
- << A;
+ if (S.CheckNoInlineAttr(/*OrigSt=*/nullptr, St, A))
return nullptr;
- }
-
- for (const auto *CallExpr : CEF.getCallExprs()) {
- const Decl *Decl = CallExpr->getCalleeDecl();
- if (Decl->hasAttr<AlwaysInlineAttr>() || Decl->hasAttr<FlattenAttr>())
- S.Diag(St->getBeginLoc(), diag::warn_function_stmt_attribute_precedence)
- << A << (Decl->hasAttr<AlwaysInlineAttr>() ? 0 : 1);
- }
return ::new (S.Context) NoInlineAttr(S.Context, A);
}
@@ -250,19 +292,8 @@ static Attr *handleAlwaysInlineAttr(Sema &S, Stmt *St, const ParsedAttr &A,
return nullptr;
}
- CallExprFinder CEF(S, St);
- if (!CEF.foundCallExpr()) {
- S.Diag(St->getBeginLoc(), diag::warn_attribute_ignored_no_calls_in_stmt)
- << A;
+ if (S.CheckAlwaysInlineAttr(/*OrigSt=*/nullptr, St, A))
return nullptr;
- }
-
- for (const auto *CallExpr : CEF.getCallExprs()) {
- const Decl *Decl = CallExpr->getCalleeDecl();
- if (Decl->hasAttr<NoInlineAttr>() || Decl->hasAttr<FlattenAttr>())
- S.Diag(St->getBeginLoc(), diag::warn_function_stmt_attribute_precedence)
- << A << (Decl->hasAttr<NoInlineAttr>() ? 2 : 1);
- }
return ::new (S.Context) AlwaysInlineAttr(S.Context, A);
}
@@ -459,7 +490,9 @@ static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
!(A.existsInTarget(S.Context.getTargetInfo()) ||
(S.Context.getLangOpts().SYCLIsDevice && Aux &&
A.existsInTarget(*Aux)))) {
- S.Diag(A.getLoc(), A.isDeclspecAttribute()
+ S.Diag(A.getLoc(), A.isRegularKeywordAttribute()
+ ? (unsigned)diag::err_keyword_not_supported_on_target
+ : A.isDeclspecAttribute()
? (unsigned)diag::warn_unhandled_ms_attribute_ignored
: (unsigned)diag::warn_unknown_attribute_ignored)
<< A << A.getRange();
@@ -495,7 +528,7 @@ static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
// declaration attribute is not written on a statement, but this code is
// needed for attributes in Attr.td that do not list any subjects.
S.Diag(A.getRange().getBegin(), diag::err_decl_attribute_invalid_on_stmt)
- << A << St->getBeginLoc();
+ << A << A.isRegularKeywordAttribute() << St->getBeginLoc();
return nullptr;
}
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
index 890cea1dfb0e..a1f0f5732b2b 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
@@ -26,6 +26,7 @@
#include "clang/Basic/Stack.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Overload.h"
@@ -315,9 +316,8 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
}
bool Sema::isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
- SourceLocation NameLoc,
- ParsedTemplateTy *Template) {
- CXXScopeSpec SS;
+ SourceLocation NameLoc, CXXScopeSpec &SS,
+ ParsedTemplateTy *Template /*=nullptr*/) {
bool MemberOfUnknownSpecialization = false;
// We could use redeclaration lookup here, but we don't need to: the
@@ -1107,19 +1107,8 @@ makeTemplateArgumentListInfo(Sema &S, TemplateIdAnnotation &TemplateId) {
return TemplateArgs;
}
-bool Sema::ActOnTypeConstraint(const CXXScopeSpec &SS,
- TemplateIdAnnotation *TypeConstr,
- TemplateTypeParmDecl *ConstrainedParameter,
- SourceLocation EllipsisLoc) {
- return BuildTypeConstraint(SS, TypeConstr, ConstrainedParameter, EllipsisLoc,
- false);
-}
+bool Sema::CheckTypeConstraint(TemplateIdAnnotation *TypeConstr) {
-bool Sema::BuildTypeConstraint(const CXXScopeSpec &SS,
- TemplateIdAnnotation *TypeConstr,
- TemplateTypeParmDecl *ConstrainedParameter,
- SourceLocation EllipsisLoc,
- bool AllowUnexpandedPack) {
TemplateName TN = TypeConstr->Template.get();
ConceptDecl *CD = cast<ConceptDecl>(TN.getAsTemplateDecl());
@@ -1137,9 +1126,32 @@ bool Sema::BuildTypeConstraint(const CXXScopeSpec &SS,
if (!WereArgsSpecified &&
CD->getTemplateParameters()->getMinRequiredArguments() > 1) {
Diag(TypeConstr->TemplateNameLoc,
- diag::err_type_constraint_missing_arguments) << CD;
+ diag::err_type_constraint_missing_arguments)
+ << CD;
return true;
}
+ return false;
+}
+
+bool Sema::ActOnTypeConstraint(const CXXScopeSpec &SS,
+ TemplateIdAnnotation *TypeConstr,
+ TemplateTypeParmDecl *ConstrainedParameter,
+ SourceLocation EllipsisLoc) {
+ return BuildTypeConstraint(SS, TypeConstr, ConstrainedParameter, EllipsisLoc,
+ false);
+}
+
+bool Sema::BuildTypeConstraint(const CXXScopeSpec &SS,
+ TemplateIdAnnotation *TypeConstr,
+ TemplateTypeParmDecl *ConstrainedParameter,
+ SourceLocation EllipsisLoc,
+ bool AllowUnexpandedPack) {
+
+ if (CheckTypeConstraint(TypeConstr))
+ return true;
+
+ TemplateName TN = TypeConstr->Template.get();
+ ConceptDecl *CD = cast<ConceptDecl>(TN.getAsTemplateDecl());
DeclarationNameInfo ConceptName(DeclarationName(TypeConstr->Name),
TypeConstr->TemplateNameLoc);
@@ -1251,35 +1263,41 @@ bool Sema::AttachTypeConstraint(NestedNameSpecifierLoc NS,
return false;
}
-bool Sema::AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *NTTP,
+bool Sema::AttachTypeConstraint(AutoTypeLoc TL,
+ NonTypeTemplateParmDecl *NewConstrainedParm,
+ NonTypeTemplateParmDecl *OrigConstrainedParm,
SourceLocation EllipsisLoc) {
- if (NTTP->getType() != TL.getType() ||
+ if (NewConstrainedParm->getType() != TL.getType() ||
TL.getAutoKeyword() != AutoTypeKeyword::Auto) {
- Diag(NTTP->getTypeSourceInfo()->getTypeLoc().getBeginLoc(),
+ Diag(NewConstrainedParm->getTypeSourceInfo()->getTypeLoc().getBeginLoc(),
diag::err_unsupported_placeholder_constraint)
- << NTTP->getTypeSourceInfo()->getTypeLoc().getSourceRange();
+ << NewConstrainedParm->getTypeSourceInfo()
+ ->getTypeLoc()
+ .getSourceRange();
return true;
}
// FIXME: Concepts: This should be the type of the placeholder, but this is
// unclear in the wording right now.
DeclRefExpr *Ref =
- BuildDeclRefExpr(NTTP, NTTP->getType(), VK_PRValue, NTTP->getLocation());
+ BuildDeclRefExpr(OrigConstrainedParm, OrigConstrainedParm->getType(),
+ VK_PRValue, OrigConstrainedParm->getLocation());
if (!Ref)
return true;
ExprResult ImmediatelyDeclaredConstraint = formImmediatelyDeclaredConstraint(
*this, TL.getNestedNameSpecifierLoc(), TL.getConceptNameInfo(),
TL.getNamedConcept(), TL.getLAngleLoc(), TL.getRAngleLoc(),
- BuildDecltypeType(Ref), NTTP->getLocation(),
+ BuildDecltypeType(Ref), OrigConstrainedParm->getLocation(),
[&](TemplateArgumentListInfo &ConstraintArgs) {
for (unsigned I = 0, C = TL.getNumArgs(); I != C; ++I)
ConstraintArgs.addArgument(TL.getArgLoc(I));
},
EllipsisLoc);
if (ImmediatelyDeclaredConstraint.isInvalid() ||
- !ImmediatelyDeclaredConstraint.isUsable())
+ !ImmediatelyDeclaredConstraint.isUsable())
return true;
- NTTP->setPlaceholderTypeConstraint(ImmediatelyDeclaredConstraint.get());
+ NewConstrainedParm->setPlaceholderTypeConstraint(
+ ImmediatelyDeclaredConstraint.get());
return false;
}
@@ -1559,7 +1577,7 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
if (AutoTypeLoc TL = TInfo->getTypeLoc().getContainedAutoTypeLoc())
if (TL.isConstrained())
- if (AttachTypeConstraint(TL, Param, D.getEllipsisLoc()))
+ if (AttachTypeConstraint(TL, Param, Param, D.getEllipsisLoc()))
Invalid = true;
if (Invalid)
@@ -1592,16 +1610,6 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
if (DiagnoseUnexpandedParameterPack(Default, UPPC_DefaultArgument))
return Param;
- TemplateArgument SugaredConverted, CanonicalConverted;
- ExprResult DefaultRes = CheckTemplateArgument(
- Param, Param->getType(), Default, SugaredConverted, CanonicalConverted,
- CTAK_Specified);
- if (DefaultRes.isInvalid()) {
- Param->setInvalidDecl();
- return Param;
- }
- Default = DefaultRes.get();
-
Param->setDefaultArgument(Default);
}
@@ -2562,12 +2570,47 @@ private:
};
}
+FunctionTemplateDecl *Sema::DeclareImplicitDeductionGuideFromInitList(
+ TemplateDecl *Template, MutableArrayRef<QualType> ParamTypes,
+ SourceLocation Loc) {
+ if (CXXRecordDecl *DefRecord =
+ cast<CXXRecordDecl>(Template->getTemplatedDecl())->getDefinition()) {
+ if (TemplateDecl *DescribedTemplate =
+ DefRecord->getDescribedClassTemplate())
+ Template = DescribedTemplate;
+ }
+
+ DeclContext *DC = Template->getDeclContext();
+ if (DC->isDependentContext())
+ return nullptr;
+
+ ConvertConstructorToDeductionGuideTransform Transform(
+ *this, cast<ClassTemplateDecl>(Template));
+ if (!isCompleteType(Loc, Transform.DeducedType))
+ return nullptr;
+
+ // In case we were expanding a pack when we attempted to declare deduction
+ // guides, turn off pack expansion for everything we're about to do.
+ ArgumentPackSubstitutionIndexRAII SubstIndex(*this,
+ /*NewSubstitutionIndex=*/-1);
+ // Create a template instantiation record to track the "instantiation" of
+ // constructors into deduction guides.
+ InstantiatingTemplate BuildingDeductionGuides(
+ *this, Loc, Template,
+ Sema::InstantiatingTemplate::BuildingDeductionGuidesTag{});
+ if (BuildingDeductionGuides.isInvalid())
+ return nullptr;
+
+ return cast<FunctionTemplateDecl>(
+ Transform.buildSimpleDeductionGuide(ParamTypes));
+}
+
void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc) {
if (CXXRecordDecl *DefRecord =
cast<CXXRecordDecl>(Template->getTemplatedDecl())->getDefinition()) {
- TemplateDecl *DescribedTemplate = DefRecord->getDescribedClassTemplate();
- Template = DescribedTemplate ? DescribedTemplate : Template;
+ if (TemplateDecl *DescribedTemplate = DefRecord->getDescribedClassTemplate())
+ Template = DescribedTemplate;
}
DeclContext *DC = Template->getDeclContext();
@@ -2591,9 +2634,9 @@ void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
ArgumentPackSubstitutionIndexRAII SubstIndex(*this, -1);
// Create a template instantiation record to track the "instantiation" of
// constructors into deduction guides.
- // FIXME: Add a kind for this to give more meaningful diagnostics. But can
- // this substitution process actually fail?
- InstantiatingTemplate BuildingDeductionGuides(*this, Loc, Template);
+ InstantiatingTemplate BuildingDeductionGuides(
+ *this, Loc, Template,
+ Sema::InstantiatingTemplate::BuildingDeductionGuidesTag{});
if (BuildingDeductionGuides.isInvalid())
return;
@@ -2601,13 +2644,21 @@ void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
// FIXME: Skip constructors for which deduction must necessarily fail (those
// for which some class template parameter without a default argument never
// appears in a deduced context).
+ llvm::SmallPtrSet<NamedDecl *, 8> ProcessedCtors;
bool AddedAny = false;
for (NamedDecl *D : LookupConstructors(Transform.Primary)) {
D = D->getUnderlyingDecl();
if (D->isInvalidDecl() || D->isImplicit())
continue;
+
D = cast<NamedDecl>(D->getCanonicalDecl());
+ // Within C++20 modules, we may have multiple same constructors in
+ // multiple same RecordDecls. And it doesn't make sense to create
+ // duplicated deduction guides for the duplicated constructors.
+ if (ProcessedCtors.count(D))
+ continue;
+
auto *FTD = dyn_cast<FunctionTemplateDecl>(D);
auto *CD =
dyn_cast_or_null<CXXConstructorDecl>(FTD ? FTD->getTemplatedDecl() : D);
@@ -2622,6 +2673,7 @@ void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
}))
continue;
+ ProcessedCtors.insert(D);
Transform.transformConstructor(FTD, CD);
AddedAny = true;
}
@@ -2639,7 +2691,7 @@ void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
cast<FunctionTemplateDecl>(
Transform.buildSimpleDeductionGuide(Transform.DeducedType))
->getTemplatedDecl())
- ->setIsCopyDeductionCandidate();
+ ->setDeductionCandidateKind(DeductionCandidate::Copy);
}
/// Diagnose the presence of a default template argument on a
@@ -2834,8 +2886,7 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
NewDefaultLoc = NewTypeParm->getDefaultArgumentLoc();
SawDefaultArgument = true;
- if (!OldTypeParm->getOwningModule() ||
- isModuleUnitOfCurrentTU(OldTypeParm->getOwningModule()))
+ if (!OldTypeParm->getOwningModule())
RedundantDefaultArg = true;
else if (!getASTContext().isSameDefaultTemplateArgument(OldTypeParm,
NewTypeParm)) {
@@ -2887,8 +2938,7 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
OldDefaultLoc = OldNonTypeParm->getDefaultArgumentLoc();
NewDefaultLoc = NewNonTypeParm->getDefaultArgumentLoc();
SawDefaultArgument = true;
- if (!OldNonTypeParm->getOwningModule() ||
- isModuleUnitOfCurrentTU(OldNonTypeParm->getOwningModule()))
+ if (!OldNonTypeParm->getOwningModule())
RedundantDefaultArg = true;
else if (!getASTContext().isSameDefaultTemplateArgument(
OldNonTypeParm, NewNonTypeParm)) {
@@ -2939,8 +2989,7 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
OldDefaultLoc = OldTemplateParm->getDefaultArgument().getLocation();
NewDefaultLoc = NewTemplateParm->getDefaultArgument().getLocation();
SawDefaultArgument = true;
- if (!OldTemplateParm->getOwningModule() ||
- isModuleUnitOfCurrentTU(OldTemplateParm->getOwningModule()))
+ if (!OldTemplateParm->getOwningModule())
RedundantDefaultArg = true;
else if (!getASTContext().isSameDefaultTemplateArgument(
OldTemplateParm, NewTemplateParm)) {
@@ -4987,13 +5036,20 @@ Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
return ExprError();
}
- if (ClassTemplateDecl *Temp = R.getAsSingle<ClassTemplateDecl>()) {
- Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_class_template)
- << SS.getScopeRep()
- << NameInfo.getName().getAsString() << SS.getRange();
- Diag(Temp->getLocation(), diag::note_referenced_class_template);
+ auto DiagnoseTypeTemplateDecl = [&](TemplateDecl *Temp,
+ bool isTypeAliasTemplateDecl) {
+ Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_type_template)
+ << SS.getScopeRep() << NameInfo.getName().getAsString() << SS.getRange()
+ << isTypeAliasTemplateDecl;
+ Diag(Temp->getLocation(), diag::note_referenced_type_template) << 0;
return ExprError();
- }
+ };
+
+ if (ClassTemplateDecl *Temp = R.getAsSingle<ClassTemplateDecl>())
+ return DiagnoseTypeTemplateDecl(Temp, false);
+
+ if (TypeAliasTemplateDecl *Temp = R.getAsSingle<TypeAliasTemplateDecl>())
+ return DiagnoseTypeTemplateDecl(Temp, true);
return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*ADL*/ false, TemplateArgs);
}
@@ -5895,6 +5951,11 @@ bool Sema::CheckTemplateArgumentList(
CTAK_Specified))
return true;
+ CanonicalConverted.back().setIsDefaulted(
+ clang::isSubstitutedDefaultArgument(
+ Context, NewArgs[ArgIdx].getArgument(), *Param,
+ CanonicalConverted, Params->getDepth()));
+
bool PackExpansionIntoNonPack =
NewArgs[ArgIdx].getArgument().isPackExpansion() &&
(!(*Param)->isTemplateParameterPack() || getExpandedPackSize(*Param));
@@ -6070,6 +6131,8 @@ bool Sema::CheckTemplateArgumentList(
CTAK_Specified))
return true;
+ CanonicalConverted.back().setIsDefaulted(true);
+
// Core issue 150 (assumed resolution): if this is a template template
// parameter, keep track of the default template arguments from the
// template definition.
@@ -7512,7 +7575,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
else if (OldValue.isUnsigned())
RequiredBits = OldValue.getActiveBits() + 1;
else
- RequiredBits = OldValue.getMinSignedBits();
+ RequiredBits = OldValue.getSignificantBits();
if (RequiredBits > AllowedBits) {
Diag(Arg->getBeginLoc(), diag::warn_template_arg_too_large)
<< toString(OldValue, 10) << toString(Value, 10) << Param->getType()
@@ -7945,8 +8008,7 @@ Sema::BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
static bool MatchTemplateParameterKind(
Sema &S, NamedDecl *New, const NamedDecl *NewInstFrom, NamedDecl *Old,
const NamedDecl *OldInstFrom, bool Complain,
- Sema::TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc,
- bool PartialOrdering) {
+ Sema::TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc) {
// Check the actual kind (type, non-type, template).
if (Old->getKind() != New->getKind()) {
if (Complain) {
@@ -8002,8 +8064,14 @@ static bool MatchTemplateParameterKind(
// to actually compare the arguments.
if (Kind != Sema::TPL_TemplateTemplateArgumentMatch ||
(!OldNTTP->getType()->isDependentType() &&
- !NewNTTP->getType()->isDependentType()))
- if (!S.Context.hasSameType(OldNTTP->getType(), NewNTTP->getType())) {
+ !NewNTTP->getType()->isDependentType())) {
+ // C++20 [temp.over.link]p6:
+ // Two [non-type] template-parameters are equivalent [if] they have
+ // equivalent types ignoring the use of type-constraints for
+ // placeholder types
+ QualType OldType = S.Context.getUnconstrainedType(OldNTTP->getType());
+ QualType NewType = S.Context.getUnconstrainedType(NewNTTP->getType());
+ if (!S.Context.hasSameType(OldType, NewType)) {
if (Complain) {
unsigned NextDiag = diag::err_template_nontype_parm_different_type;
if (TemplateArgLoc.isValid()) {
@@ -8021,6 +8089,7 @@ static bool MatchTemplateParameterKind(
return false;
}
+ }
}
// For template template parameters, check the template parameter types.
// The template parameter lists of template template
@@ -8034,11 +8103,12 @@ static bool MatchTemplateParameterKind(
(Kind == Sema::TPL_TemplateMatch
? Sema::TPL_TemplateTemplateParmMatch
: Kind),
- TemplateArgLoc, PartialOrdering))
+ TemplateArgLoc))
return false;
}
- if (!PartialOrdering && Kind != Sema::TPL_TemplateTemplateArgumentMatch &&
+ if (Kind != Sema::TPL_TemplateParamsEquivalent &&
+ Kind != Sema::TPL_TemplateTemplateArgumentMatch &&
!isa<TemplateTemplateParmDecl>(Old)) {
const Expr *NewC = nullptr, *OldC = nullptr;
@@ -8131,8 +8201,7 @@ void DiagnoseTemplateParameterListArityMismatch(Sema &S,
bool Sema::TemplateParameterListsAreEqual(
const NamedDecl *NewInstFrom, TemplateParameterList *New,
const NamedDecl *OldInstFrom, TemplateParameterList *Old, bool Complain,
- TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc,
- bool PartialOrdering) {
+ TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc) {
if (Old->size() != New->size() && Kind != TPL_TemplateTemplateArgumentMatch) {
if (Complain)
DiagnoseTemplateParameterListArityMismatch(*this, New, Old, Kind,
@@ -8164,7 +8233,7 @@ bool Sema::TemplateParameterListsAreEqual(
if (!MatchTemplateParameterKind(*this, *NewParm, NewInstFrom, *OldParm,
OldInstFrom, Complain, Kind,
- TemplateArgLoc, PartialOrdering))
+ TemplateArgLoc))
return false;
++NewParm;
@@ -8181,7 +8250,7 @@ bool Sema::TemplateParameterListsAreEqual(
for (; NewParm != NewParmEnd; ++NewParm) {
if (!MatchTemplateParameterKind(*this, *NewParm, NewInstFrom, *OldParm,
OldInstFrom, Complain, Kind,
- TemplateArgLoc, PartialOrdering))
+ TemplateArgLoc))
return false;
}
}
@@ -8195,7 +8264,8 @@ bool Sema::TemplateParameterListsAreEqual(
return false;
}
- if (!PartialOrdering && Kind != TPL_TemplateTemplateArgumentMatch) {
+ if (Kind != TPL_TemplateTemplateArgumentMatch &&
+ Kind != TPL_TemplateParamsEquivalent) {
const Expr *NewRC = New->getRequiresClause();
const Expr *OldRC = Old->getRequiresClause();
@@ -11307,6 +11377,7 @@ void Sema::MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
// Take tokens to avoid allocations
LPT->Toks.swap(Toks);
LPT->D = FnD;
+ LPT->FPO = getCurFPFeatures();
LateParsedTemplateMap.insert(std::make_pair(FD, std::move(LPT)));
FD->setLateTemplateParsed(true);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
index 1fe2d3fac685..31ea7be2975e 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -10,7 +10,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/TemplateDeduction.h"
#include "TreeTransform.h"
#include "TypeLocBuilder.h"
#include "clang/AST/ASTContext.h"
@@ -37,9 +36,11 @@
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Specifiers.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/Template.h"
+#include "clang/Sema/TemplateDeduction.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
@@ -211,7 +212,8 @@ static bool isSameDeclaration(Decl *X, Decl *Y) {
static DeducedTemplateArgument
checkDeducedTemplateArguments(ASTContext &Context,
const DeducedTemplateArgument &X,
- const DeducedTemplateArgument &Y) {
+ const DeducedTemplateArgument &Y,
+ bool AggregateCandidateDeduction = false) {
// We have no deduction for one or both of the arguments; they're compatible.
if (X.isNull())
return Y;
@@ -349,20 +351,24 @@ checkDeducedTemplateArguments(ASTContext &Context,
case TemplateArgument::Pack: {
if (Y.getKind() != TemplateArgument::Pack ||
- X.pack_size() != Y.pack_size())
+ (!AggregateCandidateDeduction && X.pack_size() != Y.pack_size()))
return DeducedTemplateArgument();
llvm::SmallVector<TemplateArgument, 8> NewPack;
- for (TemplateArgument::pack_iterator XA = X.pack_begin(),
- XAEnd = X.pack_end(),
- YA = Y.pack_begin();
+ for (TemplateArgument::pack_iterator
+ XA = X.pack_begin(),
+ XAEnd = X.pack_end(), YA = Y.pack_begin(), YAEnd = Y.pack_end();
XA != XAEnd; ++XA, ++YA) {
- TemplateArgument Merged = checkDeducedTemplateArguments(
- Context, DeducedTemplateArgument(*XA, X.wasDeducedFromArrayBound()),
- DeducedTemplateArgument(*YA, Y.wasDeducedFromArrayBound()));
- if (Merged.isNull() && !(XA->isNull() && YA->isNull()))
- return DeducedTemplateArgument();
- NewPack.push_back(Merged);
+ if (YA != YAEnd) {
+ TemplateArgument Merged = checkDeducedTemplateArguments(
+ Context, DeducedTemplateArgument(*XA, X.wasDeducedFromArrayBound()),
+ DeducedTemplateArgument(*YA, Y.wasDeducedFromArrayBound()));
+ if (Merged.isNull() && !(XA->isNull() && YA->isNull()))
+ return DeducedTemplateArgument();
+ NewPack.push_back(Merged);
+ } else {
+ NewPack.push_back(*XA);
+ }
}
return DeducedTemplateArgument(
@@ -693,8 +699,10 @@ public:
/// Prepare to deduce the packs named within Pattern.
PackDeductionScope(Sema &S, TemplateParameterList *TemplateParams,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
- TemplateDeductionInfo &Info, TemplateArgument Pattern)
- : S(S), TemplateParams(TemplateParams), Deduced(Deduced), Info(Info) {
+ TemplateDeductionInfo &Info, TemplateArgument Pattern,
+ bool DeducePackIfNotAlreadyDeduced = false)
+ : S(S), TemplateParams(TemplateParams), Deduced(Deduced), Info(Info),
+ DeducePackIfNotAlreadyDeduced(DeducePackIfNotAlreadyDeduced){
unsigned NumNamedPacks = addPacks(Pattern);
finishConstruction(NumNamedPacks);
}
@@ -938,8 +946,13 @@ public:
// Check the new pack matches any previous value.
DeducedTemplateArgument OldPack = *Loc;
- DeducedTemplateArgument Result =
- checkDeducedTemplateArguments(S.Context, OldPack, NewPack);
+ DeducedTemplateArgument Result = checkDeducedTemplateArguments(
+ S.Context, OldPack, NewPack, DeducePackIfNotAlreadyDeduced);
+
+ Info.AggregateDeductionCandidateHasMismatchedArity =
+ OldPack.getKind() == TemplateArgument::Pack &&
+ NewPack.getKind() == TemplateArgument::Pack &&
+ OldPack.pack_size() != NewPack.pack_size() && !Result.isNull();
// If we deferred a deduction of this pack, check that one now too.
if (!Result.isNull() && !Pack.DeferredDeduction.isNull()) {
@@ -979,6 +992,7 @@ private:
TemplateDeductionInfo &Info;
unsigned PackElements = 0;
bool IsPartiallyExpanded = false;
+ bool DeducePackIfNotAlreadyDeduced = false;
/// The number of expansions, if we have a fully-expanded pack in this scope.
std::optional<unsigned> FixedNumExpansions;
@@ -1613,7 +1627,11 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
llvm_unreachable("Type nodes handled above");
case Type::Auto:
- // FIXME: Implement deduction in dependent case.
+ // C++23 [temp.deduct.funcaddr]/3:
+ // A placeholder type in the return type of a function template is a
+ // non-deduced context.
+ // There's no corresponding wording for [temp.deduct.decl], but we treat
+ // it the same to match other compilers.
if (P->isDependentType())
return Sema::TDK_Success;
[[fallthrough]];
@@ -1700,10 +1718,12 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
if (!IAA)
return Sema::TDK_NonDeducedMismatch;
+ const auto *IAP = S.Context.getAsIncompleteArrayType(P);
+ assert(IAP && "Template parameter not of incomplete array type");
+
return DeduceTemplateArgumentsByTypeMatch(
- S, TemplateParams,
- S.Context.getAsIncompleteArrayType(P)->getElementType(),
- IAA->getElementType(), Info, Deduced, TDF & TDF_IgnoreQualifiers);
+ S, TemplateParams, IAP->getElementType(), IAA->getElementType(), Info,
+ Deduced, TDF & TDF_IgnoreQualifiers);
}
// T [integer-constant]
@@ -2879,7 +2899,7 @@ CheckDeducedArgumentConstraints(Sema &S, TemplateDeclT *Template,
// not class-scope explicit specialization, so replace with Deduced Args
// instead of adding to inner-most.
if (NeedsReplacement)
- MLTAL.replaceInnermostTemplateArguments(CanonicalDeducedArgs);
+ MLTAL.replaceInnermostTemplateArguments(Template, CanonicalDeducedArgs);
if (S.CheckConstraintSatisfaction(Template, AssociatedConstraints, MLTAL,
Info.getLocation(),
@@ -3589,11 +3609,28 @@ Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
DeclContext *Owner = FunctionTemplate->getDeclContext();
if (FunctionTemplate->getFriendObjectKind())
Owner = FunctionTemplate->getLexicalDeclContext();
+ FunctionDecl *FD = FunctionTemplate->getTemplatedDecl();
+ // additional check for inline friend,
+ // ```
+ // template <class F1> int foo(F1 X);
+ // template <int A1> struct A {
+ // template <class F1> friend int foo(F1 X) { return A1; }
+ // };
+ // template struct A<1>;
+ // int a = foo(1.0);
+ // ```
+ const FunctionDecl *FDFriend;
+ if (FD->getFriendObjectKind() == Decl::FriendObjectKind::FOK_None &&
+ FD->isDefined(FDFriend, /*CheckForPendingFriendDefinition*/ true) &&
+ FDFriend->getFriendObjectKind() != Decl::FriendObjectKind::FOK_None) {
+ FD = const_cast<FunctionDecl *>(FDFriend);
+ Owner = FD->getLexicalDeclContext();
+ }
MultiLevelTemplateArgumentList SubstArgs(
FunctionTemplate, CanonicalDeducedArgumentList->asArray(),
/*Final=*/false);
Specialization = cast_or_null<FunctionDecl>(
- SubstDecl(FunctionTemplate->getTemplatedDecl(), Owner, SubstArgs));
+ SubstDecl(FD, Owner, SubstArgs));
if (!Specialization || Specialization->isInvalidDecl())
return TDK_SubstitutionFailure;
@@ -3729,7 +3766,8 @@ static QualType GetTypeOfFunction(Sema &S, const OverloadExpr::FindResult &R,
static QualType
ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
Expr *Arg, QualType ParamType,
- bool ParamWasReference) {
+ bool ParamWasReference,
+ TemplateSpecCandidateSet *FailedTSC = nullptr) {
OverloadExpr::FindResult R = OverloadExpr::find(Arg);
@@ -3751,8 +3789,10 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
!ParamType->isMemberFunctionPointerType()) {
if (Ovl->hasExplicitTemplateArgs()) {
// But we can still look for an explicit specialization.
- if (FunctionDecl *ExplicitSpec
- = S.ResolveSingleFunctionTemplateSpecialization(Ovl))
+ if (FunctionDecl *ExplicitSpec =
+ S.ResolveSingleFunctionTemplateSpecialization(
+ Ovl, /*Complain=*/false,
+ /*FoundDeclAccessPair=*/nullptr, FailedTSC))
return GetTypeOfFunction(S, R, ExplicitSpec);
}
@@ -3834,7 +3874,8 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
/// overloaded function set that could not be resolved.
static bool AdjustFunctionParmAndArgTypesForDeduction(
Sema &S, TemplateParameterList *TemplateParams, unsigned FirstInnerIndex,
- QualType &ParamType, QualType &ArgType, Expr *Arg, unsigned &TDF) {
+ QualType &ParamType, QualType &ArgType, Expr *Arg, unsigned &TDF,
+ TemplateSpecCandidateSet *FailedTSC = nullptr) {
// C++0x [temp.deduct.call]p3:
// If P is a cv-qualified type, the top level cv-qualifiers of P's type
// are ignored for type deduction.
@@ -3851,9 +3892,8 @@ static bool AdjustFunctionParmAndArgTypesForDeduction(
// but there are sometimes special circumstances. Typically
// involving a template-id-expr.
if (ArgType == S.Context.OverloadTy) {
- ArgType = ResolveOverloadForDeduction(S, TemplateParams,
- Arg, ParamType,
- ParamRefType != nullptr);
+ ArgType = ResolveOverloadForDeduction(S, TemplateParams, Arg, ParamType,
+ ParamRefType != nullptr, FailedTSC);
if (ArgType.isNull())
return true;
}
@@ -3931,7 +3971,8 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsFromCallArgument(
QualType ParamType, Expr *Arg, TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<Sema::OriginalCallArg> &OriginalCallArgs,
- bool DecomposedParam, unsigned ArgIdx, unsigned TDF);
+ bool DecomposedParam, unsigned ArgIdx, unsigned TDF,
+ TemplateSpecCandidateSet *FailedTSC = nullptr);
/// Attempt template argument deduction from an initializer list
/// deemed to be an argument in a function call.
@@ -4007,14 +4048,16 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsFromCallArgument(
QualType ParamType, Expr *Arg, TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<Sema::OriginalCallArg> &OriginalCallArgs,
- bool DecomposedParam, unsigned ArgIdx, unsigned TDF) {
+ bool DecomposedParam, unsigned ArgIdx, unsigned TDF,
+ TemplateSpecCandidateSet *FailedTSC) {
QualType ArgType = Arg->getType();
QualType OrigParamType = ParamType;
// If P is a reference type [...]
// If P is a cv-qualified type [...]
- if (AdjustFunctionParmAndArgTypesForDeduction(
- S, TemplateParams, FirstInnerIndex, ParamType, ArgType, Arg, TDF))
+ if (AdjustFunctionParmAndArgTypesForDeduction(S, TemplateParams,
+ FirstInnerIndex, ParamType,
+ ArgType, Arg, TDF, FailedTSC))
return Sema::TDK_Success;
// If [...] the argument is a non-empty initializer list [...]
@@ -4062,7 +4105,7 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, TemplateDeductionInfo &Info,
- bool PartialOverloading,
+ bool PartialOverloading, bool AggregateDeductionCandidate,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent) {
if (FunctionTemplate->isInvalidDecl())
return TDK_Invalid;
@@ -4149,9 +4192,12 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
continue;
}
+ bool IsTrailingPack = ParamIdx + 1 == NumParamTypes;
+
QualType ParamPattern = ParamExpansion->getPattern();
PackDeductionScope PackScope(*this, TemplateParams, Deduced, Info,
- ParamPattern);
+ ParamPattern,
+ AggregateDeductionCandidate && IsTrailingPack);
// C++0x [temp.deduct.call]p1:
// For a function parameter pack that occurs at the end of the
@@ -4169,7 +4215,7 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
// the length of the explicitly-specified pack if it's expanded by the
// parameter pack and 0 otherwise, and we treat each deduction as a
// non-deduced context.
- if (ParamIdx + 1 == NumParamTypes || PackScope.hasFixedArity()) {
+ if (IsTrailingPack || PackScope.hasFixedArity()) {
for (; ArgIdx < Args.size() && PackScope.hasNextElement();
PackScope.nextPackElement(), ++ArgIdx) {
ParamTypesForArgChecking.push_back(ParamPattern);
@@ -4325,11 +4371,9 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
Deduced.resize(TemplateParams->size());
// If the function has a deduced return type, substitute it for a dependent
- // type so that we treat it as a non-deduced context in what follows. If we
- // are looking up by signature, the signature type should also have a deduced
- // return type, which we instead expect to exactly match.
+ // type so that we treat it as a non-deduced context in what follows.
bool HasDeducedReturnType = false;
- if (getLangOpts().CPlusPlus14 && IsAddressOfFunction &&
+ if (getLangOpts().CPlusPlus14 &&
Function->getReturnType()->getContainedAutoType()) {
FunctionType = SubstAutoTypeDependent(FunctionType);
HasDeducedReturnType = true;
@@ -4357,11 +4401,17 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
// If the function has a deduced return type, deduce it now, so we can check
// that the deduced function type matches the requested type.
- if (HasDeducedReturnType &&
+ if (HasDeducedReturnType && IsAddressOfFunction &&
Specialization->getReturnType()->isUndeducedType() &&
DeduceReturnType(Specialization, Info.getLocation(), false))
return TDK_MiscellaneousDeductionFailure;
+ if (IsAddressOfFunction && getLangOpts().CPlusPlus20 &&
+ Specialization->isImmediateEscalating() &&
+ CheckIfFunctionSpecializationIsImmediate(Specialization,
+ Info.getLocation()))
+ return TDK_MiscellaneousDeductionFailure;
+
// If the function has a dependent exception specification, resolve it now,
// so we can check that the exception specification matches.
auto *SpecializationFPT =
@@ -4376,23 +4426,31 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
// noreturn can't be dependent, so we don't actually need this for them
// right now.)
QualType SpecializationType = Specialization->getType();
- if (!IsAddressOfFunction)
+ if (!IsAddressOfFunction) {
ArgFunctionType = adjustCCAndNoReturn(ArgFunctionType, SpecializationType,
/*AdjustExceptionSpec*/true);
+ // Revert placeholder types in the return type back to undeduced types so
+ // that the comparison below compares the declared return types.
+ if (HasDeducedReturnType) {
+ SpecializationType = SubstAutoType(SpecializationType, QualType());
+ ArgFunctionType = SubstAutoType(ArgFunctionType, QualType());
+ }
+ }
+
// If the requested function type does not match the actual type of the
// specialization with respect to arguments of compatible pointer to function
// types, template argument deduction fails.
if (!ArgFunctionType.isNull()) {
- if (IsAddressOfFunction &&
- !isSameOrCompatibleFunctionType(
- Context.getCanonicalType(SpecializationType),
- Context.getCanonicalType(ArgFunctionType)))
- return TDK_MiscellaneousDeductionFailure;
-
- if (!IsAddressOfFunction &&
- !Context.hasSameType(SpecializationType, ArgFunctionType))
- return TDK_MiscellaneousDeductionFailure;
+ if (IsAddressOfFunction
+ ? !isSameOrCompatibleFunctionType(
+ Context.getCanonicalType(SpecializationType),
+ Context.getCanonicalType(ArgFunctionType))
+ : !Context.hasSameType(SpecializationType, ArgFunctionType)) {
+ Info.FirstArg = TemplateArgument(SpecializationType);
+ Info.SecondArg = TemplateArgument(ArgFunctionType);
+ return TDK_NonDeducedMismatch;
+ }
}
return TDK_Success;
@@ -4697,11 +4755,11 @@ static bool CheckDeducedPlaceholderConstraints(Sema &S, const AutoType &Type,
/// should be specified in the 'Info' parameter.
/// \param IgnoreConstraints Set if we should not fail if the deduced type does
/// not satisfy the type-constraint in the auto type.
-Sema::TemplateDeductionResult Sema::DeduceAutoType(TypeLoc Type, Expr *Init,
- QualType &Result,
- TemplateDeductionInfo &Info,
- bool DependentDeduction,
- bool IgnoreConstraints) {
+Sema::TemplateDeductionResult
+Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result,
+ TemplateDeductionInfo &Info, bool DependentDeduction,
+ bool IgnoreConstraints,
+ TemplateSpecCandidateSet *FailedTSC) {
assert(DependentDeduction || Info.getDeducedDepth() == 0);
if (Init->containsErrors())
return TDK_AlreadyDiagnosed;
@@ -4815,7 +4873,8 @@ Sema::TemplateDeductionResult Sema::DeduceAutoType(TypeLoc Type, Expr *Init,
"substituting template parameter for 'auto' failed");
if (auto TDK = DeduceTemplateArgumentsFromCallArgument(
*this, TemplateParamsSt.get(), 0, FuncParam, Init, Info, Deduced,
- OriginalCallArgs, /*Decomposed=*/false, /*ArgIdx=*/0, /*TDF=*/0))
+ OriginalCallArgs, /*Decomposed=*/false, /*ArgIdx=*/0, /*TDF=*/0,
+ FailedTSC))
return DeductionFailed(TDK);
}
@@ -4982,6 +5041,33 @@ bool Sema::DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
return StillUndeduced;
}
+bool Sema::CheckIfFunctionSpecializationIsImmediate(FunctionDecl *FD,
+ SourceLocation Loc) {
+ assert(FD->isImmediateEscalating());
+
+ if (isLambdaConversionOperator(FD)) {
+ CXXRecordDecl *Lambda = cast<CXXMethodDecl>(FD)->getParent();
+ FunctionDecl *CallOp = Lambda->getLambdaCallOperator();
+
+ // For a generic lambda, instantiate the call operator if needed.
+ if (auto *Args = FD->getTemplateSpecializationArgs()) {
+ CallOp = InstantiateFunctionDeclaration(
+ CallOp->getDescribedFunctionTemplate(), Args, Loc);
+ if (!CallOp || CallOp->isInvalidDecl())
+ return true;
+ runWithSufficientStackSpace(
+ Loc, [&] { InstantiateFunctionDefinition(Loc, CallOp); });
+ }
+ return CallOp->isInvalidDecl();
+ }
+
+ if (FD->getTemplateInstantiationPattern()) {
+ runWithSufficientStackSpace(
+ Loc, [&] { InstantiateFunctionDefinition(Loc, FD); });
+ }
+ return false;
+}
+
/// If this is a non-static member function,
static void
AddImplicitObjectParameterType(ASTContext &Context,
@@ -5279,8 +5365,8 @@ FunctionTemplateDecl *Sema::getMoreSpecializedTemplate(
// function parameters that positionally correspond between the two
// templates are not of the same type, neither template is more specialized
// than the other.
- if (!TemplateParameterListsAreEqual(
- TPL1, TPL2, false, Sema::TPL_TemplateMatch, SourceLocation(), true))
+ if (!TemplateParameterListsAreEqual(TPL1, TPL2, false,
+ Sema::TPL_TemplateParamsEquivalent))
return nullptr;
for (unsigned i = 0; i < NumParams1; ++i)
@@ -5637,8 +5723,8 @@ getMoreSpecialized(Sema &S, QualType T1, QualType T2, TemplateLikeDecl *P1,
// function parameters that positionally correspond between the two
// templates are not of the same type, neither template is more specialized
// than the other.
- if (!S.TemplateParameterListsAreEqual(
- TPL1, TPL2, false, Sema::TPL_TemplateMatch, SourceLocation(), true))
+ if (!S.TemplateParameterListsAreEqual(TPL1, TPL2, false,
+ Sema::TPL_TemplateParamsEquivalent))
return nullptr;
if (!TemplateArgumentListAreEqual(S.getASTContext())(P1, P2))
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 2790e78aa53a..8702e2ca3a1b 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -26,6 +26,7 @@
#include "clang/Basic/Stack.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Sema.h"
@@ -34,6 +35,8 @@
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/TemplateInstCallback.h"
+#include "llvm/ADT/ScopeExit.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TimeProfiler.h"
#include <optional>
@@ -131,6 +134,14 @@ HandleDefaultTempArgIntoTempTempParam(const TemplateTemplateParmDecl *TTP,
return Response::Done();
}
+Response HandlePartialClassTemplateSpec(
+ const ClassTemplatePartialSpecializationDecl *PartialClassTemplSpec,
+ MultiLevelTemplateArgumentList &Result, bool SkipForSpecialization) {
+ if (!SkipForSpecialization)
+ Result.addOuterRetainedLevels(PartialClassTemplSpec->getTemplateDepth());
+ return Response::Done();
+}
+
// Add template arguments from a class template instantiation.
Response
HandleClassTemplateSpec(const ClassTemplateSpecializationDecl *ClassTemplSpec,
@@ -153,6 +164,14 @@ HandleClassTemplateSpec(const ClassTemplateSpecializationDecl *ClassTemplSpec,
assert(ClassTemplSpec->getSpecializedTemplate() && "No class template?");
if (ClassTemplSpec->getSpecializedTemplate()->isMemberSpecialization())
return Response::Done();
+
+ // If this was instantiated from a partial template specialization, we need
+ // to get the next level of declaration context from the partial
+ // specialization, as the ClassTemplateSpecializationDecl's
+ // DeclContext/LexicalDeclContext will be for the primary template.
+ if (auto *InstFromPartialTempl = ClassTemplSpec->getSpecializedTemplateOrPartial()
+ .dyn_cast<ClassTemplatePartialSpecializationDecl *>())
+ return Response::ChangeDecl(InstFromPartialTempl->getLexicalDeclContext());
}
return Response::UseNextDecl(ClassTemplSpec);
}
@@ -208,6 +227,21 @@ Response HandleFunction(const FunctionDecl *Function,
return Response::UseNextDecl(Function);
}
+Response HandleFunctionTemplateDecl(const FunctionTemplateDecl *FTD,
+ MultiLevelTemplateArgumentList &Result) {
+ if (!isa<ClassTemplateSpecializationDecl>(FTD->getDeclContext())) {
+ NestedNameSpecifier *NNS = FTD->getTemplatedDecl()->getQualifier();
+ const Type *Ty;
+ const TemplateSpecializationType *TSTy;
+ if (NNS && (Ty = NNS->getAsType()) &&
+ (TSTy = Ty->getAs<TemplateSpecializationType>()))
+ Result.addOuterTemplateArguments(const_cast<FunctionTemplateDecl *>(FTD),
+ TSTy->template_arguments(),
+ /*Final=*/false);
+ }
+ return Response::ChangeDecl(FTD->getLexicalDeclContext());
+}
+
Response HandleRecordDecl(const CXXRecordDecl *Rec,
MultiLevelTemplateArgumentList &Result,
ASTContext &Context,
@@ -218,17 +252,17 @@ Response HandleRecordDecl(const CXXRecordDecl *Rec,
"Outer template not instantiated?");
if (ClassTemplate->isMemberSpecialization())
return Response::Done();
- if (ForConstraintInstantiation) {
- QualType RecordType = Context.getTypeDeclType(Rec);
- QualType Injected = cast<InjectedClassNameType>(RecordType)
- ->getInjectedSpecializationType();
- const auto *InjectedType = cast<TemplateSpecializationType>(Injected);
+ if (ForConstraintInstantiation)
Result.addOuterTemplateArguments(const_cast<CXXRecordDecl *>(Rec),
- InjectedType->template_arguments(),
+ ClassTemplate->getInjectedTemplateArgs(),
/*Final=*/false);
- }
}
+ if (const MemberSpecializationInfo *MSInfo =
+ Rec->getMemberSpecializationInfo())
+ if (MSInfo->getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
+ return Response::Done();
+
bool IsFriend = Rec->getFriendObjectKind() ||
(Rec->getDescribedClassTemplate() &&
Rec->getDescribedClassTemplate()->getFriendObjectKind());
@@ -294,18 +328,23 @@ MultiLevelTemplateArgumentList Sema::getTemplateInstantiationArgs(
// Accumulate the set of template argument lists in this structure.
MultiLevelTemplateArgumentList Result;
- if (Innermost)
+ using namespace TemplateInstArgsHelpers;
+ const Decl *CurDecl = ND;
+ if (Innermost) {
Result.addOuterTemplateArguments(const_cast<NamedDecl *>(ND),
Innermost->asArray(), Final);
-
- const Decl *CurDecl = ND;
+ CurDecl = Response::UseNextDecl(ND).NextDecl;
+ }
while (!CurDecl->isFileContextDecl()) {
- using namespace TemplateInstArgsHelpers;
Response R;
if (const auto *VarTemplSpec =
dyn_cast<VarTemplateSpecializationDecl>(CurDecl)) {
R = HandleVarTemplateSpec(VarTemplSpec, Result, SkipForSpecialization);
+ } else if (const auto *PartialClassTemplSpec =
+ dyn_cast<ClassTemplatePartialSpecializationDecl>(CurDecl)) {
+ R = HandlePartialClassTemplateSpec(PartialClassTemplSpec, Result,
+ SkipForSpecialization);
} else if (const auto *ClassTemplSpec =
dyn_cast<ClassTemplateSpecializationDecl>(CurDecl)) {
R = HandleClassTemplateSpec(ClassTemplSpec, Result,
@@ -318,6 +357,8 @@ MultiLevelTemplateArgumentList Sema::getTemplateInstantiationArgs(
} else if (const auto *CSD =
dyn_cast<ImplicitConceptSpecializationDecl>(CurDecl)) {
R = HandleImplicitConceptSpecializationDecl(CSD, Result);
+ } else if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(CurDecl)) {
+ R = HandleFunctionTemplateDecl(FTD, Result);
} else if (!isa<DeclContext>(CurDecl)) {
R = Response::DontClearRelativeToPrimaryNextDecl(CurDecl);
if (CurDecl->getDeclContext()->isTranslationUnit()) {
@@ -367,6 +408,8 @@ bool Sema::CodeSynthesisContext::isInstantiationRecord() const {
case InitializingStructuredBinding:
case MarkingClassDllexported:
case BuildingBuiltinDumpStructCall:
+ case LambdaExpressionSubstitution:
+ case BuildingDeductionGuides:
return false;
// This function should never be called when Kind's value is Memoization.
@@ -583,6 +626,13 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(
SemaRef, CodeSynthesisContext::ParameterMappingSubstitution,
PointOfInstantiation, InstantiationRange, Template) {}
+Sema::InstantiatingTemplate::InstantiatingTemplate(
+ Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Entity,
+ BuildingDeductionGuidesTag, SourceRange InstantiationRange)
+ : InstantiatingTemplate(
+ SemaRef, CodeSynthesisContext::BuildingDeductionGuides,
+ PointOfInstantiation, InstantiationRange, Entity) {}
+
void Sema::pushCodeSynthesisContext(CodeSynthesisContext Ctx) {
Ctx.SavedInNonInstantiationSFINAEContext = InNonInstantiationSFINAEContext;
@@ -925,11 +975,13 @@ void Sema::PrintInstantiationStack() {
<< MD->isExplicitlyDefaulted() << DFK.asSpecialMember()
<< Context.getTagDeclType(MD->getParent());
} else if (DFK.isComparison()) {
+ QualType RecordType = FD->getParamDecl(0)
+ ->getType()
+ .getNonReferenceType()
+ .getUnqualifiedType();
Diags.Report(Active->PointOfInstantiation,
diag::note_comparison_synthesized_at)
- << (int)DFK.asComparison()
- << Context.getTagDeclType(
- cast<CXXRecordDecl>(FD->getLexicalDeclContext()));
+ << (int)DFK.asComparison() << RecordType;
}
break;
}
@@ -961,6 +1013,10 @@ void Sema::PrintInstantiationStack() {
case CodeSynthesisContext::Memoization:
break;
+ case CodeSynthesisContext::LambdaExpressionSubstitution:
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_lambda_substitution_here);
+ break;
case CodeSynthesisContext::ConstraintsCheck: {
unsigned DiagID = 0;
if (!Active->Entity) {
@@ -1008,6 +1064,8 @@ void Sema::PrintInstantiationStack() {
diag::note_parameter_mapping_substitution_here)
<< Active->InstantiationRange;
break;
+ case CodeSynthesisContext::BuildingDeductionGuides:
+ llvm_unreachable("unexpected deduction guide in instantiation stack");
}
}
}
@@ -1016,6 +1074,7 @@ std::optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
if (InNonInstantiationSFINAEContext)
return std::optional<TemplateDeductionInfo *>(nullptr);
+ bool SawLambdaSubstitution = false;
for (SmallVectorImpl<CodeSynthesisContext>::const_reverse_iterator
Active = CodeSynthesisContexts.rbegin(),
ActiveEnd = CodeSynthesisContexts.rend();
@@ -1037,6 +1096,15 @@ std::optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
case CodeSynthesisContext::NestedRequirementConstraintsCheck:
// This is a template instantiation, so there is no SFINAE.
return std::nullopt;
+ case CodeSynthesisContext::LambdaExpressionSubstitution:
+ // [temp.deduct]p9
+ // A lambda-expression appearing in a function type or a template
+ // parameter is not considered part of the immediate context for the
+ // purposes of template argument deduction.
+
+ // We need to check parents.
+ SawLambdaSubstitution = true;
+ break;
case CodeSynthesisContext::DefaultTemplateArgumentInstantiation:
case CodeSynthesisContext::PriorTemplateArgumentSubstitution:
@@ -1049,12 +1117,17 @@ std::optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
case CodeSynthesisContext::ExplicitTemplateArgumentSubstitution:
case CodeSynthesisContext::DeducedTemplateArgumentSubstitution:
+ // We're either substituting explicitly-specified template arguments,
+ // deduced template arguments. SFINAE applies unless we are in a lambda
+ // expression, see [temp.deduct]p9.
+ if (SawLambdaSubstitution)
+ return std::nullopt;
+ [[fallthrough]];
case CodeSynthesisContext::ConstraintSubstitution:
case CodeSynthesisContext::RequirementInstantiation:
case CodeSynthesisContext::RequirementParameterInstantiation:
- // We're either substituting explicitly-specified template arguments,
- // deduced template arguments, a constraint expression or a requirement
- // in a requires expression, so SFINAE applies.
+ // SFINAE always applies in a constraint expression or a requirement
+ // in a requires expression.
assert(Active->DeductionInfo && "Missing deduction info pointer");
return Active->DeductionInfo;
@@ -1064,6 +1137,7 @@ std::optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
case CodeSynthesisContext::InitializingStructuredBinding:
case CodeSynthesisContext::MarkingClassDllexported:
case CodeSynthesisContext::BuildingBuiltinDumpStructCall:
+ case CodeSynthesisContext::BuildingDeductionGuides:
// This happens in a context unrelated to template instantiation, so
// there is no SFINAE.
return std::nullopt;
@@ -1231,7 +1305,8 @@ namespace {
// We recreated a local declaration, but not by instantiating it. There
// may be pending dependent diagnostics to produce.
- if (auto *DC = dyn_cast<DeclContext>(Old); DC && DC->isDependentContext())
+ if (auto *DC = dyn_cast<DeclContext>(Old);
+ DC && DC->isDependentContext() && DC->isFunctionOrMethod())
SemaRef.PerformDependentDiagnostics(DC, TemplateArgs);
}
@@ -1271,6 +1346,12 @@ namespace {
bool AllowInjectedClassName = false);
const LoopHintAttr *TransformLoopHintAttr(const LoopHintAttr *LH);
+ const NoInlineAttr *TransformStmtNoInlineAttr(const Stmt *OrigS,
+ const Stmt *InstS,
+ const NoInlineAttr *A);
+ const AlwaysInlineAttr *
+ TransformStmtAlwaysInlineAttr(const Stmt *OrigS, const Stmt *InstS,
+ const AlwaysInlineAttr *A);
ExprResult TransformPredefinedExpr(PredefinedExpr *E);
ExprResult TransformDeclRefExpr(DeclRefExpr *E);
@@ -1336,12 +1417,21 @@ namespace {
ExprResult TransformLambdaExpr(LambdaExpr *E) {
LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
Sema::ConstraintEvalRAII<TemplateInstantiator> RAII(*this);
+
+ Sema::CodeSynthesisContext C;
+ C.Kind = clang::Sema::CodeSynthesisContext::LambdaExpressionSubstitution;
+ C.PointOfInstantiation = E->getBeginLoc();
+ SemaRef.pushCodeSynthesisContext(C);
+ auto PopCtx =
+ llvm::make_scope_exit([this] { SemaRef.popCodeSynthesisContext(); });
+
ExprResult Result = inherited::TransformLambdaExpr(E);
if (Result.isInvalid())
return Result;
CXXMethodDecl *MD = Result.getAs<LambdaExpr>()->getCallOperator();
for (ParmVarDecl *PVD : MD->parameters()) {
+ assert(PVD && "null in a parameter list");
if (!PVD->hasDefaultArg())
continue;
Expr *UninstExpr = PVD->getUninstantiatedDefaultArg();
@@ -1766,6 +1856,20 @@ TemplateInstantiator::TransformLoopHintAttr(const LoopHintAttr *LH) {
return LoopHintAttr::CreateImplicit(getSema().Context, LH->getOption(),
LH->getState(), TransformedExpr, *LH);
}
+const NoInlineAttr *TemplateInstantiator::TransformStmtNoInlineAttr(
+ const Stmt *OrigS, const Stmt *InstS, const NoInlineAttr *A) {
+ if (!A || getSema().CheckNoInlineAttr(OrigS, InstS, *A))
+ return nullptr;
+
+ return A;
+}
+const AlwaysInlineAttr *TemplateInstantiator::TransformStmtAlwaysInlineAttr(
+ const Stmt *OrigS, const Stmt *InstS, const AlwaysInlineAttr *A) {
+ if (!A || getSema().CheckAlwaysInlineAttr(OrigS, InstS, *A))
+ return nullptr;
+
+ return A;
+}
ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef(
Decl *AssociatedDecl, const NonTypeTemplateParmDecl *parm,
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index bed5237749c5..f78d46f59503 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -22,6 +22,7 @@
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ScopeInfo.h"
@@ -94,11 +95,14 @@ static void instantiateDependentAlignedAttr(
if (!Result.isInvalid())
S.AddAlignedAttr(New, *Aligned, Result.getAs<Expr>(), IsPackExpansion);
} else {
- TypeSourceInfo *Result = S.SubstType(Aligned->getAlignmentType(),
- TemplateArgs, Aligned->getLocation(),
- DeclarationName());
- if (Result)
- S.AddAlignedAttr(New, *Aligned, Result, IsPackExpansion);
+ if (TypeSourceInfo *Result =
+ S.SubstType(Aligned->getAlignmentType(), TemplateArgs,
+ Aligned->getLocation(), DeclarationName())) {
+ if (!S.CheckAlignasTypeArgument(Aligned->getSpelling(), Result,
+ Aligned->getLocation(),
+ Result->getTypeLoc().getSourceRange()))
+ S.AddAlignedAttr(New, *Aligned, Result, IsPackExpansion);
+ }
}
}
@@ -834,6 +838,22 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
}
}
+/// Update instantiation attributes after template was late parsed.
+///
+/// Some attributes are evaluated based on the body of template. If it is
+/// late parsed, such attributes cannot be evaluated when declaration is
+/// instantiated. This function is used to update instantiation attributes when
+/// template definition is ready.
+void Sema::updateAttrsForLateParsedTemplate(const Decl *Pattern, Decl *Inst) {
+ for (const auto *Attr : Pattern->attrs()) {
+ if (auto *A = dyn_cast<StrictFPAttr>(Attr)) {
+ if (!Inst->hasAttr<StrictFPAttr>())
+ Inst->addAttr(A->clone(getASTContext()));
+ continue;
+ }
+ }
+}
+
/// In the MS ABI, we need to instantiate default arguments of dllexported
/// default constructors along with the constructor definition. This allows IR
/// gen to emit a constructor closure which calls the default constructor with
@@ -1409,11 +1429,14 @@ Decl *TemplateDeclInstantiator::VisitStaticAssertDecl(StaticAssertDecl *D) {
if (InstantiatedAssertExpr.isInvalid())
return nullptr;
- return SemaRef.BuildStaticAssertDeclaration(D->getLocation(),
- InstantiatedAssertExpr.get(),
- D->getMessage(),
- D->getRParenLoc(),
- D->isFailed());
+ ExprResult InstantiatedMessageExpr =
+ SemaRef.SubstExpr(D->getMessage(), TemplateArgs);
+ if (InstantiatedMessageExpr.isInvalid())
+ return nullptr;
+
+ return SemaRef.BuildStaticAssertDeclaration(
+ D->getLocation(), InstantiatedAssertExpr.get(),
+ InstantiatedMessageExpr.get(), D->getRParenLoc(), D->isFailed());
}
Decl *TemplateDeclInstantiator::VisitEnumDecl(EnumDecl *D) {
@@ -1637,33 +1660,12 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
<< QualifierLoc.getSourceRange();
return nullptr;
}
-
- if (PrevClassTemplate) {
- const ClassTemplateDecl *MostRecentPrevCT =
- PrevClassTemplate->getMostRecentDecl();
- TemplateParameterList *PrevParams =
- MostRecentPrevCT->getTemplateParameters();
-
- // Make sure the parameter lists match.
- if (!SemaRef.TemplateParameterListsAreEqual(
- D->getTemplatedDecl(), InstParams,
- MostRecentPrevCT->getTemplatedDecl(), PrevParams, true,
- Sema::TPL_TemplateMatch))
- return nullptr;
-
- // Do some additional validation, then merge default arguments
- // from the existing declarations.
- if (SemaRef.CheckTemplateParameterList(InstParams, PrevParams,
- Sema::TPC_ClassTemplate))
- return nullptr;
- }
}
CXXRecordDecl *RecordInst = CXXRecordDecl::Create(
SemaRef.Context, Pattern->getTagKind(), DC, Pattern->getBeginLoc(),
Pattern->getLocation(), Pattern->getIdentifier(), PrevDecl,
/*DelayTypeCreation=*/true);
-
if (QualifierLoc)
RecordInst->setQualifierInfo(QualifierLoc);
@@ -1673,16 +1675,38 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
ClassTemplateDecl *Inst
= ClassTemplateDecl::Create(SemaRef.Context, DC, D->getLocation(),
D->getIdentifier(), InstParams, RecordInst);
- assert(!(isFriend && Owner->isDependentContext()));
- Inst->setPreviousDecl(PrevClassTemplate);
-
RecordInst->setDescribedClassTemplate(Inst);
if (isFriend) {
- if (PrevClassTemplate)
+ assert(!Owner->isDependentContext());
+ Inst->setLexicalDeclContext(Owner);
+ RecordInst->setLexicalDeclContext(Owner);
+
+ if (PrevClassTemplate) {
+ Inst->setCommonPtr(PrevClassTemplate->getCommonPtr());
+ RecordInst->setTypeForDecl(
+ PrevClassTemplate->getTemplatedDecl()->getTypeForDecl());
+ const ClassTemplateDecl *MostRecentPrevCT =
+ PrevClassTemplate->getMostRecentDecl();
+ TemplateParameterList *PrevParams =
+ MostRecentPrevCT->getTemplateParameters();
+
+ // Make sure the parameter lists match.
+ if (!SemaRef.TemplateParameterListsAreEqual(
+ RecordInst, InstParams, MostRecentPrevCT->getTemplatedDecl(),
+ PrevParams, true, Sema::TPL_TemplateMatch))
+ return nullptr;
+
+ // Do some additional validation, then merge default arguments
+ // from the existing declarations.
+ if (SemaRef.CheckTemplateParameterList(InstParams, PrevParams,
+ Sema::TPC_ClassTemplate))
+ return nullptr;
+
Inst->setAccess(PrevClassTemplate->getAccess());
- else
+ } else {
Inst->setAccess(D->getAccess());
+ }
Inst->setObjectOfFriendDecl();
// TODO: do we want to track the instantiation progeny of this
@@ -1693,15 +1717,15 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
Inst->setInstantiatedFromMemberTemplate(D);
}
+ Inst->setPreviousDecl(PrevClassTemplate);
+
// Trigger creation of the type for the instantiation.
- SemaRef.Context.getInjectedClassNameType(RecordInst,
- Inst->getInjectedClassNameSpecialization());
+ SemaRef.Context.getInjectedClassNameType(
+ RecordInst, Inst->getInjectedClassNameSpecialization());
// Finish handling of friends.
if (isFriend) {
DC->makeDeclVisibleInContext(Inst);
- Inst->setLexicalDeclContext(Owner);
- RecordInst->setLexicalDeclContext(Owner);
return Inst;
}
@@ -2108,9 +2132,8 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
Function = CXXDeductionGuideDecl::Create(
SemaRef.Context, DC, D->getInnerLocStart(),
InstantiatedExplicitSpecifier, NameInfo, T, TInfo,
- D->getSourceRange().getEnd());
- if (DGuide->isCopyDeductionCandidate())
- cast<CXXDeductionGuideDecl>(Function)->setIsCopyDeductionCandidate();
+ D->getSourceRange().getEnd(), /*Ctor=*/nullptr,
+ DGuide->getDeductionCandidateKind());
Function->setAccess(D->getAccess());
} else {
Function = FunctionDecl::Create(
@@ -2277,7 +2300,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
// Filter out previous declarations that don't match the scope. The only
// effect this has is to remove declarations found in inline namespaces
// for friend declarations with unqualified names.
- if (isFriend && !QualifierLoc && !FunctionTemplate) {
+ if (isFriend && !QualifierLoc) {
SemaRef.FilterLookupForScope(Previous, DC, /*Scope=*/ nullptr,
/*ConsiderLinkage=*/ true,
QualifierLoc.hasQualifier());
@@ -2506,9 +2529,6 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
Constructor->getConstexprKind(), InheritedConstructor(),
TrailingRequiresClause);
Method->setRangeEnd(Constructor->getEndLoc());
- if (Constructor->isDefaultConstructor() ||
- Constructor->isCopyOrMoveConstructor())
- Method->setIneligibleOrNotSelected(true);
} else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(D)) {
Method = CXXDestructorDecl::Create(
SemaRef.Context, Record, StartLoc, NameInfo, T, TInfo,
@@ -2531,8 +2551,6 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
SemaRef.Context, Record, StartLoc, NameInfo, T, TInfo, SC,
D->UsesFPIntrin(), D->isInlineSpecified(), D->getConstexprKind(),
D->getEndLoc(), TrailingRequiresClause);
- if (D->isMoveAssignmentOperator() || D->isCopyAssignmentOperator())
- Method->setIneligibleOrNotSelected(true);
}
if (D->isInlined())
@@ -2735,6 +2753,22 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
if (IsExplicitSpecialization && !isFriend)
SemaRef.CompleteMemberSpecialization(Method, Previous);
+ // If the method is a special member function, we need to mark it as
+ // ineligible so that Owner->addDecl() won't mark the class as non trivial.
+ // At the end of the class instantiation, we calculate eligibility again and
+ // then we adjust trivility if needed.
+ // We need this check to happen only after the method parameters are set,
+ // because being e.g. a copy constructor depends on the instantiated
+ // arguments.
+ if (auto *Constructor = dyn_cast<CXXConstructorDecl>(Method)) {
+ if (Constructor->isDefaultConstructor() ||
+ Constructor->isCopyOrMoveConstructor())
+ Method->setIneligibleOrNotSelected(true);
+ } else if (Method->isCopyAssignmentOperator() ||
+ Method->isMoveAssignmentOperator()) {
+ Method->setIneligibleOrNotSelected(true);
+ }
+
// If there's a function template, let our caller handle it.
if (FunctionTemplate) {
// do nothing
@@ -2992,8 +3026,10 @@ Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl(
if (AutoTypeLoc AutoLoc = DI->getTypeLoc().getContainedAutoTypeLoc())
if (AutoLoc.isConstrained())
+ // Note: We attach the uninstantiated constriant here, so that it can be
+ // instantiated relative to the top level, like all our other constraints.
if (SemaRef.AttachTypeConstraint(
- AutoLoc, Param,
+ AutoLoc, Param, D,
IsExpandedParameterPack
? DI->getTypeLoc().getAs<PackExpansionTypeLoc>()
.getEllipsisLoc()
@@ -4644,11 +4680,7 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
ActiveInstType &ActiveInst = SemaRef.CodeSynthesisContexts.back();
if (ActiveInst.Kind == ActiveInstType::ExplicitTemplateArgumentSubstitution ||
ActiveInst.Kind == ActiveInstType::DeducedTemplateArgumentSubstitution) {
- if (FunctionTemplateDecl *FunTmpl
- = dyn_cast<FunctionTemplateDecl>(ActiveInst.Entity)) {
- assert(FunTmpl->getTemplatedDecl() == Tmpl &&
- "Deduction from the wrong function template?");
- (void) FunTmpl;
+ if (isa<FunctionTemplateDecl>(ActiveInst.Entity)) {
SemaRef.InstantiatingSpecializations.erase(
{ActiveInst.Entity->getCanonicalDecl(), ActiveInst.Kind});
atTemplateEnd(SemaRef.TemplateInstCallbacks, SemaRef, ActiveInst);
@@ -4916,6 +4948,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
"missing LateParsedTemplate");
LateTemplateParser(OpaqueParser, *LPTIter->second);
Pattern = PatternDecl->getBody(PatternDecl);
+ updateAttrsForLateParsedTemplate(PatternDecl, Function);
}
// Note, we should never try to instantiate a deleted function template.
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
index 86268b504cbb..dfcc78dafdc4 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -1220,10 +1220,11 @@ ExprResult Sema::ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS,
if (!LHS || !RHS) {
Expr *Pack = LHS ? LHS : RHS;
assert(Pack && "fold expression with neither LHS nor RHS");
- DiscardOperands();
- if (!Pack->containsUnexpandedParameterPack())
+ if (!Pack->containsUnexpandedParameterPack()) {
+ DiscardOperands();
return Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
<< Pack->getSourceRange();
+ }
}
BinaryOperatorKind Opc = ConvertTokenKindToBinaryOpcode(Operator);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaType.cpp b/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
index 8cb1ed28fe3e..0aa691d24171 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
@@ -38,6 +38,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include <bitset>
@@ -102,8 +103,10 @@ static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr,
}
}
- S.Diag(loc, diag::warn_type_attribute_wrong_type) << name << WhichType
- << type;
+ S.Diag(loc, attr.isRegularKeywordAttribute()
+ ? diag::err_type_attribute_wrong_type
+ : diag::warn_type_attribute_wrong_type)
+ << name << WhichType << type;
}
// objc_gc applies to Objective-C pointers or, otherwise, to the
@@ -125,6 +128,7 @@ static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr,
case ParsedAttr::AT_VectorCall: \
case ParsedAttr::AT_AArch64VectorPcs: \
case ParsedAttr::AT_AArch64SVEPcs: \
+ case ParsedAttr::AT_ArmStreaming: \
case ParsedAttr::AT_AMDGPUKernelCall: \
case ParsedAttr::AT_MSABI: \
case ParsedAttr::AT_SysVABI: \
@@ -683,7 +687,7 @@ static void distributeTypeAttrsFromDeclarator(TypeProcessingState &state,
for (ParsedAttr &attr : AttrsCopy) {
// Do not distribute [[]] attributes. They have strict rules for what
// they appertain to.
- if (attr.isStandardAttributeSyntax())
+ if (attr.isStandardAttributeSyntax() || attr.isRegularKeywordAttribute())
continue;
switch (attr.getKind()) {
@@ -946,7 +950,7 @@ static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type,
// Retrieve the bound.
QualType bound = typeParam->getUnderlyingType();
- const auto *boundObjC = bound->getAs<ObjCObjectPointerType>();
+ const auto *boundObjC = bound->castAs<ObjCObjectPointerType>();
// Determine whether the type argument is substitutable for the bound.
if (typeArgObjC->isObjCIdType()) {
@@ -1498,7 +1502,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
case DeclSpec::TST_int128:
if (!S.Context.getTargetInfo().hasInt128Type() &&
!(S.getLangOpts().SYCLIsDevice || S.getLangOpts().CUDAIsDevice ||
- (S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice)))
+ (S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsTargetDevice)))
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
<< "__int128";
if (DS.getTypeSpecSign() == TypeSpecifierSign::Unsigned)
@@ -1511,16 +1515,17 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// do not diagnose _Float16 usage to avoid false alarm.
// ToDo: more precise diagnostics for CUDA.
if (!S.Context.getTargetInfo().hasFloat16Type() && !S.getLangOpts().CUDA &&
- !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice))
+ !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsTargetDevice))
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
<< "_Float16";
Result = Context.Float16Ty;
break;
case DeclSpec::TST_half: Result = Context.HalfTy; break;
case DeclSpec::TST_BFloat16:
- if (!S.Context.getTargetInfo().hasBFloat16Type())
- S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
- << "__bf16";
+ if (!S.Context.getTargetInfo().hasBFloat16Type() &&
+ !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsTargetDevice) &&
+ !S.getLangOpts().SYCLIsDevice)
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported) << "__bf16";
Result = Context.BFloat16Ty;
break;
case DeclSpec::TST_float: Result = Context.FloatTy; break;
@@ -1543,7 +1548,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
case DeclSpec::TST_float128:
if (!S.Context.getTargetInfo().hasFloat128Type() &&
!S.getLangOpts().SYCLIsDevice &&
- !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice))
+ !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsTargetDevice))
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
<< "__float128";
Result = Context.Float128Ty;
@@ -1551,7 +1556,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
case DeclSpec::TST_ibm128:
if (!S.Context.getTargetInfo().hasIbm128Type() &&
!S.getLangOpts().SYCLIsDevice &&
- !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice))
+ !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsTargetDevice))
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported) << "__ibm128";
Result = Context.Ibm128Ty;
break;
@@ -2197,6 +2202,21 @@ QualType Sema::BuildPointerType(QualType T,
if (getLangOpts().OpenCL)
T = deduceOpenCLPointeeAddrSpace(*this, T);
+ // In WebAssembly, pointers to reference types and pointers to tables are
+ // illegal.
+ if (getASTContext().getTargetInfo().getTriple().isWasm()) {
+ if (T.isWebAssemblyReferenceType()) {
+ Diag(Loc, diag::err_wasm_reference_pr) << 0;
+ return QualType();
+ }
+
+ // We need to desugar the type here in case T is a ParenType.
+ if (T->getUnqualifiedDesugaredType()->isWebAssemblyTableType()) {
+ Diag(Loc, diag::err_wasm_table_pr) << 0;
+ return QualType();
+ }
+ }
+
// Build the pointer type.
return Context.getPointerType(T);
}
@@ -2272,6 +2292,17 @@ QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
if (getLangOpts().OpenCL)
T = deduceOpenCLPointeeAddrSpace(*this, T);
+ // In WebAssembly, references to reference types and tables are illegal.
+ if (getASTContext().getTargetInfo().getTriple().isWasm() &&
+ T.isWebAssemblyReferenceType()) {
+ Diag(Loc, diag::err_wasm_reference_pr) << 1;
+ return QualType();
+ }
+ if (T->isWebAssemblyTableType()) {
+ Diag(Loc, diag::err_wasm_table_pr) << 1;
+ return QualType();
+ }
+
// Handle restrict on references.
if (LValueRef)
return Context.getLValueReferenceType(T, SpelledAsLValue);
@@ -2475,12 +2506,22 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
} else {
// C99 6.7.5.2p1: If the element type is an incomplete or function type,
// reject it (e.g. void ary[7], struct foo ary[7], void ary[7]())
- if (RequireCompleteSizedType(Loc, T,
+ if (!T.isWebAssemblyReferenceType() &&
+ RequireCompleteSizedType(Loc, T,
diag::err_array_incomplete_or_sizeless_type))
return QualType();
}
- if (T->isSizelessType()) {
+ // Multi-dimensional arrays of WebAssembly references are not allowed.
+ if (Context.getTargetInfo().getTriple().isWasm() && T->isArrayType()) {
+ const auto *ATy = dyn_cast<ArrayType>(T);
+ if (ATy && ATy->getElementType().isWebAssemblyReferenceType()) {
+ Diag(Loc, diag::err_wasm_reftype_multidimensional_array);
+ return QualType();
+ }
+ }
+
+ if (T->isSizelessType() && !T.isWebAssemblyReferenceType()) {
Diag(Loc, diag::err_array_incomplete_or_sizeless_type) << 1 << T;
return QualType();
}
@@ -2599,7 +2640,7 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
<< ArraySize->getSourceRange();
return QualType();
}
- if (ConstVal == 0) {
+ if (ConstVal == 0 && !T.isWebAssemblyReferenceType()) {
// GCC accepts zero sized static arrays. We allow them when
// we're not in a SFINAE context.
Diag(ArraySize->getBeginLoc(),
@@ -2668,8 +2709,8 @@ QualType Sema::BuildVectorType(QualType CurType, Expr *SizeExpr,
return QualType();
}
// Only support _BitInt elements with byte-sized power of 2 NumBits.
- if (CurType->isBitIntType()) {
- unsigned NumBits = CurType->getAs<BitIntType>()->getNumBits();
+ if (const auto *BIT = CurType->getAs<BitIntType>()) {
+ unsigned NumBits = BIT->getNumBits();
if (!llvm::isPowerOf2_32(NumBits) || NumBits < 8) {
Diag(AttrLoc, diag::err_attribute_invalid_bitint_vector_type)
<< (NumBits < 8);
@@ -2750,7 +2791,7 @@ QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize,
// Only support _BitInt elements with byte-sized power of 2 NumBits.
if (T->isBitIntType()) {
- unsigned NumBits = T->getAs<BitIntType>()->getNumBits();
+ unsigned NumBits = T->castAs<BitIntType>()->getNumBits();
if (!llvm::isPowerOf2_32(NumBits) || NumBits < 8) {
Diag(AttrLoc, diag::err_attribute_invalid_bitint_vector_type)
<< (NumBits < 8);
@@ -2988,6 +3029,9 @@ QualType Sema::BuildFunctionType(QualType T,
Diag(Loc, diag::err_parameters_retval_cannot_have_fp16_type) << 0 <<
FixItHint::CreateInsertion(Loc, "*");
Invalid = true;
+ } else if (ParamType->isWebAssemblyTableType()) {
+ Diag(Loc, diag::err_wasm_table_as_function_parameter);
+ Invalid = true;
}
// C++2a [dcl.fct]p4:
@@ -3629,7 +3673,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
case DeclaratorContext::FunctionalCast:
if (isa<DeducedTemplateSpecializationType>(Deduced))
break;
- if (SemaRef.getLangOpts().CPlusPlus2b && IsCXXAutoType &&
+ if (SemaRef.getLangOpts().CPlusPlus23 && IsCXXAutoType &&
!Auto->isDecltypeAuto())
break; // auto(x)
[[fallthrough]];
@@ -4538,7 +4582,7 @@ static bool hasOuterPointerLikeChunk(const Declarator &D, unsigned endIndex) {
return false;
}
-static bool IsNoDerefableChunk(DeclaratorChunk Chunk) {
+static bool IsNoDerefableChunk(const DeclaratorChunk &Chunk) {
return (Chunk.Kind == DeclaratorChunk::Pointer ||
Chunk.Kind == DeclaratorChunk::Array);
}
@@ -4878,12 +4922,14 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// If we're supposed to infer nullability, do so now.
if (inferNullability && !inferNullabilityInnerOnlyComplete) {
- ParsedAttr::Syntax syntax = inferNullabilityCS
- ? ParsedAttr::AS_ContextSensitiveKeyword
- : ParsedAttr::AS_Keyword;
+ ParsedAttr::Form form =
+ inferNullabilityCS
+ ? ParsedAttr::Form::ContextSensitiveKeyword()
+ : ParsedAttr::Form::Keyword(false /*IsAlignAs*/,
+ false /*IsRegularKeywordAttribute*/);
ParsedAttr *nullabilityAttr = Pool.create(
S.getNullabilityKeyword(*inferNullability), SourceRange(pointerLoc),
- nullptr, SourceLocation(), nullptr, 0, syntax);
+ nullptr, SourceLocation(), nullptr, 0, form);
attrs.addAtEnd(nullabilityAttr);
@@ -4964,6 +5010,12 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Walk the DeclTypeInfo, building the recursive type as we go.
// DeclTypeInfos are ordered from the identifier out, which is
// opposite of what we want :).
+
+ // Track if the produced type matches the structure of the declarator.
+ // This is used later to decide if we can fill `TypeLoc` from
+ // `DeclaratorChunk`s. E.g. it must be false if Clang recovers from
+ // an error by replacing the type with `int`.
+ bool AreDeclaratorChunksValid = true;
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
unsigned chunkIndex = e - i - 1;
state.setCurrentChunkIndex(chunkIndex);
@@ -5055,6 +5107,19 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
DeclaratorChunk::ArrayTypeInfo &ATI = DeclType.Arr;
Expr *ArraySize = static_cast<Expr*>(ATI.NumElts);
ArrayType::ArraySizeModifier ASM;
+
+ // Microsoft property fields can have multiple sizeless array chunks
+ // (i.e. int x[][][]). Skip all of these except one to avoid creating
+ // bad incomplete array types.
+ if (chunkIndex != 0 && !ArraySize &&
+ D.getDeclSpec().getAttributes().hasMSPropertyAttr()) {
+ // This is a sizeless chunk. If the next is also, skip this one.
+ DeclaratorChunk &NextDeclType = D.getTypeObject(chunkIndex - 1);
+ if (NextDeclType.Kind == DeclaratorChunk::Array &&
+ !NextDeclType.Arr.NumElts)
+ break;
+ }
+
if (ATI.isStar)
ASM = ArrayType::Star;
else if (ATI.hasStatic)
@@ -5096,17 +5161,6 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
D.setInvalidType(true);
}
}
- const AutoType *AT = T->getContainedAutoType();
- // Allow arrays of auto if we are a generic lambda parameter.
- // i.e. [](auto (&array)[5]) { return array[0]; }; OK
- if (AT && D.getContext() != DeclaratorContext::LambdaExprParameter) {
- // We've already diagnosed this for decltype(auto).
- if (!AT->isDecltypeAuto())
- S.Diag(DeclType.Loc, diag::err_illegal_decl_array_of_auto)
- << getPrintableNameForEntity(Name) << T;
- T = QualType();
- break;
- }
// Array parameters can be marked nullable as well, although it's not
// necessary if they're marked 'static'.
@@ -5144,6 +5198,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
: diag::err_deduced_return_type);
T = Context.IntTy;
D.setInvalidType(true);
+ AreDeclaratorChunksValid = false;
} else {
S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
diag::warn_cxx11_compat_deduced_return_type);
@@ -5154,6 +5209,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
S.Diag(D.getBeginLoc(), diag::err_trailing_return_in_parens)
<< T << D.getSourceRange();
D.setInvalidType(true);
+ // FIXME: recover and fill decls in `TypeLoc`s.
+ AreDeclaratorChunksValid = false;
} else if (D.getName().getKind() ==
UnqualifiedIdKind::IK_DeductionGuideName) {
if (T != Context.DependentTy) {
@@ -5161,6 +5218,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
diag::err_deduction_guide_with_complex_decl)
<< D.getSourceRange();
D.setInvalidType(true);
+ // FIXME: recover and fill decls in `TypeLoc`s.
+ AreDeclaratorChunksValid = false;
}
} else if (D.getContext() != DeclaratorContext::LambdaExpr &&
(T.hasQualifiers() || !isa<AutoType>(T) ||
@@ -5171,6 +5230,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
diag::err_trailing_return_without_auto)
<< T << D.getDeclSpec().getSourceRange();
D.setInvalidType(true);
+ // FIXME: recover and fill decls in `TypeLoc`s.
+ AreDeclaratorChunksValid = false;
}
T = S.GetTypeFromParser(FTI.getTrailingReturnType(), &TInfo);
if (T.isNull()) {
@@ -5211,6 +5272,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
S.Diag(DeclType.Loc, diagID) << T->isFunctionType() << T;
T = Context.IntTy;
D.setInvalidType(true);
+ AreDeclaratorChunksValid = false;
}
// Do not allow returning half FP value.
@@ -5277,6 +5339,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
ObjCObjectPointerTypeLoc TLoc = TLB.push<ObjCObjectPointerTypeLoc>(T);
TLoc.setStarLoc(FixitLoc);
TInfo = TLB.getTypeSourceInfo(Context, T);
+ } else {
+ AreDeclaratorChunksValid = false;
}
D.setInvalidType(true);
@@ -5397,6 +5461,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
T = (!LangOpts.requiresStrictPrototypes() && !LangOpts.OpenCL)
? Context.getFunctionNoProtoType(T, EI)
: Context.IntTy;
+ AreDeclaratorChunksValid = false;
break;
}
@@ -5631,9 +5696,13 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
if (!ClsType.isNull())
T = S.BuildMemberPointerType(T, ClsType, DeclType.Loc,
D.getIdentifier());
+ else
+ AreDeclaratorChunksValid = false;
+
if (T.isNull()) {
T = Context.IntTy;
D.setInvalidType(true);
+ AreDeclaratorChunksValid = false;
} else if (DeclType.Mem.TypeQuals) {
T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Mem.TypeQuals);
}
@@ -5651,6 +5720,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
if (T.isNull()) {
D.setInvalidType(true);
T = Context.IntTy;
+ AreDeclaratorChunksValid = false;
}
// See if there are any attributes on this declarator chunk.
@@ -5909,9 +5979,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
assert(!T.isNull() && "T must not be null at the end of this function");
- if (D.isInvalidType())
+ if (!AreDeclaratorChunksValid)
return Context.getTrivialTypeSourceInfo(T);
-
return GetTypeSourceInfoForDeclarator(state, T, TInfo);
}
@@ -5976,7 +6045,7 @@ static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state,
ParsedAttr *attr = D.getAttributePool().create(
&S.Context.Idents.get("objc_ownership"), SourceLocation(),
/*scope*/ nullptr, SourceLocation(),
- /*args*/ &Args, 1, ParsedAttr::AS_GNU);
+ /*args*/ &Args, 1, ParsedAttr::Form::GNU());
chunk.getAttrs().addAtEnd(attr);
// TODO: mark whether we did this inference?
}
@@ -6508,6 +6577,12 @@ GetTypeSourceInfoForDeclarator(TypeProcessingState &State,
}
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
+ // Microsoft property fields can have multiple sizeless array chunks
+ // (i.e. int x[][][]). Don't create more than one level of incomplete array.
+ if (CurrTL.getTypeLocClass() == TypeLoc::IncompleteArray && e != 1 &&
+ D.getDeclSpec().getAttributes().hasMSPropertyAttr())
+ continue;
+
// An AtomicTypeLoc might be produced by an atomic qualifier in this
// declarator chunk.
if (AtomicTypeLoc ATL = CurrTL.getAs<AtomicTypeLoc>()) {
@@ -7286,12 +7361,12 @@ static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
if (Attrs[attr::Ptr32] && Attrs[attr::Ptr64]) {
S.Diag(PAttr.getLoc(), diag::err_attributes_are_not_compatible)
<< "'__ptr32'"
- << "'__ptr64'";
+ << "'__ptr64'" << /*isRegularKeyword=*/0;
return true;
} else if (Attrs[attr::SPtr] && Attrs[attr::UPtr]) {
S.Diag(PAttr.getLoc(), diag::err_attributes_are_not_compatible)
<< "'__sptr'"
- << "'__uptr'";
+ << "'__uptr'" << /*isRegularKeyword=*/0;
return true;
}
@@ -7331,6 +7406,37 @@ static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
return false;
}
+static bool HandleWebAssemblyFuncrefAttr(TypeProcessingState &State,
+ QualType &QT, ParsedAttr &PAttr) {
+ assert(PAttr.getKind() == ParsedAttr::AT_WebAssemblyFuncref);
+
+ Sema &S = State.getSema();
+ Attr *A = createSimpleAttr<WebAssemblyFuncrefAttr>(S.Context, PAttr);
+
+ std::bitset<attr::LastAttr> Attrs;
+ attr::Kind NewAttrKind = A->getKind();
+ const auto *AT = dyn_cast<AttributedType>(QT);
+ while (AT) {
+ Attrs[AT->getAttrKind()] = true;
+ AT = dyn_cast<AttributedType>(AT->getModifiedType());
+ }
+
+ // You cannot specify duplicate type attributes, so if the attribute has
+ // already been applied, flag it.
+ if (Attrs[NewAttrKind]) {
+ S.Diag(PAttr.getLoc(), diag::warn_duplicate_attribute_exact) << PAttr;
+ return true;
+ }
+
+ // Add address space to type based on its attributes.
+ LangAS ASIdx = LangAS::wasm_funcref;
+ QualType Pointee = QT->getPointeeType();
+ Pointee = S.Context.getAddrSpaceQualType(
+ S.Context.removeAddrSpaceQualType(Pointee), ASIdx);
+ QT = State.getAttributedType(A, QT, S.Context.getPointerType(Pointee));
+ return false;
+}
+
/// Map a nullability attribute kind to a nullability kind.
static NullabilityKind mapNullabilityAttrKind(ParsedAttr::Kind kind) {
switch (kind) {
@@ -7634,6 +7740,8 @@ static Attr *getCCTypeAttr(ASTContext &Ctx, ParsedAttr &Attr) {
return createSimpleAttr<AArch64VectorPcsAttr>(Ctx, Attr);
case ParsedAttr::AT_AArch64SVEPcs:
return createSimpleAttr<AArch64SVEPcsAttr>(Ctx, Attr);
+ case ParsedAttr::AT_ArmStreaming:
+ return createSimpleAttr<ArmStreamingAttr>(Ctx, Attr);
case ParsedAttr::AT_AMDGPUKernelCall:
return createSimpleAttr<AMDGPUKernelCallAttr>(Ctx, Attr);
case ParsedAttr::AT_Pcs: {
@@ -7781,8 +7889,8 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
CallingConv CC = fn->getCallConv();
if (CC == CC_X86FastCall) {
S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
- << FunctionType::getNameForCallConv(CC)
- << "regparm";
+ << FunctionType::getNameForCallConv(CC) << "regparm"
+ << attr.isRegularKeywordAttribute();
attr.setInvalid();
return true;
}
@@ -7861,8 +7969,9 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
// and the CCs don't match.
if (S.getCallingConvAttributedType(type)) {
S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
- << FunctionType::getNameForCallConv(CC)
- << FunctionType::getNameForCallConv(CCOld);
+ << FunctionType::getNameForCallConv(CC)
+ << FunctionType::getNameForCallConv(CCOld)
+ << attr.isRegularKeywordAttribute();
attr.setInvalid();
return true;
}
@@ -7894,7 +8003,8 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
// Also diagnose fastcall with regparm.
if (CC == CC_X86FastCall && fn->getHasRegParm()) {
S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
- << "regparm" << FunctionType::getNameForCallConv(CC_X86FastCall);
+ << "regparm" << FunctionType::getNameForCallConv(CC_X86FastCall)
+ << attr.isRegularKeywordAttribute();
attr.setInvalid();
return true;
}
@@ -8085,10 +8195,18 @@ static bool verifyValidIntegerConstantExpr(Sema &S, const ParsedAttr &Attr,
/// match one of the standard Neon vector types.
static void HandleNeonVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
Sema &S, VectorType::VectorKind VecKind) {
+ bool IsTargetCUDAAndHostARM = false;
+ if (S.getLangOpts().CUDAIsDevice) {
+ const TargetInfo *AuxTI = S.getASTContext().getAuxTargetInfo();
+ IsTargetCUDAAndHostARM =
+ AuxTI && (AuxTI->getTriple().isAArch64() || AuxTI->getTriple().isARM());
+ }
+
// Target must have NEON (or MVE, whose vectors are similar enough
// not to need a separate attribute)
- if (!S.Context.getTargetInfo().hasFeature("neon") &&
- !S.Context.getTargetInfo().hasFeature("mve")) {
+ if (!(S.Context.getTargetInfo().hasFeature("neon") ||
+ S.Context.getTargetInfo().hasFeature("mve") ||
+ IsTargetCUDAAndHostARM)) {
S.Diag(Attr.getLoc(), diag::err_attribute_unsupported)
<< Attr << "'neon' or 'mve'";
Attr.setInvalid();
@@ -8096,8 +8214,8 @@ static void HandleNeonVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
}
// Check the attribute arguments.
if (Attr.getNumArgs() != 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr
- << 1;
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << Attr << 1;
Attr.setInvalid();
return;
}
@@ -8107,7 +8225,8 @@ static void HandleNeonVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
return;
// Only certain element types are supported for Neon vectors.
- if (!isPermittedNeonBaseType(CurType, VecKind, S)) {
+ if (!isPermittedNeonBaseType(CurType, VecKind, S) &&
+ !IsTargetCUDAAndHostARM) {
S.Diag(Attr.getLoc(), diag::err_attribute_invalid_vector_type) << CurType;
Attr.setInvalid();
return;
@@ -8210,6 +8329,69 @@ static void HandleArmMveStrictPolymorphismAttr(TypeProcessingState &State,
CurType, CurType);
}
+/// HandleRISCVRVVVectorBitsTypeAttr - The "riscv_rvv_vector_bits" attribute is
+/// used to create fixed-length versions of sizeless RVV types such as
+/// vint8m1_t_t.
+static void HandleRISCVRVVVectorBitsTypeAttr(QualType &CurType,
+ ParsedAttr &Attr, Sema &S) {
+ // Target must have vector extension.
+ if (!S.Context.getTargetInfo().hasFeature("zve32x")) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_unsupported)
+ << Attr << "'zve32x'";
+ Attr.setInvalid();
+ return;
+ }
+
+ auto VScale = S.Context.getTargetInfo().getVScaleRange(S.getLangOpts());
+ if (!VScale || !VScale->first || VScale->first != VScale->second) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_riscv_rvv_bits_unsupported)
+ << Attr;
+ Attr.setInvalid();
+ return;
+ }
+
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << Attr << 1;
+ Attr.setInvalid();
+ return;
+ }
+
+ // The vector size must be an integer constant expression.
+ llvm::APSInt RVVVectorSizeInBits(32);
+ if (!verifyValidIntegerConstantExpr(S, Attr, RVVVectorSizeInBits))
+ return;
+
+ // Attribute can only be attached to a single RVV vector type.
+ if (!CurType->isRVVVLSBuiltinType()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_invalid_rvv_type)
+ << Attr << CurType;
+ Attr.setInvalid();
+ return;
+ }
+
+ unsigned VecSize = static_cast<unsigned>(RVVVectorSizeInBits.getZExtValue());
+
+ ASTContext::BuiltinVectorTypeInfo Info =
+ S.Context.getBuiltinVectorTypeInfo(CurType->castAs<BuiltinType>());
+ unsigned EltSize = S.Context.getTypeSize(Info.ElementType);
+ unsigned MinElts = Info.EC.getKnownMinValue();
+
+ // The attribute vector size must match -mrvv-vector-bits.
+ unsigned ExpectedSize = VScale->first * MinElts * EltSize;
+ if (VecSize != ExpectedSize) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_bad_rvv_vector_size)
+ << VecSize << ExpectedSize;
+ Attr.setInvalid();
+ return;
+ }
+
+ VectorType::VectorKind VecKind = VectorType::RVVFixedLengthDataVector;
+ VecSize /= EltSize;
+ CurType = S.Context.getVectorType(Info.ElementType, VecSize, VecKind);
+}
+
/// Handle OpenCL Access Qualifier Attribute.
static void HandleOpenCLAccessAttr(QualType &CurType, const ParsedAttr &Attr,
Sema &S) {
@@ -8348,12 +8530,13 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
if (attr.isInvalid())
continue;
- if (attr.isStandardAttributeSyntax()) {
+ if (attr.isStandardAttributeSyntax() || attr.isRegularKeywordAttribute()) {
// [[gnu::...]] attributes are treated as declaration attributes, so may
// not appertain to a DeclaratorChunk. If we handle them as type
// attributes, accept them in that position and diagnose the GCC
// incompatibility.
if (attr.isGNUScope()) {
+ assert(attr.isStandardAttributeSyntax());
bool IsTypeAttr = attr.isTypeAttr();
if (TAL == TAL_DeclChunk) {
state.getSema().Diag(attr.getLoc(),
@@ -8381,9 +8564,11 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
switch (attr.getKind()) {
default:
// A [[]] attribute on a declarator chunk must appertain to a type.
- if (attr.isStandardAttributeSyntax() && TAL == TAL_DeclChunk) {
+ if ((attr.isStandardAttributeSyntax() ||
+ attr.isRegularKeywordAttribute()) &&
+ TAL == TAL_DeclChunk) {
state.getSema().Diag(attr.getLoc(), diag::err_attribute_not_type_attr)
- << attr;
+ << attr << attr.isRegularKeywordAttribute();
attr.setUsedAsTypeAttr();
}
break;
@@ -8456,6 +8641,10 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
attr.setUsedAsTypeAttr();
break;
}
+ case ParsedAttr::AT_RISCVRVVVectorBits:
+ HandleRISCVRVVVectorBitsTypeAttr(type, attr, state.getSema());
+ attr.setUsedAsTypeAttr();
+ break;
case ParsedAttr::AT_OpenCLAccess:
HandleOpenCLAccessAttr(type, attr, state.getSema());
attr.setUsedAsTypeAttr();
@@ -8488,6 +8677,12 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
attr.setUsedAsTypeAttr();
break;
+ case ParsedAttr::AT_WebAssemblyFuncref: {
+ if (!HandleWebAssemblyFuncrefAttr(state, type, attr))
+ attr.setUsedAsTypeAttr();
+ break;
+ }
+
MS_TYPE_ATTRS_CASELIST:
if (!handleMSPointerTypeQualifierAttr(state, attr, type))
attr.setUsedAsTypeAttr();
@@ -8554,7 +8749,8 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
// Attributes with standard syntax have strict rules for what they
// appertain to and hence should not use the "distribution" logic below.
- if (attr.isStandardAttributeSyntax()) {
+ if (attr.isStandardAttributeSyntax() ||
+ attr.isRegularKeywordAttribute()) {
if (!handleFunctionTypeAttr(state, attr, type)) {
diagnoseBadTypeAttribute(state.getSema(), attr, type);
attr.setInvalid();
@@ -8889,8 +9085,7 @@ static void assignInheritanceModel(Sema &S, CXXRecordDecl *RD) {
? S.ImplicitMSInheritanceAttrLoc
: RD->getSourceRange();
RD->addAttr(MSInheritanceAttr::CreateImplicit(
- S.getASTContext(), BestCase, Loc, AttributeCommonInfo::AS_Microsoft,
- MSInheritanceAttr::Spelling(IM)));
+ S.getASTContext(), BestCase, Loc, MSInheritanceAttr::Spelling(IM)));
S.Consumer.AssignInheritanceModel(RD);
}
}
diff --git a/contrib/llvm-project/clang/lib/Sema/TreeTransform.h b/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
index 4244bbc1e4b1..10b3587885e3 100644
--- a/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
+++ b/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
@@ -19,8 +19,8 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
-#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/OpenMPClause.h"
@@ -31,6 +31,7 @@
#include "clang/Basic/DiagnosticParse.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Sema/Designator.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/ParsedTemplate.h"
@@ -377,22 +378,43 @@ public:
/// By default, this routine transforms a statement by delegating to the
/// appropriate TransformXXXAttr function to transform a specific kind
/// of attribute. Subclasses may override this function to transform
- /// attributed statements using some other mechanism.
+ /// attributed statements/types using some other mechanism.
///
/// \returns the transformed attribute
const Attr *TransformAttr(const Attr *S);
-/// Transform the specified attribute.
-///
-/// Subclasses should override the transformation of attributes with a pragma
-/// spelling to transform expressions stored within the attribute.
-///
-/// \returns the transformed attribute.
-#define ATTR(X)
-#define PRAGMA_SPELLING_ATTR(X) \
+ // Transform the given statement attribute.
+ //
+ // Delegates to the appropriate TransformXXXAttr function to transform a
+ // specific kind of statement attribute. Unlike the non-statement taking
+ // version of this, this implements all attributes, not just pragmas.
+ const Attr *TransformStmtAttr(const Stmt *OrigS, const Stmt *InstS,
+ const Attr *A);
+
+ // Transform the specified attribute.
+ //
+ // Subclasses should override the transformation of attributes with a pragma
+ // spelling to transform expressions stored within the attribute.
+ //
+ // \returns the transformed attribute.
+#define ATTR(X) \
const X##Attr *Transform##X##Attr(const X##Attr *R) { return R; }
#include "clang/Basic/AttrList.inc"
+ // Transform the specified attribute.
+ //
+ // Subclasses should override the transformation of attributes to do
+ // transformation and checking of statement attributes. By default, this
+ // delegates to the non-statement taking version.
+ //
+ // \returns the transformed attribute.
+#define ATTR(X) \
+ const X##Attr *TransformStmt##X##Attr(const Stmt *, const Stmt *, \
+ const X##Attr *A) { \
+ return getDerived().Transform##X##Attr(A); \
+ }
+#include "clang/Basic/AttrList.inc"
+
/// Transform the given expression.
///
/// By default, this routine transforms an expression by delegating to the
@@ -2400,6 +2422,19 @@ public:
return getSema().ActOnOpenMPMessageClause(MS, StartLoc, LParenLoc, EndLoc);
}
+ /// Build a new OpenMP 'doacross' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *
+ RebuildOMPDoacrossClause(OpenMPDoacrossClauseModifier DepType,
+ SourceLocation DepLoc, SourceLocation ColonLoc,
+ ArrayRef<Expr *> VarList, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPDoacrossClause(
+ DepType, DepLoc, ColonLoc, VarList, StartLoc, LParenLoc, EndLoc);
+ }
+
/// Rebuild the operand to an Objective-C \@synchronized statement.
///
/// By default, performs semantic analysis to build the new statement.
@@ -2803,6 +2838,21 @@ public:
R.addDecl(FoundDecl);
R.resolveKind();
+ if (getSema().isUnevaluatedContext() && Base->isImplicitCXXThis() &&
+ isa<FieldDecl, IndirectFieldDecl, MSPropertyDecl>(Member)) {
+ if (auto *ThisClass = cast<CXXThisExpr>(Base)
+ ->getType()
+ ->getPointeeType()
+ ->getAsCXXRecordDecl()) {
+ auto *Class = cast<CXXRecordDecl>(Member->getDeclContext());
+ // In unevaluated contexts, an expression supposed to be a member access
+ // might reference a member in an unrelated class.
+ if (!ThisClass->Equals(Class) && !ThisClass->isDerivedFrom(Class))
+ return getSema().BuildDeclRefExpr(Member, Member->getType(),
+ VK_LValue, Member->getLocation());
+ }
+ }
+
return getSema().BuildMemberReferenceExpr(Base, BaseType, OpLoc, isArrow,
SS, TemplateKWLoc,
FirstQualifierInScope,
@@ -2977,7 +3027,7 @@ public:
RParenLoc);
}
- /// Build a new generic selection expression.
+ /// Build a new generic selection expression with an expression predicate.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2988,9 +3038,25 @@ public:
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs) {
return getSema().CreateGenericSelectionExpr(KeyLoc, DefaultLoc, RParenLoc,
+ /*PredicateIsExpr=*/true,
ControllingExpr, Types, Exprs);
}
+ /// Build a new generic selection expression with a type predicate.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildGenericSelectionExpr(SourceLocation KeyLoc,
+ SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ TypeSourceInfo *ControllingType,
+ ArrayRef<TypeSourceInfo *> Types,
+ ArrayRef<Expr *> Exprs) {
+ return getSema().CreateGenericSelectionExpr(KeyLoc, DefaultLoc, RParenLoc,
+ /*PredicateIsExpr=*/false,
+ ControllingType, Types, Exprs);
+ }
+
/// Build a new overloaded operator call expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -3000,10 +3066,11 @@ public:
/// argument-dependent lookup, etc. Subclasses may override this routine to
/// provide different behavior.
ExprResult RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
- SourceLocation OpLoc,
- Expr *Callee,
- Expr *First,
- Expr *Second);
+ SourceLocation OpLoc,
+ SourceLocation CalleeLoc,
+ bool RequiresADL,
+ const UnresolvedSetImpl &Functions,
+ Expr *First, Expr *Second);
/// Build a new C++ "named" cast expression, such as static_cast or
/// reinterpret_cast.
@@ -6988,7 +7055,8 @@ QualType TreeTransform<Derived>::TransformAttributedType(
// type sugar, and therefore cannot be diagnosed in any other way.
if (auto nullability = oldType->getImmediateNullability()) {
if (!modifiedType->canHaveNullability()) {
- SemaRef.Diag(TL.getAttr()->getLocation(),
+ SemaRef.Diag((TL.getAttr() ? TL.getAttr()->getLocation()
+ : TL.getModifiedLoc().getBeginLoc()),
diag::err_nullability_nonpointer)
<< DiagNullabilityKind(*nullability, false) << modifiedType;
return QualType();
@@ -7532,36 +7600,52 @@ const Attr *TreeTransform<Derived>::TransformAttr(const Attr *R) {
return R;
switch (R->getKind()) {
-// Transform attributes with a pragma spelling by calling TransformXXXAttr.
-#define ATTR(X)
-#define PRAGMA_SPELLING_ATTR(X) \
+// Transform attributes by calling TransformXXXAttr.
+#define ATTR(X) \
case attr::X: \
return getDerived().Transform##X##Attr(cast<X##Attr>(R));
#include "clang/Basic/AttrList.inc"
- default:
+ }
+ return R;
+}
+
+template <typename Derived>
+const Attr *TreeTransform<Derived>::TransformStmtAttr(const Stmt *OrigS,
+ const Stmt *InstS,
+ const Attr *R) {
+ if (!R)
return R;
+
+ switch (R->getKind()) {
+// Transform attributes by calling TransformStmtXXXAttr.
+#define ATTR(X) \
+ case attr::X: \
+ return getDerived().TransformStmt##X##Attr(OrigS, InstS, cast<X##Attr>(R));
+#include "clang/Basic/AttrList.inc"
}
+ return TransformAttr(R);
}
template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformAttributedStmt(AttributedStmt *S,
StmtDiscardKind SDK) {
+ StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt(), SDK);
+ if (SubStmt.isInvalid())
+ return StmtError();
+
bool AttrsChanged = false;
SmallVector<const Attr *, 1> Attrs;
// Visit attributes and keep track if any are transformed.
for (const auto *I : S->getAttrs()) {
- const Attr *R = getDerived().TransformAttr(I);
+ const Attr *R =
+ getDerived().TransformStmtAttr(S->getSubStmt(), SubStmt.get(), I);
AttrsChanged |= (I != R);
if (R)
Attrs.push_back(R);
}
- StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt(), SDK);
- if (SubStmt.isInvalid())
- return StmtError();
-
if (SubStmt.get() == S->getSubStmt() && !AttrsChanged)
return S;
@@ -7939,8 +8023,7 @@ TreeTransform<Derived>::TransformMSAsmStmt(MSAsmStmt *S) {
TransformedExprs, S->getEndLoc());
}
-// C++ Coroutines TS
-
+// C++ Coroutines
template<typename Derived>
StmtResult
TreeTransform<Derived>::TransformCoroutineBodyStmt(CoroutineBodyStmt *S) {
@@ -8048,6 +8131,13 @@ TreeTransform<Derived>::TransformCoroutineBodyStmt(CoroutineBodyStmt *S) {
return StmtError();
Builder.Deallocate = DeallocRes.get();
+ if (auto *ResultDecl = S->getResultDecl()) {
+ StmtResult Res = getDerived().TransformStmt(ResultDecl);
+ if (Res.isInvalid())
+ return StmtError();
+ Builder.ResultDecl = Res.get();
+ }
+
if (auto *ReturnStmt = S->getReturnStmt()) {
StmtResult Res = getDerived().TransformStmt(ReturnStmt);
if (Res.isInvalid())
@@ -10650,6 +10740,22 @@ OMPClause *TreeTransform<Derived>::TransformOMPXDynCGroupMemClause(
Size.get(), C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPDoacrossClause(OMPDoacrossClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ return getDerived().RebuildOMPDoacrossClause(
+ C->getDependenceType(), C->getDependenceLoc(), C->getColonLoc(), Vars,
+ C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+}
+
//===----------------------------------------------------------------------===//
// Expression transformation
//===----------------------------------------------------------------------===//
@@ -10793,9 +10899,14 @@ TreeTransform<Derived>::TransformUserDefinedLiteral(UserDefinedLiteral *E) {
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformGenericSelectionExpr(GenericSelectionExpr *E) {
- ExprResult ControllingExpr =
- getDerived().TransformExpr(E->getControllingExpr());
- if (ControllingExpr.isInvalid())
+ ExprResult ControllingExpr;
+ TypeSourceInfo *ControllingType = nullptr;
+ if (E->isExprPredicate())
+ ControllingExpr = getDerived().TransformExpr(E->getControllingExpr());
+ else
+ ControllingType = getDerived().TransformType(E->getControllingType());
+
+ if (ControllingExpr.isInvalid() && !ControllingType)
return ExprError();
SmallVector<Expr *, 4> AssocExprs;
@@ -10818,12 +10929,16 @@ TreeTransform<Derived>::TransformGenericSelectionExpr(GenericSelectionExpr *E) {
AssocExprs.push_back(AssocExpr.get());
}
+ if (!ControllingType)
return getDerived().RebuildGenericSelectionExpr(E->getGenericLoc(),
E->getDefaultLoc(),
E->getRParenLoc(),
ControllingExpr.get(),
AssocTypes,
AssocExprs);
+ return getDerived().RebuildGenericSelectionExpr(
+ E->getGenericLoc(), E->getDefaultLoc(), E->getRParenLoc(),
+ ControllingType, AssocTypes, AssocExprs);
}
template<typename Derived>
@@ -11342,7 +11457,8 @@ TreeTransform<Derived>::TransformBinaryOperator(BinaryOperator *E) {
if (LHS.isInvalid())
return ExprError();
- ExprResult RHS = getDerived().TransformExpr(E->getRHS());
+ ExprResult RHS =
+ getDerived().TransformInitializer(E->getRHS(), /*NotCopyInit=*/false);
if (RHS.isInvalid())
return ExprError();
@@ -11602,13 +11718,12 @@ TreeTransform<Derived>::TransformDesignatedInitExpr(DesignatedInitExpr *E) {
bool ExprChanged = false;
for (const DesignatedInitExpr::Designator &D : E->designators()) {
if (D.isFieldDesignator()) {
- Desig.AddDesignator(Designator::getField(D.getFieldName(),
- D.getDotLoc(),
- D.getFieldLoc()));
- if (D.getField()) {
+ Desig.AddDesignator(Designator::CreateFieldDesignator(
+ D.getFieldName(), D.getDotLoc(), D.getFieldLoc()));
+ if (D.getFieldDecl()) {
FieldDecl *Field = cast_or_null<FieldDecl>(
- getDerived().TransformDecl(D.getFieldLoc(), D.getField()));
- if (Field != D.getField())
+ getDerived().TransformDecl(D.getFieldLoc(), D.getFieldDecl()));
+ if (Field != D.getFieldDecl())
// Rebuild the expression when the transformed FieldDecl is
// different to the already assigned FieldDecl.
ExprChanged = true;
@@ -11627,7 +11742,7 @@ TreeTransform<Derived>::TransformDesignatedInitExpr(DesignatedInitExpr *E) {
return ExprError();
Desig.AddDesignator(
- Designator::getArray(Index.get(), D.getLBracketLoc()));
+ Designator::CreateArrayDesignator(Index.get(), D.getLBracketLoc()));
ExprChanged = ExprChanged || Init.get() != E->getArrayIndex(D);
ArrayExprs.push_back(Index.get());
@@ -11644,10 +11759,8 @@ TreeTransform<Derived>::TransformDesignatedInitExpr(DesignatedInitExpr *E) {
if (End.isInvalid())
return ExprError();
- Desig.AddDesignator(Designator::getArrayRange(Start.get(),
- End.get(),
- D.getLBracketLoc(),
- D.getEllipsisLoc()));
+ Desig.AddDesignator(Designator::CreateArrayRangeDesignator(
+ Start.get(), End.get(), D.getLBracketLoc(), D.getEllipsisLoc()));
ExprChanged = ExprChanged || Start.get() != E->getArrayRangeStart(D) ||
End.get() != E->getArrayRangeEnd(D);
@@ -11879,10 +11992,6 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
llvm_unreachable("not an overloaded operator?");
}
- ExprResult Callee = getDerived().TransformExpr(E->getCallee());
- if (Callee.isInvalid())
- return ExprError();
-
ExprResult First;
if (E->getOperator() == OO_Amp)
First = getDerived().TransformAddressOfOperand(E->getArg(0));
@@ -11893,28 +12002,45 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
ExprResult Second;
if (E->getNumArgs() == 2) {
- Second = getDerived().TransformExpr(E->getArg(1));
+ Second =
+ getDerived().TransformInitializer(E->getArg(1), /*NotCopyInit=*/false);
if (Second.isInvalid())
return ExprError();
}
- if (!getDerived().AlwaysRebuild() &&
- Callee.get() == E->getCallee() &&
- First.get() == E->getArg(0) &&
- (E->getNumArgs() != 2 || Second.get() == E->getArg(1)))
- return SemaRef.MaybeBindToTemporary(E);
-
Sema::FPFeaturesStateRAII FPFeaturesState(getSema());
FPOptionsOverride NewOverrides(E->getFPFeatures());
getSema().CurFPFeatures =
NewOverrides.applyOverrides(getSema().getLangOpts());
getSema().FpPragmaStack.CurrentValue = NewOverrides;
- return getDerived().RebuildCXXOperatorCallExpr(E->getOperator(),
- E->getOperatorLoc(),
- Callee.get(),
- First.get(),
- Second.get());
+ Expr *Callee = E->getCallee();
+ if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(Callee)) {
+ LookupResult R(SemaRef, ULE->getName(), ULE->getNameLoc(),
+ Sema::LookupOrdinaryName);
+ if (getDerived().TransformOverloadExprDecls(ULE, ULE->requiresADL(), R))
+ return ExprError();
+
+ return getDerived().RebuildCXXOperatorCallExpr(
+ E->getOperator(), E->getOperatorLoc(), Callee->getBeginLoc(),
+ ULE->requiresADL(), R.asUnresolvedSet(), First.get(), Second.get());
+ }
+
+ UnresolvedSet<1> Functions;
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Callee))
+ Callee = ICE->getSubExprAsWritten();
+ NamedDecl *DR = cast<DeclRefExpr>(Callee)->getDecl();
+ ValueDecl *VD = cast_or_null<ValueDecl>(
+ getDerived().TransformDecl(DR->getLocation(), DR));
+ if (!VD)
+ return ExprError();
+
+ if (!isa<CXXMethodDecl>(VD))
+ Functions.addDecl(VD);
+
+ return getDerived().RebuildCXXOperatorCallExpr(
+ E->getOperator(), E->getOperatorLoc(), Callee->getBeginLoc(),
+ /*RequiresADL=*/false, Functions, First.get(), Second.get());
}
template<typename Derived>
@@ -13233,37 +13359,6 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
LambdaScopeInfo *LSI = getSema().PushLambdaScope();
Sema::FunctionScopeRAII FuncScopeCleanup(getSema());
- // Transform the template parameters, and add them to the current
- // instantiation scope. The null case is handled correctly.
- auto TPL = getDerived().TransformTemplateParameterList(
- E->getTemplateParameterList());
- LSI->GLTemplateParameterList = TPL;
-
- // Transform the type of the original lambda's call operator.
- // The transformation MUST be done in the CurrentInstantiationScope since
- // it introduces a mapping of the original to the newly created
- // transformed parameters.
- TypeSourceInfo *NewCallOpTSI = nullptr;
- {
- TypeSourceInfo *OldCallOpTSI = E->getCallOperator()->getTypeSourceInfo();
- FunctionProtoTypeLoc OldCallOpFPTL =
- OldCallOpTSI->getTypeLoc().getAs<FunctionProtoTypeLoc>();
-
- TypeLocBuilder NewCallOpTLBuilder;
- SmallVector<QualType, 4> ExceptionStorage;
- TreeTransform *This = this; // Work around gcc.gnu.org/PR56135.
- QualType NewCallOpType = TransformFunctionProtoType(
- NewCallOpTLBuilder, OldCallOpFPTL, nullptr, Qualifiers(),
- [&](FunctionProtoType::ExceptionSpecInfo &ESI, bool &Changed) {
- return This->TransformExceptionSpec(OldCallOpFPTL.getBeginLoc(), ESI,
- ExceptionStorage, Changed);
- });
- if (NewCallOpType.isNull())
- return ExprError();
- NewCallOpTSI = NewCallOpTLBuilder.getTypeSourceInfo(getSema().Context,
- NewCallOpType);
- }
-
// Create the local class that will describe the lambda.
// FIXME: DependencyKind below is wrong when substituting inside a templated
@@ -13280,49 +13375,24 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
DependencyKind = CXXRecordDecl::LDK_NeverDependent;
CXXRecordDecl *OldClass = E->getLambdaClass();
- CXXRecordDecl *Class =
- getSema().createLambdaClosureType(E->getIntroducerRange(), NewCallOpTSI,
- DependencyKind, E->getCaptureDefault());
-
+ CXXRecordDecl *Class = getSema().createLambdaClosureType(
+ E->getIntroducerRange(), /*Info=*/nullptr, DependencyKind,
+ E->getCaptureDefault());
getDerived().transformedLocalDecl(OldClass, {Class});
- std::optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling;
- if (getDerived().ReplacingOriginal())
- Mangling = std::make_tuple(OldClass->hasKnownLambdaInternalLinkage(),
- OldClass->getLambdaManglingNumber(),
- OldClass->getDeviceLambdaManglingNumber(),
- OldClass->getLambdaContextDecl());
-
- // Build the call operator.
- CXXMethodDecl *NewCallOperator = getSema().startLambdaDefinition(
- Class, E->getIntroducerRange(), NewCallOpTSI,
- E->getCallOperator()->getEndLoc(),
- NewCallOpTSI->getTypeLoc().castAs<FunctionProtoTypeLoc>().getParams(),
- E->getCallOperator()->getConstexprKind(),
- E->getCallOperator()->getStorageClass(),
- E->getCallOperator()->getTrailingRequiresClause());
-
- LSI->CallOperator = NewCallOperator;
-
- getDerived().transformAttrs(E->getCallOperator(), NewCallOperator);
- getDerived().transformedLocalDecl(E->getCallOperator(), {NewCallOperator});
+ CXXMethodDecl *NewCallOperator =
+ getSema().CreateLambdaCallOperator(E->getIntroducerRange(), Class);
+ NewCallOperator->setLexicalDeclContext(getSema().CurContext);
- // Number the lambda for linkage purposes if necessary.
- getSema().handleLambdaNumbering(Class, NewCallOperator, Mangling);
+ // Enter the scope of the lambda.
+ getSema().buildLambdaScope(LSI, NewCallOperator, E->getIntroducerRange(),
+ E->getCaptureDefault(), E->getCaptureDefaultLoc(),
+ E->hasExplicitParameters(), E->isMutable());
// Introduce the context of the call operator.
Sema::ContextRAII SavedContext(getSema(), NewCallOperator,
/*NewThisContext*/false);
- // Enter the scope of the lambda.
- getSema().buildLambdaScope(LSI, NewCallOperator,
- E->getIntroducerRange(),
- E->getCaptureDefault(),
- E->getCaptureDefaultLoc(),
- E->hasExplicitParameters(),
- E->hasExplicitResultType(),
- E->isMutable());
-
bool Invalid = false;
// Transform captures.
@@ -13362,7 +13432,8 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
}
VarDecl *NewVD = getSema().createLambdaInitCaptureVarDecl(
OldVD->getLocation(), InitQualType, NewC.EllipsisLoc,
- OldVD->getIdentifier(), OldVD->getInitStyle(), Init.get());
+ OldVD->getIdentifier(), OldVD->getInitStyle(), Init.get(),
+ getSema().CurContext);
if (!NewVD) {
Invalid = true;
break;
@@ -13442,6 +13513,61 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
}
getSema().finishLambdaExplicitCaptures(LSI);
+ // Transform the template parameters, and add them to the current
+ // instantiation scope. The null case is handled correctly.
+ auto TPL = getDerived().TransformTemplateParameterList(
+ E->getTemplateParameterList());
+ LSI->GLTemplateParameterList = TPL;
+
+ // Transform the type of the original lambda's call operator.
+ // The transformation MUST be done in the CurrentInstantiationScope since
+ // it introduces a mapping of the original to the newly created
+ // transformed parameters.
+ TypeSourceInfo *NewCallOpTSI = nullptr;
+ {
+ TypeSourceInfo *OldCallOpTSI = E->getCallOperator()->getTypeSourceInfo();
+ auto OldCallOpFPTL =
+ OldCallOpTSI->getTypeLoc().getAs<FunctionProtoTypeLoc>();
+
+ TypeLocBuilder NewCallOpTLBuilder;
+ SmallVector<QualType, 4> ExceptionStorage;
+ TreeTransform *This = this; // Work around gcc.gnu.org/PR56135.
+ QualType NewCallOpType = TransformFunctionProtoType(
+ NewCallOpTLBuilder, OldCallOpFPTL, nullptr, Qualifiers(),
+ [&](FunctionProtoType::ExceptionSpecInfo &ESI, bool &Changed) {
+ return This->TransformExceptionSpec(OldCallOpFPTL.getBeginLoc(), ESI,
+ ExceptionStorage, Changed);
+ });
+ if (NewCallOpType.isNull())
+ return ExprError();
+ NewCallOpTSI =
+ NewCallOpTLBuilder.getTypeSourceInfo(getSema().Context, NewCallOpType);
+ }
+
+ getSema().CompleteLambdaCallOperator(
+ NewCallOperator, E->getCallOperator()->getLocation(),
+ E->getCallOperator()->getInnerLocStart(),
+ E->getCallOperator()->getTrailingRequiresClause(), NewCallOpTSI,
+ E->getCallOperator()->getConstexprKind(),
+ E->getCallOperator()->getStorageClass(),
+ NewCallOpTSI->getTypeLoc().castAs<FunctionProtoTypeLoc>().getParams(),
+ E->hasExplicitResultType());
+
+ getDerived().transformAttrs(E->getCallOperator(), NewCallOperator);
+ getDerived().transformedLocalDecl(E->getCallOperator(), {NewCallOperator});
+
+ {
+ // Number the lambda for linkage purposes if necessary.
+ Sema::ContextRAII ManglingContext(getSema(), Class->getDeclContext());
+
+ std::optional<CXXRecordDecl::LambdaNumbering> Numbering;
+ if (getDerived().ReplacingOriginal()) {
+ Numbering = OldClass->getLambdaNumbering();
+ }
+
+ getSema().handleLambdaNumbering(Class, NewCallOperator, Numbering);
+ }
+
// FIXME: Sema's lambda-building mechanism expects us to push an expression
// evaluation context even if we're not transforming the function body.
getSema().PushExpressionEvaluationContext(
@@ -14024,13 +14150,17 @@ TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
// We've got down to a single element; build a binary operator.
Expr *LHS = LeftFold ? Result.get() : Out.get();
Expr *RHS = LeftFold ? Out.get() : Result.get();
- if (Callee)
+ if (Callee) {
+ UnresolvedSet<16> Functions;
+ Functions.append(Callee->decls_begin(), Callee->decls_end());
Result = getDerived().RebuildCXXOperatorCallExpr(
BinaryOperator::getOverloadedOperator(E->getOperator()),
- E->getEllipsisLoc(), Callee, LHS, RHS);
- else
+ E->getEllipsisLoc(), Callee->getBeginLoc(), Callee->requiresADL(),
+ Functions, LHS, RHS);
+ } else {
Result = getDerived().RebuildBinaryOperator(E->getEllipsisLoc(),
E->getOperator(), LHS, RHS);
+ }
} else
Result = Out;
@@ -14595,7 +14725,12 @@ TreeTransform<Derived>::TransformBlockExpr(BlockExpr *E) {
oldCapture));
assert(blockScope->CaptureMap.count(newCapture));
}
- assert(oldBlock->capturesCXXThis() == blockScope->isCXXThisCaptured());
+
+ // The this pointer may not be captured by the instantiated block, even when
+ // it's captured by the original block, if the expression causing the
+ // capture is in the discarded branch of a constexpr if statement.
+ assert((!blockScope->isCXXThisCaptured() || oldBlock->capturesCXXThis()) &&
+ "this pointer isn't captured in the old block");
}
#endif
@@ -15029,14 +15164,11 @@ TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
return Template.get();
}
-template<typename Derived>
-ExprResult
-TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
- SourceLocation OpLoc,
- Expr *OrigCallee,
- Expr *First,
- Expr *Second) {
- Expr *Callee = OrigCallee->IgnoreParenCasts();
+template <typename Derived>
+ExprResult TreeTransform<Derived>::RebuildCXXOperatorCallExpr(
+ OverloadedOperatorKind Op, SourceLocation OpLoc, SourceLocation CalleeLoc,
+ bool RequiresADL, const UnresolvedSetImpl &Functions, Expr *First,
+ Expr *Second) {
bool isPostIncDec = Second && (Op == OO_PlusPlus || Op == OO_MinusMinus);
if (First->getObjectKind() == OK_ObjCProperty) {
@@ -15061,8 +15193,8 @@ TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
if (Op == OO_Subscript) {
if (!First->getType()->isOverloadableType() &&
!Second->getType()->isOverloadableType())
- return getSema().CreateBuiltinArraySubscriptExpr(
- First, Callee->getBeginLoc(), Second, OpLoc);
+ return getSema().CreateBuiltinArraySubscriptExpr(First, CalleeLoc, Second,
+ OpLoc);
} else if (Op == OO_Arrow) {
// It is possible that the type refers to a RecoveryExpr created earlier
// in the tree transformation.
@@ -15096,27 +15228,6 @@ TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
}
}
- // Compute the transformed set of functions (and function templates) to be
- // used during overload resolution.
- UnresolvedSet<16> Functions;
- bool RequiresADL;
-
- if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(Callee)) {
- Functions.append(ULE->decls_begin(), ULE->decls_end());
- // If the overload could not be resolved in the template definition
- // (because we had a dependent argument), ADL is performed as part of
- // template instantiation.
- RequiresADL = ULE->requiresADL();
- } else {
- // If we've resolved this to a particular non-member function, just call
- // that function. If we resolved it to a member function,
- // CreateOverloaded* will find that function for us.
- NamedDecl *ND = cast<DeclRefExpr>(Callee)->getDecl();
- if (!isa<CXXMethodDecl>(ND))
- Functions.addDecl(ND);
- RequiresADL = false;
- }
-
// Add any functions found via argument-dependent lookup.
Expr *Args[2] = { First, Second };
unsigned NumArgs = 1 + (Second != nullptr);
@@ -15129,23 +15240,6 @@ TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
RequiresADL);
}
- if (Op == OO_Subscript) {
- SourceLocation LBrace;
- SourceLocation RBrace;
-
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Callee)) {
- DeclarationNameLoc NameLoc = DRE->getNameInfo().getInfo();
- LBrace = NameLoc.getCXXOperatorNameBeginLoc();
- RBrace = NameLoc.getCXXOperatorNameEndLoc();
- } else {
- LBrace = Callee->getBeginLoc();
- RBrace = OpLoc;
- }
-
- return SemaRef.CreateOverloadedArraySubscriptExpr(LBrace, RBrace,
- First, Second);
- }
-
// Create the overloaded operator invocation for binary operators.
BinaryOperatorKind Opc = BinaryOperator::getOverloadedOpcode(Op);
ExprResult Result = SemaRef.CreateOverloadedBinOp(
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp
index 96bc47dcdb4e..72e582107480 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp
@@ -250,6 +250,11 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
ID = PREDEF_TYPE_##Id##_ID; \
break;
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) \
+ case BuiltinType::Id: \
+ ID = PREDEF_TYPE_##Id##_ID; \
+ break;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
case BuiltinType::BuiltinFn:
ID = PREDEF_TYPE_BUILTIN_FN;
break;
@@ -479,8 +484,7 @@ bool serialization::needsAnonymousDeclarationNumber(const NamedDecl *D) {
}
// Otherwise, we only care about anonymous class members / block-scope decls.
- // FIXME: We need to handle lambdas and blocks within inline / templated
- // variables too.
+ // FIXME: We need to handle blocks within inline / templated variables too.
if (D->getDeclName())
return false;
if (!isa<RecordDecl, ObjCInterfaceDecl>(D->getLexicalDeclContext()))
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
index 4d72596b7439..5f756961c6e1 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
@@ -105,7 +105,6 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Bitstream/BitstreamReader.h"
#include "llvm/Support/Casting.h"
@@ -124,6 +123,7 @@
#include "llvm/Support/Timer.h"
#include "llvm/Support/VersionTuple.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
@@ -277,12 +277,17 @@ static bool checkLanguageOptions(const LangOptions &LangOpts,
const LangOptions &ExistingLangOpts,
DiagnosticsEngine *Diags,
bool AllowCompatibleDifferences = true) {
-#define LANGOPT(Name, Bits, Default, Description) \
- if (ExistingLangOpts.Name != LangOpts.Name) { \
- if (Diags) \
- Diags->Report(diag::err_pch_langopt_mismatch) \
- << Description << LangOpts.Name << ExistingLangOpts.Name; \
- return true; \
+#define LANGOPT(Name, Bits, Default, Description) \
+ if (ExistingLangOpts.Name != LangOpts.Name) { \
+ if (Diags) { \
+ if (Bits == 1) \
+ Diags->Report(diag::err_pch_langopt_mismatch) \
+ << Description << LangOpts.Name << ExistingLangOpts.Name; \
+ else \
+ Diags->Report(diag::err_pch_langopt_value_mismatch) \
+ << Description; \
+ } \
+ return true; \
}
#define VALUE_LANGOPT(Name, Bits, Default, Description) \
@@ -654,6 +659,10 @@ static bool checkPreprocessorOptions(
SmallVector<StringRef, 4> ExistingMacroNames;
collectMacroDefinitions(ExistingPPOpts, ExistingMacros, &ExistingMacroNames);
+ // Use a line marker to enter the <command line> file, as the defines and
+ // undefines here will have come from the command line.
+ SuggestedPredefines += "# 1 \"<command line>\" 1\n";
+
for (unsigned I = 0, N = ExistingMacroNames.size(); I != N; ++I) {
// Dig out the macro definition in the existing preprocessor options.
StringRef MacroName = ExistingMacroNames[I];
@@ -713,6 +722,10 @@ static bool checkPreprocessorOptions(
}
return true;
}
+
+ // Leave the <command line> file and return to <built-in>.
+ SuggestedPredefines += "# 1 \"<built-in>\" 2\n";
+
if (Validation == OptionValidateStrictMatches) {
// If strict matches are requested, don't tolerate any extra defines in
// the AST file that are missing on the command line.
@@ -1579,8 +1592,13 @@ bool ASTReader::ReadSLocEntry(int ID) {
auto Buffer = ReadBuffer(SLocEntryCursor, Name);
if (!Buffer)
return true;
- SourceMgr.createFileID(std::move(Buffer), FileCharacter, ID,
- BaseOffset + Offset, IncludeLoc);
+ FileID FID = SourceMgr.createFileID(std::move(Buffer), FileCharacter, ID,
+ BaseOffset + Offset, IncludeLoc);
+ if (Record[3]) {
+ auto &FileInfo =
+ const_cast<SrcMgr::FileInfo &>(SourceMgr.getSLocEntry(FID).getFile());
+ FileInfo.setHasLineDirectives();
+ }
break;
}
@@ -1687,6 +1705,16 @@ Token ASTReader::ReadToken(ModuleFile &F, const RecordDataImpl &Record,
Tok.setAnnotationValue(static_cast<void *>(Info));
break;
}
+ case tok::annot_pragma_pack: {
+ auto *Info = new (PP.getPreprocessorAllocator()) Sema::PragmaPackInfo;
+ Info->Action = static_cast<Sema::PragmaMsStackAction>(Record[Idx++]);
+ auto SlotLabel = ReadString(Record, Idx);
+ Info->SlotLabel =
+ llvm::StringRef(SlotLabel).copy(PP.getPreprocessorAllocator());
+ Info->Alignment = ReadToken(F, Record, Idx);
+ Tok.setAnnotationValue(static_cast<void *>(Info));
+ break;
+ }
// Some annotation tokens do not use the PtrData field.
case tok::annot_pragma_openmp:
case tok::annot_pragma_openmp_end:
@@ -1847,6 +1875,21 @@ ASTReader::getGlobalPreprocessedEntityID(ModuleFile &M,
return LocalID + I->second;
}
+const FileEntry *HeaderFileInfoTrait::getFile(const internal_key_type &Key) {
+ FileManager &FileMgr = Reader.getFileManager();
+ if (!Key.Imported) {
+ if (auto File = FileMgr.getFile(Key.Filename))
+ return *File;
+ return nullptr;
+ }
+
+ std::string Resolved = std::string(Key.Filename);
+ Reader.ResolveImportedPath(M, Resolved);
+ if (auto File = FileMgr.getFile(Resolved))
+ return *File;
+ return nullptr;
+}
+
unsigned HeaderFileInfoTrait::ComputeHash(internal_key_ref ikey) {
return llvm::hash_combine(ikey.Size, ikey.ModTime);
}
@@ -1867,23 +1910,8 @@ bool HeaderFileInfoTrait::EqualKey(internal_key_ref a, internal_key_ref b) {
return true;
// Determine whether the actual files are equivalent.
- FileManager &FileMgr = Reader.getFileManager();
- auto GetFile = [&](const internal_key_type &Key) -> const FileEntry* {
- if (!Key.Imported) {
- if (auto File = FileMgr.getFile(Key.Filename))
- return *File;
- return nullptr;
- }
-
- std::string Resolved = std::string(Key.Filename);
- Reader.ResolveImportedPath(M, Resolved);
- if (auto File = FileMgr.getFile(Resolved))
- return *File;
- return nullptr;
- };
-
- const FileEntry *FEA = GetFile(a);
- const FileEntry *FEB = GetFile(b);
+ const FileEntry *FEA = getFile(a);
+ const FileEntry *FEB = getFile(b);
return FEA && FEA == FEB;
}
@@ -1912,6 +1940,14 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
const unsigned char *End = d + DataLen;
HeaderFileInfo HFI;
unsigned Flags = *d++;
+
+ bool Included = (Flags >> 6) & 0x01;
+ if (Included)
+ if (const FileEntry *FE = getFile(key))
+ // Not using \c Preprocessor::markIncluded(), since that would attempt to
+ // deserialize this header file info again.
+ Reader.getPreprocessor().getIncludedFiles().insert(FE);
+
// FIXME: Refactor with mergeHeaderFileInfo in HeaderSearch.cpp.
HFI.isImport |= (Flags >> 5) & 0x01;
HFI.isPragmaOnce |= (Flags >> 4) & 0x01;
@@ -1945,10 +1981,11 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
std::string Filename = std::string(key.Filename);
if (key.Imported)
Reader.ResolveImportedPath(M, Filename);
- // FIXME: NameAsWritten
- Module::Header H = {std::string(key.Filename), "",
- FileMgr.getOptionalFileRef(Filename)};
- ModMap.addHeader(Mod, H, HeaderRole, /*Imported*/true);
+ if (auto FE = FileMgr.getOptionalFileRef(Filename)) {
+ // FIXME: NameAsWritten
+ Module::Header H = {std::string(key.Filename), "", *FE};
+ ModMap.addHeader(Mod, H, HeaderRole, /*Imported=*/true);
+ }
HFI.isModuleHeader |= ModuleMap::isModular(HeaderRole);
}
@@ -2361,12 +2398,15 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
StringRef Filename = FI.Filename;
uint64_t StoredContentHash = FI.ContentHash;
+ // For standard C++ modules, we don't need to check the inputs.
+ bool SkipChecks = F.StandardCXXModule;
+
OptionalFileEntryRefDegradesToFileEntryPtr File = OptionalFileEntryRef(
expectedToOptional(FileMgr.getFileRef(Filename, /*OpenFile=*/false)));
// For an overridden file, create a virtual file with the stored
// size/timestamp.
- if ((Overridden || Transient) && !File)
+ if ((Overridden || Transient || SkipChecks) && !File)
File = FileMgr.getVirtualFileRef(Filename, StoredSize, StoredTime);
if (!File) {
@@ -2389,7 +2429,7 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
// PCH.
SourceManager &SM = getSourceManager();
// FIXME: Reject if the overrides are different.
- if ((!Overridden && !Transient) && SM.isFileOverridden(File)) {
+ if ((!Overridden && !Transient) && !SkipChecks && SM.isFileOverridden(File)) {
if (Complain)
Error(diag::err_fe_pch_file_overridden, Filename);
@@ -2448,7 +2488,7 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
};
bool IsOutOfDate = false;
- auto FileChange = HasInputFileChanged();
+ auto FileChange = SkipChecks ? Change{Change::None} : HasInputFileChanged();
// For an overridden file, there is nothing to validate.
if (!Overridden && FileChange.Kind != Change::None) {
if (Complain && !Diags.isDiagnosticInFlight()) {
@@ -2500,7 +2540,8 @@ void ASTReader::ResolveImportedPath(ModuleFile &M, std::string &Filename) {
}
void ASTReader::ResolveImportedPath(std::string &Filename, StringRef Prefix) {
- if (Filename.empty() || llvm::sys::path::is_absolute(Filename))
+ if (Filename.empty() || llvm::sys::path::is_absolute(Filename) ||
+ Filename == "<built-in>" || Filename == "<command line>")
return;
SmallString<128> Buffer;
@@ -2792,7 +2833,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
return VersionMismatch;
}
- bool hasErrors = Record[6];
+ bool hasErrors = Record[7];
if (hasErrors && !DisableValidation) {
// If requested by the caller and the module hasn't already been read
// or compiled, mark modules on error as out-of-date.
@@ -2816,7 +2857,9 @@ ASTReader::ReadControlBlock(ModuleFile &F,
if (F.RelocatablePCH)
F.BaseDirectory = isysroot.empty() ? "/" : isysroot;
- F.HasTimestamps = Record[5];
+ F.StandardCXXModule = Record[5];
+
+ F.HasTimestamps = Record[6];
const std::string &CurBranch = getClangFullRepositoryVersion();
StringRef ASTBranch = Blob;
@@ -2840,6 +2883,8 @@ ASTReader::ReadControlBlock(ModuleFile &F,
while (Idx < N) {
// Read information about the AST file.
ModuleKind ImportedKind = (ModuleKind)Record[Idx++];
+ // Whether we're importing a standard c++ module.
+ bool IsImportingStdCXXModule = Record[Idx++];
// The import location will be the local one for now; we will adjust
// all import locations of module imports after the global source
// location info are setup, in ReadAST.
@@ -2857,18 +2902,25 @@ ASTReader::ReadControlBlock(ModuleFile &F,
// For prebuilt and explicit modules first consult the file map for
// an override. Note that here we don't search prebuilt module
- // directories, only the explicit name to file mappings. Also, we will
- // still verify the size/signature making sure it is essentially the
- // same file but perhaps in a different location.
+ // directories if we're not importing standard c++ module, only the
+ // explicit name to file mappings. Also, we will still verify the
+ // size/signature making sure it is essentially the same file but
+ // perhaps in a different location.
if (ImportedKind == MK_PrebuiltModule || ImportedKind == MK_ExplicitModule)
ImportedFile = PP.getHeaderSearchInfo().getPrebuiltModuleFileName(
- ImportedName, /*FileMapOnly*/ true);
+ ImportedName, /*FileMapOnly*/ !IsImportingStdCXXModule);
+
+ if (ImportedFile.empty()) {
+ // It is deprecated for C++20 Named modules to use the implicitly
+ // paths.
+ if (IsImportingStdCXXModule)
+ Diag(clang::diag::warn_reading_std_cxx_module_by_implicit_paths)
+ << ImportedName;
- if (ImportedFile.empty())
// Use BaseDirectoryAsWritten to ensure we use the same path in the
// ModuleCache as when writing.
ImportedFile = ReadPath(BaseDirectoryAsWritten, Record, Idx);
- else
+ } else
SkipPath(Record, Idx);
// If our client can't cope with us being out of date, we can't cope with
@@ -2938,6 +2990,9 @@ ASTReader::ReadControlBlock(ModuleFile &F,
BaseDirectoryAsWritten = Blob;
assert(!F.ModuleName.empty() &&
"MODULE_DIRECTORY found before MODULE_NAME");
+ F.BaseDirectory = std::string(Blob);
+ if (!PP.getPreprocessorOpts().ModulesCheckRelocated)
+ break;
// If we've already loaded a module map file covering this module, we may
// have a better path for it (relative to the current build).
Module *M = PP.getHeaderSearchInfo().lookupModule(
@@ -2959,8 +3014,6 @@ ASTReader::ReadControlBlock(ModuleFile &F,
}
}
F.BaseDirectory = std::string(M->Directory->getName());
- } else {
- F.BaseDirectory = std::string(Blob);
}
break;
}
@@ -2984,22 +3037,6 @@ ASTReader::ReadControlBlock(ModuleFile &F,
}
}
-void ASTReader::readIncludedFiles(ModuleFile &F, StringRef Blob,
- Preprocessor &PP) {
- using namespace llvm::support;
-
- const unsigned char *D = (const unsigned char *)Blob.data();
- unsigned FileCount = endian::readNext<uint32_t, little, unaligned>(D);
-
- for (unsigned I = 0; I < FileCount; ++I) {
- size_t ID = endian::readNext<uint32_t, little, unaligned>(D);
- InputFileInfo IFI = getInputFileInfo(F, ID);
- if (llvm::ErrorOr<const FileEntry *> File =
- PP.getFileManager().getFile(IFI.Filename))
- PP.getIncludedFiles().insert(*File);
- }
-}
-
llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
unsigned ClientLoadCapabilities) {
BitstreamCursor &Stream = F.Stream;
@@ -3718,7 +3755,7 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
unsigned GlobalID = getGlobalSubmoduleID(F, Record[I++]);
SourceLocation Loc = ReadSourceLocation(F, Record, I);
if (GlobalID) {
- ImportedModules.push_back(ImportedSubmodule(GlobalID, Loc));
+ PendingImportedModules.push_back(ImportedSubmodule(GlobalID, Loc));
if (DeserializationListener)
DeserializationListener->ModuleImportRead(GlobalID, Loc);
}
@@ -3751,10 +3788,6 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
break;
}
- case PP_INCLUDED_FILES:
- readIncludedFiles(F, Blob, PP);
- break;
-
case LATE_PARSED_TEMPLATE:
LateParsedTemplates.emplace_back(
std::piecewise_construct, std::forward_as_tuple(&F),
@@ -3958,7 +3991,8 @@ ASTReader::ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
// usable header search context.
assert(!F.ModuleName.empty() &&
"MODULE_NAME should come before MODULE_MAP_FILE");
- if (F.Kind == MK_ImplicitModule && ModuleMgr.begin()->Kind != MK_MainFile) {
+ if (PP.getPreprocessorOpts().ModulesCheckRelocated &&
+ F.Kind == MK_ImplicitModule && ModuleMgr.begin()->Kind != MK_MainFile) {
// An implicitly-loaded module file should have its module listed in some
// module map file that we've already loaded.
Module *M =
@@ -4124,7 +4158,7 @@ void ASTReader::makeModuleVisible(Module *Mod,
auto HiddenNames = std::move(*Hidden);
HiddenNamesMap.erase(Hidden);
makeNamesVisible(HiddenNames.second, HiddenNames.first);
- assert(HiddenNamesMap.find(Mod) == HiddenNamesMap.end() &&
+ assert(!HiddenNamesMap.contains(Mod) &&
"making names visible added hidden names");
}
@@ -4342,27 +4376,56 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
if (F.OriginalSourceFileID.isValid())
F.OriginalSourceFileID = TranslateFileID(F, F.OriginalSourceFileID);
- // Preload all the pending interesting identifiers by marking them out of
- // date.
for (auto Offset : F.PreloadIdentifierOffsets) {
const unsigned char *Data = F.IdentifierTableData + Offset;
ASTIdentifierLookupTrait Trait(*this, F);
auto KeyDataLen = Trait.ReadKeyDataLength(Data);
auto Key = Trait.ReadKey(Data, KeyDataLen.first);
- auto &II = PP.getIdentifierTable().getOwn(Key);
- II.setOutOfDate(true);
+
+ IdentifierInfo *II;
+ if (!PP.getLangOpts().CPlusPlus) {
+ // Identifiers present in both the module file and the importing
+ // instance are marked out-of-date so that they can be deserialized
+ // on next use via ASTReader::updateOutOfDateIdentifier().
+ // Identifiers present in the module file but not in the importing
+ // instance are ignored for now, preventing growth of the identifier
+ // table. They will be deserialized on first use via ASTReader::get().
+ auto It = PP.getIdentifierTable().find(Key);
+ if (It == PP.getIdentifierTable().end())
+ continue;
+ II = It->second;
+ } else {
+ // With C++ modules, not many identifiers are considered interesting.
+ // All identifiers in the module file can be placed into the identifier
+ // table of the importing instance and marked as out-of-date. This makes
+ // ASTReader::get() a no-op, and deserialization will take place on
+ // first/next use via ASTReader::updateOutOfDateIdentifier().
+ II = &PP.getIdentifierTable().getOwn(Key);
+ }
+
+ II->setOutOfDate(true);
// Mark this identifier as being from an AST file so that we can track
// whether we need to serialize it.
- markIdentifierFromAST(*this, II);
+ markIdentifierFromAST(*this, *II);
// Associate the ID with the identifier so that the writer can reuse it.
auto ID = Trait.ReadIdentifierID(Data + KeyDataLen.first);
- SetIdentifierInfo(ID, &II);
+ SetIdentifierInfo(ID, II);
}
}
+ // Builtins and library builtins have already been initialized. Mark all
+ // identifiers as out-of-date, so that they are deserialized on first use.
+ if (Type == MK_PCH || Type == MK_Preamble || Type == MK_MainFile)
+ for (auto &Id : PP.getIdentifierTable())
+ Id.second->setOutOfDate(true);
+
+ // Mark selectors as out of date.
+ for (const auto &Sel : SelectorGeneration)
+ SelectorOutOfDate[Sel.first] = true;
+
// Setup the import locations and notify the module manager that we've
// committed to these module files.
for (ImportedModule &M : Loaded) {
@@ -4380,25 +4443,6 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
F.ImportLoc = TranslateSourceLocation(*M.ImportedBy, M.ImportLoc);
}
- if (!PP.getLangOpts().CPlusPlus ||
- (Type != MK_ImplicitModule && Type != MK_ExplicitModule &&
- Type != MK_PrebuiltModule)) {
- // Mark all of the identifiers in the identifier table as being out of date,
- // so that various accessors know to check the loaded modules when the
- // identifier is used.
- //
- // For C++ modules, we don't need information on many identifiers (just
- // those that provide macros or are poisoned), so we mark all of
- // the interesting ones via PreloadIdentifierOffsets.
- for (IdentifierTable::iterator Id = PP.getIdentifierTable().begin(),
- IdEnd = PP.getIdentifierTable().end();
- Id != IdEnd; ++Id)
- Id->second->setOutOfDate(true);
- }
- // Mark selectors as out of date.
- for (auto Sel : SelectorGeneration)
- SelectorOutOfDate[Sel.first] = true;
-
// Resolve any unresolved module exports.
for (unsigned I = 0, N = UnresolvedModuleRefs.size(); I != N; ++I) {
UnresolvedModuleRef &Unresolved = UnresolvedModuleRefs[I];
@@ -4435,8 +4479,8 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
UnresolvedModuleRefs.clear();
if (Imported)
- Imported->append(ImportedModules.begin(),
- ImportedModules.end());
+ Imported->append(PendingImportedModules.begin(),
+ PendingImportedModules.end());
// FIXME: How do we load the 'use'd modules? They may not be submodules.
// Might be unnecessary as use declarations are only used to build the
@@ -4473,18 +4517,16 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
}
}
- if (PP.getHeaderSearchInfo()
- .getHeaderSearchOpts()
- .ModulesValidateOncePerBuildSession) {
+ HeaderSearchOptions &HSOpts = PP.getHeaderSearchInfo().getHeaderSearchOpts();
+ if (HSOpts.ModulesValidateOncePerBuildSession) {
// Now we are certain that the module and all modules it depends on are
- // up to date. Create or update timestamp files for modules that are
- // located in the module cache (not for PCH files that could be anywhere
- // in the filesystem).
+ // up-to-date. For implicitly-built module files, ensure the corresponding
+ // timestamp files are up-to-date in this build session.
for (unsigned I = 0, N = Loaded.size(); I != N; ++I) {
ImportedModule &M = Loaded[I];
- if (M.Mod->Kind == MK_ImplicitModule) {
+ if (M.Mod->Kind == MK_ImplicitModule &&
+ M.Mod->InputFilesValidationTimestamp < HSOpts.BuildSessionTimestamp)
updateModuleTimestamp(*M.Mod);
- }
}
}
@@ -5040,7 +5082,7 @@ void ASTReader::InitializeContext() {
// Re-export any modules that were imported by a non-module AST file.
// FIXME: This does not make macro-only imports visible again.
- for (auto &Import : ImportedModules) {
+ for (auto &Import : PendingImportedModules) {
if (Module *Imported = getSubmodule(Import.ID)) {
makeModuleVisible(Imported, Module::AllVisible,
/*ImportLoc=*/Import.ImportLoc);
@@ -5050,6 +5092,10 @@ void ASTReader::InitializeContext() {
// nullptr here, we do the same later, in UpdateSema().
}
}
+
+ // Hand off these modules to Sema.
+ PendingImportedModulesSema.append(PendingImportedModules);
+ PendingImportedModules.clear();
}
void ASTReader::finalizeForWriting() {
@@ -5409,9 +5455,9 @@ bool ASTReader::readASTFileControlBlock(
unsigned Idx = 0, N = Record.size();
while (Idx < N) {
// Read information about the AST file.
- Idx +=
- 1 + 1 + 1 + 1 +
- ASTFileSignature::size; // Kind, ImportLoc, Size, ModTime, Signature
+
+ // Kind, StandardCXXModule, ImportLoc, Size, ModTime, Signature
+ Idx += 1 + 1 + 1 + 1 + 1 + ASTFileSignature::size;
std::string ModuleName = ReadString(Record, Idx);
std::string Filename = ReadString(Record, Idx);
ResolveImportedPath(Filename, ModuleDir);
@@ -5563,7 +5609,7 @@ llvm::Error ASTReader::ReadSubmoduleBlock(ModuleFile &F,
break;
case SUBMODULE_DEFINITION: {
- if (Record.size() < 12)
+ if (Record.size() < 13)
return llvm::createStringError(std::errc::illegal_byte_sequence,
"malformed module definition");
@@ -5572,6 +5618,7 @@ llvm::Error ASTReader::ReadSubmoduleBlock(ModuleFile &F,
SubmoduleID GlobalID = getGlobalSubmoduleID(F, Record[Idx++]);
SubmoduleID Parent = getGlobalSubmoduleID(F, Record[Idx++]);
Module::ModuleKind Kind = (Module::ModuleKind)Record[Idx++];
+ SourceLocation DefinitionLoc = ReadSourceLocation(F, Record[Idx++]);
bool IsFramework = Record[Idx++];
bool IsExplicit = Record[Idx++];
bool IsSystem = Record[Idx++];
@@ -5592,8 +5639,7 @@ llvm::Error ASTReader::ReadSubmoduleBlock(ModuleFile &F,
ModMap.findOrCreateModule(Name, ParentModule, IsFramework, IsExplicit)
.first;
- // FIXME: set the definition loc for CurrentModule, or call
- // ModMap.setInferredModuleAllowedBy()
+ // FIXME: Call ModMap.setInferredModuleAllowedBy()
SubmoduleID GlobalIndex = GlobalID - NUM_PREDEF_SUBMODULE_IDS;
if (GlobalIndex >= SubmodulesLoaded.size() ||
@@ -5622,6 +5668,7 @@ llvm::Error ASTReader::ReadSubmoduleBlock(ModuleFile &F,
}
CurrentModule->Kind = Kind;
+ CurrentModule->DefinitionLoc = DefinitionLoc;
CurrentModule->Signature = F.Signature;
CurrentModule->IsFromModuleFile = true;
CurrentModule->IsSystem = IsSystem || CurrentModule->IsSystem;
@@ -5634,6 +5681,12 @@ llvm::Error ASTReader::ReadSubmoduleBlock(ModuleFile &F,
if (DeserializationListener)
DeserializationListener->ModuleRead(GlobalID, CurrentModule);
+ // If we're loading a module before we initialize the sema, it implies
+ // we're performing eagerly loading.
+ if (!getSema() && CurrentModule->isModulePurview() &&
+ !getContext().getLangOpts().isCompilingModule())
+ Diag(clang::diag::warn_eagerly_load_for_standard_cplusplus_modules);
+
SubmodulesLoaded[GlobalIndex] = CurrentModule;
// Clear out data that will be replaced by what is in the module file.
@@ -5662,9 +5715,9 @@ llvm::Error ASTReader::ReadSubmoduleBlock(ModuleFile &F,
std::string Filename = std::string(Blob);
ResolveImportedPath(F, Filename);
if (auto Umbrella = PP.getFileManager().getOptionalFileRef(Filename)) {
- if (!CurrentModule->getUmbrellaHeader()) {
+ if (!CurrentModule->getUmbrellaHeaderAsWritten()) {
// FIXME: NameAsWritten
- ModMap.setUmbrellaHeader(CurrentModule, *Umbrella, Blob, "");
+ ModMap.setUmbrellaHeaderAsWritten(CurrentModule, *Umbrella, Blob, "");
}
// Note that it's too late at this point to return out of date if the
// name from the PCM doesn't match up with the one in the module map,
@@ -5699,10 +5752,11 @@ llvm::Error ASTReader::ReadSubmoduleBlock(ModuleFile &F,
// See comments in SUBMODULE_UMBRELLA_HEADER
std::string Dirname = std::string(Blob);
ResolveImportedPath(F, Dirname);
- if (auto Umbrella = PP.getFileManager().getDirectory(Dirname)) {
- if (!CurrentModule->getUmbrellaDir()) {
+ if (auto Umbrella =
+ PP.getFileManager().getOptionalDirectoryRef(Dirname)) {
+ if (!CurrentModule->getUmbrellaDirAsWritten()) {
// FIXME: NameAsWritten
- ModMap.setUmbrellaDir(CurrentModule, *Umbrella, Blob, "");
+ ModMap.setUmbrellaDirAsWritten(CurrentModule, *Umbrella, Blob, "");
}
}
break;
@@ -6930,6 +6984,10 @@ QualType ASTReader::GetType(TypeID ID) {
if (Index < NUM_PREDEF_TYPE_IDS) {
QualType T;
switch ((PredefinedTypeIDs)Index) {
+ case PREDEF_TYPE_LAST_ID:
+ // We should never use this one.
+ llvm_unreachable("Invalid predefined type");
+ break;
case PREDEF_TYPE_NULL_ID:
return QualType();
case PREDEF_TYPE_VOID_ID:
@@ -7178,6 +7236,11 @@ QualType ASTReader::GetType(TypeID ID) {
T = Context.SingletonId; \
break;
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) \
+ case PREDEF_TYPE_##Id##_ID: \
+ T = Context.SingletonId; \
+ break;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
}
assert(!T.isNull() && "Unknown predefined type");
@@ -7346,6 +7409,7 @@ ASTReader::GetExternalCXXCtorInitializers(uint64_t Offset) {
return nullptr;
}
ReadingKindTracker ReadingKind(Read_Decl, *this);
+ Deserializing D(this);
Expected<unsigned> MaybeCode = Cursor.ReadCode();
if (!MaybeCode) {
@@ -7380,6 +7444,7 @@ CXXBaseSpecifier *ASTReader::GetExternalCXXBaseSpecifiers(uint64_t Offset) {
return nullptr;
}
ReadingKindTracker ReadingKind(Read_Decl, *this);
+ Deserializing D(this);
Expected<unsigned> MaybeCode = Cursor.ReadCode();
if (!MaybeCode) {
@@ -7647,7 +7712,7 @@ void ASTReader::FindExternalLexicalDecls(
};
if (isa<TranslationUnitDecl>(DC)) {
- for (auto Lexical : TULexicalDecls)
+ for (const auto &Lexical : TULexicalDecls)
Visit(Lexical.first, Lexical.second);
} else {
auto I = LexicalDecls.find(DC);
@@ -8082,13 +8147,14 @@ void ASTReader::UpdateSema() {
}
// For non-modular AST files, restore visiblity of modules.
- for (auto &Import : ImportedModules) {
+ for (auto &Import : PendingImportedModulesSema) {
if (Import.ImportLoc.isInvalid())
continue;
if (Module *Imported = getSubmodule(Import.ID)) {
SemaObj->makeModuleVisible(Imported, Import.ImportLoc);
}
}
+ PendingImportedModulesSema.clear();
}
IdentifierInfo *ASTReader::get(StringRef Name) {
@@ -8531,6 +8597,7 @@ void ASTReader::ReadLateParsedTemplates(
auto LT = std::make_unique<LateParsedTemplate>();
LT->D = GetLocalDecl(*FMod, LateParsed[Idx++]);
+ LT->FPO = FPOptions::getFromOpaqueInt(LateParsed[Idx++]);
ModuleFile *F = getOwningModuleFile(LT->D);
assert(F && "No module");
@@ -8547,6 +8614,17 @@ void ASTReader::ReadLateParsedTemplates(
LateParsedTemplates.clear();
}
+void ASTReader::AssignedLambdaNumbering(const CXXRecordDecl *Lambda) {
+ if (Lambda->getLambdaContextDecl()) {
+ // Keep track of this lambda so it can be merged with another lambda that
+ // is loaded later.
+ LambdaDeclarationsForMerging.insert(
+ {{Lambda->getLambdaContextDecl()->getCanonicalDecl(),
+ Lambda->getLambdaIndexInContext()},
+ const_cast<CXXRecordDecl *>(Lambda)});
+ }
+}
+
void ASTReader::LoadSelector(Selector Sel) {
// It would be complicated to avoid reading the methods anyway. So don't.
ReadMethodPool(Sel);
@@ -9084,7 +9162,7 @@ llvm::APFloat ASTRecordReader::readAPFloat(const llvm::fltSemantics &Sem) {
}
// Read a string
-std::string ASTReader::ReadString(const RecordData &Record, unsigned &Idx) {
+std::string ASTReader::ReadString(const RecordDataImpl &Record, unsigned &Idx) {
unsigned Len = Record[Idx++];
std::string Result(Record.data() + Idx, Record.data() + Idx + Len);
Idx += Len;
@@ -9257,11 +9335,12 @@ void ASTReader::visitTopLevelModuleMaps(
}
void ASTReader::finishPendingActions() {
- while (!PendingIdentifierInfos.empty() || !PendingFunctionTypes.empty() ||
- !PendingIncompleteDeclChains.empty() || !PendingDeclChains.empty() ||
- !PendingMacroIDs.empty() || !PendingDeclContextInfos.empty() ||
- !PendingUpdateRecords.empty() ||
- !PendingObjCExtensionIvarRedeclarations.empty()) {
+ while (
+ !PendingIdentifierInfos.empty() || !PendingDeducedFunctionTypes.empty() ||
+ !PendingDeducedVarTypes.empty() || !PendingIncompleteDeclChains.empty() ||
+ !PendingDeclChains.empty() || !PendingMacroIDs.empty() ||
+ !PendingDeclContextInfos.empty() || !PendingUpdateRecords.empty() ||
+ !PendingObjCExtensionIvarRedeclarations.empty()) {
// If any identifiers with corresponding top-level declarations have
// been loaded, load those declarations now.
using TopLevelDeclsMap =
@@ -9279,9 +9358,9 @@ void ASTReader::finishPendingActions() {
// Load each function type that we deferred loading because it was a
// deduced type that might refer to a local type declared within itself.
- for (unsigned I = 0; I != PendingFunctionTypes.size(); ++I) {
- auto *FD = PendingFunctionTypes[I].first;
- FD->setType(GetType(PendingFunctionTypes[I].second));
+ for (unsigned I = 0; I != PendingDeducedFunctionTypes.size(); ++I) {
+ auto *FD = PendingDeducedFunctionTypes[I].first;
+ FD->setType(GetType(PendingDeducedFunctionTypes[I].second));
// If we gave a function a deduced return type, remember that we need to
// propagate that along the redeclaration chain.
@@ -9290,7 +9369,15 @@ void ASTReader::finishPendingActions() {
PendingDeducedTypeUpdates.insert(
{FD->getCanonicalDecl(), FD->getReturnType()});
}
- PendingFunctionTypes.clear();
+ PendingDeducedFunctionTypes.clear();
+
+ // Load each variable type that we deferred loading because it was a
+ // deduced type that might refer to a local type declared within itself.
+ for (unsigned I = 0; I != PendingDeducedVarTypes.size(); ++I) {
+ auto *VD = PendingDeducedVarTypes[I].first;
+ VD->setType(GetType(PendingDeducedVarTypes[I].second));
+ }
+ PendingDeducedVarTypes.clear();
// For each decl chain that we wanted to complete while deserializing, mark
// it as "still needs to be completed".
@@ -9466,7 +9553,6 @@ void ASTReader::finishPendingActions() {
continue;
// FIXME: Check for =delete/=default?
- // FIXME: Complain about ODR violations here?
const FunctionDecl *Defn = nullptr;
if (!getContext().getLangOpts().Modules || !FD->hasBody(Defn)) {
FD->setLazyBody(PB->second);
@@ -9497,6 +9583,12 @@ void ASTReader::finishPendingActions() {
}
PendingBodies.clear();
+ // Inform any classes that had members added that they now have more members.
+ for (auto [RD, MD] : PendingAddedClassMembers) {
+ RD->addedMember(MD);
+ }
+ PendingAddedClassMembers.clear();
+
// Do some cleanup.
for (auto *ND : PendingMergedDefinitionsToDeduplicate)
getContext().deduplicateMergedDefinitonsFor(ND);
@@ -9673,9 +9765,6 @@ void ASTReader::diagnoseOdrViolations() {
ObjCProtocolOdrMergeFailures.empty())
return;
- // Ensure we don't accidentally recursively enter deserialization while
- // we're producing our diagnostics.
- Deserializing RecursionGuard(this);
ODRDiagsEmitter DiagsEmitter(Diags, getContext(),
getPreprocessor().getLangOpts());
@@ -10275,6 +10364,12 @@ OMPClause *OMPClauseReader::readClause() {
case llvm::omp::OMPC_ompx_dyn_cgroup_mem:
C = new (Context) OMPXDynCGroupMemClause();
break;
+ case llvm::omp::OMPC_doacross: {
+ unsigned NumVars = Record.readInt();
+ unsigned NumLoops = Record.readInt();
+ C = OMPDoacrossClause::CreateEmpty(Context, NumVars, NumLoops);
+ break;
+ }
#define OMP_CLAUSE_NO_CLASS(Enum, Str) \
case llvm::omp::Enum: \
break;
@@ -11351,6 +11446,22 @@ void OMPClauseReader::VisitOMPXDynCGroupMemClause(OMPXDynCGroupMemClause *C) {
C->setLParenLoc(Record.readSourceLocation());
}
+void OMPClauseReader::VisitOMPDoacrossClause(OMPDoacrossClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setDependenceType(
+ static_cast<OpenMPDoacrossClauseModifier>(Record.readInt()));
+ C->setDependenceLoc(Record.readSourceLocation());
+ C->setColonLoc(Record.readSourceLocation());
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned I = 0; I != NumVars; ++I)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+ for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I)
+ C->setLoopData(I, Record.readSubExpr());
+}
+
OMPTraitInfo *ASTRecordReader::readOMPTraitInfo() {
OMPTraitInfo &TI = getContext().getNewOMPTraitInfo();
TI.Sets.resize(readUInt32());
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
index 8cb513eff13e..10c92f8d2149 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -14,6 +14,7 @@
#include "ASTCommon.h"
#include "ASTReaderInternals.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTStructuralEquivalence.h"
#include "clang/AST/Attr.h"
#include "clang/AST/AttrIterator.h"
#include "clang/AST/Decl.h"
@@ -88,7 +89,7 @@ namespace clang {
using RecordData = ASTReader::RecordData;
TypeID DeferredTypeID = 0;
- unsigned AnonymousDeclNumber;
+ unsigned AnonymousDeclNumber = 0;
GlobalDeclID NamedDeclForTagDecl = 0;
IdentifierInfo *TypedefNameForLinkage = nullptr;
@@ -157,9 +158,12 @@ namespace clang {
return Record.getSubmodule(readSubmoduleID());
}
- void ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update);
+ void ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update,
+ Decl *LambdaContext = nullptr,
+ unsigned IndexInLambdaContext = 0);
void ReadCXXDefinitionData(struct CXXRecordDecl::DefinitionData &Data,
- const CXXRecordDecl *D);
+ const CXXRecordDecl *D, Decl *LambdaContext,
+ unsigned IndexInLambdaContext);
void MergeDefinitionData(CXXRecordDecl *D,
struct CXXRecordDecl::DefinitionData &&NewDD);
void ReadObjCDefinitionData(struct ObjCInterfaceDecl::DefinitionData &Data);
@@ -376,6 +380,7 @@ namespace clang {
void VisitTemplateParamObjectDecl(TemplateParamObjectDecl *D);
void VisitIndirectFieldDecl(IndirectFieldDecl *FD);
RedeclarableResult VisitVarDeclImpl(VarDecl *D);
+ void ReadVarDeclInit(VarDecl *VD);
void VisitVarDecl(VarDecl *VD) { VisitVarDeclImpl(VD); }
void VisitImplicitParamDecl(ImplicitParamDecl *PD);
void VisitParmVarDecl(ParmVarDecl *PD);
@@ -421,6 +426,9 @@ namespace clang {
template <typename T>
void mergeRedeclarable(Redeclarable<T> *D, RedeclarableResult &Redecl);
+ void mergeLambda(CXXRecordDecl *D, RedeclarableResult &Redecl,
+ Decl *Context, unsigned Number);
+
void mergeRedeclarableTemplate(RedeclarableTemplateDecl *D,
RedeclarableResult &Redecl);
@@ -468,9 +476,8 @@ namespace {
/// Iterator over the redeclarations of a declaration that have already
/// been merged into the same redeclaration chain.
-template<typename DeclT>
-class MergedRedeclIterator {
- DeclT *Start;
+template <typename DeclT> class MergedRedeclIterator {
+ DeclT *Start = nullptr;
DeclT *Canonical = nullptr;
DeclT *Current = nullptr;
@@ -558,11 +565,14 @@ void ASTDeclReader::Visit(Decl *D) {
ID->TypeForDecl = Reader.GetType(DeferredTypeID).getTypePtrOrNull();
} else if (auto *FD = dyn_cast<FunctionDecl>(D)) {
// FunctionDecl's body was written last after all other Stmts/Exprs.
- // We only read it if FD doesn't already have a body (e.g., from another
- // module).
- // FIXME: Can we diagnose ODR violations somehow?
if (Record.readInt())
ReadFunctionDefinition(FD);
+ } else if (auto *VD = dyn_cast<VarDecl>(D)) {
+ ReadVarDeclInit(VD);
+ } else if (auto *FD = dyn_cast<FieldDecl>(D)) {
+ if (FD->hasInClassInitializer() && Record.readInt()) {
+ FD->setLazyInClassInitializer(LazyDeclStmtPtr(GetCurrentCursorOffset()));
+ }
}
}
@@ -860,10 +870,10 @@ void ASTDeclReader::VisitRecordDecl(RecordDecl *RD) {
void ASTDeclReader::VisitValueDecl(ValueDecl *VD) {
VisitNamedDecl(VD);
- // For function declarations, defer reading the type in case the function has
- // a deduced return type that references an entity declared within the
- // function.
- if (isa<FunctionDecl>(VD))
+ // For function or variable declarations, defer reading the type in case the
+ // declaration has a deduced type that references an entity declared within
+ // the function definition or variable initializer.
+ if (isa<FunctionDecl, VarDecl>(VD))
DeferredTypeID = Record.getGlobalTypeID(Record.readInt());
else
VD->setType(Record.readType());
@@ -1025,7 +1035,7 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
// We'll set up the real type in Visit, once we've finished loading the
// function.
FD->setType(FD->getTypeSourceInfo()->getType());
- Reader.PendingFunctionTypes.push_back({FD, DeferredTypeID});
+ Reader.PendingDeducedFunctionTypes.push_back({FD, DeferredTypeID});
} else {
FD->setType(Reader.GetType(DeferredTypeID));
}
@@ -1491,15 +1501,13 @@ void ASTDeclReader::VisitFieldDecl(FieldDecl *FD) {
VisitDeclaratorDecl(FD);
FD->Mutable = Record.readInt();
- if (auto ISK = static_cast<FieldDecl::InitStorageKind>(Record.readInt())) {
- FD->InitStorage.setInt(ISK);
- FD->InitStorage.setPointer(ISK == FieldDecl::ISK_CapturedVLAType
- ? Record.readType().getAsOpaquePtr()
- : Record.readExpr());
- }
-
- if (auto *BW = Record.readExpr())
- FD->setBitWidth(BW);
+ unsigned Bits = Record.readInt();
+ FD->StorageKind = Bits >> 1;
+ if (FD->StorageKind == FieldDecl::ISK_CapturedVLAType)
+ FD->CapturedVLAType =
+ cast<VariableArrayType>(Record.readType().getTypePtr());
+ else if (Bits & 1)
+ FD->setBitWidth(Record.readExpr());
if (!FD->getDeclName()) {
if (auto *Tmpl = readDeclAs<FieldDecl>())
@@ -1570,6 +1578,7 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
VD->VarDeclBits.TSCSpec = Record.readInt();
VD->VarDeclBits.InitStyle = Record.readInt();
VD->VarDeclBits.ARCPseudoStrong = Record.readInt();
+ bool HasDeducedType = false;
if (!isa<ParmVarDecl>(VD)) {
VD->NonParmVarDeclBits.IsThisDeclarationADemotedDefinition =
Record.readInt();
@@ -1584,7 +1593,18 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
VD->NonParmVarDeclBits.PreviousDeclInSameBlockScope = Record.readInt();
VD->NonParmVarDeclBits.ImplicitParamKind = Record.readInt();
VD->NonParmVarDeclBits.EscapingByref = Record.readInt();
+ HasDeducedType = Record.readInt();
}
+
+ // If this variable has a deduced type, defer reading that type until we are
+ // done deserializing this variable, because the type might refer back to the
+ // variable.
+ if (HasDeducedType)
+ Reader.PendingDeducedVarTypes.push_back({VD, DeferredTypeID});
+ else
+ VD->setType(Reader.GetType(DeferredTypeID));
+ DeferredTypeID = 0;
+
auto VarLinkage = Linkage(Record.readInt());
VD->setCachedLinkage(VarLinkage);
@@ -1593,22 +1613,13 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
VD->getLexicalDeclContext()->isFunctionOrMethod())
VD->setLocalExternDecl();
- if (uint64_t Val = Record.readInt()) {
- VD->setInit(Record.readExpr());
- if (Val != 1) {
- EvaluatedStmt *Eval = VD->ensureEvaluatedStmt();
- Eval->HasConstantInitialization = (Val & 2) != 0;
- Eval->HasConstantDestruction = (Val & 4) != 0;
- }
- }
-
- if (VD->hasAttr<BlocksAttr>() && VD->getType()->getAsCXXRecordDecl()) {
+ if (VD->hasAttr<BlocksAttr>()) {
Expr *CopyExpr = Record.readExpr();
if (CopyExpr)
Reader.getContext().setBlockVarCopyInit(VD, CopyExpr, Record.readInt());
}
- if (VD->getStorageDuration() == SD_Static && Record.readInt()) {
+ if (Record.readInt()) {
Reader.DefinitionSource[VD] =
Loc.F->Kind == ModuleKind::MK_MainFile ||
Reader.getContext().getLangOpts().BuildingPCHWithObjectFile;
@@ -1642,6 +1653,25 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
return Redecl;
}
+void ASTDeclReader::ReadVarDeclInit(VarDecl *VD) {
+ if (uint64_t Val = Record.readInt()) {
+ EvaluatedStmt *Eval = VD->ensureEvaluatedStmt();
+ Eval->HasConstantInitialization = (Val & 2) != 0;
+ Eval->HasConstantDestruction = (Val & 4) != 0;
+ Eval->WasEvaluated = (Val & 8) != 0;
+ if (Eval->WasEvaluated) {
+ Eval->Evaluated = Record.readAPValue();
+ if (Eval->Evaluated.needsCleanup())
+ Reader.getContext().addDestruction(&Eval->Evaluated);
+ }
+
+ // Store the offset of the initializer. Don't deserialize it yet: it might
+ // not be needed, and might refer back to the variable, for example if it
+ // contains a lambda.
+ Eval->Value = GetCurrentCursorOffset();
+ }
+}
+
void ASTDeclReader::VisitImplicitParamDecl(ImplicitParamDecl *PD) {
VisitVarDecl(PD);
}
@@ -1893,10 +1923,10 @@ void ASTDeclReader::VisitUnresolvedUsingIfExistsDecl(
}
void ASTDeclReader::ReadCXXDefinitionData(
- struct CXXRecordDecl::DefinitionData &Data, const CXXRecordDecl *D) {
- #define FIELD(Name, Width, Merge) \
- Data.Name = Record.readInt();
- #include "clang/AST/CXXRecordDeclDefinitionBits.def"
+ struct CXXRecordDecl::DefinitionData &Data, const CXXRecordDecl *D,
+ Decl *LambdaContext, unsigned IndexInLambdaContext) {
+#define FIELD(Name, Width, Merge) Data.Name = Record.readInt();
+#include "clang/AST/CXXRecordDeclDefinitionBits.def"
// Note: the caller has deserialized the IsLambda bit already.
Data.ODRHash = Record.readInt();
@@ -1908,21 +1938,26 @@ void ASTDeclReader::ReadCXXDefinitionData(
Reader.getContext().getLangOpts().BuildingPCHWithObjectFile;
}
- Data.NumBases = Record.readInt();
- if (Data.NumBases)
- Data.Bases = ReadGlobalOffset();
- Data.NumVBases = Record.readInt();
- if (Data.NumVBases)
- Data.VBases = ReadGlobalOffset();
-
Record.readUnresolvedSet(Data.Conversions);
Data.ComputedVisibleConversions = Record.readInt();
if (Data.ComputedVisibleConversions)
Record.readUnresolvedSet(Data.VisibleConversions);
assert(Data.Definition && "Data.Definition should be already set!");
- Data.FirstFriend = readDeclID();
- if (Data.IsLambda) {
+ if (!Data.IsLambda) {
+ assert(!LambdaContext && !IndexInLambdaContext &&
+ "given lambda context for non-lambda");
+
+ Data.NumBases = Record.readInt();
+ if (Data.NumBases)
+ Data.Bases = ReadGlobalOffset();
+
+ Data.NumVBases = Record.readInt();
+ if (Data.NumVBases)
+ Data.VBases = ReadGlobalOffset();
+
+ Data.FirstFriend = readDeclID();
+ } else {
using Capture = LambdaCapture;
auto &Lambda = static_cast<CXXRecordDecl::LambdaDefinitionData &>(Data);
@@ -1933,8 +1968,10 @@ void ASTDeclReader::ReadCXXDefinitionData(
Lambda.NumExplicitCaptures = Record.readInt();
Lambda.HasKnownInternalLinkage = Record.readInt();
Lambda.ManglingNumber = Record.readInt();
- D->setDeviceLambdaManglingNumber(Record.readInt());
- Lambda.ContextDecl = readDeclID();
+ if (unsigned DeviceManglingNumber = Record.readInt())
+ Reader.getContext().DeviceLambdaManglingNumbers[D] = DeviceManglingNumber;
+ Lambda.IndexInContext = IndexInLambdaContext;
+ Lambda.ContextDecl = LambdaContext;
Capture *ToCapture = nullptr;
if (Lambda.NumCaptures) {
ToCapture = (Capture *)Reader.getContext().Allocate(sizeof(Capture) *
@@ -1976,7 +2013,7 @@ void ASTDeclReader::MergeDefinitionData(
Reader.PendingDefinitions.erase(MergeDD.Definition);
MergeDD.Definition->setCompleteDefinition(false);
Reader.mergeDefinitionVisibility(DD.Definition, MergeDD.Definition);
- assert(Reader.Lookups.find(MergeDD.Definition) == Reader.Lookups.end() &&
+ assert(!Reader.Lookups.contains(MergeDD.Definition) &&
"already loaded pending lookups for merged definition");
}
@@ -2055,13 +2092,17 @@ void ASTDeclReader::MergeDefinitionData(
{MergeDD.Definition, &MergeDD});
}
-void ASTDeclReader::ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update) {
+void ASTDeclReader::ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update,
+ Decl *LambdaContext,
+ unsigned IndexInLambdaContext) {
struct CXXRecordDecl::DefinitionData *DD;
ASTContext &C = Reader.getContext();
// Determine whether this is a lambda closure type, so that we can
// allocate the appropriate DefinitionData structure.
bool IsLambda = Record.readInt();
+ assert(!(IsLambda && Update) &&
+ "lambda definition should not be added by update record");
if (IsLambda)
DD = new (C) CXXRecordDecl::LambdaDefinitionData(
D, nullptr, CXXRecordDecl::LDK_Unknown, false, LCD_None);
@@ -2075,7 +2116,7 @@ void ASTDeclReader::ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update) {
if (!Canon->DefinitionData)
Canon->DefinitionData = DD;
D->DefinitionData = Canon->DefinitionData;
- ReadCXXDefinitionData(*DD, D);
+ ReadCXXDefinitionData(*DD, D, LambdaContext, IndexInLambdaContext);
// We might already have a different definition for this record. This can
// happen either because we're reading an update record, or because we've
@@ -2102,8 +2143,15 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
ASTContext &C = Reader.getContext();
enum CXXRecKind {
- CXXRecNotTemplate = 0, CXXRecTemplate, CXXRecMemberSpecialization
+ CXXRecNotTemplate = 0,
+ CXXRecTemplate,
+ CXXRecMemberSpecialization,
+ CXXLambda
};
+
+ Decl *LambdaContext = nullptr;
+ unsigned IndexInLambdaContext = 0;
+
switch ((CXXRecKind)Record.readInt()) {
case CXXRecNotTemplate:
// Merged when we merge the folding set entry in the primary template.
@@ -2135,11 +2183,19 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
mergeRedeclarable(D, Redecl);
break;
}
+ case CXXLambda: {
+ LambdaContext = readDecl();
+ if (LambdaContext)
+ IndexInLambdaContext = Record.readInt();
+ mergeLambda(D, Redecl, LambdaContext, IndexInLambdaContext);
+ break;
+ }
}
bool WasDefinition = Record.readInt();
if (WasDefinition)
- ReadCXXRecordDefinition(D, /*Update*/false);
+ ReadCXXRecordDefinition(D, /*Update=*/false, LambdaContext,
+ IndexInLambdaContext);
else
// Propagate DefinitionData pointer from the canonical declaration.
D->DefinitionData = D->getCanonicalDecl()->DefinitionData;
@@ -2162,7 +2218,8 @@ void ASTDeclReader::VisitCXXDeductionGuideDecl(CXXDeductionGuideDecl *D) {
D->setExplicitSpecifier(Record.readExplicitSpec());
D->Ctor = readDeclAs<CXXConstructorDecl>();
VisitFunctionDecl(D);
- D->setIsCopyDeductionCandidate(Record.readInt());
+ D->setDeductionCandidateKind(
+ static_cast<DeductionCandidate>(Record.readInt()));
}
void ASTDeclReader::VisitCXXMethodDecl(CXXMethodDecl *D) {
@@ -2741,6 +2798,41 @@ void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase,
mergeRedeclarable(D, Existing, Redecl);
}
+/// Attempt to merge D with a previous declaration of the same lambda, which is
+/// found by its index within its context declaration, if it has one.
+///
+/// We can't look up lambdas in their enclosing lexical or semantic context in
+/// general, because for lambdas in variables, both of those might be a
+/// namespace or the translation unit.
+void ASTDeclReader::mergeLambda(CXXRecordDecl *D, RedeclarableResult &Redecl,
+ Decl *Context, unsigned IndexInContext) {
+ // If we don't have a mangling context, treat this like any other
+ // declaration.
+ if (!Context)
+ return mergeRedeclarable(D, Redecl);
+
+ // If modules are not available, there is no reason to perform this merge.
+ if (!Reader.getContext().getLangOpts().Modules)
+ return;
+
+ // If we're not the canonical declaration, we don't need to merge.
+ if (!D->isFirstDecl())
+ return;
+
+ if (auto *Existing = Redecl.getKnownMergeTarget())
+ // We already know of an existing declaration we should merge with.
+ mergeRedeclarable(D, cast<TagDecl>(Existing), Redecl);
+
+ // Look up this lambda to see if we've seen it before. If so, merge with the
+ // one we already loaded.
+ NamedDecl *&Slot = Reader.LambdaDeclarationsForMerging[{
+ Context->getCanonicalDecl(), IndexInContext}];
+ if (Slot)
+ mergeRedeclarable(D, cast<TagDecl>(Slot), Redecl);
+ else
+ Slot = D;
+}
+
void ASTDeclReader::mergeRedeclarableTemplate(RedeclarableTemplateDecl *D,
RedeclarableResult &Redecl) {
mergeRedeclarable(D, Redecl);
@@ -3006,10 +3098,15 @@ Attr *ASTRecordReader::readAttr() {
unsigned ParsedKind = Record.readInt();
unsigned Syntax = Record.readInt();
unsigned SpellingIndex = Record.readInt();
+ bool IsAlignas = (ParsedKind == AttributeCommonInfo::AT_Aligned &&
+ Syntax == AttributeCommonInfo::AS_Keyword &&
+ SpellingIndex == AlignedAttr::Keyword_alignas);
+ bool IsRegularKeywordAttribute = Record.readBool();
AttributeCommonInfo Info(AttrName, ScopeName, AttrRange, ScopeLoc,
AttributeCommonInfo::Kind(ParsedKind),
- AttributeCommonInfo::Syntax(Syntax), SpellingIndex);
+ {AttributeCommonInfo::Syntax(Syntax), SpellingIndex,
+ IsAlignas, IsRegularKeywordAttribute});
#include "clang/Serialization/AttrPCHRead.inc"
@@ -4181,23 +4278,22 @@ namespace {
// Check for duplicate categories.
if (Cat->getDeclName()) {
ObjCCategoryDecl *&Existing = NameCategoryMap[Cat->getDeclName()];
- if (Existing &&
- Reader.getOwningModuleFile(Existing)
- != Reader.getOwningModuleFile(Cat)) {
- // FIXME: We should not warn for duplicates in diamond:
- //
- // MT //
- // / \ //
- // ML MR //
- // \ / //
- // MB //
- //
- // If there are duplicates in ML/MR, there will be warning when
- // creating MB *and* when importing MB. We should not warn when
- // importing.
- Reader.Diag(Cat->getLocation(), diag::warn_dup_category_def)
- << Interface->getDeclName() << Cat->getDeclName();
- Reader.Diag(Existing->getLocation(), diag::note_previous_definition);
+ if (Existing && Reader.getOwningModuleFile(Existing) !=
+ Reader.getOwningModuleFile(Cat)) {
+ llvm::DenseSet<std::pair<Decl *, Decl *>> NonEquivalentDecls;
+ StructuralEquivalenceContext Ctx(
+ Cat->getASTContext(), Existing->getASTContext(),
+ NonEquivalentDecls, StructuralEquivalenceKind::Default,
+ /*StrictTypeSpelling =*/false,
+ /*Complain =*/false,
+ /*ErrorOnTagTypeMismatch =*/true);
+ if (!Ctx.IsEquivalent(Cat, Existing)) {
+ // Warn only if the categories with the same name are different.
+ Reader.Diag(Cat->getLocation(), diag::warn_dup_category_def)
+ << Interface->getDeclName() << Cat->getDeclName();
+ Reader.Diag(Existing->getLocation(),
+ diag::note_previous_definition);
+ }
} else if (!Existing) {
// Record this category.
Existing = Cat;
@@ -4306,13 +4402,9 @@ void ASTDeclReader::UpdateDecl(Decl *D,
switch ((DeclUpdateKind)Record.readInt()) {
case UPD_CXX_ADDED_IMPLICIT_MEMBER: {
auto *RD = cast<CXXRecordDecl>(D);
- // FIXME: If we also have an update record for instantiating the
- // definition of D, we need that to happen before we get here.
Decl *MD = Record.readDecl();
assert(MD && "couldn't read decl from update record");
- // FIXME: We should call addHiddenDecl instead, to add the member
- // to its DeclContext.
- RD->addedMember(MD);
+ Reader.PendingAddedClassMembers.push_back({RD, MD});
break;
}
@@ -4340,15 +4432,7 @@ void ASTDeclReader::UpdateDecl(Decl *D,
auto *VD = cast<VarDecl>(D);
VD->NonParmVarDeclBits.IsInline = Record.readInt();
VD->NonParmVarDeclBits.IsInlineSpecified = Record.readInt();
- uint64_t Val = Record.readInt();
- if (Val && !VD->getInit()) {
- VD->setInit(Record.readExpr());
- if (Val != 1) {
- EvaluatedStmt *Eval = VD->ensureEvaluatedStmt();
- Eval->HasConstantInitialization = (Val & 2) != 0;
- Eval->HasConstantDestruction = (Val & 4) != 0;
- }
- }
+ ReadVarDeclInit(VD);
break;
}
@@ -4391,7 +4475,7 @@ void ASTDeclReader::UpdateDecl(Decl *D,
// Only apply the update if the field still has an uninstantiated
// default member initializer.
- if (FD->hasInClassInitializer() && !FD->getInClassInitializer()) {
+ if (FD->hasInClassInitializer() && !FD->hasNonNullInClassInitializer()) {
if (DefaultInit)
FD->setInClassInitializer(DefaultInit);
else
@@ -4545,9 +4629,8 @@ void ASTDeclReader::UpdateDecl(Decl *D,
break;
case UPD_DECL_MARKED_OPENMP_THREADPRIVATE:
- D->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
- Reader.getContext(), readSourceRange(),
- AttributeCommonInfo::AS_Pragma));
+ D->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(Reader.getContext(),
+ readSourceRange()));
break;
case UPD_DECL_MARKED_OPENMP_ALLOCATE: {
@@ -4557,8 +4640,7 @@ void ASTDeclReader::UpdateDecl(Decl *D,
Expr *Alignment = Record.readExpr();
SourceRange SR = readSourceRange();
D->addAttr(OMPAllocateDeclAttr::CreateImplicit(
- Reader.getContext(), AllocatorKind, Allocator, Alignment, SR,
- AttributeCommonInfo::AS_Pragma));
+ Reader.getContext(), AllocatorKind, Allocator, Alignment, SR));
break;
}
@@ -4579,7 +4661,7 @@ void ASTDeclReader::UpdateDecl(Decl *D,
unsigned Level = Record.readInt();
D->addAttr(OMPDeclareTargetDeclAttr::CreateImplicit(
Reader.getContext(), MapType, DevType, IndirectE, Indirect, Level,
- readSourceRange(), AttributeCommonInfo::AS_Pragma));
+ readSourceRange()));
break;
}
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h b/contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h
index 4a4cfcce156d..b906cc6c58a2 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h
@@ -276,6 +276,9 @@ public:
static internal_key_type ReadKey(const unsigned char *d, unsigned);
data_type ReadData(internal_key_ref,const unsigned char *d, unsigned DataLen);
+
+private:
+ const FileEntry *getFile(const internal_key_type &Key);
};
/// The on-disk hash table used for known header files.
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp
index 46d653c7f940..96307c35ad32 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -115,10 +115,6 @@ namespace clang {
TemplateArgumentLoc *ArgsLocArray,
unsigned NumTemplateArgs);
- /// Read and initialize a ExplicitTemplateArgumentList structure.
- void ReadExplicitTemplateArgumentList(ASTTemplateArgumentListInfo &ArgList,
- unsigned NumTemplateArgs);
-
void VisitStmt(Stmt *S);
#define STMT(Type, Base) \
void Visit##Type(Type *);
@@ -228,7 +224,7 @@ void ASTStmtReader::VisitIfStmt(IfStmt *S) {
if (HasElse)
S->setElse(Record.readSubStmt());
if (HasVar)
- S->setConditionVariable(Record.getContext(), readDeclAs<VarDecl>());
+ S->setConditionVariableDeclStmt(cast<DeclStmt>(Record.readSubStmt()));
if (HasInit)
S->setInit(Record.readSubStmt());
@@ -253,7 +249,7 @@ void ASTStmtReader::VisitSwitchStmt(SwitchStmt *S) {
if (HasInit)
S->setInit(Record.readSubStmt());
if (HasVar)
- S->setConditionVariable(Record.getContext(), readDeclAs<VarDecl>());
+ S->setConditionVariableDeclStmt(cast<DeclStmt>(Record.readSubStmt()));
S->setSwitchLoc(readSourceLocation());
S->setLParenLoc(readSourceLocation());
@@ -279,7 +275,7 @@ void ASTStmtReader::VisitWhileStmt(WhileStmt *S) {
S->setCond(Record.readSubExpr());
S->setBody(Record.readSubStmt());
if (HasVar)
- S->setConditionVariable(Record.getContext(), readDeclAs<VarDecl>());
+ S->setConditionVariableDeclStmt(cast<DeclStmt>(Record.readSubStmt()));
S->setWhileLoc(readSourceLocation());
S->setLParenLoc(readSourceLocation());
@@ -299,7 +295,7 @@ void ASTStmtReader::VisitForStmt(ForStmt *S) {
VisitStmt(S);
S->setInit(Record.readSubStmt());
S->setCond(Record.readSubExpr());
- S->setConditionVariable(Record.getContext(), readDeclAs<VarDecl>());
+ S->setConditionVariableDeclStmt(cast_or_null<DeclStmt>(Record.readSubStmt()));
S->setInc(Record.readSubExpr());
S->setBody(Record.readSubStmt());
S->setForLoc(readSourceLocation());
@@ -400,8 +396,10 @@ void ASTStmtReader::VisitGCCAsmStmt(GCCAsmStmt *S) {
Clobbers.push_back(cast_or_null<StringLiteral>(Record.readSubStmt()));
// Labels
- for (unsigned I = 0, N = NumLabels; I != N; ++I)
+ for (unsigned I = 0, N = NumLabels; I != N; ++I) {
+ Names.push_back(Record.readIdentifier());
Exprs.push_back(Record.readSubStmt());
+ }
S->setOutputsAndInputsAndClobbers(Record.getContext(),
Names.data(), Constraints.data(),
@@ -582,6 +580,7 @@ void ASTStmtReader::VisitPredefinedExpr(PredefinedExpr *E) {
bool HasFunctionName = Record.readInt();
E->PredefinedExprBits.HasFunctionName = HasFunctionName;
E->PredefinedExprBits.Kind = Record.readInt();
+ E->PredefinedExprBits.IsTransparent = Record.readInt();
E->setLocation(readSourceLocation());
if (HasFunctionName)
E->setFunctionName(cast<StringLiteral>(Record.readSubExpr()));
@@ -596,6 +595,7 @@ void ASTStmtReader::VisitDeclRefExpr(DeclRefExpr *E) {
E->DeclRefExprBits.HadMultipleCandidates = Record.readInt();
E->DeclRefExprBits.RefersToEnclosingVariableOrCapture = Record.readInt();
E->DeclRefExprBits.NonOdrUseReason = Record.readInt();
+ E->DeclRefExprBits.IsImmediateEscalating = Record.readInt();
unsigned NumTemplateArgs = 0;
if (E->hasTemplateKWAndArgsInfo())
NumTemplateArgs = Record.readInt();
@@ -1216,9 +1216,9 @@ void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
auto *Field = readDeclAs<FieldDecl>();
SourceLocation DotLoc = readSourceLocation();
SourceLocation FieldLoc = readSourceLocation();
- Designators.push_back(Designator(Field->getIdentifier(), DotLoc,
- FieldLoc));
- Designators.back().setField(Field);
+ Designators.push_back(Designator::CreateFieldDesignator(
+ Field->getIdentifier(), DotLoc, FieldLoc));
+ Designators.back().setFieldDecl(Field);
break;
}
@@ -1226,7 +1226,8 @@ void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
const IdentifierInfo *Name = Record.readIdentifier();
SourceLocation DotLoc = readSourceLocation();
SourceLocation FieldLoc = readSourceLocation();
- Designators.push_back(Designator(Name, DotLoc, FieldLoc));
+ Designators.push_back(Designator::CreateFieldDesignator(Name, DotLoc,
+ FieldLoc));
break;
}
@@ -1234,7 +1235,9 @@ void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
unsigned Index = Record.readInt();
SourceLocation LBracketLoc = readSourceLocation();
SourceLocation RBracketLoc = readSourceLocation();
- Designators.push_back(Designator(Index, LBracketLoc, RBracketLoc));
+ Designators.push_back(Designator::CreateArrayDesignator(Index,
+ LBracketLoc,
+ RBracketLoc));
break;
}
@@ -1243,8 +1246,8 @@ void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
SourceLocation LBracketLoc = readSourceLocation();
SourceLocation EllipsisLoc = readSourceLocation();
SourceLocation RBracketLoc = readSourceLocation();
- Designators.push_back(Designator(Index, LBracketLoc, EllipsisLoc,
- RBracketLoc));
+ Designators.push_back(Designator::CreateArrayRangeDesignator(
+ Index, LBracketLoc, EllipsisLoc, RBracketLoc));
break;
}
}
@@ -1354,6 +1357,7 @@ void ASTStmtReader::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
unsigned NumAssocs = Record.readInt();
assert(NumAssocs == E->getNumAssocs() && "Wrong NumAssocs!");
+ E->IsExprPredicate = Record.readInt();
E->ResultIndex = Record.readInt();
E->GenericSelectionExprBits.GenericLoc = readSourceLocation();
E->DefaultLoc = readSourceLocation();
@@ -1700,6 +1704,7 @@ void ASTStmtReader::VisitCXXConstructExpr(CXXConstructExpr *E) {
E->CXXConstructExprBits.StdInitListInitialization = Record.readInt();
E->CXXConstructExprBits.ZeroInitialization = Record.readInt();
E->CXXConstructExprBits.ConstructionKind = Record.readInt();
+ E->CXXConstructExprBits.IsImmediateEscalating = Record.readInt();
E->CXXConstructExprBits.Loc = readSourceLocation();
E->Constructor = readDeclAs<CXXConstructorDecl>();
E->ParenOrBraceRange = readSourceRange();
@@ -2003,9 +2008,10 @@ ASTStmtReader::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
Record.skipInts(1);
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
E->setArg(I, Record.readSubExpr());
- E->TSI = readTypeSourceInfo();
+ E->TypeAndInitForm.setPointer(readTypeSourceInfo());
E->setLParenLoc(readSourceLocation());
E->setRParenLoc(readSourceLocation());
+ E->TypeAndInitForm.setInt(Record.readInt());
}
void ASTStmtReader::VisitOverloadExpr(OverloadExpr *E) {
@@ -2805,7 +2811,7 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case STMT_REF_PTR:
IsStmtReference = true;
- assert(StmtEntries.find(Record[0]) != StmtEntries.end() &&
+ assert(StmtEntries.contains(Record[0]) &&
"No stmt was recorded for this offset reference!");
S = StmtEntries[Record.readInt()];
break;
@@ -2929,12 +2935,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_DECL_REF:
S = DeclRefExpr::CreateEmpty(
- Context,
- /*HasQualifier=*/Record[ASTStmtReader::NumExprFields],
- /*HasFoundDecl=*/Record[ASTStmtReader::NumExprFields + 1],
- /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields + 2],
- /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields + 2] ?
- Record[ASTStmtReader::NumExprFields + 6] : 0);
+ Context,
+ /*HasQualifier=*/Record[ASTStmtReader::NumExprFields],
+ /*HasFoundDecl=*/Record[ASTStmtReader::NumExprFields + 1],
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields + 2],
+ /*NumTemplateArgs=*/
+ Record[ASTStmtReader::NumExprFields + 2]
+ ? Record[ASTStmtReader::NumExprFields + 7]
+ : 0);
break;
case EXPR_INTEGER_LITERAL:
@@ -3519,7 +3527,7 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
CollapsedNum, Empty);
break;
}
-
+
case STMT_OMP_MASKED_TASKLOOP_DIRECTIVE: {
unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
index bdf11001473e..26279d399b53 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
@@ -185,7 +185,7 @@ std::set<const FileEntry *> GetAffectingModuleMaps(const Preprocessor &PP,
if (!HFI || (HFI->isModuleHeader && !HFI->isCompilingModuleHeader))
continue;
- for (const auto &KH : HS.findAllModulesForHeader(File)) {
+ for (const auto &KH : HS.findResolvedModulesForHeader(File)) {
if (!KH.getModule())
continue;
ModulesToProcess.push_back(KH.getModule());
@@ -200,7 +200,9 @@ std::set<const FileEntry *> GetAffectingModuleMaps(const Preprocessor &PP,
CB(F);
FileID FID = SourceMgr.translateFile(F);
SourceLocation Loc = SourceMgr.getIncludeLoc(FID);
- while (Loc.isValid()) {
+ // The include location of inferred module maps can point into the header
+ // file that triggered the inferring. Cut off the walk if that's the case.
+ while (Loc.isValid() && isModuleMap(SourceMgr.getFileCharacteristic(Loc))) {
FID = SourceMgr.getFileID(Loc);
CB(*SourceMgr.getFileEntryRefForID(FID));
Loc = SourceMgr.getIncludeLoc(FID);
@@ -209,11 +211,18 @@ std::set<const FileEntry *> GetAffectingModuleMaps(const Preprocessor &PP,
auto ProcessModuleOnce = [&](const Module *M) {
for (const Module *Mod = M; Mod; Mod = Mod->Parent)
- if (ProcessedModules.insert(Mod).second)
+ if (ProcessedModules.insert(Mod).second) {
+ auto Insert = [&](FileEntryRef F) { ModuleMaps.insert(F); };
+ // The containing module map is affecting, because it's being pointed
+ // into by Module::DefinitionLoc.
+ if (auto ModuleMapFile = MM.getContainingModuleMapFile(Mod))
+ ForIncludeChain(*ModuleMapFile, Insert);
+ // For inferred modules, the module map that allowed inferring is not in
+ // the include chain of the virtual containing module map file. It did
+ // affect the compilation, though.
if (auto ModuleMapFile = MM.getModuleMapFileForUniquing(Mod))
- ForIncludeChain(*ModuleMapFile, [&](FileEntryRef F) {
- ModuleMaps.insert(F);
- });
+ ForIncludeChain(*ModuleMapFile, Insert);
+ }
};
for (const Module *CurrentModule : ModulesToProcess) {
@@ -866,7 +875,6 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(CUDA_PRAGMA_FORCE_HOST_DEVICE_DEPTH);
RECORD(PP_CONDITIONAL_STACK);
RECORD(DECLS_TO_CHECK_FOR_DEFERRED_DIAGS);
- RECORD(PP_INCLUDED_FILES);
RECORD(PP_ASSUME_NONNULL_LOC);
// SourceManager Block.
@@ -1243,6 +1251,8 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang maj.
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang min.
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Relocatable
+ // Standard C++ module
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Timestamps
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Errors
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // SVN branch/tag
@@ -1250,15 +1260,15 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
assert((!WritingModule || isysroot.empty()) &&
"writing module as a relocatable PCH?");
{
- RecordData::value_type Record[] = {
- METADATA,
- VERSION_MAJOR,
- VERSION_MINOR,
- CLANG_VERSION_MAJOR,
- CLANG_VERSION_MINOR,
- !isysroot.empty(),
- IncludeTimestamps,
- ASTHasCompilerErrors};
+ RecordData::value_type Record[] = {METADATA,
+ VERSION_MAJOR,
+ VERSION_MINOR,
+ CLANG_VERSION_MAJOR,
+ CLANG_VERSION_MINOR,
+ !isysroot.empty(),
+ isWritingStdCXXNamedModules(),
+ IncludeTimestamps,
+ ASTHasCompilerErrors};
Stream.EmitRecordWithBlob(MetadataAbbrevCode, Record,
getClangFullRepositoryVersion());
}
@@ -1277,8 +1287,9 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
SmallString<128> BaseDir;
if (PP.getHeaderSearchInfo().getHeaderSearchOpts().ModuleFileHomeIsCwd) {
// Use the current working directory as the base path for all inputs.
- auto *CWD =
- Context.getSourceManager().getFileManager().getDirectory(".").get();
+ auto CWD =
+ Context.getSourceManager().getFileManager().getOptionalDirectoryRef(
+ ".");
BaseDir.assign(CWD->getName());
} else {
BaseDir.assign(WritingModule->Directory->getName());
@@ -1288,11 +1299,11 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
// If the home of the module is the current working directory, then we
// want to pick up the cwd of the build process loading the module, not
// our cwd, when we load this module.
- if (!(PP.getHeaderSearchInfo()
+ if (!PP.getHeaderSearchInfo().getHeaderSearchOpts().ModuleFileHomeIsCwd &&
+ (!PP.getHeaderSearchInfo()
.getHeaderSearchOpts()
.ModuleMapFileHomeIsCwd ||
- PP.getHeaderSearchInfo().getHeaderSearchOpts().ModuleFileHomeIsCwd) ||
- WritingModule->Directory->getName() != StringRef(".")) {
+ WritingModule->Directory->getName() != StringRef("."))) {
// Module directory.
auto Abbrev = std::make_shared<BitCodeAbbrev>();
Abbrev->Add(BitCodeAbbrevOp(MODULE_DIRECTORY));
@@ -1349,6 +1360,7 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
continue;
Record.push_back((unsigned)M.Kind); // FIXME: Stable encoding
+ Record.push_back(M.StandardCXXModule);
AddSourceLocation(M.ImportLoc, Record);
// If we have calculated signature, there is no need to store
@@ -1759,6 +1771,7 @@ namespace {
struct data_type {
const HeaderFileInfo &HFI;
+ bool AlreadyIncluded;
ArrayRef<ModuleMap::KnownHeader> KnownHeaders;
UnresolvedModule Unresolved;
};
@@ -1804,7 +1817,8 @@ namespace {
endian::Writer LE(Out, little);
uint64_t Start = Out.tell(); (void)Start;
- unsigned char Flags = (Data.HFI.isImport << 5)
+ unsigned char Flags = (Data.AlreadyIncluded << 6)
+ | (Data.HFI.isImport << 5)
| (Data.HFI.isPragmaOnce << 4)
| (Data.HFI.DirInfo << 1)
| Data.HFI.IndexHeaderMapHeader;
@@ -1884,7 +1898,7 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
// If the file didn't exist, we can still create a module if we were given
// enough information in the module map.
- for (auto U : M->MissingHeaders) {
+ for (const auto &U : M->MissingHeaders) {
// Check that we were given enough information to build a module
// without this file existing on disk.
if (!U.Size || (!U.ModTime && IncludeTimestamps)) {
@@ -1903,18 +1917,16 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
SavedStrings.push_back(FilenameDup.data());
HeaderFileInfoTrait::key_type Key = {
- FilenameDup, *U.Size, IncludeTimestamps ? *U.ModTime : 0
- };
+ FilenameDup, *U.Size, IncludeTimestamps ? *U.ModTime : 0};
HeaderFileInfoTrait::data_type Data = {
- Empty, {}, {M, ModuleMap::headerKindToRole(U.Kind)}
- };
+ Empty, false, {}, {M, ModuleMap::headerKindToRole(U.Kind)}};
// FIXME: Deal with cases where there are multiple unresolved header
// directives in different submodules for the same header.
Generator.insert(Key, Data, GeneratorTrait);
++NumHeaderSearchEntries;
}
-
- Worklist.append(M->submodule_begin(), M->submodule_end());
+ auto SubmodulesRange = M->submodules();
+ Worklist.append(SubmodulesRange.begin(), SubmodulesRange.end());
}
}
@@ -1950,11 +1962,13 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
SavedStrings.push_back(Filename.data());
}
+ bool Included = PP->alreadyIncluded(File);
+
HeaderFileInfoTrait::key_type Key = {
Filename, File->getSize(), getTimestampForOutput(File)
};
HeaderFileInfoTrait::data_type Data = {
- *HFI, HS.getModuleMap().findResolvedModulesForHeader(File), {}
+ *HFI, Included, HS.getModuleMap().findResolvedModulesForHeader(File), {}
};
Generator.insert(Key, Data, GeneratorTrait);
++NumHeaderSearchEntries;
@@ -2260,29 +2274,6 @@ static bool shouldIgnoreMacro(MacroDirective *MD, bool IsModule,
return false;
}
-void ASTWriter::writeIncludedFiles(raw_ostream &Out, const Preprocessor &PP) {
- using namespace llvm::support;
-
- const Preprocessor::IncludedFilesSet &IncludedFiles = PP.getIncludedFiles();
-
- std::vector<uint32_t> IncludedInputFileIDs;
- IncludedInputFileIDs.reserve(IncludedFiles.size());
-
- for (const FileEntry *File : IncludedFiles) {
- auto InputFileIt = InputFileIDs.find(File);
- if (InputFileIt == InputFileIDs.end())
- continue;
- IncludedInputFileIDs.push_back(InputFileIt->second);
- }
-
- llvm::sort(IncludedInputFileIDs);
-
- endian::Writer LE(Out, little);
- LE.write<uint32_t>(IncludedInputFileIDs.size());
- for (uint32_t ID : IncludedInputFileIDs)
- LE.write<uint32_t>(ID);
-}
-
/// Writes the block containing the serialized form of the
/// preprocessor.
void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
@@ -2531,20 +2522,6 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
MacroOffsetsBase - ASTBlockStartOffset};
Stream.EmitRecordWithBlob(MacroOffsetAbbrev, Record, bytes(MacroOffsets));
}
-
- {
- auto Abbrev = std::make_shared<BitCodeAbbrev>();
- Abbrev->Add(BitCodeAbbrevOp(PP_INCLUDED_FILES));
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
- unsigned IncludedFilesAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
-
- SmallString<2048> Buffer;
- raw_svector_ostream Out(Buffer);
- writeIncludedFiles(Out, PP);
- RecordData::value_type Record[] = {PP_INCLUDED_FILES};
- Stream.EmitRecordWithBlob(IncludedFilesAbbrev, Record, Buffer.data(),
- Buffer.size());
- }
}
void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec,
@@ -2701,9 +2678,8 @@ unsigned ASTWriter::getSubmoduleID(Module *Mod) {
/// given module).
static unsigned getNumberOfModules(Module *Mod) {
unsigned ChildModules = 0;
- for (auto Sub = Mod->submodule_begin(), SubEnd = Mod->submodule_end();
- Sub != SubEnd; ++Sub)
- ChildModules += getNumberOfModules(*Sub);
+ for (auto *Submodule : Mod->submodules())
+ ChildModules += getNumberOfModules(Submodule);
return ChildModules + 1;
}
@@ -2719,7 +2695,8 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_DEFINITION));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ID
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Parent
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // Kind
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // Kind
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Definition location
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsFramework
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsExplicit
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsSystem
@@ -2820,12 +2797,16 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
ParentID = SubmoduleIDs[Mod->Parent];
}
+ uint64_t DefinitionLoc =
+ SourceLocationEncoding::encode(getAdjustedLocation(Mod->DefinitionLoc));
+
// Emit the definition of the block.
{
RecordData::value_type Record[] = {SUBMODULE_DEFINITION,
ID,
ParentID,
(RecordData::value_type)Mod->Kind,
+ DefinitionLoc,
Mod->IsFramework,
Mod->IsExplicit,
Mod->IsSystem,
@@ -2845,14 +2826,16 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
}
// Emit the umbrella header, if there is one.
- if (auto UmbrellaHeader = Mod->getUmbrellaHeader()) {
+ if (std::optional<Module::Header> UmbrellaHeader =
+ Mod->getUmbrellaHeaderAsWritten()) {
RecordData::value_type Record[] = {SUBMODULE_UMBRELLA_HEADER};
Stream.EmitRecordWithBlob(UmbrellaAbbrev, Record,
- UmbrellaHeader.NameAsWritten);
- } else if (auto UmbrellaDir = Mod->getUmbrellaDir()) {
+ UmbrellaHeader->NameAsWritten);
+ } else if (std::optional<Module::DirectoryName> UmbrellaDir =
+ Mod->getUmbrellaDirAsWritten()) {
RecordData::value_type Record[] = {SUBMODULE_UMBRELLA_DIR};
Stream.EmitRecordWithBlob(UmbrellaDirAbbrev, Record,
- UmbrellaDir.NameAsWritten);
+ UmbrellaDir->NameAsWritten);
}
// Emit the headers.
@@ -2876,10 +2859,9 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
// Emit the top headers.
{
- auto TopHeaders = Mod->getTopHeaders(PP->getFileManager());
RecordData::value_type Record[] = {SUBMODULE_TOPHEADER};
- for (auto *H : TopHeaders) {
- SmallString<128> HeaderName(H->getName());
+ for (FileEntryRef H : Mod->getTopHeaders(PP->getFileManager())) {
+ SmallString<128> HeaderName(H.getName());
PreparePathForOutput(HeaderName);
Stream.EmitRecordWithBlob(TopHeaderAbbrev, Record, HeaderName);
}
@@ -2995,20 +2977,41 @@ void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag,
assert(Flags == EncodeDiagStateFlags(State) &&
"diag state flags vary in single AST file");
+ // If we ever serialize non-pragma mappings outside the initial state, the
+ // code below will need to consider more than getDefaultMapping.
+ assert(!IncludeNonPragmaStates ||
+ State == Diag.DiagStatesByLoc.FirstDiagState);
+
unsigned &DiagStateID = DiagStateIDMap[State];
Record.push_back(DiagStateID);
if (DiagStateID == 0) {
DiagStateID = ++CurrID;
+ SmallVector<std::pair<unsigned, DiagnosticMapping>> Mappings;
// Add a placeholder for the number of mappings.
auto SizeIdx = Record.size();
Record.emplace_back();
for (const auto &I : *State) {
- if (I.second.isPragma() || IncludeNonPragmaStates) {
- Record.push_back(I.first);
- Record.push_back(I.second.serialize());
- }
+ // Maybe skip non-pragmas.
+ if (!I.second.isPragma() && !IncludeNonPragmaStates)
+ continue;
+ // Skip default mappings. We have a mapping for every diagnostic ever
+ // emitted, regardless of whether it was customized.
+ if (!I.second.isPragma() &&
+ I.second == DiagnosticIDs::getDefaultMapping(I.first))
+ continue;
+ Mappings.push_back(I);
+ }
+
+ // Sort by diag::kind for deterministic output.
+ llvm::sort(Mappings, [](const auto &LHS, const auto &RHS) {
+ return LHS.first < RHS.first;
+ });
+
+ for (const auto &I : Mappings) {
+ Record.push_back(I.first);
+ Record.push_back(I.second.serialize());
}
// Update the placeholder.
Record[SizeIdx] = (Record.size() - SizeIdx) / 2;
@@ -3175,6 +3178,13 @@ void ASTWriter::WriteComments() {
auto _ = llvm::make_scope_exit([this] { Stream.ExitBlock(); });
if (!PP->getPreprocessorOpts().WriteCommentListToPCH)
return;
+
+ // Don't write comments to BMI to reduce the size of BMI.
+ // If language services (e.g., clangd) want such abilities,
+ // we can offer a special option then.
+ if (isWritingStdCXXNamedModules())
+ return;
+
RecordData Record;
for (const auto &FO : Context->Comments.OrderedComments) {
for (const auto &OC : FO.second) {
@@ -3543,26 +3553,24 @@ public:
// the mapping from persistent IDs to strings.
Writer.SetIdentifierOffset(II, Out.tell());
+ auto MacroOffset = Writer.getMacroDirectivesOffset(II);
+
// Emit the offset of the key/data length information to the interesting
// identifiers table if necessary.
- if (InterestingIdentifierOffsets && isInterestingIdentifier(II))
+ if (InterestingIdentifierOffsets &&
+ isInterestingIdentifier(II, MacroOffset))
InterestingIdentifierOffsets->push_back(Out.tell());
unsigned KeyLen = II->getLength() + 1;
unsigned DataLen = 4; // 4 bytes for the persistent ID << 1
- auto MacroOffset = Writer.getMacroDirectivesOffset(II);
if (isInterestingIdentifier(II, MacroOffset)) {
DataLen += 2; // 2 bytes for builtin ID
DataLen += 2; // 2 bytes for flags
if (MacroOffset)
DataLen += 4; // MacroDirectives offset.
- if (NeedDecls) {
- for (IdentifierResolver::iterator D = IdResolver.begin(II),
- DEnd = IdResolver.end();
- D != DEnd; ++D)
- DataLen += 4;
- }
+ if (NeedDecls)
+ DataLen += std::distance(IdResolver.begin(II), IdResolver.end()) * 4;
}
return emitULEBKeyDataLength(KeyLen, DataLen, Out);
}
@@ -3607,8 +3615,7 @@ public:
// "stat"), but the ASTReader adds declarations to the end of the list
// (so we need to see the struct "stat" before the function "stat").
// Only emit declarations that aren't from a chained PCH, though.
- SmallVector<NamedDecl *, 16> Decls(IdResolver.begin(II),
- IdResolver.end());
+ SmallVector<NamedDecl *, 16> Decls(IdResolver.decls(II));
for (NamedDecl *D : llvm::reverse(Decls))
LE.write<uint32_t>(
Writer.getDeclID(getDeclForLocalLookup(PP.getLangOpts(), D)));
@@ -3634,9 +3641,8 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
// strings.
{
llvm::OnDiskChainedHashTableGenerator<ASTIdentifierTableTrait> Generator;
- ASTIdentifierTableTrait Trait(
- *this, PP, IdResolver, IsModule,
- (getLangOpts().CPlusPlus && IsModule) ? &InterestingIdents : nullptr);
+ ASTIdentifierTableTrait Trait(*this, PP, IdResolver, IsModule,
+ IsModule ? &InterestingIdents : nullptr);
// Look for any identifiers that were named while processing the
// headers, but are otherwise not needed. We add these to the hash
@@ -3645,13 +3651,13 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
// file.
SmallVector<const IdentifierInfo *, 128> IIs;
for (const auto &ID : PP.getIdentifierTable())
- IIs.push_back(ID.second);
- // Sort the identifiers lexicographically before getting them references so
+ if (Trait.isInterestingNonMacroIdentifier(ID.second))
+ IIs.push_back(ID.second);
+ // Sort the identifiers lexicographically before getting the references so
// that their order is stable.
llvm::sort(IIs, llvm::deref<std::less<>>());
for (const IdentifierInfo *II : IIs)
- if (Trait.isInterestingNonMacroIdentifier(II))
- getIdentifierRef(II);
+ getIdentifierRef(II);
// Create the on-disk hash table representation. We only store offsets
// for identifiers that appear here for the first time.
@@ -4252,6 +4258,7 @@ void ASTWriter::WriteLateParsedTemplates(Sema &SemaRef) {
LateParsedTemplate &LPT = *LPTMapEntry.second;
AddDeclRef(FD, Record);
AddDeclRef(LPT.D, Record);
+ Record.push_back(LPT.FPO.getAsOpaqueInt());
Record.push_back(LPT.Toks.size());
for (const auto &Tok : LPT.Toks) {
@@ -4382,6 +4389,7 @@ void ASTRecordWriter::AddAttr(const Attr *A) {
Record.push_back(A->getParsedKind());
Record.push_back(A->getSyntax());
Record.push_back(A->getAttributeSpellingListIndexRaw());
+ Record.push_back(A->isRegularKeywordAttribute());
#include "clang/Serialization/AttrPCHWrite.inc"
}
@@ -4412,6 +4420,14 @@ void ASTWriter::AddToken(const Token &Tok, RecordDataImpl &Record) {
AddToken(T, Record);
break;
}
+ case tok::annot_pragma_pack: {
+ auto *Info =
+ static_cast<Sema::PragmaPackInfo *>(Tok.getAnnotationValue());
+ Record.push_back(static_cast<unsigned>(Info->Action));
+ AddString(Info->SlotLabel, Record);
+ AddToken(Info->Alignment, Record);
+ break;
+ }
// Some annotation tokens do not use the PtrData field.
case tok::annot_pragma_openmp:
case tok::annot_pragma_openmp_end:
@@ -4436,6 +4452,11 @@ void ASTWriter::AddString(StringRef Str, RecordDataImpl &Record) {
bool ASTWriter::PreparePathForOutput(SmallVectorImpl<char> &Path) {
assert(Context && "should have context when outputting path");
+ // Leave special file names as they are.
+ StringRef PathStr(Path.data(), Path.size());
+ if (PathStr == "<built-in>" || PathStr == "<command line>")
+ return false;
+
bool Changed =
cleanPathForOutput(Context->getSourceManager().getFileManager(), Path);
@@ -4873,13 +4894,9 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
}
// Sort the identifiers to visit based on their name.
llvm::sort(IIs, llvm::deref<std::less<>>());
- for (const IdentifierInfo *II : IIs) {
- for (IdentifierResolver::iterator D = SemaRef.IdResolver.begin(II),
- DEnd = SemaRef.IdResolver.end();
- D != DEnd; ++D) {
- GetDeclRef(*D);
- }
- }
+ for (const IdentifierInfo *II : IIs)
+ for (const Decl *D : SemaRef.IdResolver.decls(II))
+ GetDeclRef(D);
}
// For method pool in the module, if it contains an entry for a selector,
@@ -5101,7 +5118,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
};
llvm::SmallVector<ModuleInfo, 64> Imports;
for (const auto *I : Context.local_imports()) {
- assert(SubmoduleIDs.find(I->getImportedModule()) != SubmoduleIDs.end());
+ assert(SubmoduleIDs.contains(I->getImportedModule()));
Imports.push_back(ModuleInfo(SubmoduleIDs[I->getImportedModule()],
I->getImportedModule()));
}
@@ -5167,6 +5184,7 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
const Decl *D = DeclUpdate.first;
bool HasUpdatedBody = false;
+ bool HasAddedVarDefinition = false;
RecordData RecordData;
ASTRecordWriter Record(*this, RecordData);
for (auto &Update : DeclUpdate.second) {
@@ -5176,6 +5194,8 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
// to skip over the lazy body to reach statements for other records.
if (Kind == UPD_CXX_ADDED_FUNCTION_DEFINITION)
HasUpdatedBody = true;
+ else if (Kind == UPD_CXX_ADDED_VAR_DEFINITION)
+ HasAddedVarDefinition = true;
else
Record.push_back(Kind);
@@ -5188,6 +5208,7 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
break;
case UPD_CXX_ADDED_FUNCTION_DEFINITION:
+ case UPD_CXX_ADDED_VAR_DEFINITION:
break;
case UPD_CXX_POINT_OF_INSTANTIATION:
@@ -5195,14 +5216,6 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
Record.AddSourceLocation(Update.getLoc());
break;
- case UPD_CXX_ADDED_VAR_DEFINITION: {
- const VarDecl *VD = cast<VarDecl>(D);
- Record.push_back(VD->isInline());
- Record.push_back(VD->isInlineSpecified());
- Record.AddVarDeclInit(VD);
- break;
- }
-
case UPD_CXX_INSTANTIATED_DEFAULT_ARGUMENT:
Record.AddStmt(const_cast<Expr *>(
cast<ParmVarDecl>(Update.getDecl())->getDefaultArg()));
@@ -5314,12 +5327,20 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
}
}
+ // Add a trailing update record, if any. These must go last because we
+ // lazily load their attached statement.
if (HasUpdatedBody) {
const auto *Def = cast<FunctionDecl>(D);
Record.push_back(UPD_CXX_ADDED_FUNCTION_DEFINITION);
Record.push_back(Def->isInlined());
Record.AddSourceLocation(Def->getInnerLocStart());
Record.AddFunctionDefinition(Def);
+ } else if (HasAddedVarDefinition) {
+ const auto *VD = cast<VarDecl>(D);
+ Record.push_back(UPD_CXX_ADDED_VAR_DEFINITION);
+ Record.push_back(VD->isInline());
+ Record.push_back(VD->isInlineSpecified());
+ Record.AddVarDeclInit(VD);
}
OffsetsRecord.push_back(GetDeclRef(D));
@@ -5449,7 +5470,7 @@ MacroID ASTWriter::getMacroID(MacroInfo *MI) {
if (!MI || MI->isBuiltinMacro())
return 0;
- assert(MacroIDs.find(MI) != MacroIDs.end() && "Macro not emitted!");
+ assert(MacroIDs.contains(MI) && "Macro not emitted!");
return MacroIDs[MI];
}
@@ -5624,7 +5645,7 @@ DeclID ASTWriter::getDeclID(const Decl *D) {
if (D->isFromASTFile())
return D->getGlobalID();
- assert(DeclIDs.find(D) != DeclIDs.end() && "Declaration not emitted!");
+ assert(DeclIDs.contains(D) && "Declaration not emitted!");
return DeclIDs[D];
}
@@ -5909,6 +5930,7 @@ void ASTRecordWriter::AddCXXDefinitionData(const CXXRecordDecl *D) {
// getODRHash will compute the ODRHash if it has not been previously computed.
Record->push_back(D->getODRHash());
+
bool ModulesDebugInfo =
Writer->Context->getLangOpts().ModulesDebugInfo && !D->isDependentType();
Record->push_back(ModulesDebugInfo);
@@ -5917,24 +5939,24 @@ void ASTRecordWriter::AddCXXDefinitionData(const CXXRecordDecl *D) {
// IsLambda bit is already saved.
- Record->push_back(Data.NumBases);
- if (Data.NumBases > 0)
- AddCXXBaseSpecifiers(Data.bases());
-
- // FIXME: Make VBases lazily computed when needed to avoid storing them.
- Record->push_back(Data.NumVBases);
- if (Data.NumVBases > 0)
- AddCXXBaseSpecifiers(Data.vbases());
-
AddUnresolvedSet(Data.Conversions.get(*Writer->Context));
Record->push_back(Data.ComputedVisibleConversions);
if (Data.ComputedVisibleConversions)
AddUnresolvedSet(Data.VisibleConversions.get(*Writer->Context));
// Data.Definition is the owning decl, no need to write it.
- AddDeclRef(D->getFirstFriend());
- // Add lambda-specific data.
- if (Data.IsLambda) {
+ if (!Data.IsLambda) {
+ Record->push_back(Data.NumBases);
+ if (Data.NumBases > 0)
+ AddCXXBaseSpecifiers(Data.bases());
+
+ // FIXME: Make VBases lazily computed when needed to avoid storing them.
+ Record->push_back(Data.NumVBases);
+ if (Data.NumVBases > 0)
+ AddCXXBaseSpecifiers(Data.vbases());
+
+ AddDeclRef(D->getFirstFriend());
+ } else {
auto &Lambda = D->getLambdaData();
Record->push_back(Lambda.DependencyKind);
Record->push_back(Lambda.IsGenericLambda);
@@ -5944,7 +5966,8 @@ void ASTRecordWriter::AddCXXDefinitionData(const CXXRecordDecl *D) {
Record->push_back(Lambda.HasKnownInternalLinkage);
Record->push_back(Lambda.ManglingNumber);
Record->push_back(D->getDeviceLambdaManglingNumber());
- AddDeclRef(D->getLambdaContextDecl());
+ // The lambda context declaration and index within the context are provided
+ // separately, so that they can be used for merging.
AddTypeSourceInfo(Lambda.MethodTyInfo);
for (unsigned I = 0, N = Lambda.NumCaptures; I != N; ++I) {
const LambdaCapture &Capture = Lambda.Captures.front()[I];
@@ -5976,13 +5999,20 @@ void ASTRecordWriter::AddVarDeclInit(const VarDecl *VD) {
return;
}
- unsigned Val = 1;
+ uint64_t Val = 1;
if (EvaluatedStmt *ES = VD->getEvaluatedStmt()) {
Val |= (ES->HasConstantInitialization ? 2 : 0);
Val |= (ES->HasConstantDestruction ? 4 : 0);
- // FIXME: Also emit the constant initializer value.
+ APValue *Evaluated = VD->getEvaluatedValue();
+ // If the evaluted result is constant, emit it.
+ if (Evaluated && (Evaluated->isInt() || Evaluated->isFloat()))
+ Val |= 8;
}
push_back(Val);
+ if (Val & 8) {
+ AddAPValue(*VD->getEvaluatedValue());
+ }
+
writeStmtRef(Init);
}
@@ -6049,12 +6079,12 @@ void ASTWriter::SelectorRead(SelectorID ID, Selector S) {
void ASTWriter::MacroDefinitionRead(serialization::PreprocessedEntityID ID,
MacroDefinitionRecord *MD) {
- assert(MacroDefinitions.find(MD) == MacroDefinitions.end());
+ assert(!MacroDefinitions.contains(MD));
MacroDefinitions[MD] = ID;
}
void ASTWriter::ModuleRead(serialization::SubmoduleID ID, Module *Mod) {
- assert(SubmoduleIDs.find(Mod) == SubmoduleIDs.end());
+ assert(!SubmoduleIDs.contains(Mod));
SubmoduleIDs[Mod] = ID;
}
@@ -7128,6 +7158,19 @@ void OMPClauseWriter::VisitOMPXDynCGroupMemClause(OMPXDynCGroupMemClause *C) {
Record.AddSourceLocation(C->getLParenLoc());
}
+void OMPClauseWriter::VisitOMPDoacrossClause(OMPDoacrossClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.push_back(C->getNumLoops());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.push_back(C->getDependenceType());
+ Record.AddSourceLocation(C->getDependenceLoc());
+ Record.AddSourceLocation(C->getColonLoc());
+ for (auto *VE : C->varlists())
+ Record.AddStmt(VE);
+ for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I)
+ Record.AddStmt(C->getLoopData(I));
+}
+
void ASTRecordWriter::writeOMPTraitInfo(const OMPTraitInfo *TI) {
writeUInt32(TI->Sets.size());
for (const auto &Set : TI->Sets) {
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp
index a58e0d796b31..59dbc36d24e8 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -278,7 +278,7 @@ void ASTDeclWriter::Visit(Decl *D) {
// Source locations require array (variable-length) abbreviations. The
// abbreviation infrastructure requires that arrays are encoded last, so
// we handle it here in the case of those classes derived from DeclaratorDecl
- if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)) {
+ if (auto *DD = dyn_cast<DeclaratorDecl>(D)) {
if (auto *TInfo = DD->getTypeSourceInfo())
Record.AddTypeLoc(TInfo->getTypeLoc());
}
@@ -286,16 +286,38 @@ void ASTDeclWriter::Visit(Decl *D) {
// Handle FunctionDecl's body here and write it after all other Stmts/Exprs
// have been written. We want it last because we will not read it back when
// retrieving it from the AST, we'll just lazily set the offset.
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (auto *FD = dyn_cast<FunctionDecl>(D)) {
Record.push_back(FD->doesThisDeclarationHaveABody());
if (FD->doesThisDeclarationHaveABody())
Record.AddFunctionDefinition(FD);
}
+ // Similar to FunctionDecls, handle VarDecl's initializer here and write it
+ // after all other Stmts/Exprs. We will not read the initializer until after
+ // we have finished recursive deserialization, because it can recursively
+ // refer back to the variable.
+ if (auto *VD = dyn_cast<VarDecl>(D)) {
+ Record.AddVarDeclInit(VD);
+ }
+
+ // And similarly for FieldDecls. We already serialized whether there is a
+ // default member initializer.
+ if (auto *FD = dyn_cast<FieldDecl>(D)) {
+ if (FD->hasInClassInitializer()) {
+ if (Expr *Init = FD->getInClassInitializer()) {
+ Record.push_back(1);
+ Record.AddStmt(Init);
+ } else {
+ Record.push_back(0);
+ // Initializer has not been instantiated yet.
+ }
+ }
+ }
+
// If this declaration is also a DeclContext, write blocks for the
// declarations that lexically stored inside its context and those
// declarations that are visible from its context.
- if (DeclContext *DC = dyn_cast<DeclContext>(D))
+ if (auto *DC = dyn_cast<DeclContext>(D))
VisitDeclContext(DC);
}
@@ -411,6 +433,10 @@ void ASTDeclWriter::VisitTypeAliasDecl(TypeAliasDecl *D) {
}
void ASTDeclWriter::VisitTagDecl(TagDecl *D) {
+ static_assert(DeclContext::NumTagDeclBits == 10,
+ "You need to update the serializer after you change the "
+ "TagDeclBits");
+
VisitRedeclarable(D);
VisitTypeDecl(D);
Record.push_back(D->getIdentifierNamespace());
@@ -435,6 +461,10 @@ void ASTDeclWriter::VisitTagDecl(TagDecl *D) {
}
void ASTDeclWriter::VisitEnumDecl(EnumDecl *D) {
+ static_assert(DeclContext::NumEnumDeclBits == 20,
+ "You need to update the serializer after you change the "
+ "EnumDeclBits");
+
VisitTagDecl(D);
Record.AddTypeSourceInfo(D->getIntegerTypeSourceInfo());
if (!D->getIntegerTypeSourceInfo())
@@ -478,6 +508,10 @@ void ASTDeclWriter::VisitEnumDecl(EnumDecl *D) {
}
void ASTDeclWriter::VisitRecordDecl(RecordDecl *D) {
+ static_assert(DeclContext::NumRecordDeclBits == 41,
+ "You need to update the serializer after you change the "
+ "RecordDeclBits");
+
VisitTagDecl(D);
Record.push_back(D->hasFlexibleArrayMember());
Record.push_back(D->isAnonymousStructOrUnion());
@@ -546,6 +580,10 @@ void ASTDeclWriter::VisitDeclaratorDecl(DeclaratorDecl *D) {
}
void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
+ static_assert(DeclContext::NumFunctionDeclBits == 30,
+ "You need to update the serializer after you change the "
+ "FunctionDeclBits");
+
VisitRedeclarable(D);
Record.push_back(D->getTemplatedKind());
@@ -692,11 +730,15 @@ void ASTDeclWriter::VisitCXXDeductionGuideDecl(CXXDeductionGuideDecl *D) {
addExplicitSpecifier(D->getExplicitSpecifier(), Record);
Record.AddDeclRef(D->Ctor);
VisitFunctionDecl(D);
- Record.push_back(D->isCopyDeductionCandidate());
+ Record.push_back(static_cast<unsigned char>(D->getDeductionCandidateKind()));
Code = serialization::DECL_CXX_DEDUCTION_GUIDE;
}
void ASTDeclWriter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
+ static_assert(DeclContext::NumObjCMethodDeclBits == 24,
+ "You need to update the serializer after you change the "
+ "ObjCMethodDeclBits");
+
VisitNamedDecl(D);
// FIXME: convert to LazyStmtPtr?
// Unlike C/C++, method bodies will never be in header files.
@@ -755,6 +797,10 @@ void ASTDeclWriter::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) {
}
void ASTDeclWriter::VisitObjCContainerDecl(ObjCContainerDecl *D) {
+ static_assert(DeclContext::NumObjCContainerDeclBits == 51,
+ "You need to update the serializer after you change the "
+ "ObjCContainerDeclBits");
+
VisitNamedDecl(D);
Record.AddSourceLocation(D->getAtStartLoc());
Record.AddSourceRange(D->getAtEndRange());
@@ -935,14 +981,11 @@ void ASTDeclWriter::VisitFieldDecl(FieldDecl *D) {
VisitDeclaratorDecl(D);
Record.push_back(D->isMutable());
- FieldDecl::InitStorageKind ISK = D->InitStorage.getInt();
- Record.push_back(ISK);
- if (ISK == FieldDecl::ISK_CapturedVLAType)
+ Record.push_back((D->StorageKind << 1) | D->BitField);
+ if (D->StorageKind == FieldDecl::ISK_CapturedVLAType)
Record.AddTypeRef(QualType(D->getCapturedVLAType(), 0));
- else if (ISK)
- Record.AddStmt(D->getInClassInitializer());
-
- Record.AddStmt(D->getBitWidth());
+ else if (D->BitField)
+ Record.AddStmt(D->getBitWidth());
if (!D->getDeclName())
Record.AddDeclRef(Context.getInstantiatedFromUnnamedFieldDecl(D));
@@ -1013,6 +1056,7 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
Record.push_back(D->getTSCSpec());
Record.push_back(D->getInitStyle());
Record.push_back(D->isARCPseudoStrong());
+ bool HasDeducedType = false;
if (!isa<ParmVarDecl>(D)) {
Record.push_back(D->isThisDeclarationADemotedDefinition());
Record.push_back(D->isExceptionVariable());
@@ -1029,36 +1073,34 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
else
Record.push_back(0);
Record.push_back(D->isEscapingByref());
+ HasDeducedType = D->getType()->getContainedDeducedType();
+ Record.push_back(HasDeducedType);
}
Record.push_back(D->getLinkageInternal());
- Record.AddVarDeclInit(D);
-
- if (D->hasAttr<BlocksAttr>() && D->getType()->getAsCXXRecordDecl()) {
+ if (D->hasAttr<BlocksAttr>()) {
BlockVarCopyInit Init = Writer.Context->getBlockVarCopyInit(D);
Record.AddStmt(Init.getCopyExpr());
if (Init.getCopyExpr())
Record.push_back(Init.canThrow());
}
- if (D->getStorageDuration() == SD_Static) {
- bool ModulesCodegen = false;
- if (Writer.WritingModule &&
- !D->getDescribedVarTemplate() && !D->getMemberSpecializationInfo()) {
- // When building a C++20 module interface unit or a partition unit, a
- // strong definition in the module interface is provided by the
- // compilation of that unit, not by its users. (Inline variables are still
- // emitted in module users.)
- ModulesCodegen =
- (Writer.WritingModule->isInterfaceOrPartition() ||
- (D->hasAttr<DLLExportAttr>() &&
- Writer.Context->getLangOpts().BuildingPCHWithObjectFile)) &&
- Writer.Context->GetGVALinkageForVariable(D) == GVA_StrongExternal;
- }
- Record.push_back(ModulesCodegen);
- if (ModulesCodegen)
- Writer.ModularCodegenDecls.push_back(Writer.GetDeclRef(D));
+ bool ModulesCodegen = false;
+ if (Writer.WritingModule && D->getStorageDuration() == SD_Static &&
+ !D->getDescribedVarTemplate()) {
+ // When building a C++20 module interface unit or a partition unit, a
+ // strong definition in the module interface is provided by the
+ // compilation of that unit, not by its users. (Inline variables are still
+ // emitted in module users.)
+ ModulesCodegen =
+ (Writer.WritingModule->isInterfaceOrPartition() ||
+ (D->hasAttr<DLLExportAttr>() &&
+ Writer.Context->getLangOpts().BuildingPCHWithObjectFile)) &&
+ Writer.Context->GetGVALinkageForVariable(D) >= GVA_StrongExternal;
}
+ Record.push_back(ModulesCodegen);
+ if (ModulesCodegen)
+ Writer.ModularCodegenDecls.push_back(Writer.GetDeclRef(D));
enum {
VarNotTemplate = 0, VarTemplate, StaticDataMemberSpecialization
@@ -1094,9 +1136,10 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
!D->isConstexpr() &&
!D->isInitCapture() &&
!D->isPreviousDeclInSameBlockScope() &&
- !(D->hasAttr<BlocksAttr>() && D->getType()->getAsCXXRecordDecl()) &&
!D->isEscapingByref() &&
+ !HasDeducedType &&
D->getStorageDuration() != SD_Static &&
+ !D->getDescribedVarTemplate() &&
!D->getMemberSpecializationInfo())
AbbrevToUse = Writer.getDeclVarAbbrev();
@@ -1241,6 +1284,10 @@ void ASTDeclWriter::VisitCapturedDecl(CapturedDecl *CD) {
}
void ASTDeclWriter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
+ static_assert(DeclContext::NumLinkageSpecDeclBits == 4,
+ "You need to update the serializer after you change the"
+ "LinkageSpecDeclBits");
+
VisitDecl(D);
Record.push_back(D->getLanguage());
Record.AddSourceLocation(D->getExternLoc());
@@ -1385,7 +1432,10 @@ void ASTDeclWriter::VisitCXXRecordDecl(CXXRecordDecl *D) {
VisitRecordDecl(D);
enum {
- CXXRecNotTemplate = 0, CXXRecTemplate, CXXRecMemberSpecialization
+ CXXRecNotTemplate = 0,
+ CXXRecTemplate,
+ CXXRecMemberSpecialization,
+ CXXLambda
};
if (ClassTemplateDecl *TemplD = D->getDescribedClassTemplate()) {
Record.push_back(CXXRecTemplate);
@@ -1396,6 +1446,15 @@ void ASTDeclWriter::VisitCXXRecordDecl(CXXRecordDecl *D) {
Record.AddDeclRef(MSInfo->getInstantiatedFrom());
Record.push_back(MSInfo->getTemplateSpecializationKind());
Record.AddSourceLocation(MSInfo->getPointOfInstantiation());
+ } else if (D->isLambda()) {
+ // For a lambda, we need some information early for merging.
+ Record.push_back(CXXLambda);
+ if (auto *Context = D->getLambdaContextDecl()) {
+ Record.AddDeclRef(Context);
+ Record.push_back(D->getLambdaIndexInContext());
+ } else {
+ Record.push_back(0);
+ }
} else {
Record.push_back(CXXRecNotTemplate);
}
@@ -1436,6 +1495,10 @@ void ASTDeclWriter::VisitCXXMethodDecl(CXXMethodDecl *D) {
}
void ASTDeclWriter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
+ static_assert(DeclContext::NumCXXConstructorDeclBits == 21,
+ "You need to update the serializer after you change the "
+ "CXXConstructorDeclBits");
+
Record.push_back(D->getTrailingAllocKind());
addExplicitSpecifier(D->getExplicitSpecifier(), Record);
if (auto Inherited = D->getInheritedConstructor()) {
@@ -1812,6 +1875,10 @@ void ASTDeclWriter::VisitStaticAssertDecl(StaticAssertDecl *D) {
/// Emit the DeclContext part of a declaration context decl.
void ASTDeclWriter::VisitDeclContext(DeclContext *DC) {
+ static_assert(DeclContext::NumDeclContextBits == 13,
+ "You need to update the serializer after you change the "
+ "DeclContextBits");
+
Record.AddOffset(Writer.WriteDeclContextLexicalBlock(Context, DC));
Record.AddOffset(Writer.WriteDeclContextVisibleBlock(Context, DC));
}
@@ -1922,6 +1989,10 @@ void ASTDeclWriter::VisitOMPRequiresDecl(OMPRequiresDecl *D) {
}
void ASTDeclWriter::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
+ static_assert(DeclContext::NumOMPDeclareReductionDeclBits == 2,
+ "You need to update the serializer after you change the "
+ "NumOMPDeclareReductionDeclBits");
+
VisitValueDecl(D);
Record.AddSourceLocation(D->getBeginLoc());
Record.AddStmt(D->getCombinerIn());
@@ -1984,7 +2055,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TSIType
// FieldDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isMutable
- Abv->Add(BitCodeAbbrevOp(0)); // InitStyle
+ Abv->Add(BitCodeAbbrevOp(0)); // StorageKind
// Type Source Info
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
@@ -2173,8 +2244,8 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // InitStyle
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isARCPseudoStrong
Abv->Add(BitCodeAbbrevOp(0)); // Linkage
- Abv->Add(BitCodeAbbrevOp(0)); // HasInit
- Abv->Add(BitCodeAbbrevOp(0)); // HasMemberSpecializationInfo
+ Abv->Add(BitCodeAbbrevOp(0)); // ModulesCodegen
+ Abv->Add(BitCodeAbbrevOp(0)); // VarKind (local enum)
// ParmVarDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsObjCMethodParameter
Abv->Add(BitCodeAbbrevOp(0)); // ScopeDepth
@@ -2261,9 +2332,10 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // isPrevDeclInSameScope
Abv->Add(BitCodeAbbrevOp(0)); // ImplicitParamKind
Abv->Add(BitCodeAbbrevOp(0)); // EscapingByref
+ Abv->Add(BitCodeAbbrevOp(0)); // HasDeducedType
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // Linkage
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // HasConstant*
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // VarKind (local enum)
+ Abv->Add(BitCodeAbbrevOp(0)); // ModulesCodeGen
+ Abv->Add(BitCodeAbbrevOp(0)); // VarKind (local enum)
// Type Source Info
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
@@ -2353,6 +2425,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //HadMultipleCandidates
Abv->Add(BitCodeAbbrevOp(0)); // RefersToEnclosingVariableOrCapture
Abv->Add(BitCodeAbbrevOp(0)); // NonOdrUseReason
+ Abv->Add(BitCodeAbbrevOp(0)); // IsImmediateEscalating
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclRef
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
DeclRefExprAbbrev = Stream.EmitAbbrev(std::move(Abv));
@@ -2428,7 +2501,20 @@ void ASTWriter::WriteDeclAbbrevs() {
/// relatively painless since they would presumably only do it for top-level
/// decls.
static bool isRequiredDecl(const Decl *D, ASTContext &Context,
- bool WritingModule) {
+ Module *WritingModule) {
+ // Named modules have different semantics than header modules. Every named
+ // module units owns a translation unit. So the importer of named modules
+ // doesn't need to deserilize everything ahead of time.
+ if (WritingModule && WritingModule->isModulePurview()) {
+ // The PragmaCommentDecl and PragmaDetectMismatchDecl are MSVC's extension.
+ // And the behavior of MSVC for such cases will leak this to the module
+ // users. Given pragma is not a standard thing, the compiler has the space
+ // to do their own decision. Let's follow MSVC here.
+ if (isa<PragmaCommentDecl, PragmaDetectMismatchDecl>(D))
+ return true;
+ return false;
+ }
+
// An ObjCMethodDecl is never considered as "required" because its
// implementation container always is.
@@ -2510,7 +2596,7 @@ void ASTRecordWriter::AddFunctionDefinition(const FunctionDecl *FD) {
// compilation of that unit, not by its users. (Inline functions are still
// emitted in module users.)
Linkage = Writer->Context->GetGVALinkageForFunction(FD);
- ModulesCodegen = *Linkage == GVA_StrongExternal;
+ ModulesCodegen = *Linkage >= GVA_StrongExternal;
}
if (Writer->Context->getLangOpts().ModulesCodegen ||
(FD->hasAttr<DLLExportAttr>() &&
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp
index b35a7cee5af2..896e24c8a13d 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -42,6 +42,7 @@ namespace clang {
Code(serialization::STMT_NULL_PTR), AbbrevToUse(0) {}
ASTStmtWriter(const ASTStmtWriter&) = delete;
+ ASTStmtWriter &operator=(const ASTStmtWriter &) = delete;
uint64_t Emit() {
assert(Code != serialization::STMT_NULL_PTR &&
@@ -150,7 +151,7 @@ void ASTStmtWriter::VisitIfStmt(IfStmt *S) {
if (HasElse)
Record.AddStmt(S->getElse());
if (HasVar)
- Record.AddDeclRef(S->getConditionVariable());
+ Record.AddStmt(S->getConditionVariableDeclStmt());
if (HasInit)
Record.AddStmt(S->getInit());
@@ -177,7 +178,7 @@ void ASTStmtWriter::VisitSwitchStmt(SwitchStmt *S) {
if (HasInit)
Record.AddStmt(S->getInit());
if (HasVar)
- Record.AddDeclRef(S->getConditionVariable());
+ Record.AddStmt(S->getConditionVariableDeclStmt());
Record.AddSourceLocation(S->getSwitchLoc());
Record.AddSourceLocation(S->getLParenLoc());
@@ -198,7 +199,7 @@ void ASTStmtWriter::VisitWhileStmt(WhileStmt *S) {
Record.AddStmt(S->getCond());
Record.AddStmt(S->getBody());
if (HasVar)
- Record.AddDeclRef(S->getConditionVariable());
+ Record.AddStmt(S->getConditionVariableDeclStmt());
Record.AddSourceLocation(S->getWhileLoc());
Record.AddSourceLocation(S->getLParenLoc());
@@ -220,7 +221,7 @@ void ASTStmtWriter::VisitForStmt(ForStmt *S) {
VisitStmt(S);
Record.AddStmt(S->getInit());
Record.AddStmt(S->getCond());
- Record.AddDeclRef(S->getConditionVariable());
+ Record.AddStmt(S->getConditionVariableDeclStmt());
Record.AddStmt(S->getInc());
Record.AddStmt(S->getBody());
Record.AddSourceLocation(S->getForLoc());
@@ -316,7 +317,10 @@ void ASTStmtWriter::VisitGCCAsmStmt(GCCAsmStmt *S) {
Record.AddStmt(S->getClobberStringLiteral(I));
// Labels
- for (auto *E : S->labels()) Record.AddStmt(E);
+ for (unsigned I = 0, N = S->getNumLabels(); I != N; ++I) {
+ Record.AddIdentifierRef(S->getLabelIdentifier(I));
+ Record.AddStmt(S->getLabelExpr(I));
+ }
Code = serialization::STMT_GCCASM;
}
@@ -593,6 +597,7 @@ void ASTStmtWriter::VisitPredefinedExpr(PredefinedExpr *E) {
bool HasFunctionName = E->getFunctionName() != nullptr;
Record.push_back(HasFunctionName);
Record.push_back(E->getIdentKind()); // FIXME: stable encoding
+ Record.push_back(E->isTransparent());
Record.AddSourceLocation(E->getLocation());
if (HasFunctionName)
Record.AddStmt(E->getFunctionName());
@@ -608,6 +613,7 @@ void ASTStmtWriter::VisitDeclRefExpr(DeclRefExpr *E) {
Record.push_back(E->hadMultipleCandidates());
Record.push_back(E->refersToEnclosingVariableOrCapture());
Record.push_back(E->isNonOdrUse());
+ Record.push_back(E->isImmediateEscalating());
if (E->hasTemplateKWAndArgsInfo()) {
unsigned NumTemplateArgs = E->getNumTemplateArgs();
@@ -619,7 +625,8 @@ void ASTStmtWriter::VisitDeclRefExpr(DeclRefExpr *E) {
if ((!E->hasTemplateKWAndArgsInfo()) && (!E->hasQualifier()) &&
(E->getDecl() == E->getFoundDecl()) &&
nk == DeclarationName::Identifier &&
- !E->refersToEnclosingVariableOrCapture() && !E->isNonOdrUse()) {
+ !E->refersToEnclosingVariableOrCapture() && !E->isNonOdrUse() &&
+ !E->isImmediateEscalating()) {
AbbrevToUse = Writer.getDeclRefExprAbbrev();
}
@@ -1087,7 +1094,7 @@ void ASTStmtWriter::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
Record.push_back(E->usesGNUSyntax());
for (const DesignatedInitExpr::Designator &D : E->designators()) {
if (D.isFieldDesignator()) {
- if (FieldDecl *Field = D.getField()) {
+ if (FieldDecl *Field = D.getFieldDecl()) {
Record.push_back(serialization::DESIG_FIELD_DECL);
Record.AddDeclRef(Field);
} else {
@@ -1098,13 +1105,13 @@ void ASTStmtWriter::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
Record.AddSourceLocation(D.getFieldLoc());
} else if (D.isArrayDesignator()) {
Record.push_back(serialization::DESIG_ARRAY);
- Record.push_back(D.getFirstExprIndex());
+ Record.push_back(D.getArrayIndex());
Record.AddSourceLocation(D.getLBracketLoc());
Record.AddSourceLocation(D.getRBracketLoc());
} else {
assert(D.isArrayRangeDesignator() && "Unknown designator");
Record.push_back(serialization::DESIG_ARRAY_RANGE);
- Record.push_back(D.getFirstExprIndex());
+ Record.push_back(D.getArrayIndex());
Record.AddSourceLocation(D.getLBracketLoc());
Record.AddSourceLocation(D.getEllipsisLoc());
Record.AddSourceLocation(D.getRBracketLoc());
@@ -1224,6 +1231,7 @@ void ASTStmtWriter::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
VisitExpr(E);
Record.push_back(E->getNumAssocs());
+ Record.push_back(E->isExprPredicate());
Record.push_back(E->ResultIndex);
Record.AddSourceLocation(E->getGenericLoc());
Record.AddSourceLocation(E->getDefaultLoc());
@@ -1595,6 +1603,7 @@ void ASTStmtWriter::VisitCXXConstructExpr(CXXConstructExpr *E) {
Record.push_back(E->isStdInitListInitialization());
Record.push_back(E->requiresZeroInitialization());
Record.push_back(E->getConstructionKind()); // FIXME: stable encoding
+ Record.push_back(E->isImmediateEscalating());
Record.AddSourceLocation(E->getLocation());
Record.AddDeclRef(E->getConstructor());
Record.AddSourceRange(E->getParenOrBraceRange());
@@ -1921,6 +1930,7 @@ ASTStmtWriter::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
Record.AddTypeSourceInfo(E->getTypeSourceInfo());
Record.AddSourceLocation(E->getLParenLoc());
Record.AddSourceLocation(E->getRParenLoc());
+ Record.push_back(E->isListInitialization());
Code = serialization::EXPR_CXX_UNRESOLVED_CONSTRUCT;
}
@@ -2698,16 +2708,14 @@ void ASTStmtWriter::VisitOMPTargetParallelGenericLoopDirective(
//===----------------------------------------------------------------------===//
unsigned ASTWriter::RecordSwitchCaseID(SwitchCase *S) {
- assert(SwitchCaseIDs.find(S) == SwitchCaseIDs.end() &&
- "SwitchCase recorded twice");
+ assert(!SwitchCaseIDs.contains(S) && "SwitchCase recorded twice");
unsigned NextID = SwitchCaseIDs.size();
SwitchCaseIDs[S] = NextID;
return NextID;
}
unsigned ASTWriter::getSwitchCaseID(SwitchCase *S) {
- assert(SwitchCaseIDs.find(S) != SwitchCaseIDs.end() &&
- "SwitchCase hasn't been seen yet");
+ assert(SwitchCaseIDs.contains(S) && "SwitchCase hasn't been seen yet");
return SwitchCaseIDs[S];
}
diff --git a/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp b/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp
index b2283c2b3987..d57f4cec2eab 100644
--- a/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp
@@ -25,12 +25,12 @@
#include "llvm/Bitstream/BitstreamWriter.h"
#include "llvm/Support/DJB.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/FileUtilities.h"
#include "llvm/Support/LockFileManager.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/OnDiskHashTable.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/TimeProfiler.h"
+#include "llvm/Support/raw_ostream.h"
#include <cstdio>
using namespace clang;
using namespace serialization;
@@ -281,15 +281,6 @@ GlobalModuleIndex::readIndex(StringRef Path) {
llvm::Error::success());
}
-void
-GlobalModuleIndex::getKnownModules(SmallVectorImpl<ModuleFile *> &ModuleFiles) {
- ModuleFiles.clear();
- for (unsigned I = 0, N = Modules.size(); I != N; ++I) {
- if (ModuleFile *MF = Modules[I].File)
- ModuleFiles.push_back(MF);
- }
-}
-
void GlobalModuleIndex::getModuleDependencies(
ModuleFile *File,
SmallVectorImpl<ModuleFile *> &Dependencies) {
@@ -634,6 +625,9 @@ llvm::Error GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
// Skip the imported kind
++Idx;
+ // Skip if it is standard C++ module
+ ++Idx;
+
// Skip the import location
++Idx;
@@ -913,8 +907,10 @@ GlobalModuleIndex::writeIndex(FileManager &FileMgr,
"failed writing index");
}
- return llvm::writeFileAtomically((IndexPath + "-%%%%%%%%").str(), IndexPath,
- OutputBuffer);
+ return llvm::writeToOutput(IndexPath, [&OutputBuffer](llvm::raw_ostream &OS) {
+ OS << OutputBuffer;
+ return llvm::Error::success();
+ });
}
namespace {
diff --git a/contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp b/contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp
index 81dd54692d77..5b099b740e1d 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp
@@ -59,11 +59,7 @@ ModuleFile *ModuleManager::lookupByModuleName(StringRef Name) const {
}
ModuleFile *ModuleManager::lookup(const FileEntry *File) const {
- auto Known = Modules.find(File);
- if (Known == Modules.end())
- return nullptr;
-
- return Known->second;
+ return Modules.lookup(File);
}
std::unique_ptr<llvm::MemoryBuffer>
diff --git a/contrib/llvm-project/clang/lib/Serialization/PCHContainerOperations.cpp b/contrib/llvm-project/clang/lib/Serialization/PCHContainerOperations.cpp
index d4990fce2d99..56ca3394385b 100644
--- a/contrib/llvm-project/clang/lib/Serialization/PCHContainerOperations.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/PCHContainerOperations.cpp
@@ -57,6 +57,11 @@ std::unique_ptr<ASTConsumer> RawPCHContainerWriter::CreatePCHContainerGenerator(
return std::make_unique<RawPCHContainerGenerator>(std::move(OS), Buffer);
}
+ArrayRef<llvm::StringRef> RawPCHContainerReader::getFormats() const {
+ static StringRef Raw("raw");
+ return ArrayRef(Raw);
+}
+
StringRef
RawPCHContainerReader::ExtractPCH(llvm::MemoryBufferRef Buffer) const {
return Buffer.getBuffer();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
index 45783729e142..a54f1b1e71d4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
@@ -7,14 +7,15 @@
//===----------------------------------------------------------------------===//
// This file reports various statistics about analyzer visitation.
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Statistic.h"
@@ -52,9 +53,8 @@ void AnalyzerStatsChecker::checkEndAnalysis(ExplodedGraph &G,
const Decl *D = LC->getDecl();
// Iterate over the exploded graph.
- for (ExplodedGraph::node_iterator I = G.nodes_begin();
- I != G.nodes_end(); ++I) {
- const ProgramPoint &P = I->getLocation();
+ for (const ExplodedNode &N : G.nodes()) {
+ const ProgramPoint &P = N.getLocation();
// Only check the coverage in the top level function (optimization).
if (D != P.getLocationContext()->getDecl())
@@ -115,11 +115,8 @@ void AnalyzerStatsChecker::checkEndAnalysis(ExplodedGraph &G,
output.str(), PathDiagnosticLocation(D, SM));
// Emit warning for each block we bailed out on.
- typedef CoreEngine::BlocksExhausted::const_iterator ExhaustedIterator;
const CoreEngine &CE = Eng.getCoreEngine();
- for (ExhaustedIterator I = CE.blocks_exhausted_begin(),
- E = CE.blocks_exhausted_end(); I != E; ++I) {
- const BlockEdge &BE = I->first;
+ for (const BlockEdge &BE : make_first_range(CE.exhausted_blocks())) {
const CFGBlock *Exit = BE.getDst();
if (Exit->empty())
continue;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
index 986b0add93df..269277aaf357 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -33,14 +33,19 @@ namespace {
class ArrayBoundCheckerV2 :
public Checker<check::Location> {
mutable std::unique_ptr<BuiltinBug> BT;
+ mutable std::unique_ptr<BugType> TaintBT;
- enum OOB_Kind { OOB_Precedes, OOB_Excedes, OOB_Tainted };
+ enum OOB_Kind { OOB_Precedes, OOB_Excedes };
- void reportOOB(CheckerContext &C, ProgramStateRef errorState, OOB_Kind kind,
- std::unique_ptr<BugReporterVisitor> Visitor = nullptr) const;
+ void reportOOB(CheckerContext &C, ProgramStateRef errorState,
+ OOB_Kind kind) const;
+ void reportTaintOOB(CheckerContext &C, ProgramStateRef errorState,
+ SVal TaintedSVal) const;
+
+ static bool isFromCtypeMacro(const Stmt *S, ASTContext &AC);
public:
- void checkLocation(SVal l, bool isLoad, const Stmt*S,
+ void checkLocation(SVal l, bool isLoad, const Stmt *S,
CheckerContext &C) const;
};
@@ -48,40 +53,29 @@ public:
class RegionRawOffsetV2 {
private:
const SubRegion *baseRegion;
- SVal byteOffset;
-
- RegionRawOffsetV2()
- : baseRegion(nullptr), byteOffset(UnknownVal()) {}
+ NonLoc byteOffset;
public:
- RegionRawOffsetV2(const SubRegion* base, SVal offset)
- : baseRegion(base), byteOffset(offset) {}
+ RegionRawOffsetV2(const SubRegion *base, NonLoc offset)
+ : baseRegion(base), byteOffset(offset) { assert(base); }
- NonLoc getByteOffset() const { return byteOffset.castAs<NonLoc>(); }
+ NonLoc getByteOffset() const { return byteOffset; }
const SubRegion *getRegion() const { return baseRegion; }
- static RegionRawOffsetV2 computeOffset(ProgramStateRef state,
- SValBuilder &svalBuilder,
- SVal location);
+ static std::optional<RegionRawOffsetV2>
+ computeOffset(ProgramStateRef State, SValBuilder &SVB, SVal Location);
void dump() const;
void dumpToStream(raw_ostream &os) const;
};
}
-static SVal computeExtentBegin(SValBuilder &svalBuilder,
- const MemRegion *region) {
- const MemSpaceRegion *SR = region->getMemorySpace();
- if (SR->getKind() == MemRegion::UnknownSpaceRegionKind)
- return UnknownVal();
- else
- return svalBuilder.makeZeroArrayIndex();
-}
-
// TODO: once the constraint manager is smart enough to handle non simplified
// symbolic expressions remove this function. Note that this can not be used in
// the constraint manager as is, since this does not handle overflows. It is
// safe to assume, however, that memory offsets will not overflow.
+// NOTE: callers of this function need to be aware of the effects of overflows
+// and signed<->unsigned conversions!
static std::pair<NonLoc, nonloc::ConcreteInt>
getSimplifiedOffsets(NonLoc offset, nonloc::ConcreteInt extent,
SValBuilder &svalBuilder) {
@@ -114,6 +108,38 @@ getSimplifiedOffsets(NonLoc offset, nonloc::ConcreteInt extent,
return std::pair<NonLoc, nonloc::ConcreteInt>(offset, extent);
}
+// Evaluate the comparison Value < Threshold with the help of the custom
+// simplification algorithm defined for this checker. Return a pair of states,
+// where the first one corresponds to "value below threshold" and the second
+// corresponds to "value at or above threshold". Returns {nullptr, nullptr} in
+// the case when the evaluation fails.
+static std::pair<ProgramStateRef, ProgramStateRef>
+compareValueToThreshold(ProgramStateRef State, NonLoc Value, NonLoc Threshold,
+ SValBuilder &SVB) {
+ if (auto ConcreteThreshold = Threshold.getAs<nonloc::ConcreteInt>()) {
+ std::tie(Value, Threshold) = getSimplifiedOffsets(Value, *ConcreteThreshold, SVB);
+ }
+ if (auto ConcreteThreshold = Threshold.getAs<nonloc::ConcreteInt>()) {
+ QualType T = Value.getType(SVB.getContext());
+ if (T->isUnsignedIntegerType() && ConcreteThreshold->getValue().isNegative()) {
+ // In this case we reduced the bound check to a comparison of the form
+ // (symbol or value with unsigned type) < (negative number)
+ // which is always false. We are handling these cases separately because
+ // evalBinOpNN can perform a signed->unsigned conversion that turns the
+ // negative number into a huge positive value and leads to wildly
+ // inaccurate conclusions.
+ return {nullptr, State};
+ }
+ }
+ auto BelowThreshold =
+ SVB.evalBinOpNN(State, BO_LT, Value, Threshold, SVB.getConditionType()).getAs<NonLoc>();
+
+ if (BelowThreshold)
+ return State->assume(*BelowThreshold);
+
+ return {nullptr, nullptr};
+}
+
void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
const Stmt* LoadS,
CheckerContext &checkerContext) const {
@@ -127,109 +153,101 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
// memory access is within the extent of the base region. Since we
// have some flexibility in defining the base region, we can achieve
// various levels of conservatism in our buffer overflow checking.
- ProgramStateRef state = checkerContext.getState();
- SValBuilder &svalBuilder = checkerContext.getSValBuilder();
- const RegionRawOffsetV2 &rawOffset =
- RegionRawOffsetV2::computeOffset(state, svalBuilder, location);
-
- if (!rawOffset.getRegion())
+ // The header ctype.h (from e.g. glibc) implements the isXXXXX() macros as
+ // #define isXXXXX(arg) (LOOKUP_TABLE[arg] & BITMASK_FOR_XXXXX)
+ // and incomplete analysis of these leads to false positives. As even
+ // accurate reports would be confusing for the users, just disable reports
+ // from these macros:
+ if (isFromCtypeMacro(LoadS, checkerContext.getASTContext()))
return;
- NonLoc rawOffsetVal = rawOffset.getByteOffset();
-
- // CHECK LOWER BOUND: Is byteOffset < extent begin?
- // If so, we are doing a load/store
- // before the first valid offset in the memory region.
+ ProgramStateRef state = checkerContext.getState();
- SVal extentBegin = computeExtentBegin(svalBuilder, rawOffset.getRegion());
+ SValBuilder &svalBuilder = checkerContext.getSValBuilder();
+ const std::optional<RegionRawOffsetV2> &RawOffset =
+ RegionRawOffsetV2::computeOffset(state, svalBuilder, location);
- if (std::optional<NonLoc> NV = extentBegin.getAs<NonLoc>()) {
- if (auto ConcreteNV = NV->getAs<nonloc::ConcreteInt>()) {
- std::pair<NonLoc, nonloc::ConcreteInt> simplifiedOffsets =
- getSimplifiedOffsets(rawOffset.getByteOffset(), *ConcreteNV,
- svalBuilder);
- rawOffsetVal = simplifiedOffsets.first;
- *NV = simplifiedOffsets.second;
- }
+ if (!RawOffset)
+ return;
- SVal lowerBound = svalBuilder.evalBinOpNN(state, BO_LT, rawOffsetVal, *NV,
- svalBuilder.getConditionType());
+ NonLoc ByteOffset = RawOffset->getByteOffset();
- std::optional<NonLoc> lowerBoundToCheck = lowerBound.getAs<NonLoc>();
- if (!lowerBoundToCheck)
- return;
+ // CHECK LOWER BOUND
+ const MemSpaceRegion *SR = RawOffset->getRegion()->getMemorySpace();
+ if (!llvm::isa<UnknownSpaceRegion>(SR)) {
+ // A pointer to UnknownSpaceRegion may point to the middle of
+ // an allocated region.
- ProgramStateRef state_precedesLowerBound, state_withinLowerBound;
- std::tie(state_precedesLowerBound, state_withinLowerBound) =
- state->assume(*lowerBoundToCheck);
+ auto [state_precedesLowerBound, state_withinLowerBound] =
+ compareValueToThreshold(state, ByteOffset,
+ svalBuilder.makeZeroArrayIndex(), svalBuilder);
- // Are we constrained enough to definitely precede the lower bound?
if (state_precedesLowerBound && !state_withinLowerBound) {
+ // We know that the index definitely precedes the lower bound.
reportOOB(checkerContext, state_precedesLowerBound, OOB_Precedes);
return;
}
- // Otherwise, assume the constraint of the lower bound.
- assert(state_withinLowerBound);
- state = state_withinLowerBound;
+ if (state_withinLowerBound)
+ state = state_withinLowerBound;
}
- do {
- // CHECK UPPER BOUND: Is byteOffset >= size(baseRegion)? If so,
- // we are doing a load/store after the last valid offset.
- const MemRegion *MR = rawOffset.getRegion();
- DefinedOrUnknownSVal Size = getDynamicExtent(state, MR, svalBuilder);
- if (!isa<NonLoc>(Size))
- break;
-
- if (auto ConcreteSize = Size.getAs<nonloc::ConcreteInt>()) {
- std::pair<NonLoc, nonloc::ConcreteInt> simplifiedOffsets =
- getSimplifiedOffsets(rawOffset.getByteOffset(), *ConcreteSize,
- svalBuilder);
- rawOffsetVal = simplifiedOffsets.first;
- Size = simplifiedOffsets.second;
- }
-
- SVal upperbound = svalBuilder.evalBinOpNN(state, BO_GE, rawOffsetVal,
- Size.castAs<NonLoc>(),
- svalBuilder.getConditionType());
-
- std::optional<NonLoc> upperboundToCheck = upperbound.getAs<NonLoc>();
- if (!upperboundToCheck)
- break;
-
- ProgramStateRef state_exceedsUpperBound, state_withinUpperBound;
- std::tie(state_exceedsUpperBound, state_withinUpperBound) =
- state->assume(*upperboundToCheck);
-
- // If we are under constrained and the index variables are tainted, report.
- if (state_exceedsUpperBound && state_withinUpperBound) {
- SVal ByteOffset = rawOffset.getByteOffset();
+ // CHECK UPPER BOUND
+ DefinedOrUnknownSVal Size =
+ getDynamicExtent(state, RawOffset->getRegion(), svalBuilder);
+ if (auto KnownSize = Size.getAs<NonLoc>()) {
+ auto [state_withinUpperBound, state_exceedsUpperBound] =
+ compareValueToThreshold(state, ByteOffset, *KnownSize, svalBuilder);
+
+ if (state_exceedsUpperBound) {
+ if (!state_withinUpperBound) {
+ // We know that the index definitely exceeds the upper bound.
+ reportOOB(checkerContext, state_exceedsUpperBound, OOB_Excedes);
+ return;
+ }
if (isTainted(state, ByteOffset)) {
- reportOOB(checkerContext, state_exceedsUpperBound, OOB_Tainted,
- std::make_unique<TaintBugVisitor>(ByteOffset));
+ // Both cases are possible, but the index is tainted, so report.
+ reportTaintOOB(checkerContext, state_exceedsUpperBound, ByteOffset);
return;
}
- } else if (state_exceedsUpperBound) {
- // If we are constrained enough to definitely exceed the upper bound,
- // report.
- assert(!state_withinUpperBound);
- reportOOB(checkerContext, state_exceedsUpperBound, OOB_Excedes);
- return;
}
- assert(state_withinUpperBound);
- state = state_withinUpperBound;
+ if (state_withinUpperBound)
+ state = state_withinUpperBound;
}
- while (false);
checkerContext.addTransition(state);
}
-void ArrayBoundCheckerV2::reportOOB(
- CheckerContext &checkerContext, ProgramStateRef errorState, OOB_Kind kind,
- std::unique_ptr<BugReporterVisitor> Visitor) const {
+void ArrayBoundCheckerV2::reportTaintOOB(CheckerContext &checkerContext,
+ ProgramStateRef errorState,
+ SVal TaintedSVal) const {
+ ExplodedNode *errorNode = checkerContext.generateErrorNode(errorState);
+ if (!errorNode)
+ return;
+
+ if (!TaintBT)
+ TaintBT.reset(
+ new BugType(this, "Out-of-bound access", categories::TaintedData));
+
+ SmallString<256> buf;
+ llvm::raw_svector_ostream os(buf);
+ os << "Out of bound memory access (index is tainted)";
+ auto BR =
+ std::make_unique<PathSensitiveBugReport>(*TaintBT, os.str(), errorNode);
+
+ // Track back the propagation of taintedness.
+ for (SymbolRef Sym : getTaintedSymbols(errorState, TaintedSVal)) {
+ BR->markInteresting(Sym);
+ }
+
+ checkerContext.emitReport(std::move(BR));
+}
+
+void ArrayBoundCheckerV2::reportOOB(CheckerContext &checkerContext,
+ ProgramStateRef errorState,
+ OOB_Kind kind) const {
ExplodedNode *errorNode = checkerContext.generateErrorNode(errorState);
if (!errorNode)
@@ -251,16 +269,30 @@ void ArrayBoundCheckerV2::reportOOB(
case OOB_Excedes:
os << "(access exceeds upper limit of memory block)";
break;
- case OOB_Tainted:
- os << "(index is tainted)";
- break;
}
-
auto BR = std::make_unique<PathSensitiveBugReport>(*BT, os.str(), errorNode);
- BR->addVisitor(std::move(Visitor));
checkerContext.emitReport(std::move(BR));
}
+bool ArrayBoundCheckerV2::isFromCtypeMacro(const Stmt *S, ASTContext &ACtx) {
+ SourceLocation Loc = S->getBeginLoc();
+ if (!Loc.isMacroID())
+ return false;
+
+ StringRef MacroName = Lexer::getImmediateMacroName(
+ Loc, ACtx.getSourceManager(), ACtx.getLangOpts());
+
+ if (MacroName.size() < 7 || MacroName[0] != 'i' || MacroName[1] != 's')
+ return false;
+
+ return ((MacroName == "isalnum") || (MacroName == "isalpha") ||
+ (MacroName == "isblank") || (MacroName == "isdigit") ||
+ (MacroName == "isgraph") || (MacroName == "islower") ||
+ (MacroName == "isnctrl") || (MacroName == "isprint") ||
+ (MacroName == "ispunct") || (MacroName == "isspace") ||
+ (MacroName == "isupper") || (MacroName == "isxdigit"));
+}
+
#ifndef NDEBUG
LLVM_DUMP_METHOD void RegionRawOffsetV2::dump() const {
dumpToStream(llvm::errs());
@@ -271,85 +303,55 @@ void RegionRawOffsetV2::dumpToStream(raw_ostream &os) const {
}
#endif
-// Lazily computes a value to be used by 'computeOffset'. If 'val'
-// is unknown or undefined, we lazily substitute '0'. Otherwise,
-// return 'val'.
-static inline SVal getValue(SVal val, SValBuilder &svalBuilder) {
- return val.isUndef() ? svalBuilder.makeZeroArrayIndex() : val;
-}
-
-// Scale a base value by a scaling factor, and return the scaled
-// value as an SVal. Used by 'computeOffset'.
-static inline SVal scaleValue(ProgramStateRef state,
- NonLoc baseVal, CharUnits scaling,
- SValBuilder &sb) {
- return sb.evalBinOpNN(state, BO_Mul, baseVal,
- sb.makeArrayIndex(scaling.getQuantity()),
- sb.getArrayIndexType());
-}
-
-// Add an SVal to another, treating unknown and undefined values as
-// summing to UnknownVal. Used by 'computeOffset'.
-static SVal addValue(ProgramStateRef state, SVal x, SVal y,
- SValBuilder &svalBuilder) {
- // We treat UnknownVals and UndefinedVals the same here because we
- // only care about computing offsets.
- if (x.isUnknownOrUndef() || y.isUnknownOrUndef())
- return UnknownVal();
-
- return svalBuilder.evalBinOpNN(state, BO_Add, x.castAs<NonLoc>(),
- y.castAs<NonLoc>(),
- svalBuilder.getArrayIndexType());
-}
-
-/// Compute a raw byte offset from a base region. Used for array bounds
-/// checking.
-RegionRawOffsetV2 RegionRawOffsetV2::computeOffset(ProgramStateRef state,
- SValBuilder &svalBuilder,
- SVal location)
-{
- const MemRegion *region = location.getAsRegion();
- SVal offset = UndefinedVal();
-
- while (region) {
- switch (region->getKind()) {
- default: {
- if (const SubRegion *subReg = dyn_cast<SubRegion>(region)) {
- offset = getValue(offset, svalBuilder);
- if (!offset.isUnknownOrUndef())
- return RegionRawOffsetV2(subReg, offset);
- }
- return RegionRawOffsetV2();
- }
- case MemRegion::ElementRegionKind: {
- const ElementRegion *elemReg = cast<ElementRegion>(region);
- SVal index = elemReg->getIndex();
- if (!isa<NonLoc>(index))
- return RegionRawOffsetV2();
- QualType elemType = elemReg->getElementType();
+/// For a given Location that can be represented as a symbolic expression
+/// Arr[Idx] (or perhaps Arr[Idx1][Idx2] etc.), return the parent memory block
+/// Arr and the distance of Location from the beginning of Arr (expressed in a
+/// NonLoc that specifies the number of CharUnits). Returns nullopt when these
+/// cannot be determined.
+std::optional<RegionRawOffsetV2>
+RegionRawOffsetV2::computeOffset(ProgramStateRef State, SValBuilder &SVB,
+ SVal Location) {
+ QualType T = SVB.getArrayIndexType();
+ auto Calc = [&SVB, State, T](BinaryOperatorKind Op, NonLoc LHS, NonLoc RHS) {
+ // We will use this utility to add and multiply values.
+ return SVB.evalBinOpNN(State, Op, LHS, RHS, T).getAs<NonLoc>();
+ };
+
+ const MemRegion *Region = Location.getAsRegion();
+ NonLoc Offset = SVB.makeZeroArrayIndex();
+
+ while (Region) {
+ if (const auto *ERegion = dyn_cast<ElementRegion>(Region)) {
+ if (const auto Index = ERegion->getIndex().getAs<NonLoc>()) {
+ QualType ElemType = ERegion->getElementType();
// If the element is an incomplete type, go no further.
- ASTContext &astContext = svalBuilder.getContext();
- if (elemType->isIncompleteType())
- return RegionRawOffsetV2();
-
- // Update the offset.
- offset = addValue(state,
- getValue(offset, svalBuilder),
- scaleValue(state,
- index.castAs<NonLoc>(),
- astContext.getTypeSizeInChars(elemType),
- svalBuilder),
- svalBuilder);
-
- if (offset.isUnknownOrUndef())
- return RegionRawOffsetV2();
-
- region = elemReg->getSuperRegion();
- continue;
+ if (ElemType->isIncompleteType())
+ return std::nullopt;
+
+ // Perform Offset += Index * sizeof(ElemType); then continue the offset
+ // calculations with SuperRegion:
+ NonLoc Size = SVB.makeArrayIndex(
+ SVB.getContext().getTypeSizeInChars(ElemType).getQuantity());
+ if (auto Delta = Calc(BO_Mul, *Index, Size)) {
+ if (auto NewOffset = Calc(BO_Add, Offset, *Delta)) {
+ Offset = *NewOffset;
+ Region = ERegion->getSuperRegion();
+ continue;
+ }
+ }
}
+ } else if (const auto *SRegion = dyn_cast<SubRegion>(Region)) {
+ // NOTE: The dyn_cast<>() is expected to succeed, it'd be very surprising
+ // to see a MemSpaceRegion at this point.
+ // FIXME: We may return with {<Region>, 0} even if we didn't handle any
+ // ElementRegion layers. I think that this behavior was introduced
+ // accidentally by 8a4c760c204546aba566e302f299f7ed2e00e287 in 2011, so
+ // it may be useful to review it in the future.
+ return RegionRawOffsetV2(SRegion, Offset);
}
+ return std::nullopt;
}
- return RegionRawOffsetV2();
+ return std::nullopt;
}
void ento::registerArrayBoundCheckerV2(CheckerManager &mgr) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index 44166aaf5b85..4a5b8913c22f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -30,6 +30,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/raw_ostream.h"
@@ -95,57 +96,65 @@ static FoundationClass findKnownClass(const ObjCInterfaceDecl *ID,
//===----------------------------------------------------------------------===//
namespace {
- class NilArgChecker : public Checker<check::PreObjCMessage,
- check::PostStmt<ObjCDictionaryLiteral>,
- check::PostStmt<ObjCArrayLiteral> > {
- mutable std::unique_ptr<APIMisuse> BT;
-
- mutable llvm::SmallDenseMap<Selector, unsigned, 16> StringSelectors;
- mutable Selector ArrayWithObjectSel;
- mutable Selector AddObjectSel;
- mutable Selector InsertObjectAtIndexSel;
- mutable Selector ReplaceObjectAtIndexWithObjectSel;
- mutable Selector SetObjectAtIndexedSubscriptSel;
- mutable Selector ArrayByAddingObjectSel;
- mutable Selector DictionaryWithObjectForKeySel;
- mutable Selector SetObjectForKeySel;
- mutable Selector SetObjectForKeyedSubscriptSel;
- mutable Selector RemoveObjectForKeySel;
-
- void warnIfNilExpr(const Expr *E,
- const char *Msg,
- CheckerContext &C) const;
-
- void warnIfNilArg(CheckerContext &C,
- const ObjCMethodCall &msg, unsigned Arg,
- FoundationClass Class,
- bool CanBeSubscript = false) const;
-
- void generateBugReport(ExplodedNode *N,
- StringRef Msg,
- SourceRange Range,
- const Expr *Expr,
- CheckerContext &C) const;
-
- public:
- void checkPreObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
- void checkPostStmt(const ObjCDictionaryLiteral *DL,
- CheckerContext &C) const;
- void checkPostStmt(const ObjCArrayLiteral *AL,
- CheckerContext &C) const;
- };
+class NilArgChecker : public Checker<check::PreObjCMessage,
+ check::PostStmt<ObjCDictionaryLiteral>,
+ check::PostStmt<ObjCArrayLiteral>,
+ EventDispatcher<ImplicitNullDerefEvent>> {
+ mutable std::unique_ptr<APIMisuse> BT;
+
+ mutable llvm::SmallDenseMap<Selector, unsigned, 16> StringSelectors;
+ mutable Selector ArrayWithObjectSel;
+ mutable Selector AddObjectSel;
+ mutable Selector InsertObjectAtIndexSel;
+ mutable Selector ReplaceObjectAtIndexWithObjectSel;
+ mutable Selector SetObjectAtIndexedSubscriptSel;
+ mutable Selector ArrayByAddingObjectSel;
+ mutable Selector DictionaryWithObjectForKeySel;
+ mutable Selector SetObjectForKeySel;
+ mutable Selector SetObjectForKeyedSubscriptSel;
+ mutable Selector RemoveObjectForKeySel;
+
+ void warnIfNilExpr(const Expr *E, const char *Msg, CheckerContext &C) const;
+
+ void warnIfNilArg(CheckerContext &C, const ObjCMethodCall &msg, unsigned Arg,
+ FoundationClass Class, bool CanBeSubscript = false) const;
+
+ void generateBugReport(ExplodedNode *N, StringRef Msg, SourceRange Range,
+ const Expr *Expr, CheckerContext &C) const;
+
+public:
+ void checkPreObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
+ void checkPostStmt(const ObjCDictionaryLiteral *DL, CheckerContext &C) const;
+ void checkPostStmt(const ObjCArrayLiteral *AL, CheckerContext &C) const;
+};
} // end anonymous namespace
void NilArgChecker::warnIfNilExpr(const Expr *E,
const char *Msg,
CheckerContext &C) const {
- ProgramStateRef State = C.getState();
- if (State->isNull(C.getSVal(E)).isConstrainedTrue()) {
+ auto Location = C.getSVal(E).getAs<Loc>();
+ if (!Location)
+ return;
+ auto [NonNull, Null] = C.getState()->assume(*Location);
+
+ // If it's known to be null.
+ if (!NonNull && Null) {
if (ExplodedNode *N = C.generateErrorNode()) {
generateBugReport(N, Msg, E->getSourceRange(), E, C);
+ return;
}
}
+
+ // If it might be null, assume that it cannot after this operation.
+ if (Null) {
+ // One needs to make sure the pointer is non-null to be used here.
+ if (ExplodedNode *N = C.generateSink(Null, C.getPredecessor())) {
+ dispatchEvent({*Location, /*IsLoad=*/false, N, &C.getBugReporter(),
+ /*IsDirectDereference=*/false});
+ }
+ C.addTransition(NonNull);
+ }
}
void NilArgChecker::warnIfNilArg(CheckerContext &C,
@@ -965,9 +974,8 @@ static bool alreadyExecutedAtLeastOneLoopIteration(const ExplodedNode *N,
}
// Keep looking for a block edge.
- for (ExplodedNode::const_pred_iterator I = N->pred_begin(),
- E = N->pred_end(); I != E; ++I) {
- if (alreadyExecutedAtLeastOneLoopIteration(*I, FCS))
+ for (const ExplodedNode *N : N->preds()) {
+ if (alreadyExecutedAtLeastOneLoopIteration(N, FCS))
return true;
}
@@ -1096,11 +1104,7 @@ ObjCLoopChecker::checkPointerEscape(ProgramStateRef State,
SymbolRef ImmutableReceiver = getMethodReceiverIfKnownImmutable(Call);
// Remove the invalidated symbols from the collection count map.
- for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
- E = Escaped.end();
- I != E; ++I) {
- SymbolRef Sym = *I;
-
+ for (SymbolRef Sym : Escaped) {
// Don't invalidate this symbol's count if we know the method being called
// is declared on an immutable class. This isn't completely correct if the
// receiver is also passed as an argument, but in most uses of NSArray,
@@ -1122,9 +1126,7 @@ void ObjCLoopChecker::checkDeadSymbols(SymbolReaper &SymReaper,
// Remove the dead symbols from the collection count map.
ContainerCountMapTy Tracked = State->get<ContainerCountMap>();
- for (ContainerCountMapTy::iterator I = Tracked.begin(),
- E = Tracked.end(); I != E; ++I) {
- SymbolRef Sym = I->first;
+ for (SymbolRef Sym : llvm::make_first_range(Tracked)) {
if (SymReaper.isDead(Sym)) {
State = State->remove<ContainerCountMap>(Sym);
State = State->remove<ContainerNonEmptyMap>(Sym);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 12b948a65261..387edd8c3b18 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "InterCheckerAPI.h"
+#include "clang/Basic/Builtins.h"
#include "clang/Basic/CharInfo.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -35,24 +36,12 @@ using namespace std::placeholders;
namespace {
struct AnyArgExpr {
- // FIXME: Remove constructor in C++17 to turn it into an aggregate.
- AnyArgExpr(const Expr *Expression, unsigned ArgumentIndex)
- : Expression{Expression}, ArgumentIndex{ArgumentIndex} {}
const Expr *Expression;
unsigned ArgumentIndex;
};
-
-struct SourceArgExpr : AnyArgExpr {
- using AnyArgExpr::AnyArgExpr; // FIXME: Remove using in C++17.
-};
-
-struct DestinationArgExpr : AnyArgExpr {
- using AnyArgExpr::AnyArgExpr; // FIXME: Same.
-};
-
-struct SizeArgExpr : AnyArgExpr {
- using AnyArgExpr::AnyArgExpr; // FIXME: Same.
-};
+struct SourceArgExpr : AnyArgExpr {};
+struct DestinationArgExpr : AnyArgExpr {};
+struct SizeArgExpr : AnyArgExpr {};
using ErrorMessage = SmallString<128>;
enum class AccessKind { write, read };
@@ -95,7 +84,7 @@ class CStringChecker : public Checker< eval::Call,
mutable std::unique_ptr<BugType> BT_Null, BT_Bounds, BT_Overlap,
BT_NotCString, BT_AdditionOverflow, BT_UninitRead;
- mutable const char *CurrentFunctionDescription;
+ mutable const char *CurrentFunctionDescription = nullptr;
public:
/// The filter is used to filter out the diagnostics which are not enabled by
@@ -175,6 +164,8 @@ public:
std::bind(&CStringChecker::evalMemcmp, _1, _2, _3, CK_Regular)},
{{CDF_MaybeBuiltin, {"bzero"}, 2}, &CStringChecker::evalBzero},
{{CDF_MaybeBuiltin, {"explicit_bzero"}, 2}, &CStringChecker::evalBzero},
+ {{CDF_MaybeBuiltin, {"sprintf"}, 2}, &CStringChecker::evalSprintf},
+ {{CDF_MaybeBuiltin, {"snprintf"}, 2}, &CStringChecker::evalSnprintf},
};
// These require a bit of special handling.
@@ -228,6 +219,11 @@ public:
void evalMemset(CheckerContext &C, const CallExpr *CE) const;
void evalBzero(CheckerContext &C, const CallExpr *CE) const;
+ void evalSprintf(CheckerContext &C, const CallExpr *CE) const;
+ void evalSnprintf(CheckerContext &C, const CallExpr *CE) const;
+ void evalSprintfCommon(CheckerContext &C, const CallExpr *CE, bool IsBounded,
+ bool IsBuiltin) const;
+
// Utility methods
std::pair<ProgramStateRef , ProgramStateRef >
static assumeZero(CheckerContext &C,
@@ -252,11 +248,34 @@ public:
const Expr *expr,
SVal val) const;
- static ProgramStateRef InvalidateBuffer(CheckerContext &C,
- ProgramStateRef state,
- const Expr *Ex, SVal V,
- bool IsSourceBuffer,
- const Expr *Size);
+ /// Invalidate the destination buffer determined by characters copied.
+ static ProgramStateRef
+ invalidateDestinationBufferBySize(CheckerContext &C, ProgramStateRef S,
+ const Expr *BufE, SVal BufV, SVal SizeV,
+ QualType SizeTy);
+
+ /// Operation never overflows, do not invalidate the super region.
+ static ProgramStateRef invalidateDestinationBufferNeverOverflows(
+ CheckerContext &C, ProgramStateRef S, const Expr *BufE, SVal BufV);
+
+ /// We do not know whether the operation can overflow (e.g. size is unknown),
+ /// invalidate the super region and escape related pointers.
+ static ProgramStateRef invalidateDestinationBufferAlwaysEscapeSuperRegion(
+ CheckerContext &C, ProgramStateRef S, const Expr *BufE, SVal BufV);
+
+ /// Invalidate the source buffer for escaping pointers.
+ static ProgramStateRef invalidateSourceBuffer(CheckerContext &C,
+ ProgramStateRef S,
+ const Expr *BufE, SVal BufV);
+
+ /// @param InvalidationTraitOperations Determine how to invlidate the
+ /// MemRegion by setting the invalidation traits. Return true to cause pointer
+ /// escape, or false otherwise.
+ static ProgramStateRef invalidateBufferAux(
+ CheckerContext &C, ProgramStateRef State, const Expr *Ex, SVal V,
+ llvm::function_ref<bool(RegionAndSymbolInvalidationTraits &,
+ const MemRegion *)>
+ InvalidationTraitOperations);
static bool SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
const MemRegion *MR);
@@ -302,10 +321,9 @@ public:
// Return true if the destination buffer of the copy function may be in bound.
// Expects SVal of Size to be positive and unsigned.
// Expects SVal of FirstBuf to be a FieldRegion.
- static bool IsFirstBufInBound(CheckerContext &C,
- ProgramStateRef state,
- const Expr *FirstBuf,
- const Expr *Size);
+ static bool isFirstBufInBound(CheckerContext &C, ProgramStateRef State,
+ SVal BufVal, QualType BufTy, SVal LengthVal,
+ QualType LengthTy);
};
} //end anonymous namespace
@@ -959,43 +977,40 @@ const StringLiteral *CStringChecker::getCStringLiteral(CheckerContext &C,
return strRegion->getStringLiteral();
}
-bool CStringChecker::IsFirstBufInBound(CheckerContext &C,
- ProgramStateRef state,
- const Expr *FirstBuf,
- const Expr *Size) {
+bool CStringChecker::isFirstBufInBound(CheckerContext &C, ProgramStateRef State,
+ SVal BufVal, QualType BufTy,
+ SVal LengthVal, QualType LengthTy) {
// If we do not know that the buffer is long enough we return 'true'.
// Otherwise the parent region of this field region would also get
// invalidated, which would lead to warnings based on an unknown state.
+ if (LengthVal.isUnknown())
+ return false;
+
// Originally copied from CheckBufferAccess and CheckLocation.
- SValBuilder &svalBuilder = C.getSValBuilder();
- ASTContext &Ctx = svalBuilder.getContext();
- const LocationContext *LCtx = C.getLocationContext();
+ SValBuilder &SB = C.getSValBuilder();
+ ASTContext &Ctx = C.getASTContext();
- QualType sizeTy = Size->getType();
QualType PtrTy = Ctx.getPointerType(Ctx.CharTy);
- SVal BufVal = state->getSVal(FirstBuf, LCtx);
- SVal LengthVal = state->getSVal(Size, LCtx);
std::optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
if (!Length)
return true; // cf top comment.
// Compute the offset of the last element to be accessed: size-1.
- NonLoc One = svalBuilder.makeIntVal(1, sizeTy).castAs<NonLoc>();
- SVal Offset = svalBuilder.evalBinOpNN(state, BO_Sub, *Length, One, sizeTy);
+ NonLoc One = SB.makeIntVal(1, LengthTy).castAs<NonLoc>();
+ SVal Offset = SB.evalBinOpNN(State, BO_Sub, *Length, One, LengthTy);
if (Offset.isUnknown())
return true; // cf top comment
NonLoc LastOffset = Offset.castAs<NonLoc>();
// Check that the first buffer is sufficiently long.
- SVal BufStart = svalBuilder.evalCast(BufVal, PtrTy, FirstBuf->getType());
+ SVal BufStart = SB.evalCast(BufVal, PtrTy, BufTy);
std::optional<Loc> BufLoc = BufStart.getAs<Loc>();
if (!BufLoc)
return true; // cf top comment.
- SVal BufEnd =
- svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc, LastOffset, PtrTy);
+ SVal BufEnd = SB.evalBinOpLN(State, BO_Add, *BufLoc, LastOffset, PtrTy);
// Check for out of bound array element access.
const MemRegion *R = BufEnd.getAsRegion();
@@ -1009,28 +1024,90 @@ bool CStringChecker::IsFirstBufInBound(CheckerContext &C,
// FIXME: Does this crash when a non-standard definition
// of a library function is encountered?
assert(ER->getValueType() == C.getASTContext().CharTy &&
- "IsFirstBufInBound should only be called with char* ElementRegions");
+ "isFirstBufInBound should only be called with char* ElementRegions");
// Get the size of the array.
const SubRegion *superReg = cast<SubRegion>(ER->getSuperRegion());
- DefinedOrUnknownSVal SizeDV = getDynamicExtent(state, superReg, svalBuilder);
+ DefinedOrUnknownSVal SizeDV = getDynamicExtent(State, superReg, SB);
// Get the index of the accessed element.
DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
- ProgramStateRef StInBound = state->assumeInBound(Idx, SizeDV, true);
+ ProgramStateRef StInBound = State->assumeInBound(Idx, SizeDV, true);
return static_cast<bool>(StInBound);
}
-ProgramStateRef CStringChecker::InvalidateBuffer(CheckerContext &C,
- ProgramStateRef state,
- const Expr *E, SVal V,
- bool IsSourceBuffer,
- const Expr *Size) {
+ProgramStateRef CStringChecker::invalidateDestinationBufferBySize(
+ CheckerContext &C, ProgramStateRef S, const Expr *BufE, SVal BufV,
+ SVal SizeV, QualType SizeTy) {
+ auto InvalidationTraitOperations =
+ [&C, S, BufTy = BufE->getType(), BufV, SizeV,
+ SizeTy](RegionAndSymbolInvalidationTraits &ITraits, const MemRegion *R) {
+ // If destination buffer is a field region and access is in bound, do
+ // not invalidate its super region.
+ if (MemRegion::FieldRegionKind == R->getKind() &&
+ isFirstBufInBound(C, S, BufV, BufTy, SizeV, SizeTy)) {
+ ITraits.setTrait(
+ R,
+ RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
+ }
+ return false;
+ };
+
+ return invalidateBufferAux(C, S, BufE, BufV, InvalidationTraitOperations);
+}
+
+ProgramStateRef
+CStringChecker::invalidateDestinationBufferAlwaysEscapeSuperRegion(
+ CheckerContext &C, ProgramStateRef S, const Expr *BufE, SVal BufV) {
+ auto InvalidationTraitOperations = [](RegionAndSymbolInvalidationTraits &,
+ const MemRegion *R) {
+ return isa<FieldRegion>(R);
+ };
+
+ return invalidateBufferAux(C, S, BufE, BufV, InvalidationTraitOperations);
+}
+
+ProgramStateRef CStringChecker::invalidateDestinationBufferNeverOverflows(
+ CheckerContext &C, ProgramStateRef S, const Expr *BufE, SVal BufV) {
+ auto InvalidationTraitOperations =
+ [](RegionAndSymbolInvalidationTraits &ITraits, const MemRegion *R) {
+ if (MemRegion::FieldRegionKind == R->getKind())
+ ITraits.setTrait(
+ R,
+ RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
+ return false;
+ };
+
+ return invalidateBufferAux(C, S, BufE, BufV, InvalidationTraitOperations);
+}
+
+ProgramStateRef CStringChecker::invalidateSourceBuffer(CheckerContext &C,
+ ProgramStateRef S,
+ const Expr *BufE,
+ SVal BufV) {
+ auto InvalidationTraitOperations =
+ [](RegionAndSymbolInvalidationTraits &ITraits, const MemRegion *R) {
+ ITraits.setTrait(
+ R->getBaseRegion(),
+ RegionAndSymbolInvalidationTraits::TK_PreserveContents);
+ ITraits.setTrait(R,
+ RegionAndSymbolInvalidationTraits::TK_SuppressEscape);
+ return true;
+ };
+
+ return invalidateBufferAux(C, S, BufE, BufV, InvalidationTraitOperations);
+}
+
+ProgramStateRef CStringChecker::invalidateBufferAux(
+ CheckerContext &C, ProgramStateRef State, const Expr *E, SVal V,
+ llvm::function_ref<bool(RegionAndSymbolInvalidationTraits &,
+ const MemRegion *)>
+ InvalidationTraitOperations) {
std::optional<Loc> L = V.getAs<Loc>();
if (!L)
- return state;
+ return State;
// FIXME: This is a simplified version of what's in CFRefCount.cpp -- it makes
// some assumptions about the value that CFRefCount can't. Even so, it should
@@ -1047,29 +1124,10 @@ ProgramStateRef CStringChecker::InvalidateBuffer(CheckerContext &C,
// Invalidate this region.
const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
-
- bool CausesPointerEscape = false;
RegionAndSymbolInvalidationTraits ITraits;
- // Invalidate and escape only indirect regions accessible through the source
- // buffer.
- if (IsSourceBuffer) {
- ITraits.setTrait(R->getBaseRegion(),
- RegionAndSymbolInvalidationTraits::TK_PreserveContents);
- ITraits.setTrait(R, RegionAndSymbolInvalidationTraits::TK_SuppressEscape);
- CausesPointerEscape = true;
- } else {
- const MemRegion::Kind& K = R->getKind();
- if (K == MemRegion::FieldRegionKind)
- if (Size && IsFirstBufInBound(C, state, E, Size)) {
- // If destination buffer is a field region and access is in bound,
- // do not invalidate its super region.
- ITraits.setTrait(
- R,
- RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
- }
- }
+ bool CausesPointerEscape = InvalidationTraitOperations(ITraits, R);
- return state->invalidateRegions(R, E, C.blockCount(), LCtx,
+ return State->invalidateRegions(R, E, C.blockCount(), LCtx,
CausesPointerEscape, nullptr, nullptr,
&ITraits);
}
@@ -1077,7 +1135,7 @@ ProgramStateRef CStringChecker::InvalidateBuffer(CheckerContext &C,
// If we have a non-region value by chance, just remove the binding.
// FIXME: is this necessary or correct? This handles the non-Region
// cases. Is it ever valid to store to these?
- return state->killBinding(*L);
+ return State->killBinding(*L);
}
bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
@@ -1174,8 +1232,8 @@ bool CStringChecker::memsetAux(const Expr *DstBuffer, SVal CharVal,
} else {
// If the destination buffer's extent is not equal to the value of
// third argument, just invalidate buffer.
- State = InvalidateBuffer(C, State, DstBuffer, MemVal,
- /*IsSourceBuffer*/ false, Size);
+ State = invalidateDestinationBufferBySize(C, State, DstBuffer, MemVal,
+ SizeVal, Size->getType());
}
if (StateNullChar && !StateNonNullChar) {
@@ -1200,8 +1258,8 @@ bool CStringChecker::memsetAux(const Expr *DstBuffer, SVal CharVal,
} else {
// If the offset is not zero and char value is not concrete, we can do
// nothing but invalidate the buffer.
- State = InvalidateBuffer(C, State, DstBuffer, MemVal,
- /*IsSourceBuffer*/ false, Size);
+ State = invalidateDestinationBufferBySize(C, State, DstBuffer, MemVal,
+ SizeVal, Size->getType());
}
return true;
}
@@ -1297,15 +1355,14 @@ void CStringChecker::evalCopyCommon(CheckerContext &C, const CallExpr *CE,
// can use LazyCompoundVals to copy the source values into the destination.
// This would probably remove any existing bindings past the end of the
// copied region, but that's still an improvement over blank invalidation.
- state =
- InvalidateBuffer(C, state, Dest.Expression, C.getSVal(Dest.Expression),
- /*IsSourceBuffer*/ false, Size.Expression);
+ state = invalidateDestinationBufferBySize(
+ C, state, Dest.Expression, C.getSVal(Dest.Expression), sizeVal,
+ Size.Expression->getType());
// Invalidate the source (const-invalidation without const-pointer-escaping
// the address of the top-level region).
- state = InvalidateBuffer(C, state, Source.Expression,
- C.getSVal(Source.Expression),
- /*IsSourceBuffer*/ true, nullptr);
+ state = invalidateSourceBuffer(C, state, Source.Expression,
+ C.getSVal(Source.Expression));
C.addTransition(state);
}
@@ -1315,9 +1372,9 @@ void CStringChecker::evalMemcpy(CheckerContext &C, const CallExpr *CE,
CharKind CK) const {
// void *memcpy(void *restrict dst, const void *restrict src, size_t n);
// The return value is the address of the destination buffer.
- DestinationArgExpr Dest = {CE->getArg(0), 0};
- SourceArgExpr Src = {CE->getArg(1), 1};
- SizeArgExpr Size = {CE->getArg(2), 2};
+ DestinationArgExpr Dest = {{CE->getArg(0), 0}};
+ SourceArgExpr Src = {{CE->getArg(1), 1}};
+ SizeArgExpr Size = {{CE->getArg(2), 2}};
ProgramStateRef State = C.getState();
@@ -1330,9 +1387,9 @@ void CStringChecker::evalMempcpy(CheckerContext &C, const CallExpr *CE,
CharKind CK) const {
// void *mempcpy(void *restrict dst, const void *restrict src, size_t n);
// The return value is a pointer to the byte following the last written byte.
- DestinationArgExpr Dest = {CE->getArg(0), 0};
- SourceArgExpr Src = {CE->getArg(1), 1};
- SizeArgExpr Size = {CE->getArg(2), 2};
+ DestinationArgExpr Dest = {{CE->getArg(0), 0}};
+ SourceArgExpr Src = {{CE->getArg(1), 1}};
+ SizeArgExpr Size = {{CE->getArg(2), 2}};
constexpr bool IsRestricted = true;
constexpr bool IsMempcpy = true;
@@ -1344,9 +1401,9 @@ void CStringChecker::evalMemmove(CheckerContext &C, const CallExpr *CE,
CharKind CK) const {
// void *memmove(void *dst, const void *src, size_t n);
// The return value is the address of the destination buffer.
- DestinationArgExpr Dest = {CE->getArg(0), 0};
- SourceArgExpr Src = {CE->getArg(1), 1};
- SizeArgExpr Size = {CE->getArg(2), 2};
+ DestinationArgExpr Dest = {{CE->getArg(0), 0}};
+ SourceArgExpr Src = {{CE->getArg(1), 1}};
+ SizeArgExpr Size = {{CE->getArg(2), 2}};
constexpr bool IsRestricted = false;
constexpr bool IsMempcpy = false;
@@ -1356,9 +1413,9 @@ void CStringChecker::evalMemmove(CheckerContext &C, const CallExpr *CE,
void CStringChecker::evalBcopy(CheckerContext &C, const CallExpr *CE) const {
// void bcopy(const void *src, void *dst, size_t n);
- SourceArgExpr Src(CE->getArg(0), 0);
- DestinationArgExpr Dest = {CE->getArg(1), 1};
- SizeArgExpr Size = {CE->getArg(2), 2};
+ SourceArgExpr Src{{CE->getArg(0), 0}};
+ DestinationArgExpr Dest = {{CE->getArg(1), 1}};
+ SizeArgExpr Size = {{CE->getArg(2), 2}};
constexpr bool IsRestricted = false;
constexpr bool IsMempcpy = false;
@@ -1373,7 +1430,7 @@ void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE,
AnyArgExpr Left = {CE->getArg(0), 0};
AnyArgExpr Right = {CE->getArg(1), 1};
- SizeArgExpr Size = {CE->getArg(2), 2};
+ SizeArgExpr Size = {{CE->getArg(2), 2}};
ProgramStateRef State = C.getState();
SValBuilder &Builder = C.getSValBuilder();
@@ -1641,14 +1698,14 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
const LocationContext *LCtx = C.getLocationContext();
// Check that the destination is non-null.
- DestinationArgExpr Dst = {CE->getArg(0), 0};
+ DestinationArgExpr Dst = {{CE->getArg(0), 0}};
SVal DstVal = state->getSVal(Dst.Expression, LCtx);
state = checkNonNull(C, state, Dst, DstVal);
if (!state)
return;
// Check that the source is non-null.
- SourceArgExpr srcExpr = {CE->getArg(1), 1};
+ SourceArgExpr srcExpr = {{CE->getArg(1), 1}};
SVal srcVal = state->getSVal(srcExpr.Expression, LCtx);
state = checkNonNull(C, state, srcExpr, srcVal);
if (!state)
@@ -1679,10 +1736,11 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// FIXME: Why do we choose the srcExpr if the access has no size?
// Note that the 3rd argument of the call would be the size parameter.
- SizeArgExpr SrcExprAsSizeDummy = {srcExpr.Expression, srcExpr.ArgumentIndex};
+ SizeArgExpr SrcExprAsSizeDummy = {
+ {srcExpr.Expression, srcExpr.ArgumentIndex}};
state = CheckOverlap(
C, state,
- (IsBounded ? SizeArgExpr{CE->getArg(2), 2} : SrcExprAsSizeDummy), Dst,
+ (IsBounded ? SizeArgExpr{{CE->getArg(2), 2}} : SrcExprAsSizeDummy), Dst,
srcExpr);
if (!state)
@@ -1691,7 +1749,7 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// If the function is strncpy, strncat, etc... it is bounded.
if (IsBounded) {
// Get the max number of characters to copy.
- SizeArgExpr lenExpr = {CE->getArg(2), 2};
+ SizeArgExpr lenExpr = {{CE->getArg(2), 2}};
SVal lenVal = state->getSVal(lenExpr.Expression, LCtx);
// Protect against misdeclared strncpy().
@@ -1977,13 +2035,13 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// can use LazyCompoundVals to copy the source values into the destination.
// This would probably remove any existing bindings past the end of the
// string, but that's still an improvement over blank invalidation.
- state = InvalidateBuffer(C, state, Dst.Expression, *dstRegVal,
- /*IsSourceBuffer*/ false, nullptr);
+ state = invalidateDestinationBufferBySize(C, state, Dst.Expression,
+ *dstRegVal, amountCopied,
+ C.getASTContext().getSizeType());
// Invalidate the source (const-invalidation without const-pointer-escaping
// the address of the top-level region).
- state = InvalidateBuffer(C, state, srcExpr.Expression, srcVal,
- /*IsSourceBuffer*/ true, nullptr);
+ state = invalidateSourceBuffer(C, state, srcExpr.Expression, srcVal);
// Set the C string length of the destination, if we know it.
if (IsBounded && (appendK == ConcatFnKind::none)) {
@@ -2165,7 +2223,7 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
void CStringChecker::evalStrsep(CheckerContext &C, const CallExpr *CE) const {
// char *strsep(char **stringp, const char *delim);
// Verify whether the search string parameter matches the return type.
- SourceArgExpr SearchStrPtr = {CE->getArg(0), 0};
+ SourceArgExpr SearchStrPtr = {{CE->getArg(0), 0}};
QualType CharPtrTy = SearchStrPtr.Expression->getType()->getPointeeType();
if (CharPtrTy.isNull() ||
@@ -2198,8 +2256,9 @@ void CStringChecker::evalStrsep(CheckerContext &C, const CallExpr *CE) const {
// Invalidate the search string, representing the change of one delimiter
// character to NUL.
- State = InvalidateBuffer(C, State, SearchStrPtr.Expression, Result,
- /*IsSourceBuffer*/ false, nullptr);
+ // As the replacement never overflows, do not invalidate its super region.
+ State = invalidateDestinationBufferNeverOverflows(
+ C, State, SearchStrPtr.Expression, Result);
// Overwrite the search string pointer. The new value is either an address
// further along in the same string, or NULL if there are no more tokens.
@@ -2248,8 +2307,10 @@ void CStringChecker::evalStdCopyCommon(CheckerContext &C,
// Invalidate the destination buffer
const Expr *Dst = CE->getArg(2);
SVal DstVal = State->getSVal(Dst, LCtx);
- State = InvalidateBuffer(C, State, Dst, DstVal, /*IsSource=*/false,
- /*Size=*/nullptr);
+ // FIXME: As we do not know how many items are copied, we also invalidate the
+ // super region containing the target location.
+ State =
+ invalidateDestinationBufferAlwaysEscapeSuperRegion(C, State, Dst, DstVal);
SValBuilder &SVB = C.getSValBuilder();
@@ -2263,9 +2324,9 @@ void CStringChecker::evalMemset(CheckerContext &C, const CallExpr *CE) const {
// void *memset(void *s, int c, size_t n);
CurrentFunctionDescription = "memory set function";
- DestinationArgExpr Buffer = {CE->getArg(0), 0};
+ DestinationArgExpr Buffer = {{CE->getArg(0), 0}};
AnyArgExpr CharE = {CE->getArg(1), 1};
- SizeArgExpr Size = {CE->getArg(2), 2};
+ SizeArgExpr Size = {{CE->getArg(2), 2}};
ProgramStateRef State = C.getState();
@@ -2312,8 +2373,8 @@ void CStringChecker::evalMemset(CheckerContext &C, const CallExpr *CE) const {
void CStringChecker::evalBzero(CheckerContext &C, const CallExpr *CE) const {
CurrentFunctionDescription = "memory clearance function";
- DestinationArgExpr Buffer = {CE->getArg(0), 0};
- SizeArgExpr Size = {CE->getArg(1), 1};
+ DestinationArgExpr Buffer = {{CE->getArg(0), 0}};
+ SizeArgExpr Size = {{CE->getArg(1), 1}};
SVal Zero = C.getSValBuilder().makeZeroVal(C.getASTContext().IntTy);
ProgramStateRef State = C.getState();
@@ -2352,6 +2413,52 @@ void CStringChecker::evalBzero(CheckerContext &C, const CallExpr *CE) const {
C.addTransition(State);
}
+void CStringChecker::evalSprintf(CheckerContext &C, const CallExpr *CE) const {
+ CurrentFunctionDescription = "'sprintf'";
+ bool IsBI = CE->getBuiltinCallee() == Builtin::BI__builtin___sprintf_chk;
+ evalSprintfCommon(C, CE, /* IsBounded */ false, IsBI);
+}
+
+void CStringChecker::evalSnprintf(CheckerContext &C, const CallExpr *CE) const {
+ CurrentFunctionDescription = "'snprintf'";
+ bool IsBI = CE->getBuiltinCallee() == Builtin::BI__builtin___snprintf_chk;
+ evalSprintfCommon(C, CE, /* IsBounded */ true, IsBI);
+}
+
+void CStringChecker::evalSprintfCommon(CheckerContext &C, const CallExpr *CE,
+ bool IsBounded, bool IsBuiltin) const {
+ ProgramStateRef State = C.getState();
+ DestinationArgExpr Dest = {{CE->getArg(0), 0}};
+
+ const auto NumParams = CE->getCalleeDecl()->getAsFunction()->getNumParams();
+ assert(CE->getNumArgs() >= NumParams);
+
+ const auto AllArguments =
+ llvm::make_range(CE->getArgs(), CE->getArgs() + CE->getNumArgs());
+ const auto VariadicArguments = drop_begin(enumerate(AllArguments), NumParams);
+
+ for (const auto &[ArgIdx, ArgExpr] : VariadicArguments) {
+ // We consider only string buffers
+ if (const QualType type = ArgExpr->getType();
+ !type->isAnyPointerType() ||
+ !type->getPointeeType()->isAnyCharacterType())
+ continue;
+ SourceArgExpr Source = {{ArgExpr, unsigned(ArgIdx)}};
+
+ // Ensure the buffers do not overlap.
+ SizeArgExpr SrcExprAsSizeDummy = {
+ {Source.Expression, Source.ArgumentIndex}};
+ State = CheckOverlap(
+ C, State,
+ (IsBounded ? SizeArgExpr{{CE->getArg(1), 1}} : SrcExprAsSizeDummy),
+ Dest, Source);
+ if (!State)
+ return;
+ }
+
+ C.addTransition(State);
+}
+
//===----------------------------------------------------------------------===//
// The driver method, and other Checker callbacks.
//===----------------------------------------------------------------------===//
@@ -2458,9 +2565,7 @@ CStringChecker::checkRegionChanges(ProgramStateRef state,
llvm::SmallPtrSet<const MemRegion *, 32> SuperRegions;
// First build sets for the changed regions and their super-regions.
- for (ArrayRef<const MemRegion *>::iterator
- I = Regions.begin(), E = Regions.end(); I != E; ++I) {
- const MemRegion *MR = *I;
+ for (const MemRegion *MR : Regions) {
Invalidated.insert(MR);
SuperRegions.insert(MR);
@@ -2473,10 +2578,7 @@ CStringChecker::checkRegionChanges(ProgramStateRef state,
CStringLengthTy::Factory &F = state->get_context<CStringLength>();
// Then loop over the entries in the current state.
- for (CStringLengthTy::iterator I = Entries.begin(),
- E = Entries.end(); I != E; ++I) {
- const MemRegion *MR = I.getKey();
-
+ for (const MemRegion *MR : llvm::make_first_range(Entries)) {
// Is this entry for a super-region of a changed region?
if (SuperRegions.count(MR)) {
Entries = F.remove(Entries, MR);
@@ -2502,13 +2604,9 @@ void CStringChecker::checkLiveSymbols(ProgramStateRef state,
// Mark all symbols in our string length map as valid.
CStringLengthTy Entries = state->get<CStringLength>();
- for (CStringLengthTy::iterator I = Entries.begin(), E = Entries.end();
- I != E; ++I) {
- SVal Len = I.getData();
-
- for (SymExpr::symbol_iterator si = Len.symbol_begin(),
- se = Len.symbol_end(); si != se; ++si)
- SR.markInUse(*si);
+ for (SVal Len : llvm::make_second_range(Entries)) {
+ for (SymbolRef Sym : Len.symbols())
+ SR.markInUse(Sym);
}
}
@@ -2520,12 +2618,10 @@ void CStringChecker::checkDeadSymbols(SymbolReaper &SR,
return;
CStringLengthTy::Factory &F = state->get_context<CStringLength>();
- for (CStringLengthTy::iterator I = Entries.begin(), E = Entries.end();
- I != E; ++I) {
- SVal Len = I.getData();
+ for (auto [Reg, Len] : Entries) {
if (SymbolRef Sym = Len.getAsSymbol()) {
if (SR.isDead(Sym))
- Entries = F.remove(Entries, I.getKey());
+ Entries = F.remove(Entries, Reg);
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 3fcf6f435a43..bd6655cc1e3f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -321,7 +321,9 @@ ObjCDeallocChecker::getInstanceSymbolFromIvarSymbol(SymbolRef IvarSym) const {
if (!IvarRegion)
return nullptr;
- return IvarRegion->getSymbolicBase()->getSymbol();
+ const SymbolicRegion *SR = IvarRegion->getSymbolicBase();
+ assert(SR && "Symbolic base should not be nullptr");
+ return SR->getSymbol();
}
/// If we are in -dealloc or -dealloc is on the stack, handle the call if it is
@@ -752,7 +754,7 @@ bool ObjCDeallocChecker::diagnoseMistakenDealloc(SymbolRef DeallocedValue,
ObjCDeallocChecker::ObjCDeallocChecker()
: NSObjectII(nullptr), SenTestCaseII(nullptr), XCTestCaseII(nullptr),
- CIFilterII(nullptr) {
+ Block_releaseII(nullptr), CIFilterII(nullptr) {
MissingReleaseBugType.reset(
new BugType(this, "Missing ivar release (leak)",
@@ -818,8 +820,8 @@ const ObjCPropertyDecl *ObjCDeallocChecker::findShadowedPropertyDecl(
IdentifierInfo *ID = PropDecl->getIdentifier();
DeclContext::lookup_result R = CatDecl->getClassInterface()->lookup(ID);
- for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E; ++I) {
- auto *ShadowedPropDecl = dyn_cast<ObjCPropertyDecl>(*I);
+ for (const NamedDecl *D : R) {
+ auto *ShadowedPropDecl = dyn_cast<ObjCPropertyDecl>(D);
if (!ShadowedPropDecl)
continue;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
index 67962f75f9bf..65a2ec4076fd 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
@@ -227,7 +227,7 @@ void ContainerModeling::checkDeadSymbols(SymbolReaper &SR,
CheckerContext &C) const {
// Cleanup
auto State = C.getState();
-
+
auto ContMap = State->get<ContainerMap>();
for (const auto &Cont : ContMap) {
if (!SR.isLiveRegion(Cont.first)) {
@@ -749,14 +749,14 @@ bool isBeginCall(const FunctionDecl *Func) {
const auto *IdInfo = Func->getIdentifier();
if (!IdInfo)
return false;
- return IdInfo->getName().endswith_insensitive("begin");
+ return IdInfo->getName().ends_with_insensitive("begin");
}
bool isEndCall(const FunctionDecl *Func) {
const auto *IdInfo = Func->getIdentifier();
if (!IdInfo)
return false;
- return IdInfo->getName().endswith_insensitive("end");
+ return IdInfo->getName().ends_with_insensitive("end");
}
const CXXRecordDecl *getCXXRecordDecl(ProgramStateRef State,
@@ -1021,7 +1021,7 @@ SymbolRef rebaseSymbol(ProgramStateRef State, SValBuilder &SVB,
SymbolRef NewSym) {
auto &SymMgr = SVB.getSymbolManager();
auto Diff = SVB.evalBinOpNN(State, BO_Sub, nonloc::SymbolVal(OrigExpr),
- nonloc::SymbolVal(OldExpr),
+ nonloc::SymbolVal(OldExpr),
SymMgr.getType(OrigExpr));
const auto DiffInt = Diff.getAs<nonloc::ConcreteInt>();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
index 01b662064d7b..5f44c9476928 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -93,9 +93,9 @@ void ReachableCode::computeReachableBlocks() {
if (isReachable)
continue;
isReachable = true;
- for (CFGBlock::const_succ_iterator i = block->succ_begin(),
- e = block->succ_end(); i != e; ++i)
- if (const CFGBlock *succ = *i)
+
+ for (const CFGBlock *succ : block->succs())
+ if (succ)
worklist.push_back(succ);
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
index 7841fd82e370..2fe91467c8c5 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
@@ -271,9 +271,8 @@ public:
const Table &Config = mgr.options.Config;
SmallVector<const Table::MapEntryTy *, 32> Keys;
- for (Table::const_iterator I = Config.begin(), E = Config.end(); I != E;
- ++I)
- Keys.push_back(&*I);
+ for (const auto &Entry : Config)
+ Keys.push_back(&Entry);
llvm::array_pod_sort(Keys.begin(), Keys.end(), compareEntry);
llvm::errs() << "[config]\n";
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
index cc01e97d3fa2..5331d9574743 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
@@ -25,9 +25,13 @@ using namespace taint;
namespace {
class DivZeroChecker : public Checker< check::PreStmt<BinaryOperator> > {
- mutable std::unique_ptr<BuiltinBug> BT;
- void reportBug(const char *Msg, ProgramStateRef StateZero, CheckerContext &C,
- std::unique_ptr<BugReporterVisitor> Visitor = nullptr) const;
+ mutable std::unique_ptr<BugType> BT;
+ mutable std::unique_ptr<BugType> TaintBT;
+ void reportBug(StringRef Msg, ProgramStateRef StateZero,
+ CheckerContext &C) const;
+ void reportTaintBug(StringRef Msg, ProgramStateRef StateZero,
+ CheckerContext &C,
+ llvm::ArrayRef<SymbolRef> TaintedSyms) const;
public:
void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
@@ -41,20 +45,34 @@ static const Expr *getDenomExpr(const ExplodedNode *N) {
return nullptr;
}
-void DivZeroChecker::reportBug(
- const char *Msg, ProgramStateRef StateZero, CheckerContext &C,
- std::unique_ptr<BugReporterVisitor> Visitor) const {
+void DivZeroChecker::reportBug(StringRef Msg, ProgramStateRef StateZero,
+ CheckerContext &C) const {
if (ExplodedNode *N = C.generateErrorNode(StateZero)) {
if (!BT)
- BT.reset(new BuiltinBug(this, "Division by zero"));
+ BT.reset(new BugType(this, "Division by zero", categories::LogicError));
auto R = std::make_unique<PathSensitiveBugReport>(*BT, Msg, N);
- R->addVisitor(std::move(Visitor));
bugreporter::trackExpressionValue(N, getDenomExpr(N), *R);
C.emitReport(std::move(R));
}
}
+void DivZeroChecker::reportTaintBug(
+ StringRef Msg, ProgramStateRef StateZero, CheckerContext &C,
+ llvm::ArrayRef<SymbolRef> TaintedSyms) const {
+ if (ExplodedNode *N = C.generateErrorNode(StateZero)) {
+ if (!TaintBT)
+ TaintBT.reset(
+ new BugType(this, "Division by zero", categories::TaintedData));
+
+ auto R = std::make_unique<PathSensitiveBugReport>(*TaintBT, Msg, N);
+ bugreporter::trackExpressionValue(N, getDenomExpr(N), *R);
+ for (auto Sym : TaintedSyms)
+ R->markInteresting(Sym);
+ C.emitReport(std::move(R));
+ }
+}
+
void DivZeroChecker::checkPreStmt(const BinaryOperator *B,
CheckerContext &C) const {
BinaryOperator::Opcode Op = B->getOpcode();
@@ -86,11 +104,13 @@ void DivZeroChecker::checkPreStmt(const BinaryOperator *B,
return;
}
- bool TaintedD = isTainted(C.getState(), *DV);
- if ((stateNotZero && stateZero && TaintedD)) {
- reportBug("Division by a tainted value, possibly zero", stateZero, C,
- std::make_unique<taint::TaintBugVisitor>(*DV));
- return;
+ if ((stateNotZero && stateZero)) {
+ std::vector<SymbolRef> taintedSyms = getTaintedSymbols(C.getState(), *DV);
+ if (!taintedSyms.empty()) {
+ reportTaintBug("Division by a tainted value, possibly zero", stateZero, C,
+ taintedSyms);
+ return;
+ }
}
// If we get here, then the denom should not be zero. We abandon the implicit
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
index 6f26842e62c7..1f3e9e00d3e6 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -31,6 +31,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/STLExtras.h"
#include <optional>
using namespace clang;
@@ -233,11 +234,9 @@ void DynamicTypePropagation::checkDeadSymbols(SymbolReaper &SR,
MostSpecializedTypeArgsMapTy TyArgMap =
State->get<MostSpecializedTypeArgsMap>();
- for (MostSpecializedTypeArgsMapTy::iterator I = TyArgMap.begin(),
- E = TyArgMap.end();
- I != E; ++I) {
- if (SR.isDead(I->first)) {
- State = State->remove<MostSpecializedTypeArgsMap>(I->first);
+ for (SymbolRef Sym : llvm::make_first_range(TyArgMap)) {
+ if (SR.isDead(Sym)) {
+ State = State->remove<MostSpecializedTypeArgsMap>(Sym);
}
}
@@ -742,8 +741,6 @@ findMethodDecl(const ObjCMessageExpr *MessageExpr,
const ObjCMethodDecl *Method = nullptr;
QualType ReceiverType = MessageExpr->getReceiverType();
- const auto *ReceiverObjectPtrType =
- ReceiverType->getAs<ObjCObjectPointerType>();
// Do this "devirtualization" on instance and class methods only. Trust the
// static type on super and super class calls.
@@ -753,7 +750,8 @@ findMethodDecl(const ObjCMessageExpr *MessageExpr,
// type, look up the method in the tracked type, not in the receiver type.
// This way we preserve more information.
if (ReceiverType->isObjCIdType() || ReceiverType->isObjCClassType() ||
- ASTCtxt.canAssignObjCInterfaces(ReceiverObjectPtrType, TrackedType)) {
+ ASTCtxt.canAssignObjCInterfaces(
+ ReceiverType->castAs<ObjCObjectPointerType>(), TrackedType)) {
const ObjCInterfaceDecl *InterfaceDecl = TrackedType->getInterfaceDecl();
// The method might not be found.
Selector Sel = MessageExpr->getSelector();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp
index 51f39c606d5c..be2fa91b994a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp
@@ -28,6 +28,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/FormatVariadic.h"
#include <optional>
using namespace clang;
@@ -269,12 +270,6 @@ bool isErrno(const Decl *D) {
return false;
}
-const char *describeErrnoCheckState(ErrnoCheckState CS) {
- assert(CS == errno_modeling::MustNotBeChecked &&
- "Errno description not applicable.");
- return "may be undefined after the call and should not be used";
-}
-
const NoteTag *getErrnoNoteTag(CheckerContext &C, const std::string &Message) {
return C.getNoteTag([Message](PathSensitiveBugReport &BR) -> std::string {
const MemRegion *ErrnoR = BR.getErrorNode()->getState()->get<ErrnoRegion>();
@@ -319,18 +314,14 @@ ProgramStateRef setErrnoStdMustBeChecked(ProgramStateRef State,
const NoteTag *getNoteTagForStdSuccess(CheckerContext &C, llvm::StringRef Fn) {
return getErrnoNoteTag(
- C, (Twine("Assuming that function '") + Twine(Fn) +
- Twine("' is successful, in this case the value 'errno' ") +
- Twine(describeErrnoCheckState(MustNotBeChecked)))
- .str());
+ C, llvm::formatv(
+ "'errno' may be undefined after successful call to '{0}'", Fn));
}
const NoteTag *getNoteTagForStdMustBeChecked(CheckerContext &C,
llvm::StringRef Fn) {
return getErrnoNoteTag(
- C, (Twine("Function '") + Twine(Fn) +
- Twine("' indicates failure only by setting of 'errno'"))
- .str());
+ C, llvm::formatv("'{0}' indicates failure only by setting 'errno'", Fn));
}
} // namespace errno_modeling
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h
index 2ca3979944e3..0707fd16d6e6 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h
@@ -78,14 +78,6 @@ ProgramStateRef clearErrnoState(ProgramStateRef State);
/// declaration.
bool isErrno(const Decl *D);
-/// Produce a textual description about how \c errno is allowed to be used
-/// (in a \c ErrnoCheckState).
-/// The returned string is insertable into a longer warning message in the form
-/// "the value 'errno' <...>".
-/// Currently only the \c errno_modeling::MustNotBeChecked state is supported,
-/// others are not used by the clients.
-const char *describeErrnoCheckState(ErrnoCheckState CS);
-
/// Create a NoteTag that displays the message if the 'errno' memory region is
/// marked as interesting, and resets the interestingness.
const NoteTag *getErrnoNoteTag(CheckerContext &C, const std::string &Message);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
index 355e9c2238a4..15ba29050e90 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
@@ -393,8 +393,7 @@ void ExprInspectionChecker::checkDeadSymbols(SymbolReaper &SymReaper,
ProgramStateRef State = C.getState();
const MarkedSymbolsTy &Syms = State->get<MarkedSymbols>();
ExplodedNode *N = C.getPredecessor();
- for (auto I = Syms.begin(), E = Syms.end(); I != E; ++I) {
- SymbolRef Sym = *I;
+ for (SymbolRef Sym : Syms) {
if (!SymReaper.isDead(Sym))
continue;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
index 65ff1be8ec05..38b4caa12aef 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
@@ -686,11 +686,10 @@ void FuchsiaHandleChecker::printState(raw_ostream &Out, ProgramStateRef State,
if (!StateMap.isEmpty()) {
Out << Sep << "FuchsiaHandleChecker :" << NL;
- for (HStateMapTy::iterator I = StateMap.begin(), E = StateMap.end(); I != E;
- ++I) {
- I.getKey()->dumpToStream(Out);
+ for (const auto &[Sym, HandleState] : StateMap) {
+ Sym->dumpToStream(Out);
Out << " : ";
- I.getData().dump(Out);
+ HandleState.dump(Out);
Out << NL;
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
index f6e2f59d5697..3dcb45c0b110 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -26,12 +26,14 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/YAMLTraits.h"
#include <limits>
#include <memory>
#include <optional>
#include <utility>
+#include <vector>
#define DEBUG_TYPE "taint-checker"
@@ -114,47 +116,110 @@ bool isStdin(SVal Val, const ASTContext &ACtx) {
return false;
}
-SVal getPointeeOf(const CheckerContext &C, Loc LValue) {
- const QualType ArgTy = LValue.getType(C.getASTContext());
+SVal getPointeeOf(ProgramStateRef State, Loc LValue) {
+ const QualType ArgTy = LValue.getType(State->getStateManager().getContext());
if (!ArgTy->isPointerType() || !ArgTy->getPointeeType()->isVoidType())
- return C.getState()->getSVal(LValue);
+ return State->getSVal(LValue);
// Do not dereference void pointers. Treat them as byte pointers instead.
// FIXME: we might want to consider more than just the first byte.
- return C.getState()->getSVal(LValue, C.getASTContext().CharTy);
+ return State->getSVal(LValue, State->getStateManager().getContext().CharTy);
}
/// Given a pointer/reference argument, return the value it refers to.
-std::optional<SVal> getPointeeOf(const CheckerContext &C, SVal Arg) {
+std::optional<SVal> getPointeeOf(ProgramStateRef State, SVal Arg) {
if (auto LValue = Arg.getAs<Loc>())
- return getPointeeOf(C, *LValue);
+ return getPointeeOf(State, *LValue);
return std::nullopt;
}
/// Given a pointer, return the SVal of its pointee or if it is tainted,
/// otherwise return the pointer's SVal if tainted.
/// Also considers stdin as a taint source.
-std::optional<SVal> getTaintedPointeeOrPointer(const CheckerContext &C,
+std::optional<SVal> getTaintedPointeeOrPointer(ProgramStateRef State,
SVal Arg) {
- const ProgramStateRef State = C.getState();
-
- if (auto Pointee = getPointeeOf(C, Arg))
+ if (auto Pointee = getPointeeOf(State, Arg))
if (isTainted(State, *Pointee)) // FIXME: isTainted(...) ? Pointee : None;
return Pointee;
if (isTainted(State, Arg))
return Arg;
+ return std::nullopt;
+}
- // FIXME: This should be done by the isTainted() API.
- if (isStdin(Arg, C.getASTContext()))
- return Arg;
+bool isTaintedOrPointsToTainted(ProgramStateRef State, SVal ExprSVal) {
+ return getTaintedPointeeOrPointer(State, ExprSVal).has_value();
+}
- return std::nullopt;
+/// Helps in printing taint diagnostics.
+/// Marks the incoming parameters of a function interesting (to be printed)
+/// when the return value, or the outgoing parameters are tainted.
+const NoteTag *taintOriginTrackerTag(CheckerContext &C,
+ std::vector<SymbolRef> TaintedSymbols,
+ std::vector<ArgIdxTy> TaintedArgs,
+ const LocationContext *CallLocation) {
+ return C.getNoteTag([TaintedSymbols = std::move(TaintedSymbols),
+ TaintedArgs = std::move(TaintedArgs), CallLocation](
+ PathSensitiveBugReport &BR) -> std::string {
+ SmallString<256> Msg;
+ // We give diagnostics only for taint related reports
+ if (!BR.isInteresting(CallLocation) ||
+ BR.getBugType().getCategory() != categories::TaintedData) {
+ return "";
+ }
+ if (TaintedSymbols.empty())
+ return "Taint originated here";
+
+ for (auto Sym : TaintedSymbols) {
+ BR.markInteresting(Sym);
+ }
+ LLVM_DEBUG(for (auto Arg
+ : TaintedArgs) {
+ llvm::dbgs() << "Taint Propagated from argument " << Arg + 1 << "\n";
+ });
+ return "";
+ });
}
-bool isTaintedOrPointsToTainted(const Expr *E, const ProgramStateRef &State,
- CheckerContext &C) {
- return getTaintedPointeeOrPointer(C, C.getSVal(E)).has_value();
+/// Helps in printing taint diagnostics.
+/// Marks the function interesting (to be printed)
+/// when the return value, or the outgoing parameters are tainted.
+const NoteTag *taintPropagationExplainerTag(
+ CheckerContext &C, std::vector<SymbolRef> TaintedSymbols,
+ std::vector<ArgIdxTy> TaintedArgs, const LocationContext *CallLocation) {
+ assert(TaintedSymbols.size() == TaintedArgs.size());
+ return C.getNoteTag([TaintedSymbols = std::move(TaintedSymbols),
+ TaintedArgs = std::move(TaintedArgs), CallLocation](
+ PathSensitiveBugReport &BR) -> std::string {
+ SmallString<256> Msg;
+ llvm::raw_svector_ostream Out(Msg);
+ // We give diagnostics only for taint related reports
+ if (TaintedSymbols.empty() ||
+ BR.getBugType().getCategory() != categories::TaintedData) {
+ return "";
+ }
+ int nofTaintedArgs = 0;
+ for (auto [Idx, Sym] : llvm::enumerate(TaintedSymbols)) {
+ if (BR.isInteresting(Sym)) {
+ BR.markInteresting(CallLocation);
+ if (TaintedArgs[Idx] != ReturnValueIndex) {
+ LLVM_DEBUG(llvm::dbgs() << "Taint Propagated to argument "
+ << TaintedArgs[Idx] + 1 << "\n");
+ if (nofTaintedArgs == 0)
+ Out << "Taint propagated to the ";
+ else
+ Out << ", ";
+ Out << TaintedArgs[Idx] + 1
+ << llvm::getOrdinalSuffix(TaintedArgs[Idx] + 1) << " argument";
+ nofTaintedArgs++;
+ } else {
+ LLVM_DEBUG(llvm::dbgs() << "Taint Propagated to return value.\n");
+ Out << "Taint propagated to the return value";
+ }
+ }
+ }
+ return std::string(Out.str());
+ });
}
/// ArgSet is used to describe arguments relevant for taint detection or
@@ -193,7 +258,7 @@ class GenericTaintRule {
ArgSet SinkArgs;
/// Arguments which should be sanitized on function return.
ArgSet FilterArgs;
- /// Arguments which can participate in taint propagationa. If any of the
+ /// Arguments which can participate in taint propagation. If any of the
/// arguments in PropSrcArgs is tainted, all arguments in PropDstArgs should
/// be tainted.
ArgSet PropSrcArgs;
@@ -343,7 +408,7 @@ public:
CheckerContext &C) const;
private:
- const BugType BT{this, "Use of Untrusted Data", "Untrusted Data"};
+ const BugType BT{this, "Use of Untrusted Data", categories::TaintedData};
bool checkUncontrolledFormatString(const CallEvent &Call,
CheckerContext &C) const;
@@ -351,7 +416,7 @@ private:
void taintUnsafeSocketProtocol(const CallEvent &Call,
CheckerContext &C) const;
- /// Default taint rules are initilized with the help of a CheckerContext to
+ /// Default taint rules are initalized with the help of a CheckerContext to
/// access the names of built-in functions like memcpy.
void initTaintRules(CheckerContext &C) const;
@@ -788,22 +853,39 @@ void GenericTaintChecker::checkPostCall(const CallEvent &Call,
llvm::dbgs() << "> actually wants to taint arg index: " << I << '\n';
});
+ const NoteTag *InjectionTag = nullptr;
+ std::vector<SymbolRef> TaintedSymbols;
+ std::vector<ArgIdxTy> TaintedIndexes;
for (ArgIdxTy ArgNum : *TaintArgs) {
// Special handling for the tainted return value.
if (ArgNum == ReturnValueIndex) {
State = addTaint(State, Call.getReturnValue());
+ std::vector<SymbolRef> TaintedSyms =
+ getTaintedSymbols(State, Call.getReturnValue());
+ if (!TaintedSyms.empty()) {
+ TaintedSymbols.push_back(TaintedSyms[0]);
+ TaintedIndexes.push_back(ArgNum);
+ }
continue;
}
-
// The arguments are pointer arguments. The data they are pointing at is
// tainted after the call.
- if (auto V = getPointeeOf(C, Call.getArgSVal(ArgNum)))
+ if (auto V = getPointeeOf(State, Call.getArgSVal(ArgNum))) {
State = addTaint(State, *V);
+ std::vector<SymbolRef> TaintedSyms = getTaintedSymbols(State, *V);
+ if (!TaintedSyms.empty()) {
+ TaintedSymbols.push_back(TaintedSyms[0]);
+ TaintedIndexes.push_back(ArgNum);
+ }
+ }
}
-
+ // Create a NoteTag callback, which prints to the user where the taintedness
+ // was propagated to.
+ InjectionTag = taintPropagationExplainerTag(C, TaintedSymbols, TaintedIndexes,
+ Call.getCalleeStackFrame(0));
// Clear up the taint info from the state.
State = State->remove<TaintArgsOnPostVisit>(CurrentFrame);
- C.addTransition(State);
+ C.addTransition(State, InjectionTag);
}
void GenericTaintChecker::printState(raw_ostream &Out, ProgramStateRef State,
@@ -826,29 +908,51 @@ void GenericTaintRule::process(const GenericTaintChecker &Checker,
/// Check for taint sinks.
ForEachCallArg([this, &Checker, &C, &State](ArgIdxTy I, const Expr *E, SVal) {
- if (SinkArgs.contains(I) && isTaintedOrPointsToTainted(E, State, C))
+ // Add taintedness to stdin parameters
+ if (isStdin(C.getSVal(E), C.getASTContext())) {
+ State = addTaint(State, C.getSVal(E));
+ }
+ if (SinkArgs.contains(I) && isTaintedOrPointsToTainted(State, C.getSVal(E)))
Checker.generateReportIfTainted(E, SinkMsg.value_or(MsgCustomSink), C);
});
/// Check for taint filters.
- ForEachCallArg([this, &C, &State](ArgIdxTy I, const Expr *E, SVal S) {
+ ForEachCallArg([this, &State](ArgIdxTy I, const Expr *E, SVal S) {
if (FilterArgs.contains(I)) {
State = removeTaint(State, S);
- if (auto P = getPointeeOf(C, S))
+ if (auto P = getPointeeOf(State, S))
State = removeTaint(State, *P);
}
});
/// Check for taint propagation sources.
- /// A rule is relevant if PropSrcArgs is empty, or if any of its signified
+ /// A rule will make the destination variables tainted if PropSrcArgs
+ /// is empty (taints the destination
+ /// arguments unconditionally), or if any of its signified
/// args are tainted in context of the current CallEvent.
bool IsMatching = PropSrcArgs.isEmpty();
- ForEachCallArg(
- [this, &C, &IsMatching, &State](ArgIdxTy I, const Expr *E, SVal) {
- IsMatching = IsMatching || (PropSrcArgs.contains(I) &&
- isTaintedOrPointsToTainted(E, State, C));
- });
+ std::vector<SymbolRef> TaintedSymbols;
+ std::vector<ArgIdxTy> TaintedIndexes;
+ ForEachCallArg([this, &C, &IsMatching, &State, &TaintedSymbols,
+ &TaintedIndexes](ArgIdxTy I, const Expr *E, SVal) {
+ std::optional<SVal> TaintedSVal =
+ getTaintedPointeeOrPointer(State, C.getSVal(E));
+ IsMatching =
+ IsMatching || (PropSrcArgs.contains(I) && TaintedSVal.has_value());
+
+ // We track back tainted arguments except for stdin
+ if (TaintedSVal && !isStdin(*TaintedSVal, C.getASTContext())) {
+ std::vector<SymbolRef> TaintedArgSyms =
+ getTaintedSymbols(State, *TaintedSVal);
+ if (!TaintedArgSyms.empty()) {
+ llvm::append_range(TaintedSymbols, TaintedArgSyms);
+ TaintedIndexes.push_back(I);
+ }
+ }
+ });
+ // Early return for propagation rules which dont match.
+ // Matching propagations, Sinks and Filters will pass this point.
if (!IsMatching)
return;
@@ -875,10 +979,13 @@ void GenericTaintRule::process(const GenericTaintChecker &Checker,
Result = F.add(Result, I);
}
+ // Taint property gets lost if the variable is passed as a
+ // non-const pointer or reference to a function which is
+ // not inlined. For matching rules we want to preserve the taintedness.
// TODO: We should traverse all reachable memory regions via the
// escaping parameter. Instead of doing that we simply mark only the
// referred memory region as tainted.
- if (WouldEscape(V, E->getType())) {
+ if (WouldEscape(V, E->getType()) && getTaintedPointeeOrPointer(State, V)) {
LLVM_DEBUG(if (!Result.contains(I)) {
llvm::dbgs() << "PreCall<";
Call.dump(llvm::dbgs());
@@ -890,7 +997,10 @@ void GenericTaintRule::process(const GenericTaintChecker &Checker,
if (!Result.isEmpty())
State = State->set<TaintArgsOnPostVisit>(C.getStackFrame(), Result);
- C.addTransition(State);
+ const NoteTag *InjectionTag = taintOriginTrackerTag(
+ C, std::move(TaintedSymbols), std::move(TaintedIndexes),
+ Call.getCalleeStackFrame(0));
+ C.addTransition(State, InjectionTag);
}
bool GenericTaintRule::UntrustedEnv(CheckerContext &C) {
@@ -902,7 +1012,8 @@ bool GenericTaintRule::UntrustedEnv(CheckerContext &C) {
bool GenericTaintChecker::generateReportIfTainted(const Expr *E, StringRef Msg,
CheckerContext &C) const {
assert(E);
- std::optional<SVal> TaintedSVal{getTaintedPointeeOrPointer(C, C.getSVal(E))};
+ std::optional<SVal> TaintedSVal =
+ getTaintedPointeeOrPointer(C.getState(), C.getSVal(E));
if (!TaintedSVal)
return false;
@@ -911,7 +1022,10 @@ bool GenericTaintChecker::generateReportIfTainted(const Expr *E, StringRef Msg,
if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
auto report = std::make_unique<PathSensitiveBugReport>(BT, Msg, N);
report->addRange(E->getSourceRange());
- report->addVisitor(std::make_unique<TaintBugVisitor>(*TaintedSVal));
+ for (auto TaintedSym : getTaintedSymbols(C.getState(), *TaintedSVal)) {
+ report->markInteresting(TaintedSym);
+ }
+
C.emitReport(std::move(report));
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
index 38ed9e702db4..90047a2899a7 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
@@ -29,8 +29,8 @@ bool isIterator(const CXXRecordDecl *CRD) {
return false;
const auto Name = CRD->getName();
- if (!(Name.endswith_insensitive("iterator") ||
- Name.endswith_insensitive("iter") || Name.endswith_insensitive("it")))
+ if (!(Name.ends_with_insensitive("iterator") ||
+ Name.ends_with_insensitive("iter") || Name.ends_with_insensitive("it")))
return false;
bool HasCopyCtor = false, HasCopyAssign = true, HasDtor = false,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
index 80431e65519e..2d51a000ece3 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
@@ -72,6 +72,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h"
+#include "llvm/ADT/STLExtras.h"
#include "Iterator.h"
@@ -303,21 +304,18 @@ void IteratorModeling::checkLiveSymbols(ProgramStateRef State,
SymbolReaper &SR) const {
// Keep symbolic expressions of iterator positions alive
auto RegionMap = State->get<IteratorRegionMap>();
- for (const auto &Reg : RegionMap) {
- const auto Offset = Reg.second.getOffset();
- for (auto i = Offset->symbol_begin(); i != Offset->symbol_end(); ++i)
- if (isa<SymbolData>(*i))
- SR.markLive(*i);
+ for (const IteratorPosition &Pos : llvm::make_second_range(RegionMap)) {
+ for (SymbolRef Sym : Pos.getOffset()->symbols())
+ if (isa<SymbolData>(Sym))
+ SR.markLive(Sym);
}
auto SymbolMap = State->get<IteratorSymbolMap>();
- for (const auto &Sym : SymbolMap) {
- const auto Offset = Sym.second.getOffset();
- for (auto i = Offset->symbol_begin(); i != Offset->symbol_end(); ++i)
- if (isa<SymbolData>(*i))
- SR.markLive(*i);
+ for (const IteratorPosition &Pos : llvm::make_second_range(SymbolMap)) {
+ for (SymbolRef Sym : Pos.getOffset()->symbols())
+ if (isa<SymbolData>(Sym))
+ SR.markLive(Sym);
}
-
}
void IteratorModeling::checkDeadSymbols(SymbolReaper &SR,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
index b3f2d7f4d268..3496af731aa6 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
@@ -27,14 +27,15 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallString.h"
@@ -80,9 +81,8 @@ class IvarInvalidationCheckerImpl {
bool hasMethod(const ObjCMethodDecl *MD) {
if (IsInvalidated)
return true;
- for (MethodSet::iterator I = InvalidationMethods.begin(),
- E = InvalidationMethods.end(); I != E; ++I) {
- if (*I == MD) {
+ for (const ObjCMethodDecl *Curr : InvalidationMethods) {
+ if (Curr == MD) {
IsInvalidated = true;
return true;
}
@@ -318,9 +318,7 @@ const ObjCIvarDecl *IvarInvalidationCheckerImpl::findPropertyBackingIvar(
// Lookup IVars named "_PropName"or "PropName" among the tracked Ivars.
StringRef PropName = Prop->getIdentifier()->getName();
- for (IvarSet::const_iterator I = TrackedIvars.begin(),
- E = TrackedIvars.end(); I != E; ++I) {
- const ObjCIvarDecl *Iv = I->first;
+ for (const ObjCIvarDecl *Iv : llvm::make_first_range(TrackedIvars)) {
StringRef IvarName = Iv->getName();
if (IvarName == PropName)
@@ -381,9 +379,7 @@ visit(const ObjCImplementationDecl *ImplD) const {
ObjCInterfaceDecl::PropertyMap PropMap;
InterfaceD->collectPropertiesToImplement(PropMap);
- for (ObjCInterfaceDecl::PropertyMap::iterator
- I = PropMap.begin(), E = PropMap.end(); I != E; ++I) {
- const ObjCPropertyDecl *PD = I->second;
+ for (const ObjCPropertyDecl *PD : llvm::make_second_range(PropMap)) {
if (PD->isClassProperty())
continue;
@@ -422,11 +418,7 @@ visit(const ObjCImplementationDecl *ImplD) const {
// Remove ivars invalidated by the partial invalidation methods. They do not
// need to be invalidated in the regular invalidation methods.
bool AtImplementationContainsAtLeastOnePartialInvalidationMethod = false;
- for (MethodSet::iterator
- I = PartialInfo.InvalidationMethods.begin(),
- E = PartialInfo.InvalidationMethods.end(); I != E; ++I) {
- const ObjCMethodDecl *InterfD = *I;
-
+ for (const ObjCMethodDecl *InterfD : PartialInfo.InvalidationMethods) {
// Get the corresponding method in the @implementation.
const ObjCMethodDecl *D = ImplD->getMethod(InterfD->getSelector(),
InterfD->isInstanceMethod());
@@ -475,10 +467,7 @@ visit(const ObjCImplementationDecl *ImplD) const {
// Check that all ivars are invalidated by the invalidation methods.
bool AtImplementationContainsAtLeastOneInvalidationMethod = false;
- for (MethodSet::iterator I = Info.InvalidationMethods.begin(),
- E = Info.InvalidationMethods.end(); I != E; ++I) {
- const ObjCMethodDecl *InterfD = *I;
-
+ for (const ObjCMethodDecl *InterfD : Info.InvalidationMethods) {
// Get the corresponding method in the @implementation.
const ObjCMethodDecl *D = ImplD->getMethod(InterfD->getSelector(),
InterfD->isInstanceMethod());
@@ -501,9 +490,8 @@ visit(const ObjCImplementationDecl *ImplD) const {
continue;
// Warn on the ivars that were not invalidated by the method.
- for (IvarSet::const_iterator
- I = IvarsI.begin(), E = IvarsI.end(); I != E; ++I)
- reportIvarNeedsInvalidation(I->first, IvarToPopertyMap, D);
+ for (const ObjCIvarDecl *Ivar : llvm::make_first_range(IvarsI))
+ reportIvarNeedsInvalidation(Ivar, IvarToPopertyMap, D);
}
}
@@ -512,9 +500,8 @@ visit(const ObjCImplementationDecl *ImplD) const {
if (AtImplementationContainsAtLeastOnePartialInvalidationMethod) {
// Warn on the ivars that were not invalidated by the prrtial
// invalidation methods.
- for (IvarSet::const_iterator
- I = Ivars.begin(), E = Ivars.end(); I != E; ++I)
- reportIvarNeedsInvalidation(I->first, IvarToPopertyMap, nullptr);
+ for (const ObjCIvarDecl *Ivar : llvm::make_first_range(Ivars))
+ reportIvarNeedsInvalidation(Ivar, IvarToPopertyMap, nullptr);
} else {
// Otherwise, no invalidation methods were implemented.
reportNoInvalidationMethod(Filter.checkName_InstanceVariableInvalidation,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
index bca10ec96cea..b77e9bf09a33 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
@@ -14,13 +14,13 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Lex/Lexer.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -28,6 +28,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Unicode.h"
#include <optional>
@@ -847,10 +848,9 @@ void NonLocalizedStringChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
if (argumentNumber < 0) { // There was no match in UIMethods
if (const Decl *D = msg.getDecl()) {
if (const ObjCMethodDecl *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
- auto formals = OMD->parameters();
- for (unsigned i = 0, ei = formals.size(); i != ei; ++i) {
- if (isAnnotatedAsTakingLocalized(formals[i])) {
- argumentNumber = i;
+ for (auto [Idx, FormalParam] : llvm::enumerate(OMD->parameters())) {
+ if (isAnnotatedAsTakingLocalized(FormalParam)) {
+ argumentNumber = Idx;
break;
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
index c1b85ace3e2d..771c0a5fbb8d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -19,6 +19,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
#include <optional>
@@ -530,9 +531,9 @@ ProgramStateRef MacOSKeychainAPIChecker::evalAssume(ProgramStateRef State,
}
if (ReturnSymbol)
- for (auto I = AMap.begin(), E = AMap.end(); I != E; ++I) {
- if (ReturnSymbol == I->second.Region)
- State = State->remove<AllocatedData>(I->first);
+ for (auto [Sym, AllocState] : AMap) {
+ if (ReturnSymbol == AllocState.Region)
+ State = State->remove<AllocatedData>(Sym);
}
return State;
@@ -547,18 +548,18 @@ void MacOSKeychainAPIChecker::checkDeadSymbols(SymbolReaper &SR,
bool Changed = false;
AllocationPairVec Errors;
- for (auto I = AMap.begin(), E = AMap.end(); I != E; ++I) {
- if (!SR.isDead(I->first))
+ for (const auto &[Sym, AllocState] : AMap) {
+ if (!SR.isDead(Sym))
continue;
Changed = true;
- State = State->remove<AllocatedData>(I->first);
+ State = State->remove<AllocatedData>(Sym);
// If the allocated symbol is null do not report.
ConstraintManager &CMgr = State->getConstraintManager();
- ConditionTruthVal AllocFailed = CMgr.isNull(State, I.getKey());
+ ConditionTruthVal AllocFailed = CMgr.isNull(State, Sym);
if (AllocFailed.isConstrainedTrue())
continue;
- Errors.push_back(std::make_pair(I->first, &I->second));
+ Errors.push_back(std::make_pair(Sym, &AllocState));
}
if (!Changed) {
// Generate the new, cleaned up state.
@@ -656,8 +657,8 @@ void MacOSKeychainAPIChecker::printState(raw_ostream &Out,
if (!AMap.isEmpty()) {
Out << Sep << "KeychainAPIChecker :" << NL;
- for (auto I = AMap.begin(), E = AMap.end(); I != E; ++I) {
- I.getKey()->dumpToStream(Out);
+ for (SymbolRef Sym : llvm::make_first_range(AMap)) {
+ Sym->dumpToStream(Out);
}
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index f05cd9227b65..d2b564d022b5 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -1688,9 +1688,9 @@ MallocChecker::MallocMemReturnsAttr(CheckerContext &C, const CallEvent &Call,
if (Att->getModule()->getName() != "malloc")
return nullptr;
- OwnershipAttr::args_iterator I = Att->args_begin(), E = Att->args_end();
- if (I != E) {
- return MallocMemAux(C, Call, Call.getArgExpr(I->getASTIndex()),
+ if (!Att->args().empty()) {
+ return MallocMemAux(C, Call,
+ Call.getArgExpr(Att->args_begin()->getASTIndex()),
UndefinedVal(), State, AF_Malloc);
}
return MallocMemAux(C, Call, UnknownVal(), UndefinedVal(), State, AF_Malloc);
@@ -2798,12 +2798,12 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
RegionStateTy RS = OldRS;
SmallVector<SymbolRef, 2> Errors;
- for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
- if (SymReaper.isDead(I->first)) {
- if (I->second.isAllocated() || I->second.isAllocatedOfSizeZero())
- Errors.push_back(I->first);
+ for (auto [Sym, State] : RS) {
+ if (SymReaper.isDead(Sym)) {
+ if (State.isAllocated() || State.isAllocatedOfSizeZero())
+ Errors.push_back(Sym);
// Remove the dead symbol from the map.
- RS = F.remove(RS, I->first);
+ RS = F.remove(RS, Sym);
}
}
@@ -2818,19 +2818,17 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
// Cleanup the Realloc Pairs Map.
ReallocPairsTy RP = state->get<ReallocPairs>();
- for (ReallocPairsTy::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
- if (SymReaper.isDead(I->first) ||
- SymReaper.isDead(I->second.ReallocatedSym)) {
- state = state->remove<ReallocPairs>(I->first);
+ for (auto [Sym, ReallocPair] : RP) {
+ if (SymReaper.isDead(Sym) || SymReaper.isDead(ReallocPair.ReallocatedSym)) {
+ state = state->remove<ReallocPairs>(Sym);
}
}
// Cleanup the FreeReturnValue Map.
FreeReturnValueTy FR = state->get<FreeReturnValue>();
- for (FreeReturnValueTy::iterator I = FR.begin(), E = FR.end(); I != E; ++I) {
- if (SymReaper.isDead(I->first) ||
- SymReaper.isDead(I->second)) {
- state = state->remove<FreeReturnValue>(I->first);
+ for (auto [Sym, RetSym] : FR) {
+ if (SymReaper.isDead(Sym) || SymReaper.isDead(RetSym)) {
+ state = state->remove<FreeReturnValue>(Sym);
}
}
@@ -2840,9 +2838,8 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
static CheckerProgramPointTag Tag("MallocChecker", "DeadSymbolsLeak");
N = C.generateNonFatalErrorNode(C.getState(), &Tag);
if (N) {
- for (SmallVectorImpl<SymbolRef>::iterator
- I = Errors.begin(), E = Errors.end(); I != E; ++I) {
- HandleLeak(*I, N, C);
+ for (SymbolRef Sym : Errors) {
+ HandleLeak(Sym, N, C);
}
}
}
@@ -2965,18 +2962,16 @@ void MallocChecker::checkPostStmt(const BlockExpr *BE,
const BlockDataRegion *R =
cast<BlockDataRegion>(C.getSVal(BE).getAsRegion());
- BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
- E = R->referenced_vars_end();
-
- if (I == E)
+ auto ReferencedVars = R->referenced_vars();
+ if (ReferencedVars.empty())
return;
SmallVector<const MemRegion*, 10> Regions;
const LocationContext *LC = C.getLocationContext();
MemRegionManager &MemMgr = C.getSValBuilder().getRegionManager();
- for ( ; I != E; ++I) {
- const VarRegion *VR = I.getCapturedRegion();
+ for (const auto &Var : ReferencedVars) {
+ const VarRegion *VR = Var.getCapturedRegion();
if (VR->getSuperRegion() == R) {
VR = MemMgr.getVarRegion(VR->getDecl(), LC);
}
@@ -3072,28 +3067,28 @@ ProgramStateRef MallocChecker::evalAssume(ProgramStateRef state,
SVal Cond,
bool Assumption) const {
RegionStateTy RS = state->get<RegionState>();
- for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
+ for (SymbolRef Sym : llvm::make_first_range(RS)) {
// If the symbol is assumed to be NULL, remove it from consideration.
ConstraintManager &CMgr = state->getConstraintManager();
- ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
+ ConditionTruthVal AllocFailed = CMgr.isNull(state, Sym);
if (AllocFailed.isConstrainedTrue())
- state = state->remove<RegionState>(I.getKey());
+ state = state->remove<RegionState>(Sym);
}
// Realloc returns 0 when reallocation fails, which means that we should
// restore the state of the pointer being reallocated.
ReallocPairsTy RP = state->get<ReallocPairs>();
- for (ReallocPairsTy::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
+ for (auto [Sym, ReallocPair] : RP) {
// If the symbol is assumed to be NULL, remove it from consideration.
ConstraintManager &CMgr = state->getConstraintManager();
- ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
+ ConditionTruthVal AllocFailed = CMgr.isNull(state, Sym);
if (!AllocFailed.isConstrainedTrue())
continue;
- SymbolRef ReallocSym = I.getData().ReallocatedSym;
+ SymbolRef ReallocSym = ReallocPair.ReallocatedSym;
if (const RefState *RS = state->get<RegionState>(ReallocSym)) {
if (RS->isReleased()) {
- switch (I.getData().Kind) {
+ switch (ReallocPair.Kind) {
case OAR_ToBeFreedAfterFailure:
state = state->set<RegionState>(ReallocSym,
RefState::getAllocated(RS->getAllocationFamily(), RS->getStmt()));
@@ -3102,11 +3097,11 @@ ProgramStateRef MallocChecker::evalAssume(ProgramStateRef state,
state = state->remove<RegionState>(ReallocSym);
break;
default:
- assert(I.getData().Kind == OAR_FreeOnFailure);
+ assert(ReallocPair.Kind == OAR_FreeOnFailure);
}
}
}
- state = state->remove<ReallocPairs>(I.getKey());
+ state = state->remove<ReallocPairs>(Sym);
}
return state;
@@ -3259,6 +3254,11 @@ bool MallocChecker::mayFreeAnyEscapedMemoryOrIsModeledExplicitly(
return true;
}
+ if (FName == "singleShotImpl" &&
+ FD->getQualifiedNameAsString() == "QTimer::singleShotImpl") {
+ return true;
+ }
+
// Handle cases where we know a buffer's /address/ can escape.
// Note that the above checks handle some special cases where we know that
// even though the address escapes, it's still our responsibility to free the
@@ -3307,11 +3307,7 @@ ProgramStateRef MallocChecker::checkPointerEscapeAux(
return State;
}
- for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
- E = Escaped.end();
- I != E; ++I) {
- SymbolRef sym = *I;
-
+ for (SymbolRef sym : Escaped) {
if (EscapingSymbol && EscapingSymbol != sym)
continue;
@@ -3446,7 +3442,8 @@ PathDiagnosticPieceRef MallocBugVisitor::VisitNode(const ExplodedNode *N,
OS << OpCallE->getDirectCallee()->getDeclName();
} else if (const auto *CallE = dyn_cast<CallExpr>(S)) {
auto &CEMgr = BRC.getStateManager().getCallEventManager();
- CallEventRef<> Call = CEMgr.getSimpleCall(CallE, state, CurrentLC);
+ CallEventRef<> Call =
+ CEMgr.getSimpleCall(CallE, state, CurrentLC, {nullptr, 0});
if (const auto *D = dyn_cast_or_null<NamedDecl>(Call->getDecl()))
OS << D->getDeclName();
else
@@ -3558,17 +3555,17 @@ void MallocChecker::printState(raw_ostream &Out, ProgramStateRef State,
if (!RS.isEmpty()) {
Out << Sep << "MallocChecker :" << NL;
- for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
- const RefState *RefS = State->get<RegionState>(I.getKey());
+ for (auto [Sym, Data] : RS) {
+ const RefState *RefS = State->get<RegionState>(Sym);
AllocationFamily Family = RefS->getAllocationFamily();
std::optional<MallocChecker::CheckKind> CheckKind =
getCheckIfTracked(Family);
if (!CheckKind)
CheckKind = getCheckIfTracked(Family, true);
- I.getKey()->dumpToStream(Out);
+ Sym->dumpToStream(Out);
Out << " : ";
- I.getData().dump(Out);
+ Data.dump(Out);
if (CheckKind)
Out << " (" << CheckNames[*CheckKind].getName() << ")";
Out << NL;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
index 5266df2ae6a6..3c8b38973c6b 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
@@ -280,17 +280,13 @@ void MallocOverflowSecurityChecker::OutputPossibleOverflows(
c.Visit(mgr.getAnalysisDeclContext(D)->getBody());
// Output warnings for all overflows that are left.
- for (CheckOverflowOps::theVecType::iterator
- i = PossibleMallocOverflows.begin(),
- e = PossibleMallocOverflows.end();
- i != e;
- ++i) {
+ for (const MallocOverflowCheck &Check : PossibleMallocOverflows) {
BR.EmitBasicReport(
D, this, "malloc() size overflow", categories::UnixAPI,
"the computation of the size of the memory allocation may overflow",
- PathDiagnosticLocation::createOperatorLoc(i->mulop,
+ PathDiagnosticLocation::createOperatorLoc(Check.mulop,
BR.getSourceManager()),
- i->mulop->getSourceRange());
+ Check.mulop->getSourceRange());
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
index 58ba3dac69ab..9e81a6bd19fc 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
@@ -12,14 +12,15 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeLoc.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -182,22 +183,20 @@ public:
AnalysisDeclContext *ADC = mgr.getAnalysisDeclContext(D);
CastedAllocFinder Finder(&BR.getContext());
Finder.Visit(D->getBody());
- for (CastedAllocFinder::CallVec::iterator i = Finder.Calls.begin(),
- e = Finder.Calls.end(); i != e; ++i) {
- QualType CastedType = i->CastedExpr->getType();
+ for (const auto &CallRec : Finder.Calls) {
+ QualType CastedType = CallRec.CastedExpr->getType();
if (!CastedType->isPointerType())
continue;
QualType PointeeType = CastedType->getPointeeType();
if (PointeeType->isVoidType())
continue;
- for (CallExpr::const_arg_iterator ai = i->AllocCall->arg_begin(),
- ae = i->AllocCall->arg_end(); ai != ae; ++ai) {
- if (!(*ai)->getType()->isIntegralOrUnscopedEnumerationType())
+ for (const Expr *Arg : CallRec.AllocCall->arguments()) {
+ if (!Arg->getType()->isIntegralOrUnscopedEnumerationType())
continue;
SizeofFinder SFinder;
- SFinder.Visit(*ai);
+ SFinder.Visit(Arg);
if (SFinder.Sizeofs.size() != 1)
continue;
@@ -212,18 +211,18 @@ public:
continue;
const TypeSourceInfo *TSI = nullptr;
- if (i->CastedExprParent.is<const VarDecl *>()) {
- TSI =
- i->CastedExprParent.get<const VarDecl *>()->getTypeSourceInfo();
+ if (CallRec.CastedExprParent.is<const VarDecl *>()) {
+ TSI = CallRec.CastedExprParent.get<const VarDecl *>()
+ ->getTypeSourceInfo();
} else {
- TSI = i->ExplicitCastType;
+ TSI = CallRec.ExplicitCastType;
}
SmallString<64> buf;
llvm::raw_svector_ostream OS(buf);
OS << "Result of ";
- const FunctionDecl *Callee = i->AllocCall->getDirectCallee();
+ const FunctionDecl *Callee = CallRec.AllocCall->getDirectCallee();
if (Callee && Callee->getIdentifier())
OS << '\'' << Callee->getIdentifier()->getName() << '\'';
else
@@ -232,14 +231,13 @@ public:
<< "', which is incompatible with "
<< "sizeof operand type '" << SizeofType << "'";
SmallVector<SourceRange, 4> Ranges;
- Ranges.push_back(i->AllocCall->getCallee()->getSourceRange());
+ Ranges.push_back(CallRec.AllocCall->getCallee()->getSourceRange());
Ranges.push_back(SFinder.Sizeofs[0]->getSourceRange());
if (TSI)
Ranges.push_back(TSI->getTypeLoc().getSourceRange());
- PathDiagnosticLocation L =
- PathDiagnosticLocation::createBegin(i->AllocCall->getCallee(),
- BR.getSourceManager(), ADC);
+ PathDiagnosticLocation L = PathDiagnosticLocation::createBegin(
+ CallRec.AllocCall->getCallee(), BR.getSourceManager(), ADC);
BR.EmitBasicReport(D, this, "Allocator sizeof operand mismatch",
categories::UnixAPI, OS.str(), L, Ranges);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
index c8ddf3b2c14f..5240352a9bd2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
@@ -184,7 +184,7 @@ private:
bool Found;
};
- AggressivenessKind Aggressiveness;
+ AggressivenessKind Aggressiveness = AK_KnownsAndLocals;
public:
void setAggressiveness(StringRef Str, CheckerManager &Mgr) {
@@ -213,8 +213,9 @@ private:
// Returns the exploded node against which the report was emitted.
// The caller *must* add any further transitions against this node.
- ExplodedNode *reportBug(const MemRegion *Region, const CXXRecordDecl *RD,
- CheckerContext &C, MisuseKind MK) const;
+ // Returns nullptr and does not report if such node already exists.
+ ExplodedNode *tryToReportBug(const MemRegion *Region, const CXXRecordDecl *RD,
+ CheckerContext &C, MisuseKind MK) const;
bool isInMoveSafeContext(const LocationContext *LC) const;
bool isStateResetMethod(const CXXMethodDecl *MethodDec) const;
@@ -377,19 +378,20 @@ void MoveChecker::modelUse(ProgramStateRef State, const MemRegion *Region,
return;
}
- ExplodedNode *N = reportBug(Region, RD, C, MK);
+ ExplodedNode *N = tryToReportBug(Region, RD, C, MK);
// If the program has already crashed on this path, don't bother.
- if (N->isSink())
+ if (!N || N->isSink())
return;
State = State->set<TrackedRegionMap>(Region, RegionState::getReported());
C.addTransition(State, N);
}
-ExplodedNode *MoveChecker::reportBug(const MemRegion *Region,
- const CXXRecordDecl *RD, CheckerContext &C,
- MisuseKind MK) const {
+ExplodedNode *MoveChecker::tryToReportBug(const MemRegion *Region,
+ const CXXRecordDecl *RD,
+ CheckerContext &C,
+ MisuseKind MK) const {
if (ExplodedNode *N = misuseCausesCrash(MK) ? C.generateErrorNode()
: C.generateNonFatalErrorNode()) {
// Uniqueing report to the same object.
@@ -552,8 +554,9 @@ MoveChecker::classifyObject(const MemRegion *MR,
// For the purposes of this checker, we classify move-safe STL types
// as not-"STL" types, because that's how the checker treats them.
MR = unwrapRValueReferenceIndirection(MR);
- bool IsLocal = isa_and_nonnull<VarRegion>(MR) &&
- isa<StackSpaceRegion>(MR->getMemorySpace());
+ bool IsLocal =
+ isa_and_nonnull<VarRegion, CXXLifetimeExtendedObjectRegion>(MR) &&
+ isa<StackSpaceRegion>(MR->getMemorySpace());
if (!RD || !RD->getDeclContext()->isStdNamespace())
return { IsLocal, SK_NonStd };
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
index be17e401fb53..bb01a3b77617 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
@@ -80,7 +80,7 @@ void ento::registerNSAutoreleasePoolChecker(CheckerManager &mgr) {
mgr.registerChecker<NSAutoreleasePoolChecker>();
}
-bool ento::shouldRegisterNSAutoreleasePoolChecker(const CheckerManager &mgr) {
+bool ento::shouldRegisterNSAutoreleasePoolChecker(const CheckerManager &mgr) {
const LangOptions &LO = mgr.getLangOpts();
return LO.getGC() != LangOptions::NonGC;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
index da8529f4ea81..906f4e85a8e5 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
@@ -26,13 +26,15 @@
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/Analysis/AnyCall.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Path.h"
@@ -81,7 +83,8 @@ class NullabilityChecker
: public Checker<check::Bind, check::PreCall, check::PreStmt<ReturnStmt>,
check::PostCall, check::PostStmt<ExplicitCastExpr>,
check::PostObjCMessage, check::DeadSymbols, eval::Assume,
- check::Location, check::Event<ImplicitNullDerefEvent>> {
+ check::Location, check::Event<ImplicitNullDerefEvent>,
+ check::BeginFunction> {
public:
// If true, the checker will not diagnose nullabilility issues for calls
@@ -102,6 +105,7 @@ public:
void checkEvent(ImplicitNullDerefEvent Event) const;
void checkLocation(SVal Location, bool IsLoad, const Stmt *S,
CheckerContext &C) const;
+ void checkBeginFunction(CheckerContext &Ctx) const;
ProgramStateRef evalAssume(ProgramStateRef State, SVal Cond,
bool Assumption) const;
@@ -306,6 +310,10 @@ static NullConstraint getNullConstraint(DefinedOrUnknownSVal Val,
return NullConstraint::Unknown;
}
+static bool isValidPointerType(QualType T) {
+ return T->isAnyPointerType() || T->isBlockPointerType();
+}
+
const SymbolicRegion *
NullabilityChecker::getTrackRegion(SVal Val, bool CheckSuperRegion) const {
if (!NeedTracking)
@@ -491,25 +499,21 @@ void NullabilityChecker::checkDeadSymbols(SymbolReaper &SR,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
NullabilityMapTy Nullabilities = State->get<NullabilityMap>();
- for (NullabilityMapTy::iterator I = Nullabilities.begin(),
- E = Nullabilities.end();
- I != E; ++I) {
- const auto *Region = I->first->getAs<SymbolicRegion>();
+ for (const MemRegion *Reg : llvm::make_first_range(Nullabilities)) {
+ const auto *Region = Reg->getAs<SymbolicRegion>();
assert(Region && "Non-symbolic region is tracked.");
if (SR.isDead(Region->getSymbol())) {
- State = State->remove<NullabilityMap>(I->first);
+ State = State->remove<NullabilityMap>(Reg);
}
}
// When an object goes out of scope, we can free the history associated
// with any property accesses on that object
PropertyAccessesMapTy PropertyAccesses = State->get<PropertyAccessesMap>();
- for (PropertyAccessesMapTy::iterator I = PropertyAccesses.begin(),
- E = PropertyAccesses.end();
- I != E; ++I) {
- const MemRegion *ReceiverRegion = I->first.first;
+ for (ObjectPropPair PropKey : llvm::make_first_range(PropertyAccesses)) {
+ const MemRegion *ReceiverRegion = PropKey.first;
if (!SR.isLiveRegion(ReceiverRegion)) {
- State = State->remove<PropertyAccessesMap>(I->first);
+ State = State->remove<PropertyAccessesMap>(PropKey);
}
}
@@ -559,6 +563,37 @@ void NullabilityChecker::checkEvent(ImplicitNullDerefEvent Event) const {
}
}
+void NullabilityChecker::checkBeginFunction(CheckerContext &C) const {
+ if (!C.inTopFrame())
+ return;
+
+ const LocationContext *LCtx = C.getLocationContext();
+ auto AbstractCall = AnyCall::forDecl(LCtx->getDecl());
+ if (!AbstractCall || AbstractCall->parameters().empty())
+ return;
+
+ ProgramStateRef State = C.getState();
+ for (const ParmVarDecl *Param : AbstractCall->parameters()) {
+ if (!isValidPointerType(Param->getType()))
+ continue;
+
+ Nullability RequiredNullability =
+ getNullabilityAnnotation(Param->getType());
+ if (RequiredNullability != Nullability::Nullable)
+ continue;
+
+ const VarRegion *ParamRegion = State->getRegion(Param, LCtx);
+ const MemRegion *ParamPointeeRegion =
+ State->getSVal(ParamRegion).getAsRegion();
+ if (!ParamPointeeRegion)
+ continue;
+
+ State = State->set<NullabilityMap>(ParamPointeeRegion,
+ NullabilityState(RequiredNullability));
+ }
+ C.addTransition(State);
+}
+
// Whenever we see a load from a typed memory region that's been annotated as
// 'nonnull', we want to trust the user on that and assume that it is is indeed
// non-null.
@@ -621,7 +656,7 @@ void NullabilityChecker::checkPreStmt(const ReturnStmt *S,
if (!RetExpr)
return;
- if (!RetExpr->getType()->isAnyPointerType())
+ if (!isValidPointerType(RetExpr->getType()))
return;
ProgramStateRef State = C.getState();
@@ -754,7 +789,7 @@ void NullabilityChecker::checkPreCall(const CallEvent &Call,
if (!ArgSVal)
continue;
- if (!Param->getType()->isAnyPointerType() &&
+ if (!isValidPointerType(Param->getType()) &&
!Param->getType()->isReferenceType())
continue;
@@ -763,7 +798,7 @@ void NullabilityChecker::checkPreCall(const CallEvent &Call,
Nullability RequiredNullability =
getNullabilityAnnotation(Param->getType());
Nullability ArgExprTypeLevelNullability =
- getNullabilityAnnotation(ArgExpr->getType());
+ getNullabilityAnnotation(lookThroughImplicitCasts(ArgExpr)->getType());
unsigned ParamIdx = Param->getFunctionScopeIndex() + 1;
@@ -841,7 +876,7 @@ void NullabilityChecker::checkPostCall(const CallEvent &Call,
if (!FuncType)
return;
QualType ReturnType = FuncType->getReturnType();
- if (!ReturnType->isAnyPointerType())
+ if (!isValidPointerType(ReturnType))
return;
ProgramStateRef State = C.getState();
if (State->get<InvariantViolated>())
@@ -907,18 +942,16 @@ static Nullability getReceiverNullability(const ObjCMethodCall &M,
ProgramStateRef NullabilityChecker::evalAssume(ProgramStateRef State, SVal Cond,
bool Assumption) const {
PropertyAccessesMapTy PropertyAccesses = State->get<PropertyAccessesMap>();
- for (PropertyAccessesMapTy::iterator I = PropertyAccesses.begin(),
- E = PropertyAccesses.end();
- I != E; ++I) {
- if (!I->second.isConstrainedNonnull) {
- ConditionTruthVal IsNonNull = State->isNonNull(I->second.Value);
+ for (auto [PropKey, PropVal] : PropertyAccesses) {
+ if (!PropVal.isConstrainedNonnull) {
+ ConditionTruthVal IsNonNull = State->isNonNull(PropVal.Value);
if (IsNonNull.isConstrainedTrue()) {
- ConstrainedPropertyVal Replacement = I->second;
+ ConstrainedPropertyVal Replacement = PropVal;
Replacement.isConstrainedNonnull = true;
- State = State->set<PropertyAccessesMap>(I->first, Replacement);
+ State = State->set<PropertyAccessesMap>(PropKey, Replacement);
} else if (IsNonNull.isConstrainedFalse()) {
// Space optimization: no point in tracking constrained-null cases
- State = State->remove<PropertyAccessesMap>(I->first);
+ State = State->remove<PropertyAccessesMap>(PropKey);
}
}
}
@@ -935,7 +968,7 @@ void NullabilityChecker::checkPostObjCMessage(const ObjCMethodCall &M,
if (!Decl)
return;
QualType RetType = Decl->getReturnType();
- if (!RetType->isAnyPointerType())
+ if (!isValidPointerType(RetType))
return;
ProgramStateRef State = C.getState();
@@ -1089,9 +1122,9 @@ void NullabilityChecker::checkPostStmt(const ExplicitCastExpr *CE,
CheckerContext &C) const {
QualType OriginType = CE->getSubExpr()->getType();
QualType DestType = CE->getType();
- if (!OriginType->isAnyPointerType())
+ if (!isValidPointerType(OriginType))
return;
- if (!DestType->isAnyPointerType())
+ if (!isValidPointerType(DestType))
return;
ProgramStateRef State = C.getState();
@@ -1215,7 +1248,7 @@ void NullabilityChecker::checkBind(SVal L, SVal V, const Stmt *S,
return;
QualType LocType = TVR->getValueType();
- if (!LocType->isAnyPointerType())
+ if (!isValidPointerType(LocType))
return;
ProgramStateRef State = C.getState();
@@ -1337,9 +1370,9 @@ void NullabilityChecker::printState(raw_ostream &Out, ProgramStateRef State,
if (!State->get<InvariantViolated>())
Out << Sep << NL;
- for (NullabilityMapTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- Out << I->first << " : ";
- I->second.print(Out);
+ for (auto [Region, State] : B) {
+ Out << Region << " : ";
+ State.print(Out);
Out << NL;
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
index 35a600f2d7b8..fbbc32a40e89 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
@@ -103,9 +103,7 @@ void ObjCSuperCallChecker::fillSelectors(ASTContext &Ctx,
llvm::SmallPtrSet<Selector, 16> &ClassSelectors =
SelectorsForClass[ClassName];
// Fill the Selectors SmallSet with all selectors we want to check.
- for (ArrayRef<SelectorDescriptor>::iterator I = Sel.begin(), E = Sel.end();
- I != E; ++I) {
- SelectorDescriptor Descriptor = *I;
+ for (SelectorDescriptor Descriptor : Sel) {
assert(Descriptor.ArgumentCount <= 1); // No multi-argument selectors yet.
// Get the selector.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
index 9f1a6e416dc6..d88d6a94a30f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -362,18 +362,17 @@ void ObjCSelfInitChecker::printState(raw_ostream &Out, ProgramStateRef State,
}
Out << NL;
- for (SelfFlagTy::iterator I = FlagMap.begin(), E = FlagMap.end();
- I != E; ++I) {
- Out << I->first << " : ";
+ for (auto [Sym, Flag] : FlagMap) {
+ Out << Sym << " : ";
- if (I->second == SelfFlag_None)
+ if (Flag == SelfFlag_None)
Out << "none";
- if (I->second & SelfFlag_Self)
+ if (Flag & SelfFlag_Self)
Out << "self variable";
- if (I->second & SelfFlag_InitRes) {
- if (I->second != SelfFlag_InitRes)
+ if (Flag & SelfFlag_InitRes) {
+ if (Flag != SelfFlag_InitRes)
Out << " | ";
Out << "result of init method";
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
index c9828c36a06a..1c2d84254d46 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
@@ -12,16 +12,17 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
-#include "clang/Analysis/PathDiagnostic.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/Analysis/PathDiagnostic.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "llvm/ADT/STLExtras.h"
using namespace clang;
using namespace ento;
@@ -48,9 +49,7 @@ static void Scan(IvarUsageMap& M, const Stmt *S) {
}
if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(S))
- for (PseudoObjectExpr::const_semantics_iterator
- i = POE->semantics_begin(), e = POE->semantics_end(); i != e; ++i) {
- const Expr *sub = *i;
+ for (const Expr *sub : POE->semantics()) {
if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(sub))
sub = OVE->getSourceExpr();
Scan(M, sub);
@@ -134,8 +133,8 @@ static void checkObjCUnusedIvar(const ObjCImplementationDecl *D,
// Any potentially unused ivars?
bool hasUnused = false;
- for (IvarUsageMap::iterator I = M.begin(), E = M.end(); I!=E; ++I)
- if (I->second == Unused) {
+ for (IVarState State : llvm::make_second_range(M))
+ if (State == Unused) {
hasUnused = true;
break;
}
@@ -152,16 +151,16 @@ static void checkObjCUnusedIvar(const ObjCImplementationDecl *D,
Scan(M, D->getDeclContext(), SM.getFileID(D->getLocation()), SM);
// Find ivars that are unused.
- for (IvarUsageMap::iterator I = M.begin(), E = M.end(); I!=E; ++I)
- if (I->second == Unused) {
+ for (auto [Ivar, State] : M)
+ if (State == Unused) {
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
- os << "Instance variable '" << *I->first << "' in class '" << *ID
+ os << "Instance variable '" << *Ivar << "' in class '" << *ID
<< "' is never used by the methods in its @implementation "
"(although it may be used by category methods).";
PathDiagnosticLocation L =
- PathDiagnosticLocation::create(I->first, BR.getSourceManager());
+ PathDiagnosticLocation::create(Ivar, BR.getSourceManager());
BR.EmitBasicReport(D, Checker, "Unused instance variable", "Optimization",
os.str(), L);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
index 27fd40a441fa..bd6e1ec3a8fc 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -273,7 +273,7 @@ public:
SmallVector<const FieldDecl *, 20> OptimalFieldsOrder;
while (!Fields.empty()) {
unsigned TrailingZeros =
- llvm::countTrailingZeros((unsigned long long)NewOffset.getQuantity());
+ llvm::countr_zero((unsigned long long)NewOffset.getQuantity());
// If NewOffset is zero, then countTrailingZeros will be 64. Shifting
// 64 will overflow our unsigned long long. Shifting 63 will turn
// our long long (and CharUnits internal type) negative. So shift 62.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
index d3e2849a0ce6..27364eb72523 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
@@ -79,10 +79,9 @@ void PointerArithChecker::checkDeadSymbols(SymbolReaper &SR,
// see http://reviews.llvm.org/D14203 for further information.
/*ProgramStateRef State = C.getState();
RegionStateTy RegionStates = State->get<RegionState>();
- for (RegionStateTy::iterator I = RegionStates.begin(), E = RegionStates.end();
- I != E; ++I) {
- if (!SR.isLiveRegion(I->first))
- State = State->remove<RegionState>(I->first);
+ for (const MemRegion *Reg: llvm::make_first_range(RegionStates)) {
+ if (!SR.isLiveRegion(Reg))
+ State = State->remove<RegionState>(Reg);
}
C.addTransition(State);*/
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
index 929bd6bc3eb3..fa8572cf85ed 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -291,6 +291,7 @@ ProgramStateRef PthreadLockChecker::resolvePossiblyDestroyedMutex(
// Existence in DestroyRetVal ensures existence in LockMap.
// Existence in Destroyed also ensures that the lock state for lockR is either
// UntouchedAndPossiblyDestroyed or UnlockedAndPossiblyDestroyed.
+ assert(lstate);
assert(lstate->isUntouchedAndPossiblyDestroyed() ||
lstate->isUnlockedAndPossiblyDestroyed());
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
index 01c71d91d1a1..7e74b418b335 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
@@ -155,10 +155,8 @@ void RetainCountChecker::checkPostStmt(const BlockExpr *BE,
ProgramStateRef state = C.getState();
auto *R = cast<BlockDataRegion>(C.getSVal(BE).getAsRegion());
- BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
- E = R->referenced_vars_end();
-
- if (I == E)
+ auto ReferencedVars = R->referenced_vars();
+ if (ReferencedVars.empty())
return;
// FIXME: For now we invalidate the tracking of all symbols passed to blocks
@@ -168,8 +166,8 @@ void RetainCountChecker::checkPostStmt(const BlockExpr *BE,
const LocationContext *LC = C.getLocationContext();
MemRegionManager &MemMgr = C.getSValBuilder().getRegionManager();
- for ( ; I != E; ++I) {
- const VarRegion *VR = I.getCapturedRegion();
+ for (auto Var : ReferencedVars) {
+ const VarRegion *VR = Var.getCapturedRegion();
if (VR->getSuperRegion() == R) {
VR = MemMgr.getVarRegion(VR->getDecl(), LC);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
index e11e509f159d..379163e12787 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
@@ -234,8 +234,8 @@ static void generateDiagnosticsForCallLike(ProgramStateRef CurrSt,
os << "Operator 'new'";
} else {
assert(isa<ObjCMessageExpr>(S));
- CallEventRef<ObjCMethodCall> Call =
- Mgr.getObjCMethodCall(cast<ObjCMessageExpr>(S), CurrSt, LCtx);
+ CallEventRef<ObjCMethodCall> Call = Mgr.getObjCMethodCall(
+ cast<ObjCMessageExpr>(S), CurrSt, LCtx, {nullptr, 0});
switch (Call->getMessageKind()) {
case OCM_Message:
@@ -250,7 +250,7 @@ static void generateDiagnosticsForCallLike(ProgramStateRef CurrSt,
}
}
- std::optional<CallEventRef<>> CE = Mgr.getCall(S, CurrSt, LCtx);
+ std::optional<CallEventRef<>> CE = Mgr.getCall(S, CurrSt, LCtx, {nullptr, 0});
auto Idx = findArgIdxOfSymbol(CurrSt, LCtx, Sym, CE);
// If index is not found, we assume that the symbol was returned.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
index e9d5d306cc06..788f2875863c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
@@ -61,7 +61,7 @@ class STLAlgorithmModeling : public Checker<eval::Call> {
public:
STLAlgorithmModeling() = default;
- bool AggressiveStdFindModeling;
+ bool AggressiveStdFindModeling = false;
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
}; //
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
index 9251c895614c..32d95e944195 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
@@ -163,13 +163,11 @@ void SimpleStreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
ProgramStateRef State = C.getState();
SymbolVector LeakedStreams;
StreamMapTy TrackedStreams = State->get<StreamMap>();
- for (StreamMapTy::iterator I = TrackedStreams.begin(),
- E = TrackedStreams.end(); I != E; ++I) {
- SymbolRef Sym = I->first;
+ for (auto [Sym, StreamStatus] : TrackedStreams) {
bool IsSymDead = SymReaper.isDead(Sym);
// Collect leaked symbols.
- if (isLeaked(Sym, I->second, IsSymDead, State))
+ if (isLeaked(Sym, StreamStatus, IsSymDead, State))
LeakedStreams.push_back(Sym);
// Remove the dead symbol from the streams map.
@@ -241,11 +239,7 @@ SimpleStreamChecker::checkPointerEscape(ProgramStateRef State,
return State;
}
- for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
- E = Escaped.end();
- I != E; ++I) {
- SymbolRef Sym = *I;
-
+ for (SymbolRef Sym : Escaped) {
// The symbol escaped. Optimistically, assume that the corresponding file
// handle will be closed somewhere else.
State = State->remove<StreamMap>(Sym);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
index 5689a63f8dd8..a20d24db158f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
@@ -31,6 +31,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/ErrorHandling.h"
#include <optional>
@@ -588,10 +589,9 @@ void SmartPtrModeling::checkLiveSymbols(ProgramStateRef State,
SymbolReaper &SR) const {
// Marking tracked symbols alive
TrackedRegionMapTy TrackedRegions = State->get<TrackedRegionMap>();
- for (auto I = TrackedRegions.begin(), E = TrackedRegions.end(); I != E; ++I) {
- SVal Val = I->second;
- for (auto si = Val.symbol_begin(), se = Val.symbol_end(); si != se; ++si) {
- SR.markLive(*si);
+ for (SVal Val : llvm::make_second_range(TrackedRegions)) {
+ for (SymbolRef Sym : Val.symbols()) {
+ SR.markLive(Sym);
}
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
index c4b7411e9401..abf9914f2ca4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
@@ -29,7 +29,7 @@ namespace {
class StackAddrEscapeChecker
: public Checker<check::PreCall, check::PreStmt<ReturnStmt>,
check::EndFunction> {
- mutable IdentifierInfo *dispatch_semaphore_tII;
+ mutable IdentifierInfo *dispatch_semaphore_tII = nullptr;
mutable std::unique_ptr<BuiltinBug> BT_stackleak;
mutable std::unique_ptr<BuiltinBug> BT_returnstack;
mutable std::unique_ptr<BuiltinBug> BT_capturedstackasync;
@@ -96,6 +96,14 @@ SourceRange StackAddrEscapeChecker::genName(raw_ostream &os, const MemRegion *R,
os << "stack memory associated with local variable '" << VR->getString()
<< '\'';
range = VR->getDecl()->getSourceRange();
+ } else if (const auto *LER = dyn_cast<CXXLifetimeExtendedObjectRegion>(R)) {
+ QualType Ty = LER->getValueType().getLocalUnqualifiedType();
+ os << "stack memory associated with temporary object of type '";
+ Ty.print(os, Ctx.getPrintingPolicy());
+ os << "' lifetime extended by local variable";
+ if (const IdentifierInfo *ID = LER->getExtendingDecl()->getIdentifier())
+ os << " '" << ID->getName() << '\'';
+ range = LER->getExpr()->getSourceRange();
} else if (const auto *TOR = dyn_cast<CXXTempObjectRegion>(R)) {
QualType Ty = TOR->getValueType().getLocalUnqualifiedType();
os << "stack memory associated with temporary object of type '";
@@ -130,10 +138,8 @@ SmallVector<const MemRegion *, 4>
StackAddrEscapeChecker::getCapturedStackRegions(const BlockDataRegion &B,
CheckerContext &C) {
SmallVector<const MemRegion *, 4> Regions;
- BlockDataRegion::referenced_vars_iterator I = B.referenced_vars_begin();
- BlockDataRegion::referenced_vars_iterator E = B.referenced_vars_end();
- for (; I != E; ++I) {
- SVal Val = C.getState()->getSVal(I.getCapturedRegion());
+ for (auto Var : B.referenced_vars()) {
+ SVal Val = C.getState()->getSVal(Var.getCapturedRegion());
const MemRegion *Region = Val.getAsRegion();
if (Region && isa<StackSpaceRegion>(Region->getMemorySpace()))
Regions.push_back(Region);
@@ -376,7 +382,7 @@ void StackAddrEscapeChecker::checkEndFunction(const ReturnStmt *RS,
llvm::raw_svector_ostream Out(Buf);
const SourceRange Range = genName(Out, Referred, Ctx.getASTContext());
- if (isa<CXXTempObjectRegion>(Referrer)) {
+ if (isa<CXXTempObjectRegion, CXXLifetimeExtendedObjectRegion>(Referrer)) {
Out << " is still referred to by a temporary object on the stack "
<< CommonSuffix;
auto Report =
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
index 49b3db560843..d18e6f63df44 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
@@ -49,8 +49,10 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/FormatVariadic.h"
#include <optional>
#include <string>
@@ -65,58 +67,165 @@ class StdLibraryFunctionsChecker
class Summary;
/// Specify how much the analyzer engine should entrust modeling this function
- /// to us. If he doesn't, he performs additional invalidations.
- enum InvalidationKind { NoEvalCall, EvalCallAsPure };
+ /// to us.
+ enum InvalidationKind {
+ /// No \c eval::Call for the function, it can be modeled elsewhere.
+ /// This checker checks only pre and post conditions.
+ NoEvalCall,
+ /// The function is modeled completely in this checker.
+ EvalCallAsPure
+ };
+
+ /// Given a range, should the argument stay inside or outside this range?
+ enum RangeKind { OutOfRange, WithinRange };
+
+ static RangeKind negateKind(RangeKind K) {
+ switch (K) {
+ case OutOfRange:
+ return WithinRange;
+ case WithinRange:
+ return OutOfRange;
+ }
+ llvm_unreachable("Unknown range kind");
+ }
- // The universal integral type to use in value range descriptions.
- // Unsigned to make sure overflows are well-defined.
+ /// The universal integral type to use in value range descriptions.
+ /// Unsigned to make sure overflows are well-defined.
typedef uint64_t RangeInt;
- /// Normally, describes a single range constraint, eg. {{0, 1}, {3, 4}} is
- /// a non-negative integer, which less than 5 and not equal to 2. For
- /// `ComparesToArgument', holds information about how exactly to compare to
- /// the argument.
+ /// Describes a single range constraint. Eg. {{0, 1}, {3, 4}} is
+ /// a non-negative integer, which less than 5 and not equal to 2.
typedef std::vector<std::pair<RangeInt, RangeInt>> IntRangeVector;
/// A reference to an argument or return value by its number.
/// ArgNo in CallExpr and CallEvent is defined as Unsigned, but
/// obviously uint32_t should be enough for all practical purposes.
typedef uint32_t ArgNo;
+ /// Special argument number for specifying the return value.
static const ArgNo Ret;
- /// Returns the string representation of an argument index.
+ /// Get a string representation of an argument index.
/// E.g.: (1) -> '1st arg', (2) - > '2nd arg'
- static SmallString<8> getArgDesc(ArgNo);
+ static void printArgDesc(ArgNo, llvm::raw_ostream &Out);
+ /// Print value X of the argument in form " (which is X)",
+ /// if the value is a fixed known value, otherwise print nothing.
+ /// This is used as simple explanation of values if possible.
+ static void printArgValueInfo(ArgNo ArgN, ProgramStateRef State,
+ const CallEvent &Call, llvm::raw_ostream &Out);
+ /// Append textual description of a numeric range [RMin,RMax] to
+ /// \p Out.
+ static void appendInsideRangeDesc(llvm::APSInt RMin, llvm::APSInt RMax,
+ QualType ArgT, BasicValueFactory &BVF,
+ llvm::raw_ostream &Out);
+ /// Append textual description of a numeric range out of [RMin,RMax] to
+ /// \p Out.
+ static void appendOutOfRangeDesc(llvm::APSInt RMin, llvm::APSInt RMax,
+ QualType ArgT, BasicValueFactory &BVF,
+ llvm::raw_ostream &Out);
class ValueConstraint;
- // Pointer to the ValueConstraint. We need a copyable, polymorphic and
- // default initialize able type (vector needs that). A raw pointer was good,
- // however, we cannot default initialize that. unique_ptr makes the Summary
- // class non-copyable, therefore not an option. Releasing the copyability
- // requirement would render the initialization of the Summary map infeasible.
+ /// Pointer to the ValueConstraint. We need a copyable, polymorphic and
+ /// default initializable type (vector needs that). A raw pointer was good,
+ /// however, we cannot default initialize that. unique_ptr makes the Summary
+ /// class non-copyable, therefore not an option. Releasing the copyability
+ /// requirement would render the initialization of the Summary map infeasible.
+ /// Mind that a pointer to a new value constraint is created when the negate
+ /// function is used.
using ValueConstraintPtr = std::shared_ptr<ValueConstraint>;
/// Polymorphic base class that represents a constraint on a given argument
/// (or return value) of a function. Derived classes implement different kind
/// of constraints, e.g range constraints or correlation between two
/// arguments.
+ /// These are used as argument constraints (preconditions) of functions, in
+ /// which case a bug report may be emitted if the constraint is not satisfied.
+ /// Another use is as conditions for summary cases, to create different
+ /// classes of behavior for a function. In this case no description of the
+ /// constraint is needed because the summary cases have an own (not generated)
+ /// description string.
class ValueConstraint {
public:
ValueConstraint(ArgNo ArgN) : ArgN(ArgN) {}
virtual ~ValueConstraint() {}
+
/// Apply the effects of the constraint on the given program state. If null
/// is returned then the constraint is not feasible.
virtual ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
const Summary &Summary,
CheckerContext &C) const = 0;
+
+ /// Represents that in which context do we require a description of the
+ /// constraint.
+ enum DescriptionKind {
+ /// Describe a constraint that was violated.
+ /// Description should start with something like "should be".
+ Violation,
+ /// Describe a constraint that was assumed to be true.
+ /// This can be used when a precondition is satisfied, or when a summary
+ /// case is applied.
+ /// Description should start with something like "is".
+ Assumption
+ };
+
+ /// Give a description that explains the constraint to the user. Used when
+ /// a bug is reported or when the constraint is applied and displayed as a
+ /// note. The description should not mention the argument (getArgNo).
+ /// See StdLibraryFunctionsChecker::reportBug about how this function is
+ /// used (this function is used not only there).
+ virtual void describe(DescriptionKind DK, const CallEvent &Call,
+ ProgramStateRef State, const Summary &Summary,
+ llvm::raw_ostream &Out) const {
+ // There are some descendant classes that are not used as argument
+ // constraints, e.g. ComparisonConstraint. In that case we can safely
+ // ignore the implementation of this function.
+ llvm_unreachable(
+ "Description not implemented for summary case constraints");
+ }
+
+ /// Give a description that explains the actual argument value (where the
+ /// current ValueConstraint applies to) to the user. This function should be
+ /// called only when the current constraint is satisfied by the argument.
+ /// It should produce a more precise description than the constraint itself.
+ /// The actual value of the argument and the program state can be used to
+ /// make the description more precise. In the most simple case, if the
+ /// argument has a fixed known value this value can be printed into \p Out,
+ /// this is done by default.
+ /// The function should return true if a description was printed to \p Out,
+ /// otherwise false.
+ /// See StdLibraryFunctionsChecker::reportBug about how this function is
+ /// used.
+ virtual bool describeArgumentValue(const CallEvent &Call,
+ ProgramStateRef State,
+ const Summary &Summary,
+ llvm::raw_ostream &Out) const {
+ if (auto N = getArgSVal(Call, getArgNo()).getAs<NonLoc>()) {
+ if (const llvm::APSInt *Int = N->getAsInteger()) {
+ Out << *Int;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /// Return those arguments that should be tracked when we report a bug about
+ /// argument constraint violation. By default it is the argument that is
+ /// constrained, however, in some special cases we need to track other
+ /// arguments as well. E.g. a buffer size might be encoded in another
+ /// argument.
+ /// The "return value" argument number can not occur as returned value.
+ virtual std::vector<ArgNo> getArgsToTrack() const { return {ArgN}; }
+
+ /// Get a constraint that represents exactly the opposite of the current.
virtual ValueConstraintPtr negate() const {
llvm_unreachable("Not implemented");
};
- // Check whether the constraint is malformed or not. It is malformed if the
- // specified argument has a mismatch with the given FunctionDecl (e.g. the
- // arg number is out-of-range of the function's argument list).
+ /// Check whether the constraint is malformed or not. It is malformed if the
+ /// specified argument has a mismatch with the given FunctionDecl (e.g. the
+ /// arg number is out-of-range of the function's argument list).
+ /// This condition can indicate if a probably wrong or unexpected function
+ /// was found where the constraint is to be applied.
bool checkValidity(const FunctionDecl *FD) const {
const bool ValidArg = ArgN == Ret || ArgN < FD->getNumParams();
assert(ValidArg && "Arg out of range!");
@@ -125,104 +234,75 @@ class StdLibraryFunctionsChecker
// Subclasses may further refine the validation.
return checkSpecificValidity(FD);
}
- ArgNo getArgNo() const { return ArgN; }
-
- // Return those arguments that should be tracked when we report a bug. By
- // default it is the argument that is constrained, however, in some special
- // cases we need to track other arguments as well. E.g. a buffer size might
- // be encoded in another argument.
- virtual std::vector<ArgNo> getArgsToTrack() const { return {ArgN}; }
-
- virtual StringRef getName() const = 0;
- // Represents that in which context do we require a description of the
- // constraint.
- enum class DescriptionKind {
- // The constraint is violated.
- Violation,
- // We assume that the constraint is satisfied.
- Assumption
- };
-
- // Give a description that explains the constraint to the user. Used when
- // the bug is reported.
- virtual std::string describe(DescriptionKind DK, ProgramStateRef State,
- const Summary &Summary) const {
- // There are some descendant classes that are not used as argument
- // constraints, e.g. ComparisonConstraint. In that case we can safely
- // ignore the implementation of this function.
- llvm_unreachable("Not implemented");
- }
+ /// Return the argument number (may be placeholder for "return value").
+ ArgNo getArgNo() const { return ArgN; }
protected:
- ArgNo ArgN; // Argument to which we apply the constraint.
-
- /// Do polymorphic validation check on the constraint.
+ /// Argument to which to apply the constraint. It can be a real argument of
+ /// the function to check, or a special value to indicate the return value
+ /// of the function.
+ /// Every constraint is assigned to one main argument, even if other
+ /// arguments are involved.
+ ArgNo ArgN;
+
+ /// Do constraint-specific validation check.
virtual bool checkSpecificValidity(const FunctionDecl *FD) const {
return true;
}
};
- /// Given a range, should the argument stay inside or outside this range?
- enum RangeKind { OutOfRange, WithinRange };
-
- /// Encapsulates a range on a single symbol.
+ /// Check if a single argument falls into a specific "range".
+ /// A range is formed as a set of intervals.
+ /// E.g. \code {['A', 'Z'], ['a', 'z'], ['_', '_']} \endcode
+ /// The intervals are closed intervals that contain one or more values.
+ ///
+ /// The default constructed RangeConstraint has an empty range, applying
+ /// such constraint does not involve any assumptions, thus the State remains
+ /// unchanged. This is meaningful, if the range is dependent on a looked up
+ /// type (e.g. [0, Socklen_tMax]). If the type is not found, then the range
+ /// is default initialized to be empty.
class RangeConstraint : public ValueConstraint {
+ /// The constraint can be specified by allowing or disallowing the range.
+ /// WithinRange indicates allowing the range, OutOfRange indicates
+ /// disallowing it (allowing the complementary range).
RangeKind Kind;
- // A range is formed as a set of intervals (sub-ranges).
- // E.g. {['A', 'Z'], ['a', 'z']}
- //
- // The default constructed RangeConstraint has an empty range set, applying
- // such constraint does not involve any assumptions, thus the State remains
- // unchanged. This is meaningful, if the range is dependent on a looked up
- // type (e.g. [0, Socklen_tMax]). If the type is not found, then the range
- // is default initialized to be empty.
+
+ /// A set of intervals.
IntRangeVector Ranges;
- public:
- StringRef getName() const override { return "Range"; }
- RangeConstraint(ArgNo ArgN, RangeKind Kind, const IntRangeVector &Ranges)
- : ValueConstraint(ArgN), Kind(Kind), Ranges(Ranges) {}
+ /// A textual description of this constraint for the specific case where the
+ /// constraint is used. If empty a generated description will be used that
+ /// is built from the range of the constraint.
+ StringRef Description;
- std::string describe(DescriptionKind DK, ProgramStateRef State,
- const Summary &Summary) const override;
+ public:
+ RangeConstraint(ArgNo ArgN, RangeKind Kind, const IntRangeVector &Ranges,
+ StringRef Desc = "")
+ : ValueConstraint(ArgN), Kind(Kind), Ranges(Ranges), Description(Desc) {
+ }
const IntRangeVector &getRanges() const { return Ranges; }
- private:
- ProgramStateRef applyAsOutOfRange(ProgramStateRef State,
- const CallEvent &Call,
- const Summary &Summary) const;
- ProgramStateRef applyAsWithinRange(ProgramStateRef State,
- const CallEvent &Call,
- const Summary &Summary) const;
-
- public:
ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
const Summary &Summary,
- CheckerContext &C) const override {
- switch (Kind) {
- case OutOfRange:
- return applyAsOutOfRange(State, Call, Summary);
- case WithinRange:
- return applyAsWithinRange(State, Call, Summary);
- }
- llvm_unreachable("Unknown range kind!");
- }
+ CheckerContext &C) const override;
+
+ void describe(DescriptionKind DK, const CallEvent &Call,
+ ProgramStateRef State, const Summary &Summary,
+ llvm::raw_ostream &Out) const override;
+
+ bool describeArgumentValue(const CallEvent &Call, ProgramStateRef State,
+ const Summary &Summary,
+ llvm::raw_ostream &Out) const override;
ValueConstraintPtr negate() const override {
RangeConstraint Tmp(*this);
- switch (Kind) {
- case OutOfRange:
- Tmp.Kind = WithinRange;
- break;
- case WithinRange:
- Tmp.Kind = OutOfRange;
- break;
- }
+ Tmp.Kind = negateKind(Kind);
return std::make_shared<RangeConstraint>(Tmp);
}
+ protected:
bool checkSpecificValidity(const FunctionDecl *FD) const override {
const bool ValidArg =
getArgType(FD, ArgN)->isIntegralType(FD->getASTContext());
@@ -230,14 +310,52 @@ class StdLibraryFunctionsChecker
"This constraint should be applied on an integral type");
return ValidArg;
}
+
+ private:
+ /// A callback function that is used when iterating over the range
+ /// intervals. It gets the begin and end (inclusive) of one interval.
+ /// This is used to make any kind of task possible that needs an iteration
+ /// over the intervals.
+ using RangeApplyFunction =
+ std::function<bool(const llvm::APSInt &Min, const llvm::APSInt &Max)>;
+
+ /// Call a function on the intervals of the range.
+ /// The function is called with all intervals in the range.
+ void applyOnWithinRange(BasicValueFactory &BVF, QualType ArgT,
+ const RangeApplyFunction &F) const;
+ /// Call a function on all intervals in the complementary range.
+ /// The function is called with all intervals that fall out of the range.
+ /// E.g. consider an interval list [A, B] and [C, D]
+ /// \code
+ /// -------+--------+------------------+------------+----------->
+ /// A B C D
+ /// \endcode
+ /// We get the ranges [-inf, A - 1], [D + 1, +inf], [B + 1, C - 1].
+ /// The \p ArgT is used to determine the min and max of the type that is
+ /// used as "-inf" and "+inf".
+ void applyOnOutOfRange(BasicValueFactory &BVF, QualType ArgT,
+ const RangeApplyFunction &F) const;
+ /// Call a function on the intervals of the range or the complementary
+ /// range.
+ void applyOnRange(RangeKind Kind, BasicValueFactory &BVF, QualType ArgT,
+ const RangeApplyFunction &F) const {
+ switch (Kind) {
+ case OutOfRange:
+ applyOnOutOfRange(BVF, ArgT, F);
+ break;
+ case WithinRange:
+ applyOnWithinRange(BVF, ArgT, F);
+ break;
+ };
+ }
};
+ /// Check relation of an argument to another.
class ComparisonConstraint : public ValueConstraint {
BinaryOperator::Opcode Opcode;
ArgNo OtherArgN;
public:
- StringRef getName() const override { return "Comparison"; };
ComparisonConstraint(ArgNo ArgN, BinaryOperator::Opcode Opcode,
ArgNo OtherArgN)
: ValueConstraint(ArgN), Opcode(Opcode), OtherArgN(OtherArgN) {}
@@ -248,6 +366,7 @@ class StdLibraryFunctionsChecker
CheckerContext &C) const override;
};
+ /// Check null or non-null-ness of an argument that is of pointer type.
class NotNullConstraint : public ValueConstraint {
using ValueConstraint::ValueConstraint;
// This variable has a role when we negate the constraint.
@@ -256,22 +375,18 @@ class StdLibraryFunctionsChecker
public:
NotNullConstraint(ArgNo ArgN, bool CannotBeNull = true)
: ValueConstraint(ArgN), CannotBeNull(CannotBeNull) {}
- std::string describe(DescriptionKind DK, ProgramStateRef State,
- const Summary &Summary) const override;
- StringRef getName() const override { return "NonNull"; }
+
ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
const Summary &Summary,
- CheckerContext &C) const override {
- SVal V = getArgSVal(Call, getArgNo());
- if (V.isUndef())
- return State;
+ CheckerContext &C) const override;
- DefinedOrUnknownSVal L = V.castAs<DefinedOrUnknownSVal>();
- if (!isa<Loc>(L))
- return State;
+ void describe(DescriptionKind DK, const CallEvent &Call,
+ ProgramStateRef State, const Summary &Summary,
+ llvm::raw_ostream &Out) const override;
- return State->assume(L, CannotBeNull);
- }
+ bool describeArgumentValue(const CallEvent &Call, ProgramStateRef State,
+ const Summary &Summary,
+ llvm::raw_ostream &Out) const override;
ValueConstraintPtr negate() const override {
NotNullConstraint Tmp(*this);
@@ -279,6 +394,54 @@ class StdLibraryFunctionsChecker
return std::make_shared<NotNullConstraint>(Tmp);
}
+ protected:
+ bool checkSpecificValidity(const FunctionDecl *FD) const override {
+ const bool ValidArg = getArgType(FD, ArgN)->isPointerType();
+ assert(ValidArg &&
+ "This constraint should be applied only on a pointer type");
+ return ValidArg;
+ }
+ };
+
+ /// Check null or non-null-ness of an argument that is of pointer type.
+ /// The argument is meant to be a buffer that has a size constraint, and it
+ /// is allowed to have a NULL value if the size is 0. The size can depend on
+ /// 1 or 2 additional arguments, if one of these is 0 the buffer is allowed to
+ /// be NULL. This is useful for functions like `fread` which have this special
+ /// property.
+ class NotNullBufferConstraint : public ValueConstraint {
+ using ValueConstraint::ValueConstraint;
+ ArgNo SizeArg1N;
+ std::optional<ArgNo> SizeArg2N;
+ // This variable has a role when we negate the constraint.
+ bool CannotBeNull = true;
+
+ public:
+ NotNullBufferConstraint(ArgNo ArgN, ArgNo SizeArg1N,
+ std::optional<ArgNo> SizeArg2N,
+ bool CannotBeNull = true)
+ : ValueConstraint(ArgN), SizeArg1N(SizeArg1N), SizeArg2N(SizeArg2N),
+ CannotBeNull(CannotBeNull) {}
+
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const override;
+
+ void describe(DescriptionKind DK, const CallEvent &Call,
+ ProgramStateRef State, const Summary &Summary,
+ llvm::raw_ostream &Out) const override;
+
+ bool describeArgumentValue(const CallEvent &Call, ProgramStateRef State,
+ const Summary &Summary,
+ llvm::raw_ostream &Out) const override;
+
+ ValueConstraintPtr negate() const override {
+ NotNullBufferConstraint Tmp(*this);
+ Tmp.CannotBeNull = !this->CannotBeNull;
+ return std::make_shared<NotNullBufferConstraint>(Tmp);
+ }
+
+ protected:
bool checkSpecificValidity(const FunctionDecl *FD) const override {
const bool ValidArg = getArgType(FD, ArgN)->isPointerType();
assert(ValidArg &&
@@ -310,7 +473,6 @@ class StdLibraryFunctionsChecker
BinaryOperator::Opcode Op = BO_LE;
public:
- StringRef getName() const override { return "BufferSize"; }
BufferSizeConstraint(ArgNo Buffer, llvm::APSInt BufMinSize)
: ValueConstraint(Buffer), ConcreteSize(BufMinSize) {}
BufferSizeConstraint(ArgNo Buffer, ArgNo BufSize)
@@ -319,6 +481,18 @@ class StdLibraryFunctionsChecker
: ValueConstraint(Buffer), SizeArgN(BufSize),
SizeMultiplierArgN(BufSizeMultiplier) {}
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const override;
+
+ void describe(DescriptionKind DK, const CallEvent &Call,
+ ProgramStateRef State, const Summary &Summary,
+ llvm::raw_ostream &Out) const override;
+
+ bool describeArgumentValue(const CallEvent &Call, ProgramStateRef State,
+ const Summary &Summary,
+ llvm::raw_ostream &Out) const override;
+
std::vector<ArgNo> getArgsToTrack() const override {
std::vector<ArgNo> Result{ArgN};
if (SizeArgN)
@@ -328,57 +502,13 @@ class StdLibraryFunctionsChecker
return Result;
}
- std::string describe(DescriptionKind DK, ProgramStateRef State,
- const Summary &Summary) const override;
-
- ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
- const Summary &Summary,
- CheckerContext &C) const override {
- SValBuilder &SvalBuilder = C.getSValBuilder();
- // The buffer argument.
- SVal BufV = getArgSVal(Call, getArgNo());
-
- // Get the size constraint.
- const SVal SizeV = [this, &State, &Call, &Summary, &SvalBuilder]() {
- if (ConcreteSize) {
- return SVal(SvalBuilder.makeIntVal(*ConcreteSize));
- }
- assert(SizeArgN && "The constraint must be either a concrete value or "
- "encoded in an argument.");
- // The size argument.
- SVal SizeV = getArgSVal(Call, *SizeArgN);
- // Multiply with another argument if given.
- if (SizeMultiplierArgN) {
- SVal SizeMulV = getArgSVal(Call, *SizeMultiplierArgN);
- SizeV = SvalBuilder.evalBinOp(State, BO_Mul, SizeV, SizeMulV,
- Summary.getArgType(*SizeArgN));
- }
- return SizeV;
- }();
-
- // The dynamic size of the buffer argument, got from the analyzer engine.
- SVal BufDynSize = getDynamicExtentWithOffset(State, BufV);
-
- SVal Feasible = SvalBuilder.evalBinOp(State, Op, SizeV, BufDynSize,
- SvalBuilder.getContext().BoolTy);
- if (auto F = Feasible.getAs<DefinedOrUnknownSVal>())
- return State->assume(*F, true);
-
- // We can get here only if the size argument or the dynamic size is
- // undefined. But the dynamic size should never be undefined, only
- // unknown. So, here, the size of the argument is undefined, i.e. we
- // cannot apply the constraint. Actually, other checkers like
- // CallAndMessage should catch this situation earlier, because we call a
- // function with an uninitialized argument.
- llvm_unreachable("Size argument or the dynamic size is Undefined");
- }
-
ValueConstraintPtr negate() const override {
BufferSizeConstraint Tmp(*this);
Tmp.Op = BinaryOperator::negateComparisonOp(Op);
return std::make_shared<BufferSizeConstraint>(Tmp);
}
+ protected:
bool checkSpecificValidity(const FunctionDecl *FD) const override {
const bool ValidArg = getArgType(FD, ArgN)->isPointerType();
assert(ValidArg &&
@@ -698,19 +828,19 @@ class StdLibraryFunctionsChecker
static SVal getArgSVal(const CallEvent &Call, ArgNo ArgN) {
return ArgN == Ret ? Call.getReturnValue() : Call.getArgSVal(ArgN);
}
+ static std::string getFunctionName(const CallEvent &Call) {
+ assert(Call.getDecl() &&
+ "Call was found by a summary, should have declaration");
+ return cast<NamedDecl>(Call.getDecl())->getNameAsString();
+ }
public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
- enum CheckKind {
- CK_StdCLibraryFunctionArgsChecker,
- CK_StdCLibraryFunctionsTesterChecker,
- CK_NumCheckKinds
- };
- bool ChecksEnabled[CK_NumCheckKinds] = {false};
- CheckerNameRef CheckNames[CK_NumCheckKinds];
+ CheckerNameRef CheckName;
+ bool AddTestFunctions = false;
bool DisplayLoadedSummaries = false;
bool ModelPOSIX = false;
@@ -725,32 +855,37 @@ private:
void initFunctionSummaries(CheckerContext &C) const;
void reportBug(const CallEvent &Call, ExplodedNode *N,
- const ValueConstraint *VC, const Summary &Summary,
- CheckerContext &C) const {
- if (!ChecksEnabled[CK_StdCLibraryFunctionArgsChecker])
- return;
- std::string Msg =
- (Twine("Function argument constraint is not satisfied, constraint: ") +
- VC->getName().data())
- .str();
+ const ValueConstraint *VC, const ValueConstraint *NegatedVC,
+ const Summary &Summary, CheckerContext &C) const {
+ assert(Call.getDecl() &&
+ "Function found in summary must have a declaration available");
+ SmallString<256> Msg;
+ llvm::raw_svector_ostream MsgOs(Msg);
+
+ MsgOs << "The ";
+ printArgDesc(VC->getArgNo(), MsgOs);
+ MsgOs << " to '" << getFunctionName(Call) << "' ";
+ bool ValuesPrinted =
+ NegatedVC->describeArgumentValue(Call, N->getState(), Summary, MsgOs);
+ if (ValuesPrinted)
+ MsgOs << " but ";
+ else
+ MsgOs << "is out of the accepted range; It ";
+ VC->describe(ValueConstraint::Violation, Call, C.getState(), Summary,
+ MsgOs);
+ Msg[0] = toupper(Msg[0]);
if (!BT_InvalidArg)
BT_InvalidArg = std::make_unique<BugType>(
- CheckNames[CK_StdCLibraryFunctionArgsChecker],
- "Unsatisfied argument constraints", categories::LogicError);
+ CheckName, "Function call with invalid argument",
+ categories::LogicError);
auto R = std::make_unique<PathSensitiveBugReport>(*BT_InvalidArg, Msg, N);
- for (ArgNo ArgN : VC->getArgsToTrack())
+ for (ArgNo ArgN : VC->getArgsToTrack()) {
bugreporter::trackExpressionValue(N, Call.getArgExpr(ArgN), *R);
-
- // Highlight the range of the argument that was violated.
- R->addRange(Call.getArgSourceRange(VC->getArgNo()));
-
- // Describe the argument constraint violation in a note.
- std::string Descr = VC->describe(
- ValueConstraint::DescriptionKind::Violation, C.getState(), Summary);
- // Capitalize the first letter b/c we want a full sentence.
- Descr[0] = toupper(Descr[0]);
- R->addNote(Descr, R->getLocation(), Call.getArgSourceRange(VC->getArgNo()));
+ R->markInteresting(Call.getArgSVal(ArgN));
+ // All tracked arguments are important, highlight them.
+ R->addRange(Call.getArgSourceRange(ArgN));
+ }
C.emitReport(std::move(R));
}
@@ -772,177 +907,229 @@ int StdLibraryFunctionsChecker::ErrnoConstraintBase::Tag = 0;
const StdLibraryFunctionsChecker::ArgNo StdLibraryFunctionsChecker::Ret =
std::numeric_limits<ArgNo>::max();
-} // end of anonymous namespace
-
static BasicValueFactory &getBVF(ProgramStateRef State) {
ProgramStateManager &Mgr = State->getStateManager();
SValBuilder &SVB = Mgr.getSValBuilder();
return SVB.getBasicValueFactory();
}
-std::string StdLibraryFunctionsChecker::NotNullConstraint::describe(
- DescriptionKind DK, ProgramStateRef State, const Summary &Summary) const {
- SmallString<48> Result;
- const auto Violation = ValueConstraint::DescriptionKind::Violation;
- Result += "the ";
- Result += getArgDesc(ArgN);
- Result += DK == Violation ? " should not be NULL" : " is not NULL";
- return Result.c_str();
-}
+} // end of anonymous namespace
-std::string StdLibraryFunctionsChecker::RangeConstraint::describe(
- DescriptionKind DK, ProgramStateRef State, const Summary &Summary) const {
+void StdLibraryFunctionsChecker::printArgDesc(
+ StdLibraryFunctionsChecker::ArgNo ArgN, llvm::raw_ostream &Out) {
+ Out << std::to_string(ArgN + 1);
+ Out << llvm::getOrdinalSuffix(ArgN + 1);
+ Out << " argument";
+}
- BasicValueFactory &BVF = getBVF(State);
+void StdLibraryFunctionsChecker::printArgValueInfo(ArgNo ArgN,
+ ProgramStateRef State,
+ const CallEvent &Call,
+ llvm::raw_ostream &Out) {
+ if (const llvm::APSInt *Val =
+ State->getStateManager().getSValBuilder().getKnownValue(
+ State, getArgSVal(Call, ArgN)))
+ Out << " (which is " << *Val << ")";
+}
- QualType T = Summary.getArgType(getArgNo());
- SmallString<48> Result;
- const auto Violation = ValueConstraint::DescriptionKind::Violation;
- Result += "the ";
- Result += getArgDesc(ArgN);
- Result += DK == Violation ? " should be " : " is ";
-
- // Range kind as a string.
- Kind == OutOfRange ? Result += "out of" : Result += "within";
-
- // Get the range values as a string.
- Result += " the range ";
- if (Ranges.size() > 1)
- Result += "[";
- unsigned I = Ranges.size();
- for (const std::pair<RangeInt, RangeInt> &R : Ranges) {
- Result += "[";
- const llvm::APSInt &Min = BVF.getValue(R.first, T);
- const llvm::APSInt &Max = BVF.getValue(R.second, T);
- Min.toString(Result);
- Result += ", ";
- Max.toString(Result);
- Result += "]";
- if (--I > 0)
- Result += ", ";
+void StdLibraryFunctionsChecker::appendInsideRangeDesc(llvm::APSInt RMin,
+ llvm::APSInt RMax,
+ QualType ArgT,
+ BasicValueFactory &BVF,
+ llvm::raw_ostream &Out) {
+ if (RMin.isZero() && RMax.isZero())
+ Out << "zero";
+ else if (RMin == RMax)
+ Out << RMin;
+ else if (RMin == BVF.getMinValue(ArgT)) {
+ if (RMax == -1)
+ Out << "< 0";
+ else
+ Out << "<= " << RMax;
+ } else if (RMax == BVF.getMaxValue(ArgT)) {
+ if (RMin.isOne())
+ Out << "> 0";
+ else
+ Out << ">= " << RMin;
+ } else if (RMin.isNegative() == RMax.isNegative() &&
+ RMin.getLimitedValue() == RMax.getLimitedValue() - 1) {
+ Out << RMin << " or " << RMax;
+ } else {
+ Out << "between " << RMin << " and " << RMax;
}
- if (Ranges.size() > 1)
- Result += "]";
-
- return Result.c_str();
}
-SmallString<8>
-StdLibraryFunctionsChecker::getArgDesc(StdLibraryFunctionsChecker::ArgNo ArgN) {
- SmallString<8> Result;
- Result += std::to_string(ArgN + 1);
- Result += llvm::getOrdinalSuffix(ArgN + 1);
- Result += " argument";
- return Result;
+void StdLibraryFunctionsChecker::appendOutOfRangeDesc(llvm::APSInt RMin,
+ llvm::APSInt RMax,
+ QualType ArgT,
+ BasicValueFactory &BVF,
+ llvm::raw_ostream &Out) {
+ if (RMin.isZero() && RMax.isZero())
+ Out << "nonzero";
+ else if (RMin == RMax) {
+ Out << "not equal to " << RMin;
+ } else if (RMin == BVF.getMinValue(ArgT)) {
+ if (RMax == -1)
+ Out << ">= 0";
+ else
+ Out << "> " << RMax;
+ } else if (RMax == BVF.getMaxValue(ArgT)) {
+ if (RMin.isOne())
+ Out << "<= 0";
+ else
+ Out << "< " << RMin;
+ } else if (RMin.isNegative() == RMax.isNegative() &&
+ RMin.getLimitedValue() == RMax.getLimitedValue() - 1) {
+ Out << "not " << RMin << " and not " << RMax;
+ } else {
+ Out << "not between " << RMin << " and " << RMax;
+ }
}
-std::string StdLibraryFunctionsChecker::BufferSizeConstraint::describe(
- DescriptionKind DK, ProgramStateRef State, const Summary &Summary) const {
- SmallString<96> Result;
- const auto Violation = ValueConstraint::DescriptionKind::Violation;
- Result += "the size of the ";
- Result += getArgDesc(ArgN);
- Result += DK == Violation ? " should be " : " is ";
- Result += "equal to or greater than the value of ";
- if (ConcreteSize) {
- ConcreteSize->toString(Result);
- } else if (SizeArgN) {
- Result += "the ";
- Result += getArgDesc(*SizeArgN);
- if (SizeMultiplierArgN) {
- Result += " times the ";
- Result += getArgDesc(*SizeMultiplierArgN);
- }
+void StdLibraryFunctionsChecker::RangeConstraint::applyOnWithinRange(
+ BasicValueFactory &BVF, QualType ArgT, const RangeApplyFunction &F) const {
+ if (Ranges.empty())
+ return;
+
+ for (auto [Start, End] : getRanges()) {
+ const llvm::APSInt &Min = BVF.getValue(Start, ArgT);
+ const llvm::APSInt &Max = BVF.getValue(End, ArgT);
+ assert(Min <= Max);
+ if (!F(Min, Max))
+ return;
}
- return Result.c_str();
}
-ProgramStateRef StdLibraryFunctionsChecker::RangeConstraint::applyAsOutOfRange(
- ProgramStateRef State, const CallEvent &Call,
- const Summary &Summary) const {
+void StdLibraryFunctionsChecker::RangeConstraint::applyOnOutOfRange(
+ BasicValueFactory &BVF, QualType ArgT, const RangeApplyFunction &F) const {
if (Ranges.empty())
- return State;
+ return;
- ProgramStateManager &Mgr = State->getStateManager();
- SValBuilder &SVB = Mgr.getSValBuilder();
- BasicValueFactory &BVF = SVB.getBasicValueFactory();
- ConstraintManager &CM = Mgr.getConstraintManager();
- QualType T = Summary.getArgType(getArgNo());
+ const IntRangeVector &R = getRanges();
+ size_t E = R.size();
+
+ const llvm::APSInt &MinusInf = BVF.getMinValue(ArgT);
+ const llvm::APSInt &PlusInf = BVF.getMaxValue(ArgT);
+
+ const llvm::APSInt &RangeLeft = BVF.getValue(R[0].first - 1ULL, ArgT);
+ const llvm::APSInt &RangeRight = BVF.getValue(R[E - 1].second + 1ULL, ArgT);
+
+ // Iterate over the "holes" between intervals.
+ for (size_t I = 1; I != E; ++I) {
+ const llvm::APSInt &Min = BVF.getValue(R[I - 1].second + 1ULL, ArgT);
+ const llvm::APSInt &Max = BVF.getValue(R[I].first - 1ULL, ArgT);
+ if (Min <= Max) {
+ if (!F(Min, Max))
+ return;
+ }
+ }
+ // Check the interval [T_MIN, min(R) - 1].
+ if (RangeLeft != PlusInf) {
+ assert(MinusInf <= RangeLeft);
+ if (!F(MinusInf, RangeLeft))
+ return;
+ }
+ // Check the interval [max(R) + 1, T_MAX],
+ if (RangeRight != MinusInf) {
+ assert(RangeRight <= PlusInf);
+ if (!F(RangeRight, PlusInf))
+ return;
+ }
+}
+
+ProgramStateRef StdLibraryFunctionsChecker::RangeConstraint::apply(
+ ProgramStateRef State, const CallEvent &Call, const Summary &Summary,
+ CheckerContext &C) const {
+ ConstraintManager &CM = C.getConstraintManager();
SVal V = getArgSVal(Call, getArgNo());
+ QualType T = Summary.getArgType(getArgNo());
if (auto N = V.getAs<NonLoc>()) {
- const IntRangeVector &R = getRanges();
- size_t E = R.size();
- for (size_t I = 0; I != E; ++I) {
- const llvm::APSInt &Min = BVF.getValue(R[I].first, T);
- const llvm::APSInt &Max = BVF.getValue(R[I].second, T);
- assert(Min <= Max);
+ auto ExcludeRangeFromArg = [&](const llvm::APSInt &Min,
+ const llvm::APSInt &Max) {
State = CM.assumeInclusiveRange(State, *N, Min, Max, false);
- if (!State)
- break;
- }
+ return static_cast<bool>(State);
+ };
+ // "OutOfRange R" is handled by excluding all ranges in R.
+ // "WithinRange R" is treated as "OutOfRange [T_MIN, T_MAX] \ R".
+ applyOnRange(negateKind(Kind), C.getSValBuilder().getBasicValueFactory(), T,
+ ExcludeRangeFromArg);
}
return State;
}
-ProgramStateRef StdLibraryFunctionsChecker::RangeConstraint::applyAsWithinRange(
- ProgramStateRef State, const CallEvent &Call,
- const Summary &Summary) const {
- if (Ranges.empty())
- return State;
+void StdLibraryFunctionsChecker::RangeConstraint::describe(
+ DescriptionKind DK, const CallEvent &Call, ProgramStateRef State,
+ const Summary &Summary, llvm::raw_ostream &Out) const {
+
+ BasicValueFactory &BVF = getBVF(State);
+ QualType T = Summary.getArgType(getArgNo());
+
+ Out << ((DK == Violation) ? "should be " : "is ");
+ if (!Description.empty()) {
+ Out << Description;
+ } else {
+ unsigned I = Ranges.size();
+ if (Kind == WithinRange) {
+ for (const std::pair<RangeInt, RangeInt> &R : Ranges) {
+ appendInsideRangeDesc(BVF.getValue(R.first, T),
+ BVF.getValue(R.second, T), T, BVF, Out);
+ if (--I > 0)
+ Out << " or ";
+ }
+ } else {
+ for (const std::pair<RangeInt, RangeInt> &R : Ranges) {
+ appendOutOfRangeDesc(BVF.getValue(R.first, T),
+ BVF.getValue(R.second, T), T, BVF, Out);
+ if (--I > 0)
+ Out << " and ";
+ }
+ }
+ }
+}
+
+bool StdLibraryFunctionsChecker::RangeConstraint::describeArgumentValue(
+ const CallEvent &Call, ProgramStateRef State, const Summary &Summary,
+ llvm::raw_ostream &Out) const {
+ unsigned int NRanges = 0;
+ bool HaveAllRanges = true;
ProgramStateManager &Mgr = State->getStateManager();
- SValBuilder &SVB = Mgr.getSValBuilder();
- BasicValueFactory &BVF = SVB.getBasicValueFactory();
+ BasicValueFactory &BVF = Mgr.getSValBuilder().getBasicValueFactory();
ConstraintManager &CM = Mgr.getConstraintManager();
- QualType T = Summary.getArgType(getArgNo());
SVal V = getArgSVal(Call, getArgNo());
- // "WithinRange R" is treated as "outside [T_MIN, T_MAX] \ R".
- // We cut off [T_MIN, min(R) - 1] and [max(R) + 1, T_MAX] if necessary,
- // and then cut away all holes in R one by one.
- //
- // E.g. consider a range list R as [A, B] and [C, D]
- // -------+--------+------------------+------------+----------->
- // A B C D
- // Then we assume that the value is not in [-inf, A - 1],
- // then not in [D + 1, +inf], then not in [B + 1, C - 1]
if (auto N = V.getAs<NonLoc>()) {
- const IntRangeVector &R = getRanges();
- size_t E = R.size();
-
- const llvm::APSInt &MinusInf = BVF.getMinValue(T);
- const llvm::APSInt &PlusInf = BVF.getMaxValue(T);
-
- const llvm::APSInt &Left = BVF.getValue(R[0].first - 1ULL, T);
- if (Left != PlusInf) {
- assert(MinusInf <= Left);
- State = CM.assumeInclusiveRange(State, *N, MinusInf, Left, false);
- if (!State)
- return nullptr;
- }
-
- const llvm::APSInt &Right = BVF.getValue(R[E - 1].second + 1ULL, T);
- if (Right != MinusInf) {
- assert(Right <= PlusInf);
- State = CM.assumeInclusiveRange(State, *N, Right, PlusInf, false);
- if (!State)
- return nullptr;
+ if (const llvm::APSInt *Int = N->getAsInteger()) {
+ Out << "is ";
+ Out << *Int;
+ return true;
}
-
- for (size_t I = 1; I != E; ++I) {
- const llvm::APSInt &Min = BVF.getValue(R[I - 1].second + 1ULL, T);
- const llvm::APSInt &Max = BVF.getValue(R[I].first - 1ULL, T);
- if (Min <= Max) {
- State = CM.assumeInclusiveRange(State, *N, Min, Max, false);
- if (!State)
- return nullptr;
+ QualType T = Summary.getArgType(getArgNo());
+ SmallString<128> MoreInfo;
+ llvm::raw_svector_ostream MoreInfoOs(MoreInfo);
+ auto ApplyF = [&](const llvm::APSInt &Min, const llvm::APSInt &Max) {
+ if (CM.assumeInclusiveRange(State, *N, Min, Max, true)) {
+ if (NRanges > 0)
+ MoreInfoOs << " or ";
+ appendInsideRangeDesc(Min, Max, T, BVF, MoreInfoOs);
+ ++NRanges;
+ } else {
+ HaveAllRanges = false;
}
+ return true;
+ };
+
+ applyOnRange(Kind, BVF, T, ApplyF);
+ assert(NRanges > 0);
+ if (!HaveAllRanges || NRanges == 1) {
+ Out << "is ";
+ Out << MoreInfo;
+ return true;
}
}
-
- return State;
+ return false;
}
ProgramStateRef StdLibraryFunctionsChecker::ComparisonConstraint::apply(
@@ -967,6 +1154,162 @@ ProgramStateRef StdLibraryFunctionsChecker::ComparisonConstraint::apply(
return State;
}
+ProgramStateRef StdLibraryFunctionsChecker::NotNullConstraint::apply(
+ ProgramStateRef State, const CallEvent &Call, const Summary &Summary,
+ CheckerContext &C) const {
+ SVal V = getArgSVal(Call, getArgNo());
+ if (V.isUndef())
+ return State;
+
+ DefinedOrUnknownSVal L = V.castAs<DefinedOrUnknownSVal>();
+ if (!isa<Loc>(L))
+ return State;
+
+ return State->assume(L, CannotBeNull);
+}
+
+void StdLibraryFunctionsChecker::NotNullConstraint::describe(
+ DescriptionKind DK, const CallEvent &Call, ProgramStateRef State,
+ const Summary &Summary, llvm::raw_ostream &Out) const {
+ assert(CannotBeNull &&
+ "Describe should not be used when the value must be NULL");
+ if (DK == Violation)
+ Out << "should not be NULL";
+ else
+ Out << "is not NULL";
+}
+
+bool StdLibraryFunctionsChecker::NotNullConstraint::describeArgumentValue(
+ const CallEvent &Call, ProgramStateRef State, const Summary &Summary,
+ llvm::raw_ostream &Out) const {
+ assert(!CannotBeNull && "This function is used when the value is NULL");
+ Out << "is NULL";
+ return true;
+}
+
+ProgramStateRef StdLibraryFunctionsChecker::NotNullBufferConstraint::apply(
+ ProgramStateRef State, const CallEvent &Call, const Summary &Summary,
+ CheckerContext &C) const {
+ SVal V = getArgSVal(Call, getArgNo());
+ if (V.isUndef())
+ return State;
+ DefinedOrUnknownSVal L = V.castAs<DefinedOrUnknownSVal>();
+ if (!isa<Loc>(L))
+ return State;
+
+ std::optional<DefinedOrUnknownSVal> SizeArg1 =
+ getArgSVal(Call, SizeArg1N).getAs<DefinedOrUnknownSVal>();
+ std::optional<DefinedOrUnknownSVal> SizeArg2;
+ if (SizeArg2N)
+ SizeArg2 = getArgSVal(Call, *SizeArg2N).getAs<DefinedOrUnknownSVal>();
+
+ auto IsArgZero = [State](std::optional<DefinedOrUnknownSVal> Val) {
+ if (!Val)
+ return false;
+ auto [IsNonNull, IsNull] = State->assume(*Val);
+ return IsNull && !IsNonNull;
+ };
+
+ if (IsArgZero(SizeArg1) || IsArgZero(SizeArg2))
+ return State;
+
+ return State->assume(L, CannotBeNull);
+}
+
+void StdLibraryFunctionsChecker::NotNullBufferConstraint::describe(
+ DescriptionKind DK, const CallEvent &Call, ProgramStateRef State,
+ const Summary &Summary, llvm::raw_ostream &Out) const {
+ assert(CannotBeNull &&
+ "Describe should not be used when the value must be NULL");
+ if (DK == Violation)
+ Out << "should not be NULL";
+ else
+ Out << "is not NULL";
+}
+
+bool StdLibraryFunctionsChecker::NotNullBufferConstraint::describeArgumentValue(
+ const CallEvent &Call, ProgramStateRef State, const Summary &Summary,
+ llvm::raw_ostream &Out) const {
+ assert(!CannotBeNull && "This function is used when the value is NULL");
+ Out << "is NULL";
+ return true;
+}
+
+ProgramStateRef StdLibraryFunctionsChecker::BufferSizeConstraint::apply(
+ ProgramStateRef State, const CallEvent &Call, const Summary &Summary,
+ CheckerContext &C) const {
+ SValBuilder &SvalBuilder = C.getSValBuilder();
+ // The buffer argument.
+ SVal BufV = getArgSVal(Call, getArgNo());
+
+ // Get the size constraint.
+ const SVal SizeV = [this, &State, &Call, &Summary, &SvalBuilder]() {
+ if (ConcreteSize) {
+ return SVal(SvalBuilder.makeIntVal(*ConcreteSize));
+ }
+ assert(SizeArgN && "The constraint must be either a concrete value or "
+ "encoded in an argument.");
+ // The size argument.
+ SVal SizeV = getArgSVal(Call, *SizeArgN);
+ // Multiply with another argument if given.
+ if (SizeMultiplierArgN) {
+ SVal SizeMulV = getArgSVal(Call, *SizeMultiplierArgN);
+ SizeV = SvalBuilder.evalBinOp(State, BO_Mul, SizeV, SizeMulV,
+ Summary.getArgType(*SizeArgN));
+ }
+ return SizeV;
+ }();
+
+ // The dynamic size of the buffer argument, got from the analyzer engine.
+ SVal BufDynSize = getDynamicExtentWithOffset(State, BufV);
+
+ SVal Feasible = SvalBuilder.evalBinOp(State, Op, SizeV, BufDynSize,
+ SvalBuilder.getContext().BoolTy);
+ if (auto F = Feasible.getAs<DefinedOrUnknownSVal>())
+ return State->assume(*F, true);
+
+ // We can get here only if the size argument or the dynamic size is
+ // undefined. But the dynamic size should never be undefined, only
+ // unknown. So, here, the size of the argument is undefined, i.e. we
+ // cannot apply the constraint. Actually, other checkers like
+ // CallAndMessage should catch this situation earlier, because we call a
+ // function with an uninitialized argument.
+ llvm_unreachable("Size argument or the dynamic size is Undefined");
+}
+
+void StdLibraryFunctionsChecker::BufferSizeConstraint::describe(
+ DescriptionKind DK, const CallEvent &Call, ProgramStateRef State,
+ const Summary &Summary, llvm::raw_ostream &Out) const {
+ Out << ((DK == Violation) ? "should be " : "is ");
+ Out << "a buffer with size equal to or greater than ";
+ if (ConcreteSize) {
+ Out << *ConcreteSize;
+ } else if (SizeArgN) {
+ Out << "the value of the ";
+ printArgDesc(*SizeArgN, Out);
+ printArgValueInfo(*SizeArgN, State, Call, Out);
+ if (SizeMultiplierArgN) {
+ Out << " times the ";
+ printArgDesc(*SizeMultiplierArgN, Out);
+ printArgValueInfo(*SizeMultiplierArgN, State, Call, Out);
+ }
+ }
+}
+
+bool StdLibraryFunctionsChecker::BufferSizeConstraint::describeArgumentValue(
+ const CallEvent &Call, ProgramStateRef State, const Summary &Summary,
+ llvm::raw_ostream &Out) const {
+ SVal BufV = getArgSVal(Call, getArgNo());
+ SVal BufDynSize = getDynamicExtentWithOffset(State, BufV);
+ if (const llvm::APSInt *Val =
+ State->getStateManager().getSValBuilder().getKnownValue(State,
+ BufDynSize)) {
+ Out << "is a buffer with size " << *Val;
+ return true;
+ }
+ return false;
+}
+
void StdLibraryFunctionsChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
std::optional<Summary> FoundSummary = findFunctionSummary(Call, C);
@@ -979,13 +1322,15 @@ void StdLibraryFunctionsChecker::checkPreCall(const CallEvent &Call,
ProgramStateRef NewState = State;
ExplodedNode *NewNode = C.getPredecessor();
for (const ValueConstraintPtr &Constraint : Summary.getArgConstraints()) {
+ ValueConstraintPtr NegatedConstraint = Constraint->negate();
ProgramStateRef SuccessSt = Constraint->apply(NewState, Call, Summary, C);
ProgramStateRef FailureSt =
- Constraint->negate()->apply(NewState, Call, Summary, C);
+ NegatedConstraint->apply(NewState, Call, Summary, C);
// The argument constraint is not satisfied.
if (FailureSt && !SuccessSt) {
- if (ExplodedNode *N = C.generateErrorNode(NewState, NewNode))
- reportBug(Call, N, Constraint.get(), Summary, C);
+ if (ExplodedNode *N = C.generateErrorNode(State, NewNode))
+ reportBug(Call, N, Constraint.get(), NegatedConstraint.get(), Summary,
+ C);
break;
}
// We will apply the constraint even if we cannot reason about the
@@ -995,10 +1340,15 @@ void StdLibraryFunctionsChecker::checkPreCall(const CallEvent &Call,
assert(SuccessSt);
NewState = SuccessSt;
if (NewState != State) {
- SmallString<64> Msg;
- Msg += "Assuming ";
- Msg += Constraint->describe(ValueConstraint::DescriptionKind::Assumption,
- NewState, Summary);
+ SmallString<128> Msg;
+ llvm::raw_svector_ostream Os(Msg);
+ Os << "Assuming that the ";
+ printArgDesc(Constraint->getArgNo(), Os);
+ Os << " to '";
+ Os << getFunctionName(Call);
+ Os << "' ";
+ Constraint->describe(ValueConstraint::Assumption, Call, NewState, Summary,
+ Os);
const auto ArgSVal = Call.getArgSVal(Constraint->getArgNo());
NewNode = C.addTransition(
NewState, NewNode,
@@ -1020,7 +1370,7 @@ void StdLibraryFunctionsChecker::checkPostCall(const CallEvent &Call,
// Now apply the constraints.
const Summary &Summary = *FoundSummary;
ProgramStateRef State = C.getState();
- const ExplodedNode *Node = C.getPredecessor();
+ ExplodedNode *Node = C.getPredecessor();
// Apply case/branch specifications.
for (const SummaryCase &Case : Summary.getCases()) {
@@ -1034,35 +1384,64 @@ void StdLibraryFunctionsChecker::checkPostCall(const CallEvent &Call,
if (NewState)
NewState = Case.getErrnoConstraint().apply(NewState, Call, Summary, C);
- if (NewState && NewState != State) {
- if (Case.getNote().empty()) {
- const NoteTag *NT = nullptr;
- if (const auto *D = dyn_cast_or_null<FunctionDecl>(Call.getDecl()))
- NT = Case.getErrnoConstraint().describe(C, D->getNameAsString());
- C.addTransition(NewState, NT);
- } else {
- StringRef Note = Case.getNote();
+ if (!NewState)
+ continue;
+
+ // It is possible that NewState == State is true.
+ // It can occur if another checker has applied the state before us.
+ // Still add these note tags, the other checker should add only its
+ // specialized note tags. These general note tags are handled always by
+ // StdLibraryFunctionsChecker.
+ ExplodedNode *Pred = Node;
+ if (!Case.getNote().empty()) {
+ const SVal RV = Call.getReturnValue();
+ // If there is a description for this execution branch (summary case),
+ // use it as a note tag.
+ std::string Note =
+ llvm::formatv(Case.getNote().str().c_str(),
+ cast<NamedDecl>(Call.getDecl())->getDeclName());
+ if (Summary.getInvalidationKd() == EvalCallAsPure) {
const NoteTag *Tag = C.getNoteTag(
- // Sorry couldn't help myself.
- [Node, Note]() -> std::string {
- // Don't emit "Assuming..." note when we ended up
- // knowing in advance which branch is taken.
- return (Node->succ_size() > 1) ? Note.str() : "";
- },
- /*IsPrunable=*/true);
- C.addTransition(NewState, Tag);
+ [Node, Note, RV](PathSensitiveBugReport &BR) -> std::string {
+ // Try to omit the note if we know in advance which branch is
+ // taken (this means, only one branch exists).
+ // This check is performed inside the lambda, after other
+ // (or this) checkers had a chance to add other successors.
+ // Dereferencing the saved node object is valid because it's part
+ // of a bug report call sequence.
+ // FIXME: This check is not exact. We may be here after a state
+ // split that was performed by another checker (and can not find
+ // the successors). This is why this check is only used in the
+ // EvalCallAsPure case.
+ if (BR.isInteresting(RV) && Node->succ_size() > 1)
+ return Note;
+ return "";
+ });
+ Pred = C.addTransition(NewState, Pred, Tag);
+ } else {
+ const NoteTag *Tag =
+ C.getNoteTag([Note, RV](PathSensitiveBugReport &BR) -> std::string {
+ if (BR.isInteresting(RV))
+ return Note;
+ return "";
+ });
+ Pred = C.addTransition(NewState, Pred, Tag);
}
- } else if (NewState == State) {
- // It is possible that the function was evaluated in a checker callback
- // where the state constraints are already applied, then no change happens
- // here to the state (if the ErrnoConstraint did not change it either).
- // If the evaluated function requires a NoteTag for errno change, it is
- // added here.
- if (const auto *D = dyn_cast_or_null<FunctionDecl>(Call.getDecl()))
- if (const NoteTag *NT =
- Case.getErrnoConstraint().describe(C, D->getNameAsString()))
- C.addTransition(NewState, NT);
+ if (!Pred)
+ continue;
}
+
+ // If we can get a note tag for the errno change, add this additionally to
+ // the previous. This note is only about value of 'errno' and is displayed
+ // if 'errno' is interesting.
+ if (const auto *D = dyn_cast<FunctionDecl>(Call.getDecl()))
+ if (const NoteTag *NT =
+ Case.getErrnoConstraint().describe(C, D->getNameAsString()))
+ Pred = C.addTransition(NewState, Pred, NT);
+
+ // Add the transition if no note tag could be added.
+ if (Pred == Node && NewState != State)
+ C.addTransition(NewState);
}
}
@@ -1122,12 +1501,11 @@ bool StdLibraryFunctionsChecker::Signature::matches(
}
// Check the argument types.
- for (size_t I = 0, E = ArgTys.size(); I != E; ++I) {
- QualType ArgTy = ArgTys[I];
+ for (auto [Idx, ArgTy] : llvm::enumerate(ArgTys)) {
if (isIrrelevant(ArgTy))
continue;
QualType FDArgTy =
- RemoveRestrict(FD->getParamDecl(I)->getType().getCanonicalType());
+ RemoveRestrict(FD->getParamDecl(Idx)->getType().getCanonicalType());
if (ArgTy != FDArgTy)
return false;
}
@@ -1162,10 +1540,12 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
CheckerContext &C) const {
if (SummariesInitialized)
return;
+ SummariesInitialized = true;
SValBuilder &SVB = C.getSValBuilder();
BasicValueFactory &BVF = SVB.getBasicValueFactory();
const ASTContext &ACtx = BVF.getContext();
+ Preprocessor &PP = C.getPreprocessor();
// Helper class to lookup a type by its name.
class LookupType {
@@ -1301,14 +1681,11 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
const RangeInt UCharRangeMax =
std::min(BVF.getMaxValue(ACtx.UnsignedCharTy).getLimitedValue(), IntMax);
- // The platform dependent value of EOF.
- // Try our best to parse this from the Preprocessor, otherwise fallback to -1.
- const auto EOFv = [&C]() -> RangeInt {
- if (const std::optional<int> OptInt =
- tryExpandAsInteger("EOF", C.getPreprocessor()))
- return *OptInt;
- return -1;
- }();
+ // Get platform dependent values of some macros.
+ // Try our best to parse this from the Preprocessor, otherwise fallback to a
+ // default value (what is found in a library header).
+ const auto EOFv = tryExpandAsInteger("EOF", PP).value_or(-1);
+ const auto AT_FDCWDv = tryExpandAsInteger("AT_FDCWD", PP).value_or(-100);
// Auxiliary class to aid adding summaries to the summary map.
struct AddToFunctionSummaryMap {
@@ -1358,9 +1735,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
} addToFunctionSummaryMap(ACtx, FunctionSummaryMap, DisplayLoadedSummaries);
// Below are helpers functions to create the summaries.
- auto ArgumentCondition = [](ArgNo ArgN, RangeKind Kind,
- IntRangeVector Ranges) {
- return std::make_shared<RangeConstraint>(ArgN, Kind, Ranges);
+ auto ArgumentCondition = [](ArgNo ArgN, RangeKind Kind, IntRangeVector Ranges,
+ StringRef Desc = "") {
+ return std::make_shared<RangeConstraint>(ArgN, Kind, Ranges, Desc);
};
auto BufferSize = [](auto... Args) {
return std::make_shared<BufferSizeConstraint>(Args...);
@@ -1399,6 +1776,10 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
auto IsNull = [&](ArgNo ArgN) {
return std::make_shared<NotNullConstraint>(ArgN, false);
};
+ auto NotNullBuffer = [&](ArgNo ArgN, ArgNo SizeArg1N, ArgNo SizeArg2N) {
+ return std::make_shared<NotNullBufferConstraint>(ArgN, SizeArg1N,
+ SizeArg2N);
+ };
std::optional<QualType> FileTy = lookupTy("FILE");
std::optional<QualType> FilePtrTy = getPointerTy(FileTy);
@@ -1409,6 +1790,10 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
std::optional<QualType> ConstFPosTPtrTy = getPointerTy(getConstTy(FPosTTy));
std::optional<QualType> FPosTPtrRestrictTy = getRestrictTy(FPosTPtrTy);
+ constexpr llvm::StringLiteral GenericSuccessMsg(
+ "Assuming that '{0}' is successful");
+ constexpr llvm::StringLiteral GenericFailureMsg("Assuming that '{0}' fails");
+
// We are finally ready to define specifications for all supported functions.
//
// Argument ranges should always cover all variants. If return value
@@ -1443,8 +1828,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
{{'0', '9'}, {'A', 'Z'}, {'a', 'z'}, {128, UCharRangeMax}}),
ReturnValueCondition(WithinRange, SingleValue(0))},
ErrnoIrrelevant, "Assuming the character is non-alphanumeric")
- .ArgConstraint(ArgumentCondition(
- 0U, WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}})));
+ .ArgConstraint(ArgumentCondition(0U, WithinRange,
+ {{EOFv, EOFv}, {0, UCharRangeMax}},
+ "an unsigned char value or EOF")));
addToFunctionSummaryMap(
"isalpha", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
@@ -1603,18 +1989,21 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap(
"toupper", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
- .ArgConstraint(ArgumentCondition(
- 0U, WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}})));
+ .ArgConstraint(ArgumentCondition(0U, WithinRange,
+ {{EOFv, EOFv}, {0, UCharRangeMax}},
+ "an unsigned char value or EOF")));
addToFunctionSummaryMap(
"tolower", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
- .ArgConstraint(ArgumentCondition(
- 0U, WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}})));
+ .ArgConstraint(ArgumentCondition(0U, WithinRange,
+ {{EOFv, EOFv}, {0, UCharRangeMax}},
+ "an unsigned char value or EOF")));
addToFunctionSummaryMap(
"toascii", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
- .ArgConstraint(ArgumentCondition(
- 0U, WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}})));
+ .ArgConstraint(ArgumentCondition(0U, WithinRange,
+ {{EOFv, EOFv}, {0, UCharRangeMax}},
+ "an unsigned char value or EOF")));
// The getc() family of functions that returns either a char or an EOF.
addToFunctionSummaryMap(
@@ -1637,19 +2026,16 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
ArgumentCondition(2U, WithinRange, Range(1, SizeMax)),
ReturnValueCondition(BO_LT, ArgNo(2)),
ReturnValueCondition(WithinRange, Range(0, SizeMax))},
- ErrnoNEZeroIrrelevant)
+ ErrnoNEZeroIrrelevant, GenericFailureMsg)
.Case({ArgumentCondition(1U, WithinRange, Range(1, SizeMax)),
ReturnValueCondition(BO_EQ, ArgNo(2)),
ReturnValueCondition(WithinRange, Range(0, SizeMax))},
- ErrnoMustNotBeChecked)
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
.Case({ArgumentCondition(1U, WithinRange, SingleValue(0)),
ReturnValueCondition(WithinRange, SingleValue(0))},
- ErrnoMustNotBeChecked)
- .ArgConstraint(NotNull(ArgNo(0)))
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .ArgConstraint(NotNullBuffer(ArgNo(0), ArgNo(1), ArgNo(2)))
.ArgConstraint(NotNull(ArgNo(3)))
- // FIXME: It should be allowed to have a null buffer if any of
- // args 1 or 2 are zero. Remove NotNull check of arg 0, add a check
- // for non-null buffer if non-zero size to BufferSizeConstraint?
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(0), /*BufSize=*/ArgNo(1),
/*BufSizeMultiplier=*/ArgNo(2)));
@@ -1749,23 +2135,29 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
ConstraintSet{ReturnValueCondition(WithinRange, Range(-1, IntMax))};
const auto &ReturnsValidFileDescriptor = ReturnsNonnegative;
+ auto ValidFileDescriptorOrAtFdcwd = [&](ArgNo ArgN) {
+ return std::make_shared<RangeConstraint>(
+ ArgN, WithinRange, Range({AT_FDCWDv, AT_FDCWDv}, {0, IntMax}),
+ "a valid file descriptor or AT_FDCWD");
+ };
+
// FILE *fopen(const char *restrict pathname, const char *restrict mode);
addToFunctionSummaryMap(
"fopen",
Signature(ArgTypes{ConstCharPtrRestrictTy, ConstCharPtrRestrictTy},
RetType{FilePtrTy}),
Summary(NoEvalCall)
- .Case({NotNull(Ret)}, ErrnoMustNotBeChecked)
- .Case({IsNull(Ret)}, ErrnoNEZeroIrrelevant)
+ .Case({NotNull(Ret)}, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({IsNull(Ret)}, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
// FILE *tmpfile(void);
- addToFunctionSummaryMap("tmpfile",
- Signature(ArgTypes{}, RetType{FilePtrTy}),
- Summary(NoEvalCall)
- .Case({NotNull(Ret)}, ErrnoMustNotBeChecked)
- .Case({IsNull(Ret)}, ErrnoNEZeroIrrelevant));
+ addToFunctionSummaryMap(
+ "tmpfile", Signature(ArgTypes{}, RetType{FilePtrTy}),
+ Summary(NoEvalCall)
+ .Case({NotNull(Ret)}, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({IsNull(Ret)}, ErrnoNEZeroIrrelevant, GenericFailureMsg));
// FILE *freopen(const char *restrict pathname, const char *restrict mode,
// FILE *restrict stream);
@@ -1776,8 +2168,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
RetType{FilePtrTy}),
Summary(NoEvalCall)
.Case({ReturnValueCondition(BO_EQ, ArgNo(2))},
- ErrnoMustNotBeChecked)
- .Case({IsNull(Ret)}, ErrnoNEZeroIrrelevant)
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({IsNull(Ret)}, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(1)))
.ArgConstraint(NotNull(ArgNo(2))));
@@ -1785,9 +2177,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap(
"fclose", Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
.Case({ReturnValueCondition(WithinRange, SingleValue(EOFv))},
- ErrnoNEZeroIrrelevant)
+ ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int fseek(FILE *stream, long offset, int whence);
@@ -1797,8 +2189,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap(
"fseek", Signature(ArgTypes{FilePtrTy, LongTy, IntTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(ArgumentCondition(2, WithinRange, {{0, 2}})));
@@ -1811,8 +2203,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{FilePtrRestrictTy, FPosTPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoUnchanged)
- .Case(ReturnsNonZero, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoUnchanged, GenericSuccessMsg)
+ .Case(ReturnsNonZero, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -1824,8 +2216,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"fsetpos",
Signature(ArgTypes{FilePtrTy, ConstFPosTPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoUnchanged)
- .Case(ReturnsNonZero, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoUnchanged, GenericSuccessMsg)
+ .Case(ReturnsNonZero, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -1837,16 +2229,17 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"ftell", Signature(ArgTypes{FilePtrTy}, RetType{LongTy}),
Summary(NoEvalCall)
.Case({ReturnValueCondition(WithinRange, Range(1, LongMax))},
- ErrnoUnchanged)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ ErrnoUnchanged, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int fileno(FILE *stream);
addToFunctionSummaryMap(
"fileno", Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// void rewind(FILE *stream);
@@ -1884,12 +2277,32 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
.ArgConstraint(ArgumentCondition(
0, WithinRange, Range(0, LongMax))));
+ // int open(const char *path, int oflag, ...);
+ addToFunctionSummaryMap(
+ "open", Signature(ArgTypes{ConstCharPtrTy, IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int openat(int fd, const char *path, int oflag, ...);
+ addToFunctionSummaryMap(
+ "openat",
+ Signature(ArgTypes{IntTy, ConstCharPtrTy, IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
// int access(const char *pathname, int amode);
addToFunctionSummaryMap(
"access", Signature(ArgTypes{ConstCharPtrTy, IntTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int faccessat(int dirfd, const char *pathname, int mode, int flags);
@@ -1898,16 +2311,18 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstCharPtrTy, IntTy, IntTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
// int dup(int fildes);
addToFunctionSummaryMap(
"dup", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -1915,20 +2330,21 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap(
"dup2", Signature(ArgTypes{IntTy, IntTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(
ArgumentCondition(1, WithinRange, Range(0, IntMax))));
// int fdatasync(int fildes);
- addToFunctionSummaryMap("fdatasync",
- Signature(ArgTypes{IntTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
- .ArgConstraint(ArgumentCondition(
- 0, WithinRange, Range(0, IntMax))));
+ addToFunctionSummaryMap(
+ "fdatasync", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
// int fnmatch(const char *pattern, const char *string, int flags);
addToFunctionSummaryMap(
@@ -1940,12 +2356,13 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
.ArgConstraint(NotNull(ArgNo(1))));
// int fsync(int fildes);
- addToFunctionSummaryMap("fsync", Signature(ArgTypes{IntTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
- .ArgConstraint(ArgumentCondition(
- 0, WithinRange, Range(0, IntMax))));
+ addToFunctionSummaryMap(
+ "fsync", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
std::optional<QualType> Off_tTy = lookupTy("off_t");
@@ -1954,8 +2371,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"truncate",
Signature(ArgTypes{ConstCharPtrTy, Off_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int symlink(const char *oldpath, const char *newpath);
@@ -1963,8 +2380,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"symlink",
Signature(ArgTypes{ConstCharPtrTy, ConstCharPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -1974,18 +2391,18 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{ConstCharPtrTy, IntTy, ConstCharPtrTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0)))
- .ArgConstraint(ArgumentCondition(1, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(1)))
.ArgConstraint(NotNull(ArgNo(2))));
// int lockf(int fd, int cmd, off_t len);
addToFunctionSummaryMap(
"lockf", Signature(ArgTypes{IntTy, IntTy, Off_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -1995,8 +2412,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap(
"creat", Signature(ArgTypes{ConstCharPtrTy, Mode_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// unsigned int sleep(unsigned int seconds);
@@ -2013,8 +2431,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap(
"dirfd", Signature(ArgTypes{DirPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// unsigned int alarm(unsigned int seconds);
@@ -2025,12 +2444,12 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
ArgumentCondition(0, WithinRange, Range(0, UnsignedIntMax))));
// int closedir(DIR *dir);
- addToFunctionSummaryMap("closedir",
- Signature(ArgTypes{DirPtrTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "closedir", Signature(ArgTypes{DirPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
// char *strdup(const char *s);
addToFunctionSummaryMap(
@@ -2055,8 +2474,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap(
"mkstemp", Signature(ArgTypes{CharPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// char *mkdtemp(char *template);
@@ -2077,8 +2497,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap(
"mkdir", Signature(ArgTypes{ConstCharPtrTy, Mode_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int mkdirat(int dirfd, const char *pathname, mode_t mode);
@@ -2086,8 +2506,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"mkdirat",
Signature(ArgTypes{IntTy, ConstCharPtrTy, Mode_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
std::optional<QualType> Dev_tTy = lookupTy("dev_t");
@@ -2097,8 +2518,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"mknod",
Signature(ArgTypes{ConstCharPtrTy, Mode_tTy, Dev_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int mknodat(int dirfd, const char *pathname, mode_t mode, dev_t dev);
@@ -2107,16 +2528,17 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstCharPtrTy, Mode_tTy, Dev_tTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
// int chmod(const char *path, mode_t mode);
addToFunctionSummaryMap(
"chmod", Signature(ArgTypes{ConstCharPtrTy, Mode_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int fchmodat(int dirfd, const char *pathname, mode_t mode, int flags);
@@ -2125,17 +2547,17 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstCharPtrTy, Mode_tTy, IntTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
- .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
// int fchmod(int fildes, mode_t mode);
addToFunctionSummaryMap(
"fchmod", Signature(ArgTypes{IntTy, Mode_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -2149,9 +2571,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstCharPtrTy, Uid_tTy, Gid_tTy, IntTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
- .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
// int chown(const char *path, uid_t owner, gid_t group);
@@ -2159,8 +2581,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"chown",
Signature(ArgTypes{ConstCharPtrTy, Uid_tTy, Gid_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int lchown(const char *path, uid_t owner, gid_t group);
@@ -2168,42 +2590,42 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"lchown",
Signature(ArgTypes{ConstCharPtrTy, Uid_tTy, Gid_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int fchown(int fildes, uid_t owner, gid_t group);
addToFunctionSummaryMap(
"fchown", Signature(ArgTypes{IntTy, Uid_tTy, Gid_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
// int rmdir(const char *pathname);
- addToFunctionSummaryMap("rmdir",
- Signature(ArgTypes{ConstCharPtrTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "rmdir", Signature(ArgTypes{ConstCharPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
// int chdir(const char *path);
- addToFunctionSummaryMap("chdir",
- Signature(ArgTypes{ConstCharPtrTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "chdir", Signature(ArgTypes{ConstCharPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
// int link(const char *oldpath, const char *newpath);
addToFunctionSummaryMap(
"link",
Signature(ArgTypes{ConstCharPtrTy, ConstCharPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -2214,29 +2636,29 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstCharPtrTy, IntTy, ConstCharPtrTy, IntTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
- .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1)))
- .ArgConstraint(ArgumentCondition(2, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(2)))
.ArgConstraint(NotNull(ArgNo(3))));
// int unlink(const char *pathname);
- addToFunctionSummaryMap("unlink",
- Signature(ArgTypes{ConstCharPtrTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "unlink", Signature(ArgTypes{ConstCharPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
// int unlinkat(int fd, const char *path, int flag);
addToFunctionSummaryMap(
"unlinkat",
Signature(ArgTypes{IntTy, ConstCharPtrTy, IntTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
- .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
std::optional<QualType> StructStatTy = lookupTy("stat");
@@ -2248,8 +2670,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap(
"fstat", Signature(ArgTypes{IntTy, StructStatPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -2259,8 +2681,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{ConstCharPtrRestrictTy, StructStatPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -2270,8 +2692,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{ConstCharPtrRestrictTy, StructStatPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -2283,9 +2705,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
StructStatPtrRestrictTy, IntTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
- .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1)))
.ArgConstraint(NotNull(ArgNo(2))));
@@ -2328,12 +2750,13 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
// int close(int fildes);
- addToFunctionSummaryMap("close", Signature(ArgTypes{IntTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
- .ArgConstraint(ArgumentCondition(
- 0, WithinRange, Range(-1, IntMax))));
+ addToFunctionSummaryMap(
+ "close", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(-1, IntMax))));
// long fpathconf(int fildes, int name);
addToFunctionSummaryMap("fpathconf",
@@ -2410,12 +2833,12 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
ArgumentCondition(4, WithinRange, Range(-1, IntMax))));
// int pipe(int fildes[2]);
- addToFunctionSummaryMap("pipe",
- Signature(ArgTypes{IntPtrTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "pipe", Signature(ArgTypes{IntPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
// off_t lseek(int fildes, off_t offset, int whence);
// In the first case we can not tell for sure if it failed or not.
@@ -2426,7 +2849,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"lseek", Signature(ArgTypes{IntTy, Off_tTy, IntTy}, RetType{Off_tTy}),
Summary(NoEvalCall)
.Case(ReturnsNonnegative, ErrnoIrrelevant)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -2439,8 +2862,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
ReturnValueCondition(WithinRange, Range(0, Ssize_tMax))},
- ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
@@ -2458,9 +2881,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(3)),
ReturnValueCondition(WithinRange, Range(0, Ssize_tMax))},
- ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
- .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1)))
.ArgConstraint(NotNull(ArgNo(2)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(2),
@@ -2475,9 +2898,11 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstCharPtrTy, IntTy, ConstCharPtrTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(2)))
.ArgConstraint(NotNull(ArgNo(3))));
// char *realpath(const char *restrict file_name,
@@ -2542,10 +2967,20 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// standardized signature will not match, thus we try to match with another
// signature that has the joker Irrelevant type. We also remove those
// constraints which require pointer types for the sockaddr param.
+
+ // int socket(int domain, int type, int protocol);
+ addToFunctionSummaryMap(
+ "socket", Signature(ArgTypes{IntTy, IntTy, IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg));
+
auto Accept =
Summary(NoEvalCall)
- .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)));
if (!addToFunctionSummaryMap(
"accept",
@@ -2568,8 +3003,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstStructSockaddrPtrTy, Socklen_tTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1)))
@@ -2582,8 +3017,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"bind",
Signature(ArgTypes{IntTy, Irrelevant, Socklen_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(
@@ -2597,8 +3032,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Socklen_tPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1)))
@@ -2608,8 +3043,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, Irrelevant, Socklen_tPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -2621,8 +3056,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Socklen_tPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1)))
@@ -2632,8 +3067,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, Irrelevant, Socklen_tPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -2644,8 +3079,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstStructSockaddrPtrTy, Socklen_tTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1)))))
@@ -2653,8 +3088,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"connect",
Signature(ArgTypes{IntTy, Irrelevant, Socklen_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -2662,8 +3097,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
ReturnValueCondition(WithinRange, Range(0, Ssize_tMax))},
- ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
/*BufSize=*/ArgNo(2)));
@@ -2689,8 +3124,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
ReturnValueCondition(WithinRange, Range(0, Ssize_tMax))},
- ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
/*BufSize=*/ArgNo(2)));
@@ -2711,13 +3146,13 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Sendto);
// int listen(int sockfd, int backlog);
- addToFunctionSummaryMap("listen",
- Signature(ArgTypes{IntTy, IntTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
- .ArgConstraint(ArgumentCondition(
- 0, WithinRange, Range(0, IntMax))));
+ addToFunctionSummaryMap(
+ "listen", Signature(ArgTypes{IntTy, IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
// ssize_t recv(int sockfd, void *buf, size_t len, int flags);
addToFunctionSummaryMap(
@@ -2727,8 +3162,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
ReturnValueCondition(WithinRange, Range(0, Ssize_tMax))},
- ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
/*BufSize=*/ArgNo(2))));
@@ -2745,8 +3180,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
RetType{Ssize_tTy}),
Summary(NoEvalCall)
.Case({ReturnValueCondition(WithinRange, Range(0, Ssize_tMax))},
- ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -2757,8 +3192,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
RetType{Ssize_tTy}),
Summary(NoEvalCall)
.Case({ReturnValueCondition(WithinRange, Range(0, Ssize_tMax))},
- ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -2769,8 +3204,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, IntTy, IntTy, ConstVoidPtrTy, Socklen_tTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(3)))
.ArgConstraint(
BufferSize(/*Buffer=*/ArgNo(3), /*BufSize=*/ArgNo(4)))
@@ -2786,8 +3221,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Socklen_tPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(3)))
.ArgConstraint(NotNull(ArgNo(4))));
@@ -2799,8 +3234,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
ReturnValueCondition(WithinRange, Range(0, Ssize_tMax))},
- ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
/*BufSize=*/ArgNo(2))));
@@ -2810,10 +3245,19 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"socketpair",
Signature(ArgTypes{IntTy, IntTy, IntTy, IntPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(3))));
+ // int shutdown(int socket, int how);
+ addToFunctionSummaryMap(
+ "shutdown", Signature(ArgTypes{IntTy, IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+
// int getnameinfo(const struct sockaddr *restrict sa, socklen_t salen,
// char *restrict node, socklen_t nodelen,
// char *restrict service,
@@ -2849,8 +3293,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"utime",
Signature(ArgTypes{ConstCharPtrTy, StructUtimbufPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
std::optional<QualType> StructTimespecTy = lookupTy("timespec");
@@ -2864,21 +3308,22 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"futimens",
Signature(ArgTypes{IntTy, ConstStructTimespecPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
// int utimensat(int dirfd, const char *pathname,
// const struct timespec times[2], int flags);
- addToFunctionSummaryMap("utimensat",
- Signature(ArgTypes{IntTy, ConstCharPtrTy,
- ConstStructTimespecPtrTy, IntTy},
- RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
- .ArgConstraint(NotNull(ArgNo(1))));
+ addToFunctionSummaryMap(
+ "utimensat",
+ Signature(
+ ArgTypes{IntTy, ConstCharPtrTy, ConstStructTimespecPtrTy, IntTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(1))));
std::optional<QualType> StructTimevalTy = lookupTy("timeval");
std::optional<QualType> ConstStructTimevalPtrTy =
@@ -2890,8 +3335,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{ConstCharPtrTy, ConstStructTimevalPtrTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int nanosleep(const struct timespec *rqtp, struct timespec *rmtp);
@@ -2900,8 +3345,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{ConstStructTimespecPtrTy, StructTimespecPtrTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
std::optional<QualType> Time_tTy = lookupTy("time_t");
@@ -2979,8 +3424,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"clock_gettime",
Signature(ArgTypes{Clockid_tTy, StructTimespecPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(1))));
std::optional<QualType> StructItimervalTy = lookupTy("itimerval");
@@ -2992,8 +3437,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"getitimer",
Signature(ArgTypes{IntTy, StructItimervalPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZero, ErrnoMustNotBeChecked)
- .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(1))));
std::optional<QualType> Pthread_cond_tTy = lookupTy("pthread_cond_t");
@@ -3096,12 +3541,20 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
}
// Functions for testing.
- if (ChecksEnabled[CK_StdCLibraryFunctionsTesterChecker]) {
+ if (AddTestFunctions) {
+ const RangeInt IntMin = BVF.getMinValue(IntTy).getLimitedValue();
+
addToFunctionSummaryMap(
"__not_null", Signature(ArgTypes{IntPtrTy}, RetType{IntTy}),
Summary(EvalCallAsPure).ArgConstraint(NotNull(ArgNo(0))));
- // Test range values.
+ addToFunctionSummaryMap(
+ "__not_null_buffer",
+ Signature(ArgTypes{VoidPtrTy, IntTy, IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(NotNullBuffer(ArgNo(0), ArgNo(1), ArgNo(2))));
+
+ // Test inside range constraints.
addToFunctionSummaryMap(
"__single_val_0", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
@@ -3114,11 +3567,124 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"__range_1_2", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.ArgConstraint(ArgumentCondition(0U, WithinRange, Range(1, 2))));
- addToFunctionSummaryMap("__range_1_2__4_5",
+ addToFunctionSummaryMap(
+ "__range_m1_1", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, WithinRange, Range(-1, 1))));
+ addToFunctionSummaryMap(
+ "__range_m2_m1", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, WithinRange, Range(-2, -1))));
+ addToFunctionSummaryMap(
+ "__range_m10_10", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, WithinRange, Range(-10, 10))));
+ addToFunctionSummaryMap("__range_m1_inf",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, Range(-1, IntMax))));
+ addToFunctionSummaryMap("__range_0_inf",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, Range(0, IntMax))));
+ addToFunctionSummaryMap("__range_1_inf",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, Range(1, IntMax))));
+ addToFunctionSummaryMap("__range_minf_m1",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, Range(IntMin, -1))));
+ addToFunctionSummaryMap("__range_minf_0",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, Range(IntMin, 0))));
+ addToFunctionSummaryMap("__range_minf_1",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, Range(IntMin, 1))));
+ addToFunctionSummaryMap("__range_1_2__4_6",
Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.ArgConstraint(ArgumentCondition(
- 0U, WithinRange, Range({1, 2}, {4, 5}))));
+ 0U, WithinRange, Range({1, 2}, {4, 6}))));
+ addToFunctionSummaryMap(
+ "__range_1_2__4_inf", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, WithinRange,
+ Range({1, 2}, {4, IntMax}))));
+
+ // Test out of range constraints.
+ addToFunctionSummaryMap(
+ "__single_val_out_0", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, OutOfRange, SingleValue(0))));
+ addToFunctionSummaryMap(
+ "__single_val_out_1", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, OutOfRange, SingleValue(1))));
+ addToFunctionSummaryMap(
+ "__range_out_1_2", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, OutOfRange, Range(1, 2))));
+ addToFunctionSummaryMap(
+ "__range_out_m1_1", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, OutOfRange, Range(-1, 1))));
+ addToFunctionSummaryMap(
+ "__range_out_m2_m1", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, OutOfRange, Range(-2, -1))));
+ addToFunctionSummaryMap(
+ "__range_out_m10_10", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, OutOfRange, Range(-10, 10))));
+ addToFunctionSummaryMap("__range_out_m1_inf",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, OutOfRange, Range(-1, IntMax))));
+ addToFunctionSummaryMap("__range_out_0_inf",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, OutOfRange, Range(0, IntMax))));
+ addToFunctionSummaryMap("__range_out_1_inf",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, OutOfRange, Range(1, IntMax))));
+ addToFunctionSummaryMap("__range_out_minf_m1",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, OutOfRange, Range(IntMin, -1))));
+ addToFunctionSummaryMap("__range_out_minf_0",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, OutOfRange, Range(IntMin, 0))));
+ addToFunctionSummaryMap("__range_out_minf_1",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, OutOfRange, Range(IntMin, 1))));
+ addToFunctionSummaryMap("__range_out_1_2__4_6",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, OutOfRange, Range({1, 2}, {4, 6}))));
+ addToFunctionSummaryMap(
+ "__range_out_1_2__4_inf", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(
+ ArgumentCondition(0U, OutOfRange, Range({1, 2}, {4, IntMax}))));
// Test range kind.
addToFunctionSummaryMap(
@@ -3183,13 +3749,33 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
ErrnoIrrelevant, "Function returns 0")
.Case({ReturnValueCondition(WithinRange, SingleValue(1))},
ErrnoIrrelevant, "Function returns 1"));
+ addToFunctionSummaryMap(
+ "__test_case_range_1_2__4_6",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .Case({ArgumentCondition(0U, WithinRange,
+ IntRangeVector{{IntMin, 0}, {3, 3}}),
+ ReturnValueCondition(WithinRange, SingleValue(1))},
+ ErrnoIrrelevant)
+ .Case({ArgumentCondition(0U, WithinRange,
+ IntRangeVector{{3, 3}, {7, IntMax}}),
+ ReturnValueCondition(WithinRange, SingleValue(2))},
+ ErrnoIrrelevant)
+ .Case({ArgumentCondition(0U, WithinRange,
+ IntRangeVector{{IntMin, 0}, {7, IntMax}}),
+ ReturnValueCondition(WithinRange, SingleValue(3))},
+ ErrnoIrrelevant)
+ .Case({ArgumentCondition(
+ 0U, WithinRange,
+ IntRangeVector{{IntMin, 0}, {3, 3}, {7, IntMax}}),
+ ReturnValueCondition(WithinRange, SingleValue(4))},
+ ErrnoIrrelevant));
}
-
- SummariesInitialized = true;
}
void ento::registerStdCLibraryFunctionsChecker(CheckerManager &mgr) {
auto *Checker = mgr.registerChecker<StdLibraryFunctionsChecker>();
+ Checker->CheckName = mgr.getCurrentCheckerName();
const AnalyzerOptions &Opts = mgr.getAnalyzerOptions();
Checker->DisplayLoadedSummaries =
Opts.getCheckerBooleanOption(Checker, "DisplayLoadedSummaries");
@@ -3203,16 +3789,12 @@ bool ento::shouldRegisterStdCLibraryFunctionsChecker(
return true;
}
-#define REGISTER_CHECKER(name) \
- void ento::register##name(CheckerManager &mgr) { \
- StdLibraryFunctionsChecker *checker = \
- mgr.getChecker<StdLibraryFunctionsChecker>(); \
- checker->ChecksEnabled[StdLibraryFunctionsChecker::CK_##name] = true; \
- checker->CheckNames[StdLibraryFunctionsChecker::CK_##name] = \
- mgr.getCurrentCheckerName(); \
- } \
- \
- bool ento::shouldRegister##name(const CheckerManager &mgr) { return true; }
-
-REGISTER_CHECKER(StdCLibraryFunctionArgsChecker)
-REGISTER_CHECKER(StdCLibraryFunctionsTesterChecker)
+void ento::registerStdCLibraryFunctionsTesterChecker(CheckerManager &mgr) {
+ auto *Checker = mgr.getChecker<StdLibraryFunctionsChecker>();
+ Checker->AddTestFunctions = true;
+}
+
+bool ento::shouldRegisterStdCLibraryFunctionsTesterChecker(
+ const CheckerManager &mgr) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index 3f61dd823940..bad86682c91e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -210,6 +210,7 @@ ProgramStateRef bindInt(uint64_t Value, ProgramStateRef State,
class StreamChecker : public Checker<check::PreCall, eval::Call,
check::DeadSymbols, check::PointerEscape> {
+ BugType BT_FileNull{this, "NULL stream pointer", "Stream handling error"};
BugType BT_UseAfterClose{this, "Closed stream", "Stream handling error"};
BugType BT_UseAfterOpenFailed{this, "Invalid stream",
"Stream handling error"};
@@ -284,7 +285,14 @@ private:
0}},
};
+ /// Expanded value of EOF, empty before initialization.
mutable std::optional<int> EofVal;
+ /// Expanded value of SEEK_SET, 0 if not found.
+ mutable int SeekSetVal = 0;
+ /// Expanded value of SEEK_CUR, 1 if not found.
+ mutable int SeekCurVal = 1;
+ /// Expanded value of SEEK_END, 2 if not found.
+ mutable int SeekEndVal = 2;
void evalFopen(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const;
@@ -338,7 +346,7 @@ private:
const StreamErrorState &ErrorKind) const;
/// Check that the stream (in StreamVal) is not NULL.
- /// If it can only be NULL a sink node is generated and nullptr returned.
+ /// If it can only be NULL a fatal error is emitted and nullptr returned.
/// Otherwise the return value is a new state where the stream is constrained
/// to be non-null.
ProgramStateRef ensureStreamNonNull(SVal StreamVal, const Expr *StreamE,
@@ -431,7 +439,7 @@ private:
});
}
- void initEof(CheckerContext &C) const {
+ void initMacroValues(CheckerContext &C) const {
if (EofVal)
return;
@@ -440,6 +448,15 @@ private:
EofVal = *OptInt;
else
EofVal = -1;
+ if (const std::optional<int> OptInt =
+ tryExpandAsInteger("SEEK_SET", C.getPreprocessor()))
+ SeekSetVal = *OptInt;
+ if (const std::optional<int> OptInt =
+ tryExpandAsInteger("SEEK_END", C.getPreprocessor()))
+ SeekEndVal = *OptInt;
+ if (const std::optional<int> OptInt =
+ tryExpandAsInteger("SEEK_CUR", C.getPreprocessor()))
+ SeekCurVal = *OptInt;
}
/// Searches for the ExplodedNode where the file descriptor was acquired for
@@ -487,7 +504,7 @@ const ExplodedNode *StreamChecker::getAcquisitionSite(const ExplodedNode *N,
void StreamChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
- initEof(C);
+ initMacroValues(C);
const FnDescription *Desc = lookupFn(Call);
if (!Desc || !Desc->PreFn)
@@ -785,6 +802,11 @@ void StreamChecker::evalFseek(const FnDescription *Desc, const CallEvent &Call,
if (!State->get<StreamMap>(StreamSym))
return;
+ const llvm::APSInt *PosV =
+ C.getSValBuilder().getKnownValue(State, Call.getArgSVal(1));
+ const llvm::APSInt *WhenceV =
+ C.getSValBuilder().getKnownValue(State, Call.getArgSVal(2));
+
DefinedSVal RetVal = makeRetVal(C, CE);
// Make expression result.
@@ -803,9 +825,12 @@ void StreamChecker::evalFseek(const FnDescription *Desc, const CallEvent &Call,
// It is possible that fseek fails but sets none of the error flags.
// If fseek failed, assume that the file position becomes indeterminate in any
// case.
+ StreamErrorState NewErrS = ErrorNone | ErrorFError;
+ // Setting the position to start of file never produces EOF error.
+ if (!(PosV && *PosV == 0 && WhenceV && *WhenceV == SeekSetVal))
+ NewErrS = NewErrS | ErrorFEof;
StateFailed = StateFailed->set<StreamMap>(
- StreamSym,
- StreamState::getOpened(Desc, ErrorNone | ErrorFEof | ErrorFError, true));
+ StreamSym, StreamState::getOpened(Desc, NewErrS, true));
C.addTransition(StateNotFailed);
C.addTransition(StateFailed, constructSetEofNoteTag(C, StreamSym));
@@ -1039,11 +1064,13 @@ StreamChecker::ensureStreamNonNull(SVal StreamVal, const Expr *StreamE,
std::tie(StateNotNull, StateNull) = CM.assumeDual(C.getState(), *Stream);
if (!StateNotNull && StateNull) {
- // Stream argument is NULL, stop analysis on this path.
- // This case should occur only if StdLibraryFunctionsChecker (or ModelPOSIX
- // option of it) is not turned on, otherwise that checker ensures non-null
- // argument.
- C.generateSink(StateNull, C.getPredecessor());
+ if (ExplodedNode *N = C.generateErrorNode(StateNull)) {
+ auto R = std::make_unique<PathSensitiveBugReport>(
+ BT_FileNull, "Stream pointer might be NULL.", N);
+ if (StreamE)
+ bugreporter::trackExpressionValue(N, StreamE, *R);
+ C.emitReport(std::move(R));
+ }
return nullptr;
}
@@ -1150,7 +1177,7 @@ StreamChecker::ensureFseekWhenceCorrect(SVal WhenceVal, CheckerContext &C,
return State;
int64_t X = CI->getValue().getSExtValue();
- if (X >= 0 && X <= 2)
+ if (X == SeekSetVal || X == SeekCurVal || X == SeekEndVal)
return State;
if (ExplodedNode *N = C.generateNonFatalErrorNode(State)) {
@@ -1201,10 +1228,12 @@ StreamChecker::reportLeaks(const SmallVector<SymbolRef, 2> &LeakedSyms,
// FIXME: Add a checker option to turn this uniqueing feature off.
const ExplodedNode *StreamOpenNode = getAcquisitionSite(Err, LeakSym, C);
assert(StreamOpenNode && "Could not find place of stream opening.");
- PathDiagnosticLocation LocUsedForUniqueing =
- PathDiagnosticLocation::createBegin(
- StreamOpenNode->getStmtForDiagnostics(), C.getSourceManager(),
- StreamOpenNode->getLocationContext());
+
+ PathDiagnosticLocation LocUsedForUniqueing;
+ if (const Stmt *StreamStmt = StreamOpenNode->getStmtForDiagnostics())
+ LocUsedForUniqueing = PathDiagnosticLocation::createBegin(
+ StreamStmt, C.getSourceManager(),
+ StreamOpenNode->getLocationContext());
std::unique_ptr<PathSensitiveBugReport> R =
std::make_unique<PathSensitiveBugReport>(
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp
index a95c0e183284..4edb671753bf 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp
@@ -146,62 +146,142 @@ ProgramStateRef taint::addPartialTaint(ProgramStateRef State,
bool taint::isTainted(ProgramStateRef State, const Stmt *S,
const LocationContext *LCtx, TaintTagType Kind) {
- SVal val = State->getSVal(S, LCtx);
- return isTainted(State, val, Kind);
+ return !getTaintedSymbolsImpl(State, S, LCtx, Kind, /*ReturnFirstOnly=*/true)
+ .empty();
}
bool taint::isTainted(ProgramStateRef State, SVal V, TaintTagType Kind) {
- if (SymbolRef Sym = V.getAsSymbol())
- return isTainted(State, Sym, Kind);
- if (const MemRegion *Reg = V.getAsRegion())
- return isTainted(State, Reg, Kind);
- return false;
+ return !getTaintedSymbolsImpl(State, V, Kind, /*ReturnFirstOnly=*/true)
+ .empty();
}
bool taint::isTainted(ProgramStateRef State, const MemRegion *Reg,
TaintTagType K) {
- if (!Reg)
- return false;
+ return !getTaintedSymbolsImpl(State, Reg, K, /*ReturnFirstOnly=*/true)
+ .empty();
+}
+
+bool taint::isTainted(ProgramStateRef State, SymbolRef Sym, TaintTagType Kind) {
+ return !getTaintedSymbolsImpl(State, Sym, Kind, /*ReturnFirstOnly=*/true)
+ .empty();
+}
+
+std::vector<SymbolRef> taint::getTaintedSymbols(ProgramStateRef State,
+ const Stmt *S,
+ const LocationContext *LCtx,
+ TaintTagType Kind) {
+ return getTaintedSymbolsImpl(State, S, LCtx, Kind, /*ReturnFirstOnly=*/false);
+}
+
+std::vector<SymbolRef> taint::getTaintedSymbols(ProgramStateRef State, SVal V,
+ TaintTagType Kind) {
+ return getTaintedSymbolsImpl(State, V, Kind, /*ReturnFirstOnly=*/false);
+}
+
+std::vector<SymbolRef> taint::getTaintedSymbols(ProgramStateRef State,
+ SymbolRef Sym,
+ TaintTagType Kind) {
+ return getTaintedSymbolsImpl(State, Sym, Kind, /*ReturnFirstOnly=*/false);
+}
+
+std::vector<SymbolRef> taint::getTaintedSymbols(ProgramStateRef State,
+ const MemRegion *Reg,
+ TaintTagType Kind) {
+ return getTaintedSymbolsImpl(State, Reg, Kind, /*ReturnFirstOnly=*/false);
+}
+
+std::vector<SymbolRef> taint::getTaintedSymbolsImpl(ProgramStateRef State,
+ const Stmt *S,
+ const LocationContext *LCtx,
+ TaintTagType Kind,
+ bool returnFirstOnly) {
+ SVal val = State->getSVal(S, LCtx);
+ return getTaintedSymbolsImpl(State, val, Kind, returnFirstOnly);
+}
+std::vector<SymbolRef> taint::getTaintedSymbolsImpl(ProgramStateRef State,
+ SVal V, TaintTagType Kind,
+ bool returnFirstOnly) {
+ if (SymbolRef Sym = V.getAsSymbol())
+ return getTaintedSymbolsImpl(State, Sym, Kind, returnFirstOnly);
+ if (const MemRegion *Reg = V.getAsRegion())
+ return getTaintedSymbolsImpl(State, Reg, Kind, returnFirstOnly);
+ return {};
+}
+
+std::vector<SymbolRef> taint::getTaintedSymbolsImpl(ProgramStateRef State,
+ const MemRegion *Reg,
+ TaintTagType K,
+ bool returnFirstOnly) {
+ std::vector<SymbolRef> TaintedSymbols;
+ if (!Reg)
+ return TaintedSymbols;
// Element region (array element) is tainted if either the base or the offset
// are tainted.
- if (const ElementRegion *ER = dyn_cast<ElementRegion>(Reg))
- return isTainted(State, ER->getSuperRegion(), K) ||
- isTainted(State, ER->getIndex(), K);
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(Reg)) {
+ std::vector<SymbolRef> TaintedIndex =
+ getTaintedSymbolsImpl(State, ER->getIndex(), K, returnFirstOnly);
+ llvm::append_range(TaintedSymbols, TaintedIndex);
+ if (returnFirstOnly && !TaintedSymbols.empty())
+ return TaintedSymbols; // return early if needed
+ std::vector<SymbolRef> TaintedSuperRegion =
+ getTaintedSymbolsImpl(State, ER->getSuperRegion(), K, returnFirstOnly);
+ llvm::append_range(TaintedSymbols, TaintedSuperRegion);
+ if (returnFirstOnly && !TaintedSymbols.empty())
+ return TaintedSymbols; // return early if needed
+ }
- if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg))
- return isTainted(State, SR->getSymbol(), K);
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg)) {
+ std::vector<SymbolRef> TaintedRegions =
+ getTaintedSymbolsImpl(State, SR->getSymbol(), K, returnFirstOnly);
+ llvm::append_range(TaintedSymbols, TaintedRegions);
+ if (returnFirstOnly && !TaintedSymbols.empty())
+ return TaintedSymbols; // return early if needed
+ }
- if (const SubRegion *ER = dyn_cast<SubRegion>(Reg))
- return isTainted(State, ER->getSuperRegion(), K);
+ if (const SubRegion *ER = dyn_cast<SubRegion>(Reg)) {
+ std::vector<SymbolRef> TaintedSubRegions =
+ getTaintedSymbolsImpl(State, ER->getSuperRegion(), K, returnFirstOnly);
+ llvm::append_range(TaintedSymbols, TaintedSubRegions);
+ if (returnFirstOnly && !TaintedSymbols.empty())
+ return TaintedSymbols; // return early if needed
+ }
- return false;
+ return TaintedSymbols;
}
-bool taint::isTainted(ProgramStateRef State, SymbolRef Sym, TaintTagType Kind) {
+std::vector<SymbolRef> taint::getTaintedSymbolsImpl(ProgramStateRef State,
+ SymbolRef Sym,
+ TaintTagType Kind,
+ bool returnFirstOnly) {
+ std::vector<SymbolRef> TaintedSymbols;
if (!Sym)
- return false;
+ return TaintedSymbols;
// Traverse all the symbols this symbol depends on to see if any are tainted.
- for (SymExpr::symbol_iterator SI = Sym->symbol_begin(),
- SE = Sym->symbol_end();
- SI != SE; ++SI) {
- if (!isa<SymbolData>(*SI))
+ for (SymbolRef SubSym : Sym->symbols()) {
+ if (!isa<SymbolData>(SubSym))
continue;
- if (const TaintTagType *Tag = State->get<TaintMap>(*SI)) {
- if (*Tag == Kind)
- return true;
+ if (const TaintTagType *Tag = State->get<TaintMap>(SubSym)) {
+ if (*Tag == Kind) {
+ TaintedSymbols.push_back(SubSym);
+ if (returnFirstOnly)
+ return TaintedSymbols; // return early if needed
+ }
}
- if (const auto *SD = dyn_cast<SymbolDerived>(*SI)) {
+ if (const auto *SD = dyn_cast<SymbolDerived>(SubSym)) {
// If this is a SymbolDerived with a tainted parent, it's also tainted.
- if (isTainted(State, SD->getParentSymbol(), Kind))
- return true;
+ std::vector<SymbolRef> TaintedParents = getTaintedSymbolsImpl(
+ State, SD->getParentSymbol(), Kind, returnFirstOnly);
+ llvm::append_range(TaintedSymbols, TaintedParents);
+ if (returnFirstOnly && !TaintedSymbols.empty())
+ return TaintedSymbols; // return early if needed
// If this is a SymbolDerived with the same parent symbol as another
- // tainted SymbolDerived and a region that's a sub-region of that tainted
- // symbol, it's also tainted.
+ // tainted SymbolDerived and a region that's a sub-region of that
+ // tainted symbol, it's also tainted.
if (const TaintedSubRegions *Regs =
State->get<DerivedSymTaint>(SD->getParentSymbol())) {
const TypedValueRegion *R = SD->getRegion();
@@ -210,46 +290,32 @@ bool taint::isTainted(ProgramStateRef State, SymbolRef Sym, TaintTagType Kind) {
// complete. For example, this would not currently identify
// overlapping fields in a union as tainted. To identify this we can
// check for overlapping/nested byte offsets.
- if (Kind == I.second && R->isSubRegionOf(I.first))
- return true;
+ if (Kind == I.second && R->isSubRegionOf(I.first)) {
+ TaintedSymbols.push_back(SD->getParentSymbol());
+ if (returnFirstOnly && !TaintedSymbols.empty())
+ return TaintedSymbols; // return early if needed
+ }
}
}
}
// If memory region is tainted, data is also tainted.
- if (const auto *SRV = dyn_cast<SymbolRegionValue>(*SI)) {
- if (isTainted(State, SRV->getRegion(), Kind))
- return true;
+ if (const auto *SRV = dyn_cast<SymbolRegionValue>(SubSym)) {
+ std::vector<SymbolRef> TaintedRegions =
+ getTaintedSymbolsImpl(State, SRV->getRegion(), Kind, returnFirstOnly);
+ llvm::append_range(TaintedSymbols, TaintedRegions);
+ if (returnFirstOnly && !TaintedSymbols.empty())
+ return TaintedSymbols; // return early if needed
}
// If this is a SymbolCast from a tainted value, it's also tainted.
- if (const auto *SC = dyn_cast<SymbolCast>(*SI)) {
- if (isTainted(State, SC->getOperand(), Kind))
- return true;
+ if (const auto *SC = dyn_cast<SymbolCast>(SubSym)) {
+ std::vector<SymbolRef> TaintedCasts =
+ getTaintedSymbolsImpl(State, SC->getOperand(), Kind, returnFirstOnly);
+ llvm::append_range(TaintedSymbols, TaintedCasts);
+ if (returnFirstOnly && !TaintedSymbols.empty())
+ return TaintedSymbols; // return early if needed
}
}
-
- return false;
-}
-
-PathDiagnosticPieceRef TaintBugVisitor::VisitNode(const ExplodedNode *N,
- BugReporterContext &BRC,
- PathSensitiveBugReport &BR) {
-
- // Find the ExplodedNode where the taint was first introduced
- if (!isTainted(N->getState(), V) ||
- isTainted(N->getFirstPred()->getState(), V))
- return nullptr;
-
- const Stmt *S = N->getStmtForDiagnostics();
- if (!S)
- return nullptr;
-
- const LocationContext *NCtx = N->getLocationContext();
- PathDiagnosticLocation L =
- PathDiagnosticLocation::createBegin(S, BRC.getSourceManager(), NCtx);
- if (!L.isValid() || !L.asLocation().isValid())
- return nullptr;
-
- return std::make_shared<PathDiagnosticEventPiece>(L, "Taint originated here");
-}
+ return TaintedSymbols;
+} \ No newline at end of file
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
index 28fe11d5ed06..9d3a909f50c1 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
@@ -188,10 +188,7 @@ void TestAfterDivZeroChecker::checkEndFunction(const ReturnStmt *,
return;
DivZeroMapTy::Factory &F = State->get_context<DivZeroMap>();
- for (llvm::ImmutableSet<ZeroState>::iterator I = DivZeroes.begin(),
- E = DivZeroes.end();
- I != E; ++I) {
- ZeroState ZS = *I;
+ for (const ZeroState &ZS : DivZeroes) {
if (ZS.getStackFrameContext() == C.getStackFrame())
DivZeroes = F.remove(DivZeroes, ZS);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
index 5cc713172527..e2f8bd541c96 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
@@ -69,8 +69,7 @@ public:
if (!CondS || CondS->computeComplexity() > ComplexityThreshold)
return State;
- for (auto B=CondS->symbol_begin(), E=CondS->symbol_end(); B != E; ++B) {
- const SymbolRef Antecedent = *B;
+ for (SymbolRef Antecedent : CondS->symbols()) {
State = addImplication(Antecedent, State, true);
State = addImplication(Antecedent, State, false);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
index 27f3345e67ac..2973dd5457c6 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -57,13 +57,10 @@ UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
ProgramStateRef state = C.getState();
auto *R = cast<BlockDataRegion>(C.getSVal(BE).getAsRegion());
- BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
- E = R->referenced_vars_end();
-
- for (; I != E; ++I) {
+ for (auto Var : R->referenced_vars()) {
// This VarRegion is the region associated with the block; we need
// the one associated with the encompassing context.
- const VarRegion *VR = I.getCapturedRegion();
+ const VarRegion *VR = Var.getCapturedRegion();
const VarDecl *VD = VR->getDecl();
if (VD->hasAttr<BlocksAttr>() || !VD->hasLocalStorage())
@@ -71,7 +68,7 @@ UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
// Get the VarRegion associated with VD in the local stack frame.
if (std::optional<UndefinedVal> V =
- state->getSVal(I.getOriginalRegion()).getAs<UndefinedVal>()) {
+ state->getSVal(Var.getOriginalRegion()).getAs<UndefinedVal>()) {
if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT)
BT.reset(
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
index 2658b473a477..f20b38a53151 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -70,7 +70,7 @@ static bool isLeftShiftResultUnrepresentable(const BinaryOperator *B,
const llvm::APSInt *LHS = SB.getKnownValue(State, C.getSVal(B->getLHS()));
const llvm::APSInt *RHS = SB.getKnownValue(State, C.getSVal(B->getRHS()));
assert(LHS && RHS && "Values unknown, inconsistent state");
- return (unsigned)RHS->getZExtValue() > LHS->countLeadingZeros();
+ return (unsigned)RHS->getZExtValue() > LHS->countl_zero();
}
void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
index 3ad6858ead46..d24a124f5ffe 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
@@ -59,9 +59,8 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
const ParentMap *PM = nullptr;
const LocationContext *LC = nullptr;
// Iterate over ExplodedGraph
- for (ExplodedGraph::node_iterator I = G.nodes_begin(), E = G.nodes_end();
- I != E; ++I) {
- const ProgramPoint &P = I->getLocation();
+ for (const ExplodedNode &N : G.nodes()) {
+ const ProgramPoint &P = N.getLocation();
LC = P.getLocationContext();
if (!LC->inTopFrame())
continue;
@@ -93,8 +92,7 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
return;
// Find CFGBlocks that were not covered by any node
- for (CFG::const_iterator I = C->begin(), E = C->end(); I != E; ++I) {
- const CFGBlock *CB = *I;
+ for (const CFGBlock *CB : *C) {
// Check if the block is unreachable
if (reachable.count(CB->getBlockID()))
continue;
@@ -181,34 +179,30 @@ void UnreachableCodeChecker::FindUnreachableEntryPoints(const CFGBlock *CB,
CFGBlocksSet &visited) {
visited.insert(CB->getBlockID());
- for (CFGBlock::const_pred_iterator I = CB->pred_begin(), E = CB->pred_end();
- I != E; ++I) {
- if (!*I)
+ for (const CFGBlock *PredBlock : CB->preds()) {
+ if (!PredBlock)
continue;
- if (!reachable.count((*I)->getBlockID())) {
+ if (!reachable.count(PredBlock->getBlockID())) {
// If we find an unreachable predecessor, mark this block as reachable so
// we don't report this block
reachable.insert(CB->getBlockID());
- if (!visited.count((*I)->getBlockID()))
+ if (!visited.count(PredBlock->getBlockID()))
// If we haven't previously visited the unreachable predecessor, recurse
- FindUnreachableEntryPoints(*I, reachable, visited);
+ FindUnreachableEntryPoints(PredBlock, reachable, visited);
}
}
}
// Find the Stmt* in a CFGBlock for reporting a warning
const Stmt *UnreachableCodeChecker::getUnreachableStmt(const CFGBlock *CB) {
- for (CFGBlock::const_iterator I = CB->begin(), E = CB->end(); I != E; ++I) {
- if (std::optional<CFGStmt> S = I->getAs<CFGStmt>()) {
+ for (const CFGElement &Elem : *CB) {
+ if (std::optional<CFGStmt> S = Elem.getAs<CFGStmt>()) {
if (!isa<DeclStmt>(S->getStmt()))
return S->getStmt();
}
}
- if (const Stmt *S = CB->getTerminatorStmt())
- return S;
- else
- return nullptr;
+ return CB->getTerminatorStmt();
}
// Determines if the path to this CFGBlock contained an element that infers this
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index fe910ce35302..b195d912cadf 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -35,13 +35,8 @@ class VLASizeChecker
: public Checker<check::PreStmt<DeclStmt>,
check::PreStmt<UnaryExprOrTypeTraitExpr>> {
mutable std::unique_ptr<BugType> BT;
- enum VLASize_Kind {
- VLA_Garbage,
- VLA_Zero,
- VLA_Tainted,
- VLA_Negative,
- VLA_Overflow
- };
+ mutable std::unique_ptr<BugType> TaintBT;
+ enum VLASize_Kind { VLA_Garbage, VLA_Zero, VLA_Negative, VLA_Overflow };
/// Check a VLA for validity.
/// Every dimension of the array and the total size is checked for validity.
@@ -55,8 +50,10 @@ class VLASizeChecker
const Expr *SizeE) const;
void reportBug(VLASize_Kind Kind, const Expr *SizeE, ProgramStateRef State,
- CheckerContext &C,
- std::unique_ptr<BugReporterVisitor> Visitor = nullptr) const;
+ CheckerContext &C) const;
+
+ void reportTaintBug(const Expr *SizeE, ProgramStateRef State,
+ CheckerContext &C, SVal TaintedSVal) const;
public:
void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const;
@@ -167,8 +164,7 @@ ProgramStateRef VLASizeChecker::checkVLAIndexSize(CheckerContext &C,
// Check if the size is tainted.
if (isTainted(State, SizeV)) {
- reportBug(VLA_Tainted, SizeE, nullptr, C,
- std::make_unique<TaintBugVisitor>(SizeV));
+ reportTaintBug(SizeE, State, C, SizeV);
return nullptr;
}
@@ -209,17 +205,44 @@ ProgramStateRef VLASizeChecker::checkVLAIndexSize(CheckerContext &C,
return State;
}
-void VLASizeChecker::reportBug(
- VLASize_Kind Kind, const Expr *SizeE, ProgramStateRef State,
- CheckerContext &C, std::unique_ptr<BugReporterVisitor> Visitor) const {
+void VLASizeChecker::reportTaintBug(const Expr *SizeE, ProgramStateRef State,
+ CheckerContext &C, SVal TaintedSVal) const {
+ // Generate an error node.
+ ExplodedNode *N = C.generateErrorNode(State);
+ if (!N)
+ return;
+
+ if (!TaintBT)
+ TaintBT.reset(
+ new BugType(this, "Dangerous variable-length array (VLA) declaration",
+ categories::TaintedData));
+
+ SmallString<256> buf;
+ llvm::raw_svector_ostream os(buf);
+ os << "Declared variable-length array (VLA) ";
+ os << "has tainted size";
+
+ auto report = std::make_unique<PathSensitiveBugReport>(*TaintBT, os.str(), N);
+ report->addRange(SizeE->getSourceRange());
+ bugreporter::trackExpressionValue(N, SizeE, *report);
+ // The vla size may be a complex expression where multiple memory locations
+ // are tainted.
+ for (auto Sym : getTaintedSymbols(State, TaintedSVal))
+ report->markInteresting(Sym);
+ C.emitReport(std::move(report));
+}
+
+void VLASizeChecker::reportBug(VLASize_Kind Kind, const Expr *SizeE,
+ ProgramStateRef State, CheckerContext &C) const {
// Generate an error node.
ExplodedNode *N = C.generateErrorNode(State);
if (!N)
return;
if (!BT)
- BT.reset(new BuiltinBug(
- this, "Dangerous variable-length array (VLA) declaration"));
+ BT.reset(new BugType(this,
+ "Dangerous variable-length array (VLA) declaration",
+ categories::LogicError));
SmallString<256> buf;
llvm::raw_svector_ostream os(buf);
@@ -231,9 +254,6 @@ void VLASizeChecker::reportBug(
case VLA_Zero:
os << "has zero size";
break;
- case VLA_Tainted:
- os << "has tainted size";
- break;
case VLA_Negative:
os << "has negative size";
break;
@@ -243,7 +263,6 @@ void VLASizeChecker::reportBug(
}
auto report = std::make_unique<PathSensitiveBugReport>(*BT, os.str(), N);
- report->addVisitor(std::move(Visitor));
report->addRange(SizeE->getSourceRange());
bugreporter::trackExpressionValue(N, SizeE, *report);
C.emitReport(std::move(report));
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLambdaCapturesChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLambdaCapturesChecker.cpp
index 004b0b9d398b..a226a01ec0a5 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLambdaCapturesChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLambdaCapturesChecker.cpp
@@ -25,7 +25,7 @@ class UncountedLambdaCapturesChecker
private:
BugType Bug{this, "Lambda capture of uncounted variable",
"WebKit coding guidelines"};
- mutable BugReporter *BR;
+ mutable BugReporter *BR = nullptr;
public:
void checkASTDecl(const TranslationUnitDecl *TUD, AnalysisManager &MGR,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/APSIntType.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/APSIntType.cpp
index a1de10c89ed9..1185cdaa044a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/APSIntType.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/APSIntType.cpp
@@ -23,7 +23,7 @@ APSIntType::testInRange(const llvm::APSInt &Value,
unsigned MinBits;
if (AllowSignConversions) {
if (Value.isSigned() && !IsUnsigned)
- MinBits = Value.getMinSignedBits();
+ MinBits = Value.getSignificantBits();
else
MinBits = Value.getActiveBits();
@@ -33,7 +33,7 @@ APSIntType::testInRange(const llvm::APSInt &Value,
// Unsigned integers can be converted to unsigned integers of the same width
// or signed integers with one more bit.
if (Value.isSigned())
- MinBits = Value.getMinSignedBits() - IsUnsigned;
+ MinBits = Value.getSignificantBits() - IsUnsigned;
else
MinBits = Value.getActiveBits() + !IsUnsigned;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
index ecfc7106560e..f9750db7b501 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
@@ -50,17 +50,14 @@ AnalysisManager::AnalysisManager(ASTContext &ASTCtx, Preprocessor &PP,
AnalysisManager::~AnalysisManager() {
FlushDiagnostics();
- for (PathDiagnosticConsumers::iterator I = PathConsumers.begin(),
- E = PathConsumers.end(); I != E; ++I) {
- delete *I;
+ for (PathDiagnosticConsumer *Consumer : PathConsumers) {
+ delete Consumer;
}
}
void AnalysisManager::FlushDiagnostics() {
PathDiagnosticConsumer::FilesMade filesMade;
- for (PathDiagnosticConsumers::iterator I = PathConsumers.begin(),
- E = PathConsumers.end();
- I != E; ++I) {
- (*I)->FlushDiagnostics(&filesMade);
+ for (PathDiagnosticConsumer *Consumer : PathConsumers) {
+ Consumer->FlushDiagnostics(&filesMade);
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
index 40cdaef1bfa7..5924f6a671c2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -97,8 +97,7 @@ const llvm::APSInt& BasicValueFactory::getValue(const llvm::APSInt& X) {
FoldNodeTy* P = APSIntSet.FindNodeOrInsertPos(ID, InsertPos);
if (!P) {
- P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
- new (P) FoldNodeTy(X);
+ P = new (BPAlloc) FoldNodeTy(X);
APSIntSet.InsertNode(P, InsertPos);
}
@@ -132,8 +131,7 @@ BasicValueFactory::getCompoundValData(QualType T,
CompoundValData* D = CompoundValDataSet.FindNodeOrInsertPos(ID, InsertPos);
if (!D) {
- D = (CompoundValData*) BPAlloc.Allocate<CompoundValData>();
- new (D) CompoundValData(T, Vals);
+ D = new (BPAlloc) CompoundValData(T, Vals);
CompoundValDataSet.InsertNode(D, InsertPos);
}
@@ -151,8 +149,7 @@ BasicValueFactory::getLazyCompoundValData(const StoreRef &store,
LazyCompoundValDataSet.FindNodeOrInsertPos(ID, InsertPos);
if (!D) {
- D = (LazyCompoundValData*) BPAlloc.Allocate<LazyCompoundValData>();
- new (D) LazyCompoundValData(store, region);
+ D = new (BPAlloc) LazyCompoundValData(store, region);
LazyCompoundValDataSet.InsertNode(D, InsertPos);
}
@@ -169,8 +166,7 @@ const PointerToMemberData *BasicValueFactory::getPointerToMemberData(
PointerToMemberDataSet.FindNodeOrInsertPos(ID, InsertPos);
if (!D) {
- D = (PointerToMemberData *)BPAlloc.Allocate<PointerToMemberData>();
- new (D) PointerToMemberData(ND, L);
+ D = new (BPAlloc) PointerToMemberData(ND, L);
PointerToMemberDataSet.InsertNode(D, InsertPos);
}
@@ -288,7 +284,7 @@ BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
if (V1.isSigned() && V1.isNegative())
return nullptr;
- if (V1.isSigned() && Amt > V1.countLeadingZeros())
+ if (V1.isSigned() && Amt > V1.countl_zero())
return nullptr;
}
@@ -358,8 +354,7 @@ BasicValueFactory::getPersistentSValWithData(const SVal& V, uintptr_t Data) {
FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
if (!P) {
- P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
- new (P) FoldNodeTy(std::make_pair(V, Data));
+ P = new (BPAlloc) FoldNodeTy(std::make_pair(V, Data));
Map.InsertNode(P, InsertPos);
}
@@ -383,8 +378,7 @@ BasicValueFactory::getPersistentSValPair(const SVal& V1, const SVal& V2) {
FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
if (!P) {
- P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
- new (P) FoldNodeTy(std::make_pair(V1, V2));
+ P = new (BPAlloc) FoldNodeTy(std::make_pair(V1, V2));
Map.InsertNode(P, InsertPos);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
index c3bd4876faf2..dc9820b61f1f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -296,26 +296,24 @@ std::string StackHintGeneratorForSymbol::getMessage(const ExplodedNode *N){
return {};
// Check if one of the parameters are set to the interesting symbol.
- unsigned ArgIndex = 0;
- for (CallExpr::const_arg_iterator I = CE->arg_begin(),
- E = CE->arg_end(); I != E; ++I, ++ArgIndex){
- SVal SV = N->getSVal(*I);
+ for (auto [Idx, ArgExpr] : llvm::enumerate(CE->arguments())) {
+ SVal SV = N->getSVal(ArgExpr);
// Check if the variable corresponding to the symbol is passed by value.
SymbolRef AS = SV.getAsLocSymbol();
if (AS == Sym) {
- return getMessageForArg(*I, ArgIndex);
+ return getMessageForArg(ArgExpr, Idx);
}
// Check if the parameter is a pointer to the symbol.
if (std::optional<loc::MemRegionVal> Reg = SV.getAs<loc::MemRegionVal>()) {
// Do not attempt to dereference void*.
- if ((*I)->getType()->isVoidPointerType())
+ if (ArgExpr->getType()->isVoidPointerType())
continue;
SVal PSV = N->getState()->getSVal(Reg->getRegion());
SymbolRef AS = PSV.getAsLocSymbol();
if (AS == Sym) {
- return getMessageForArg(*I, ArgIndex);
+ return getMessageForArg(ArgExpr, Idx);
}
}
}
@@ -2627,8 +2625,7 @@ BugPathInfo *BugPathGetter::getNextBugPath() {
const ExplodedNode *OrigN;
std::tie(CurrentBugPath.Report, OrigN) = ReportNodes.pop_back_val();
- assert(PriorityMap.find(OrigN) != PriorityMap.end() &&
- "error node not accessible from root");
+ assert(PriorityMap.contains(OrigN) && "error node not accessible from root");
// Create a new graph with a single path. This is the graph that will be
// returned to the caller.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index 2b461acf9a73..42d03f67510c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -1786,6 +1786,7 @@ PathDiagnosticPieceRef StoreSiteFinder::VisitNode(const ExplodedNode *Succ,
void TrackConstraintBRVisitor::Profile(llvm::FoldingSetNodeID &ID) const {
static int tag = 0;
ID.AddPointer(&tag);
+ ID.AddString(Message);
ID.AddBoolean(Assumption);
ID.Add(Constraint);
}
@@ -1796,8 +1797,12 @@ const char *TrackConstraintBRVisitor::getTag() {
return "TrackConstraintBRVisitor";
}
+bool TrackConstraintBRVisitor::isZeroCheck() const {
+ return !Assumption && Constraint.getAs<Loc>();
+}
+
bool TrackConstraintBRVisitor::isUnderconstrained(const ExplodedNode *N) const {
- if (IsZeroCheck)
+ if (isZeroCheck())
return N->getState()->isNull(Constraint).isUnderconstrained();
return (bool)N->getState()->assume(Constraint, !Assumption);
}
@@ -1827,19 +1832,6 @@ PathDiagnosticPieceRef TrackConstraintBRVisitor::VisitNode(
// the transition point.
assert(!isUnderconstrained(N));
- // We found the transition point for the constraint. We now need to
- // pretty-print the constraint. (work-in-progress)
- SmallString<64> sbuf;
- llvm::raw_svector_ostream os(sbuf);
-
- if (isa<Loc>(Constraint)) {
- os << "Assuming pointer value is ";
- os << (Assumption ? "non-null" : "null");
- }
-
- if (os.str().empty())
- return nullptr;
-
// Construct a new PathDiagnosticPiece.
ProgramPoint P = N->getLocation();
@@ -1854,7 +1846,7 @@ PathDiagnosticPieceRef TrackConstraintBRVisitor::VisitNode(
if (!L.isValid())
return nullptr;
- auto X = std::make_shared<PathDiagnosticEventPiece>(L, os.str());
+ auto X = std::make_shared<PathDiagnosticEventPiece>(L, Message);
X->setTag(getTag());
return std::move(X);
}
@@ -2366,8 +2358,9 @@ public:
// null.
if (V.getAsLocSymbol(/*IncludeBaseRegions=*/true))
if (LVState->isNull(V).isConstrainedTrue())
- Report.addVisitor<TrackConstraintBRVisitor>(V.castAs<DefinedSVal>(),
- false);
+ Report.addVisitor<TrackConstraintBRVisitor>(
+ V.castAs<DefinedSVal>(),
+ /*Assumption=*/false, "Assuming pointer value is null");
// Add visitor, which will suppress inline defensive checks.
if (auto DV = V.getAs<DefinedSVal>())
@@ -2531,7 +2524,7 @@ public:
Report.markInteresting(RegionRVal, Opts.Kind);
Report.addVisitor<TrackConstraintBRVisitor>(
loc::MemRegionVal(RegionRVal),
- /*assumption=*/false);
+ /*Assumption=*/false, "Assuming pointer value is null");
Result.FoundSomethingToTrack = true;
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
index 8516e3643425..195940e5e643 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -287,6 +287,7 @@ ProgramStateRef CallEvent::invalidateRegions(unsigned BlockCount,
ProgramPoint CallEvent::getProgramPoint(bool IsPreVisit,
const ProgramPointTag *Tag) const {
+
if (const Expr *E = getOriginExpr()) {
if (IsPreVisit)
return PreStmt(E, getLocationContext(), Tag);
@@ -295,11 +296,13 @@ ProgramPoint CallEvent::getProgramPoint(bool IsPreVisit,
const Decl *D = getDecl();
assert(D && "Cannot get a program point without a statement or decl");
+ assert(ElemRef.getParent() &&
+ "Cannot get a program point without a CFGElementRef");
SourceLocation Loc = getSourceRange().getBegin();
if (IsPreVisit)
- return PreImplicitCall(D, Loc, getLocationContext(), Tag);
- return PostImplicitCall(D, Loc, getLocationContext(), Tag);
+ return PreImplicitCall(D, Loc, getLocationContext(), ElemRef, Tag);
+ return PostImplicitCall(D, Loc, getLocationContext(), ElemRef, Tag);
}
SVal CallEvent::getArgSVal(unsigned Index) const {
@@ -1373,23 +1376,24 @@ void ObjCMethodCall::getInitialStackFrameContents(
CallEventRef<>
CallEventManager::getSimpleCall(const CallExpr *CE, ProgramStateRef State,
- const LocationContext *LCtx) {
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef) {
if (const auto *MCE = dyn_cast<CXXMemberCallExpr>(CE))
- return create<CXXMemberCall>(MCE, State, LCtx);
+ return create<CXXMemberCall>(MCE, State, LCtx, ElemRef);
if (const auto *OpCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
const FunctionDecl *DirectCallee = OpCE->getDirectCallee();
if (const auto *MD = dyn_cast<CXXMethodDecl>(DirectCallee))
if (MD->isInstance())
- return create<CXXMemberOperatorCall>(OpCE, State, LCtx);
+ return create<CXXMemberOperatorCall>(OpCE, State, LCtx, ElemRef);
} else if (CE->getCallee()->getType()->isBlockPointerType()) {
- return create<BlockCall>(CE, State, LCtx);
+ return create<BlockCall>(CE, State, LCtx, ElemRef);
}
// Otherwise, it's a normal function call, static member function call, or
// something we can't reason about.
- return create<SimpleFunctionCall>(CE, State, LCtx);
+ return create<SimpleFunctionCall>(CE, State, LCtx, ElemRef);
}
CallEventRef<>
@@ -1397,12 +1401,14 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
ProgramStateRef State) {
const LocationContext *ParentCtx = CalleeCtx->getParent();
const LocationContext *CallerCtx = ParentCtx->getStackFrame();
+ CFGBlock::ConstCFGElementRef ElemRef = {CalleeCtx->getCallSiteBlock(),
+ CalleeCtx->getIndex()};
assert(CallerCtx && "This should not be used for top-level stack frames");
const Stmt *CallSite = CalleeCtx->getCallSite();
if (CallSite) {
- if (CallEventRef<> Out = getCall(CallSite, State, CallerCtx))
+ if (CallEventRef<> Out = getCall(CallSite, State, CallerCtx, ElemRef))
return Out;
SValBuilder &SVB = State->getStateManager().getSValBuilder();
@@ -1411,10 +1417,11 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
SVal ThisVal = State->getSVal(ThisPtr);
if (const auto *CE = dyn_cast<CXXConstructExpr>(CallSite))
- return getCXXConstructorCall(CE, ThisVal.getAsRegion(), State, CallerCtx);
+ return getCXXConstructorCall(CE, ThisVal.getAsRegion(), State, CallerCtx,
+ ElemRef);
else if (const auto *CIE = dyn_cast<CXXInheritedCtorInitExpr>(CallSite))
return getCXXInheritedConstructorCall(CIE, ThisVal.getAsRegion(), State,
- CallerCtx);
+ CallerCtx, ElemRef);
else {
// All other cases are handled by getCall.
llvm_unreachable("This is not an inlineable statement");
@@ -1444,19 +1451,20 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
return getCXXDestructorCall(Dtor, Trigger, ThisVal.getAsRegion(),
E.getAs<CFGBaseDtor>().has_value(), State,
- CallerCtx);
+ CallerCtx, ElemRef);
}
CallEventRef<> CallEventManager::getCall(const Stmt *S, ProgramStateRef State,
- const LocationContext *LC) {
+ const LocationContext *LC,
+ CFGBlock::ConstCFGElementRef ElemRef) {
if (const auto *CE = dyn_cast<CallExpr>(S)) {
- return getSimpleCall(CE, State, LC);
+ return getSimpleCall(CE, State, LC, ElemRef);
} else if (const auto *NE = dyn_cast<CXXNewExpr>(S)) {
- return getCXXAllocatorCall(NE, State, LC);
+ return getCXXAllocatorCall(NE, State, LC, ElemRef);
} else if (const auto *DE = dyn_cast<CXXDeleteExpr>(S)) {
- return getCXXDeallocatorCall(DE, State, LC);
+ return getCXXDeallocatorCall(DE, State, LC, ElemRef);
} else if (const auto *ME = dyn_cast<ObjCMessageExpr>(S)) {
- return getObjCMethodCall(ME, State, LC);
+ return getObjCMethodCall(ME, State, LC, ElemRef);
} else {
return nullptr;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
index 1e2532d27633..c25165cce128 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
@@ -14,6 +14,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/Basic/Builtins.h"
#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace ento;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
index d12c35ef156a..e9cc080caf5f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
@@ -23,6 +23,7 @@ const char *const CXXObjectLifecycle = "C++ object lifecycle";
const char *const CXXMoveSemantics = "C++ move semantics";
const char *const SecurityError = "Security error";
const char *const UnusedCode = "Unused code";
+const char *const TaintedData = "Tainted data used";
} // namespace categories
} // namespace ento
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp
index 3d017b81762a..0102f743c911 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp
@@ -17,9 +17,9 @@
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Analysis/AnalysisDeclContext.h"
+#include "clang/Basic/JsonSupport.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
-#include "clang/Basic/JsonSupport.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
index d274d4d16db3..f84da769d182 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -233,8 +233,7 @@ void ExplodedNode::NodeGroup::addNode(ExplodedNode *N, ExplodedGraph &G) {
ExplodedNode *Old = Storage.get<ExplodedNode *>();
BumpVectorContext &Ctx = G.getNodeAllocator();
- V = G.getAllocator().Allocate<ExplodedNodeVector>();
- new (V) ExplodedNodeVector(Ctx, 4);
+ V = new (G.getAllocator()) ExplodedNodeVector(Ctx, 4);
V->push_back(Old, Ctx);
Storage = V;
@@ -408,7 +407,7 @@ ExplodedNode *ExplodedGraph::getNode(const ProgramPoint &L,
}
else {
// Allocate a new node.
- V = (NodeTy*) getAllocator().Allocate<NodeTy>();
+ V = getAllocator().Allocate<NodeTy>();
}
++NumNodes;
@@ -432,7 +431,7 @@ ExplodedNode *ExplodedGraph::createUncachedNode(const ProgramPoint &L,
ProgramStateRef State,
int64_t Id,
bool IsSink) {
- NodeTy *V = (NodeTy *) getAllocator().Allocate<NodeTy>();
+ NodeTy *V = getAllocator().Allocate<NodeTy>();
new (V) NodeTy(L, State, Id, IsSink);
return V;
}
@@ -488,7 +487,7 @@ ExplodedGraph::trim(ArrayRef<const NodeTy *> Sinks,
const ExplodedNode *N = WL2.pop_back_val();
// Skip this node if we have already processed it.
- if (Pass2.find(N) != Pass2.end())
+ if (Pass2.contains(N))
continue;
// Create the corresponding node in the new graph and record the mapping
@@ -509,9 +508,8 @@ ExplodedGraph::trim(ArrayRef<const NodeTy *> Sinks,
// Walk through the predecessors of 'N' and hook up their corresponding
// nodes in the new graph (if any) to the freshly created node.
- for (ExplodedNode::pred_iterator I = N->Preds.begin(), E = N->Preds.end();
- I != E; ++I) {
- Pass2Ty::iterator PI = Pass2.find(*I);
+ for (const ExplodedNode *Pred : N->Preds) {
+ Pass2Ty::iterator PI = Pass2.find(Pred);
if (PI == Pass2.end())
continue;
@@ -522,17 +520,16 @@ ExplodedGraph::trim(ArrayRef<const NodeTy *> Sinks,
// been created, we should hook them up as successors. Otherwise, enqueue
// the new nodes from the original graph that should have nodes created
// in the new graph.
- for (ExplodedNode::succ_iterator I = N->Succs.begin(), E = N->Succs.end();
- I != E; ++I) {
- Pass2Ty::iterator PI = Pass2.find(*I);
+ for (const ExplodedNode *Succ : N->Succs) {
+ Pass2Ty::iterator PI = Pass2.find(Succ);
if (PI != Pass2.end()) {
const_cast<ExplodedNode *>(PI->second)->addPredecessor(NewN, *G);
continue;
}
// Enqueue nodes to the worklist that were marked during pass 1.
- if (Pass1.count(*I))
- WL2.push_back(*I);
+ if (Pass1.count(Succ))
+ WL2.push_back(Succ);
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index 977c2b7f51fd..144f034a9dfe 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -65,6 +65,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/ImmutableSet.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Casting.h"
@@ -386,15 +387,19 @@ ProgramStateRef ExprEngine::createTemporaryRegionIfNeeded(
State = finishObjectConstruction(State, MT, LC);
State = State->BindExpr(Result, LC, *V);
return State;
- } else {
+ } else if (const ValueDecl *VD = MT->getExtendingDecl()) {
StorageDuration SD = MT->getStorageDuration();
+ assert(SD != SD_FullExpression);
// If this object is bound to a reference with static storage duration, we
// put it in a different region to prevent "address leakage" warnings.
if (SD == SD_Static || SD == SD_Thread) {
- TR = MRMgr.getCXXStaticTempObjectRegion(Init);
+ TR = MRMgr.getCXXStaticLifetimeExtendedObjectRegion(Init, VD);
} else {
- TR = MRMgr.getCXXTempObjectRegion(Init, LC);
+ TR = MRMgr.getCXXLifetimeExtendedObjectRegion(Init, VD, LC);
}
+ } else {
+ assert(MT->getStorageDuration() == SD_FullExpression);
+ TR = MRMgr.getCXXTempObjectRegion(Init, LC);
}
} else {
TR = MRMgr.getCXXTempObjectRegion(Init, LC);
@@ -1242,7 +1247,7 @@ ExprEngine::prepareStateForArrayDestruction(const ProgramStateRef State,
const QualType &ElementTy,
const LocationContext *LCtx,
SVal *ElementCountVal) {
- assert(Region != nullptr && "Not-null region expected");
+ assert(Region != nullptr && "Not-null region expected");
QualType Ty = ElementTy.getDesugaredType(getContext());
while (const auto *NTy = dyn_cast<ArrayType>(Ty))
@@ -1313,7 +1318,8 @@ void ExprEngine::ProcessNewAllocator(const CXXNewExpr *NE,
else {
NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
const LocationContext *LCtx = Pred->getLocationContext();
- PostImplicitCall PP(NE->getOperatorNew(), NE->getBeginLoc(), LCtx);
+ PostImplicitCall PP(NE->getOperatorNew(), NE->getBeginLoc(), LCtx,
+ getCFGElementRef());
Bldr.generateNode(PP, Pred->getState(), Pred);
}
Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx);
@@ -1361,7 +1367,8 @@ void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor Dtor,
static SimpleProgramPointTag PT(
"ExprEngine", "Skipping automatic 0 length array destruction, "
"which shouldn't be in the CFG.");
- PostImplicitCall PP(DtorDecl, varDecl->getLocation(), LCtx, &PT);
+ PostImplicitCall PP(DtorDecl, varDecl->getLocation(), LCtx,
+ getCFGElementRef(), &PT);
NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
Bldr.generateSink(PP, Pred->getState(), Pred);
return;
@@ -1378,7 +1385,8 @@ void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor Dtor,
static SimpleProgramPointTag PT("ExprEngine",
"Prepare for object destruction");
- PreImplicitCall PP(DtorDecl, varDecl->getLocation(), LCtx, &PT);
+ PreImplicitCall PP(DtorDecl, varDecl->getLocation(), LCtx, getCFGElementRef(),
+ &PT);
Pred = Bldr.generateNode(PP, state, Pred);
if (!Pred)
@@ -1406,7 +1414,7 @@ void ExprEngine::ProcessDeleteDtor(const CFGDeleteDtor Dtor,
const CXXRecordDecl *RD = BTy->getAsCXXRecordDecl();
const CXXDestructorDecl *Dtor = RD->getDestructor();
- PostImplicitCall PP(Dtor, DE->getBeginLoc(), LCtx);
+ PostImplicitCall PP(Dtor, DE->getBeginLoc(), LCtx, getCFGElementRef());
NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
Bldr.generateNode(PP, Pred->getState(), Pred);
return;
@@ -1439,7 +1447,8 @@ void ExprEngine::ProcessDeleteDtor(const CFGDeleteDtor Dtor,
static SimpleProgramPointTag PT(
"ExprEngine", "Skipping 0 length array delete destruction");
- PostImplicitCall PP(getDtorDecl(DTy), DE->getBeginLoc(), LCtx, &PT);
+ PostImplicitCall PP(getDtorDecl(DTy), DE->getBeginLoc(), LCtx,
+ getCFGElementRef(), &PT);
NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
Bldr.generateNode(PP, Pred->getState(), Pred);
return;
@@ -1453,7 +1462,8 @@ void ExprEngine::ProcessDeleteDtor(const CFGDeleteDtor Dtor,
NodeBuilder Bldr(Pred, Dst, getBuilderContext());
static SimpleProgramPointTag PT("ExprEngine",
"Prepare for object destruction");
- PreImplicitCall PP(getDtorDecl(DTy), DE->getBeginLoc(), LCtx, &PT);
+ PreImplicitCall PP(getDtorDecl(DTy), DE->getBeginLoc(), LCtx,
+ getCFGElementRef(), &PT);
Pred = Bldr.generateNode(PP, State, Pred);
if (!Pred)
@@ -1513,7 +1523,8 @@ void ExprEngine::ProcessMemberDtor(const CFGMemberDtor D,
static SimpleProgramPointTag PT(
"ExprEngine", "Skipping member 0 length array destruction, which "
"shouldn't be in the CFG.");
- PostImplicitCall PP(DtorDecl, Member->getLocation(), LCtx, &PT);
+ PostImplicitCall PP(DtorDecl, Member->getLocation(), LCtx,
+ getCFGElementRef(), &PT);
NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
Bldr.generateSink(PP, Pred->getState(), Pred);
return;
@@ -1529,7 +1540,8 @@ void ExprEngine::ProcessMemberDtor(const CFGMemberDtor D,
static SimpleProgramPointTag PT("ExprEngine",
"Prepare for object destruction");
- PreImplicitCall PP(DtorDecl, Member->getLocation(), LCtx, &PT);
+ PreImplicitCall PP(DtorDecl, Member->getLocation(), LCtx, getCFGElementRef(),
+ &PT);
Pred = Bldr.generateNode(PP, State, Pred);
if (!Pred)
@@ -1565,7 +1577,7 @@ void ExprEngine::ProcessTemporaryDtor(const CFGTemporaryDtor D,
NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
PostImplicitCall PP(D.getDestructorDecl(getContext()),
D.getBindTemporaryExpr()->getBeginLoc(),
- Pred->getLocationContext());
+ Pred->getLocationContext(), getCFGElementRef());
Bldr.generateNode(PP, State, Pred);
return;
}
@@ -2114,7 +2126,6 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
}
}
}
- // FALLTHROUGH
[[fallthrough]];
}
@@ -2630,9 +2641,7 @@ static const Stmt *ResolveCondition(const Stmt *Condition,
// The invariants are still shifting, but it is possible that the
// last element in a CFGBlock is not a CFGStmt. Look for the last
// CFGStmt as the value of the condition.
- CFGBlock::const_reverse_iterator I = B->rbegin(), E = B->rend();
- for (; I != E; ++I) {
- CFGElement Elem = *I;
+ for (CFGElement Elem : llvm::reverse(*B)) {
std::optional<CFGStmt> CS = Elem.getAs<CFGStmt>();
if (!CS)
continue;
@@ -2840,9 +2849,9 @@ void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) {
if (std::optional<loc::GotoLabel> LV = V.getAs<loc::GotoLabel>()) {
const LabelDecl *L = LV->getLabel();
- for (iterator I = builder.begin(), E = builder.end(); I != E; ++I) {
- if (I.getLabel() == L) {
- builder.generateNode(I, state);
+ for (iterator Succ : builder) {
+ if (Succ.getLabel() == L) {
+ builder.generateNode(Succ, state);
return;
}
}
@@ -2861,8 +2870,8 @@ void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) {
// This is really a catch-all. We don't support symbolics yet.
// FIXME: Implement dispatch for symbolic pointers.
- for (iterator I = builder.begin(), E = builder.end(); I != E; ++I)
- builder.generateNode(I, state);
+ for (iterator Succ : builder)
+ builder.generateNode(Succ, state);
}
void ExprEngine::processBeginOfFunction(NodeBuilderContext &BC,
@@ -3795,12 +3804,9 @@ struct DOTGraphTraits<ExplodedGraph*> : public DefaultDOTGraphTraits {
BugReporter &BR = static_cast<ExprEngine &>(
N->getState()->getStateManager().getOwningEngine()).getBugReporter();
- const auto EQClasses =
- llvm::make_range(BR.EQClasses_begin(), BR.EQClasses_end());
-
- for (const auto &EQ : EQClasses) {
- for (const auto &I : EQ.getReports()) {
- const auto *PR = dyn_cast<PathSensitiveBugReport>(I.get());
+ for (const auto &Class : BR.equivalenceClasses()) {
+ for (const auto &Report : Class.getReports()) {
+ const auto *PR = dyn_cast<PathSensitiveBugReport>(Report.get());
if (!PR)
continue;
const ExplodedNode *EN = PR->getErrorNode();
@@ -3898,10 +3904,9 @@ std::string ExprEngine::DumpGraph(bool trim, StringRef Filename) {
std::vector<const ExplodedNode *> Src;
// Iterate through the reports and get their nodes.
- for (BugReporter::EQClasses_iterator
- EI = BR.EQClasses_begin(), EE = BR.EQClasses_end(); EI != EE; ++EI) {
+ for (const auto &Class : BR.equivalenceClasses()) {
const auto *R =
- dyn_cast<PathSensitiveBugReport>(EI->getReports()[0].get());
+ dyn_cast<PathSensitiveBugReport>(Class.getReports()[0].get());
if (!R)
continue;
const auto *N = const_cast<ExplodedNode *>(R->getErrorNode());
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index 6652c065e04f..2a47116db55a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/ExprCXX.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include <optional>
@@ -133,11 +133,9 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
SVal location = LeftV;
evalLoad(Tmp, B, LHS, *it, state, location);
- for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E;
- ++I) {
-
- state = (*I)->getState();
- const LocationContext *LCtx = (*I)->getLocationContext();
+ for (ExplodedNode *N : Tmp) {
+ state = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
SVal V = state->getSVal(LHS, LCtx);
// Get the computation type.
@@ -171,8 +169,7 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
currBldrCtx->blockCount());
// However, we need to convert the symbol to the computation type.
Result = svalBuilder.evalCast(LHSVal, CTy, LTy);
- }
- else {
+ } else {
// The left-hand side may bind to a different value then the
// computation type.
LHSVal = svalBuilder.evalCast(Result, LTy, CTy);
@@ -185,7 +182,7 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
else
state = state->BindExpr(B, LCtx, Result);
- evalStore(Tmp2, B, LHS, *I, state, location, LHSVal);
+ evalStore(Tmp2, B, LHS, N, state, location, LHSVal);
}
}
@@ -211,14 +208,12 @@ void ExprEngine::VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
if (const BlockDataRegion *BDR =
dyn_cast_or_null<BlockDataRegion>(V.getAsRegion())) {
- BlockDataRegion::referenced_vars_iterator I = BDR->referenced_vars_begin(),
- E = BDR->referenced_vars_end();
-
+ auto ReferencedVars = BDR->referenced_vars();
auto CI = BD->capture_begin();
auto CE = BD->capture_end();
- for (; I != E; ++I) {
- const VarRegion *capturedR = I.getCapturedRegion();
- const TypedValueRegion *originalR = I.getOriginalRegion();
+ for (auto Var : ReferencedVars) {
+ const VarRegion *capturedR = Var.getCapturedRegion();
+ const TypedValueRegion *originalR = Var.getOriginalRegion();
// If the capture had a copy expression, use the result of evaluating
// that expression, otherwise use the original value.
@@ -291,9 +286,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
if (CastE->getCastKind() == CK_LValueToRValue ||
CastE->getCastKind() == CK_LValueToRValueBitCast) {
- for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end();
- I!=E; ++I) {
- ExplodedNode *subExprNode = *I;
+ for (ExplodedNode *subExprNode : dstPreStmt) {
ProgramStateRef state = subExprNode->getState();
const LocationContext *LCtx = subExprNode->getLocationContext();
evalLoad(Dst, CastE, CastE, subExprNode, state, state->getSVal(Ex, LCtx));
@@ -309,10 +302,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
T = ExCast->getTypeAsWritten();
StmtNodeBuilder Bldr(dstPreStmt, Dst, *currBldrCtx);
- for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end();
- I != E; ++I) {
-
- Pred = *I;
+ for (ExplodedNode *Pred : dstPreStmt) {
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
@@ -883,8 +873,7 @@ VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
QualType T = Ex->getTypeOfArgument();
- for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
- I != E; ++I) {
+ for (ExplodedNode *N : CheckedSet) {
if (Ex->getKind() == UETT_SizeOf) {
if (!T->isIncompleteType() && !T->isConstantSizeType()) {
assert(T->isVariableArrayType() && "Unknown non-constant-sized type.");
@@ -903,18 +892,17 @@ VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
APSInt Value = Ex->EvaluateKnownConstInt(getContext());
CharUnits amt = CharUnits::fromQuantity(Value.getZExtValue());
- ProgramStateRef state = (*I)->getState();
- state = state->BindExpr(Ex, (*I)->getLocationContext(),
- svalBuilder.makeIntVal(amt.getQuantity(),
- Ex->getType()));
- Bldr.generateNode(Ex, *I, state);
+ ProgramStateRef state = N->getState();
+ state = state->BindExpr(
+ Ex, N->getLocationContext(),
+ svalBuilder.makeIntVal(amt.getQuantity(), Ex->getType()));
+ Bldr.generateNode(Ex, N, state);
}
getCheckerManager().runCheckersForPostStmt(Dst, EvalSet, Ex, *this);
}
-void ExprEngine::handleUOExtension(ExplodedNodeSet::iterator I,
- const UnaryOperator *U,
+void ExprEngine::handleUOExtension(ExplodedNode *N, const UnaryOperator *U,
StmtNodeBuilder &Bldr) {
// FIXME: We can probably just have some magic in Environment::getSVal()
// that propagates values, instead of creating a new node here.
@@ -924,10 +912,9 @@ void ExprEngine::handleUOExtension(ExplodedNodeSet::iterator I,
// generate an extra node that just propagates the value of the
// subexpression.
const Expr *Ex = U->getSubExpr()->IgnoreParens();
- ProgramStateRef state = (*I)->getState();
- const LocationContext *LCtx = (*I)->getLocationContext();
- Bldr.generateNode(U, *I, state->BindExpr(U, LCtx,
- state->getSVal(Ex, LCtx)));
+ ProgramStateRef state = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
+ Bldr.generateNode(U, N, state->BindExpr(U, LCtx, state->getSVal(Ex, LCtx)));
}
void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
@@ -939,13 +926,12 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
ExplodedNodeSet EvalSet;
StmtNodeBuilder Bldr(CheckedSet, EvalSet, *currBldrCtx);
- for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
- I != E; ++I) {
+ for (ExplodedNode *N : CheckedSet) {
switch (U->getOpcode()) {
default: {
- Bldr.takeNodes(*I);
+ Bldr.takeNodes(N);
ExplodedNodeSet Tmp;
- VisitIncrementDecrementOperator(U, *I, Tmp);
+ VisitIncrementDecrementOperator(U, N, Tmp);
Bldr.addNodes(Tmp);
break;
}
@@ -960,10 +946,10 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
// For all other types, UO_Real is an identity operation.
assert (U->getType() == Ex->getType());
- ProgramStateRef state = (*I)->getState();
- const LocationContext *LCtx = (*I)->getLocationContext();
- Bldr.generateNode(U, *I, state->BindExpr(U, LCtx,
- state->getSVal(Ex, LCtx)));
+ ProgramStateRef state = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
+ Bldr.generateNode(U, N,
+ state->BindExpr(U, LCtx, state->getSVal(Ex, LCtx)));
break;
}
@@ -975,10 +961,10 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
break;
}
// For all other types, UO_Imag returns 0.
- ProgramStateRef state = (*I)->getState();
- const LocationContext *LCtx = (*I)->getLocationContext();
+ ProgramStateRef state = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
SVal X = svalBuilder.makeZeroVal(Ex->getType());
- Bldr.generateNode(U, *I, state->BindExpr(U, LCtx, X));
+ Bldr.generateNode(U, N, state->BindExpr(U, LCtx, X));
break;
}
@@ -989,15 +975,15 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
const ValueDecl *VD = DRE->getDecl();
if (isa<CXXMethodDecl, FieldDecl, IndirectFieldDecl>(VD)) {
- ProgramStateRef State = (*I)->getState();
- const LocationContext *LCtx = (*I)->getLocationContext();
+ ProgramStateRef State = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
SVal SV = svalBuilder.getMemberPointer(cast<NamedDecl>(VD));
- Bldr.generateNode(U, *I, State->BindExpr(U, LCtx, SV));
+ Bldr.generateNode(U, N, State->BindExpr(U, LCtx, SV));
break;
}
}
// Explicitly proceed with default handler for this case cascade.
- handleUOExtension(I, U, Bldr);
+ handleUOExtension(N, U, Bldr);
break;
}
case UO_Plus:
@@ -1005,7 +991,7 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
[[fallthrough]];
case UO_Deref:
case UO_Extension: {
- handleUOExtension(I, U, Bldr);
+ handleUOExtension(N, U, Bldr);
break;
}
@@ -1014,14 +1000,14 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
case UO_Not: {
assert (!U->isGLValue());
const Expr *Ex = U->getSubExpr()->IgnoreParens();
- ProgramStateRef state = (*I)->getState();
- const LocationContext *LCtx = (*I)->getLocationContext();
+ ProgramStateRef state = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
// Get the value of the subexpression.
SVal V = state->getSVal(Ex, LCtx);
if (V.isUnknownOrUndef()) {
- Bldr.generateNode(U, *I, state->BindExpr(U, LCtx, V));
+ Bldr.generateNode(U, N, state->BindExpr(U, LCtx, V));
break;
}
@@ -1058,7 +1044,7 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
state = state->BindExpr(U, LCtx, Result);
break;
}
- Bldr.generateNode(U, *I, state);
+ Bldr.generateNode(U, N, state);
break;
}
}
@@ -1084,10 +1070,9 @@ void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
ExplodedNodeSet Dst2;
StmtNodeBuilder Bldr(Tmp, Dst2, *currBldrCtx);
- for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end();I!=E;++I) {
-
- state = (*I)->getState();
- assert(LCtx == (*I)->getLocationContext());
+ for (ExplodedNode *N : Tmp) {
+ state = N->getState();
+ assert(LCtx == N->getLocationContext());
SVal V2_untested = state->getSVal(Ex, LCtx);
// Propagate unknown and undefined values.
@@ -1095,9 +1080,9 @@ void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
state = state->BindExpr(U, LCtx, V2_untested);
// Perform the store, so that the uninitialized value detection happens.
- Bldr.takeNodes(*I);
+ Bldr.takeNodes(N);
ExplodedNodeSet Dst3;
- evalStore(Dst3, U, Ex, *I, state, loc, V2_untested);
+ evalStore(Dst3, U, Ex, N, state, loc, V2_untested);
Bldr.addNodes(Dst3);
continue;
@@ -1163,9 +1148,9 @@ void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
state = state->BindExpr(U, LCtx, U->isPostfix() ? V2 : Result);
// Perform the store.
- Bldr.takeNodes(*I);
+ Bldr.takeNodes(N);
ExplodedNodeSet Dst3;
- evalStore(Dst3, U, Ex, *I, state, loc, Result);
+ evalStore(Dst3, U, Ex, N, state, loc, Result);
Bldr.addNodes(Dst3);
}
Dst.insert(Dst2);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index 6eb37287b136..7ee7c1394a67 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -20,6 +20,8 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Sequence.h"
#include <optional>
using namespace clang;
@@ -59,40 +61,39 @@ void ExprEngine::performTrivialCopy(NodeBuilder &Bldr, ExplodedNode *Pred,
AlwaysReturnsLValue = true;
}
- assert(ThisRD);
- if (ThisRD->isEmpty()) {
- // Do nothing for empty classes. Otherwise it'd retrieve an UnknownVal
- // and bind it and RegionStore would think that the actual value
- // in this region at this offset is unknown.
- return;
- }
-
const LocationContext *LCtx = Pred->getLocationContext();
+ const Expr *CallExpr = Call.getOriginExpr();
ExplodedNodeSet Dst;
Bldr.takeNodes(Pred);
- SVal V = Call.getArgSVal(0);
-
- // If the value being copied is not unknown, load from its location to get
- // an aggregate rvalue.
- if (std::optional<Loc> L = V.getAs<Loc>())
- V = Pred->getState()->getSVal(*L);
- else
- assert(V.isUnknownOrUndef());
+ assert(ThisRD);
+ if (!ThisRD->isEmpty()) {
+ // Load the source value only for non-empty classes.
+ // Otherwise it'd retrieve an UnknownVal
+ // and bind it and RegionStore would think that the actual value
+ // in this region at this offset is unknown.
+ SVal V = Call.getArgSVal(0);
- const Expr *CallExpr = Call.getOriginExpr();
- evalBind(Dst, CallExpr, Pred, ThisVal, V, true);
+ // If the value being copied is not unknown, load from its location to get
+ // an aggregate rvalue.
+ if (std::optional<Loc> L = V.getAs<Loc>())
+ V = Pred->getState()->getSVal(*L);
+ else
+ assert(V.isUnknownOrUndef());
+ evalBind(Dst, CallExpr, Pred, ThisVal, V, true);
+ } else {
+ Dst.Add(Pred);
+ }
PostStmt PS(CallExpr, LCtx);
- for (ExplodedNodeSet::iterator I = Dst.begin(), E = Dst.end();
- I != E; ++I) {
- ProgramStateRef State = (*I)->getState();
+ for (ExplodedNode *N : Dst) {
+ ProgramStateRef State = N->getState();
if (AlwaysReturnsLValue)
State = State->BindExpr(CallExpr, LCtx, ThisVal);
else
State = bindReturnValue(Call, LCtx, State);
- Bldr.generateNode(PS, State, *I);
+ Bldr.generateNode(PS, State, N);
}
}
@@ -284,7 +285,8 @@ SVal ExprEngine::computeObjectUnderConstruction(
CallOpts.IsTemporaryCtorOrDtor = true;
if (MTE) {
if (const ValueDecl *VD = MTE->getExtendingDecl()) {
- assert(MTE->getStorageDuration() != SD_FullExpression);
+ StorageDuration SD = MTE->getStorageDuration();
+ assert(SD != SD_FullExpression);
if (!VD->getType()->isReferenceType()) {
// We're lifetime-extended by a surrounding aggregate.
// Automatic destructors aren't quite working in this case
@@ -293,11 +295,15 @@ SVal ExprEngine::computeObjectUnderConstruction(
// the MaterializeTemporaryExpr?
CallOpts.IsTemporaryLifetimeExtendedViaAggregate = true;
}
- }
- if (MTE->getStorageDuration() == SD_Static ||
- MTE->getStorageDuration() == SD_Thread)
- return loc::MemRegionVal(MRMgr.getCXXStaticTempObjectRegion(E));
+ if (SD == SD_Static || SD == SD_Thread)
+ return loc::MemRegionVal(
+ MRMgr.getCXXStaticLifetimeExtendedObjectRegion(E, VD));
+
+ return loc::MemRegionVal(
+ MRMgr.getCXXLifetimeExtendedObjectRegion(E, VD, LCtx));
+ }
+ assert(MTE->getStorageDuration() == SD_FullExpression);
}
return loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(E, LCtx));
@@ -357,7 +363,8 @@ SVal ExprEngine::computeObjectUnderConstruction(
};
if (const auto *CE = dyn_cast<CallExpr>(E)) {
- CallEventRef<> Caller = CEMgr.getSimpleCall(CE, State, LCtx);
+ CallEventRef<> Caller =
+ CEMgr.getSimpleCall(CE, State, LCtx, getCFGElementRef());
if (std::optional<SVal> V = getArgLoc(Caller))
return *V;
else
@@ -365,14 +372,15 @@ SVal ExprEngine::computeObjectUnderConstruction(
} else if (const auto *CCE = dyn_cast<CXXConstructExpr>(E)) {
// Don't bother figuring out the target region for the future
// constructor because we won't need it.
- CallEventRef<> Caller =
- CEMgr.getCXXConstructorCall(CCE, /*Target=*/nullptr, State, LCtx);
+ CallEventRef<> Caller = CEMgr.getCXXConstructorCall(
+ CCE, /*Target=*/nullptr, State, LCtx, getCFGElementRef());
if (std::optional<SVal> V = getArgLoc(Caller))
return *V;
else
break;
} else if (const auto *ME = dyn_cast<ObjCMessageExpr>(E)) {
- CallEventRef<> Caller = CEMgr.getObjCMethodCall(ME, State, LCtx);
+ CallEventRef<> Caller =
+ CEMgr.getObjCMethodCall(ME, State, LCtx, getCFGElementRef());
if (std::optional<SVal> V = getArgLoc(Caller))
return *V;
else
@@ -726,9 +734,9 @@ void ExprEngine::handleConstructor(const Expr *E,
CallEventManager &CEMgr = getStateManager().getCallEventManager();
CallEventRef<> Call =
CIE ? (CallEventRef<>)CEMgr.getCXXInheritedConstructorCall(
- CIE, TargetRegion, State, LCtx)
+ CIE, TargetRegion, State, LCtx, getCFGElementRef())
: (CallEventRef<>)CEMgr.getCXXConstructorCall(
- CE, TargetRegion, State, LCtx);
+ CE, TargetRegion, State, LCtx, getCFGElementRef());
ExplodedNodeSet DstPreVisit;
getCheckerManager().runCheckersForPreStmt(DstPreVisit, Pred, E, *this);
@@ -737,10 +745,8 @@ void ExprEngine::handleConstructor(const Expr *E,
if (CE) {
// FIXME: Is it possible and/or useful to do this before PreStmt?
StmtNodeBuilder Bldr(DstPreVisit, PreInitialized, *currBldrCtx);
- for (ExplodedNodeSet::iterator I = DstPreVisit.begin(),
- E = DstPreVisit.end();
- I != E; ++I) {
- ProgramStateRef State = (*I)->getState();
+ for (ExplodedNode *N : DstPreVisit) {
+ ProgramStateRef State = N->getState();
if (CE->requiresZeroInitialization()) {
// FIXME: Once we properly handle constructors in new-expressions, we'll
// need to invalidate the region before setting a default value, to make
@@ -757,7 +763,7 @@ void ExprEngine::handleConstructor(const Expr *E,
State = State->bindDefaultZero(Target, LCtx);
}
- Bldr.generateNode(CE, *I, State, /*tag=*/nullptr,
+ Bldr.generateNode(CE, N, State, /*tag=*/nullptr,
ProgramPoint::PreStmtKind);
}
} else {
@@ -775,14 +781,12 @@ void ExprEngine::handleConstructor(const Expr *E,
!CallOpts.IsArrayCtorOrDtor) {
StmtNodeBuilder Bldr(DstPreCall, DstEvaluated, *currBldrCtx);
// FIXME: Handle other kinds of trivial constructors as well.
- for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
- I != E; ++I)
- performTrivialCopy(Bldr, *I, *Call);
+ for (ExplodedNode *N : DstPreCall)
+ performTrivialCopy(Bldr, N, *Call);
} else {
- for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
- I != E; ++I)
- getCheckerManager().runCheckersForEvalCall(DstEvaluated, *I, *Call, *this,
+ for (ExplodedNode *N : DstPreCall)
+ getCheckerManager().runCheckersForEvalCall(DstEvaluated, N, *Call, *this,
CallOpts);
}
@@ -797,7 +801,8 @@ void ExprEngine::handleConstructor(const Expr *E,
StmtNodeBuilder Bldr(DstEvaluated, DstEvaluatedPostProcessed, *currBldrCtx);
const AnalysisDeclContext *ADC = LCtx->getAnalysisDeclContext();
if (!ADC->getCFGBuildOptions().AddTemporaryDtors) {
- if (llvm::isa_and_nonnull<CXXTempObjectRegion>(TargetRegion) &&
+ if (llvm::isa_and_nonnull<CXXTempObjectRegion,
+ CXXLifetimeExtendedObjectRegion>(TargetRegion) &&
cast<CXXConstructorDecl>(Call->getDecl())
->getParent()
->isAnyDestructorNoReturn()) {
@@ -869,7 +874,8 @@ void ExprEngine::VisitCXXDestructor(QualType ObjectType,
// it would interrupt the analysis instead.
static SimpleProgramPointTag T("ExprEngine", "SkipInvalidDestructor");
// FIXME: PostImplicitCall with a null decl may crash elsewhere anyway.
- PostImplicitCall PP(/*Decl=*/nullptr, S->getEndLoc(), LCtx, &T);
+ PostImplicitCall PP(/*Decl=*/nullptr, S->getEndLoc(), LCtx,
+ getCFGElementRef(), &T);
NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
Bldr.generateNode(PP, Pred->getState(), Pred);
return;
@@ -894,8 +900,8 @@ void ExprEngine::VisitCXXDestructor(QualType ObjectType,
}
CallEventManager &CEMgr = getStateManager().getCallEventManager();
- CallEventRef<CXXDestructorCall> Call =
- CEMgr.getCXXDestructorCall(DtorDecl, S, Dest, IsBaseDtor, State, LCtx);
+ CallEventRef<CXXDestructorCall> Call = CEMgr.getCXXDestructorCall(
+ DtorDecl, S, Dest, IsBaseDtor, State, LCtx, getCFGElementRef());
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
Call->getSourceRange().getBegin(),
@@ -907,9 +913,8 @@ void ExprEngine::VisitCXXDestructor(QualType ObjectType,
ExplodedNodeSet DstInvalidated;
StmtNodeBuilder Bldr(DstPreCall, DstInvalidated, *currBldrCtx);
- for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
- I != E; ++I)
- defaultEvalCall(Bldr, *I, *Call, CallOpts);
+ for (ExplodedNode *N : DstPreCall)
+ defaultEvalCall(Bldr, N, *Call, CallOpts);
getCheckerManager().runCheckersForPostCall(Dst, DstInvalidated,
*Call, *this);
@@ -925,7 +930,7 @@ void ExprEngine::VisitCXXNewAllocatorCall(const CXXNewExpr *CNE,
"Error evaluating New Allocator Call");
CallEventManager &CEMgr = getStateManager().getCallEventManager();
CallEventRef<CXXAllocatorCall> Call =
- CEMgr.getCXXAllocatorCall(CNE, State, LCtx);
+ CEMgr.getCXXAllocatorCall(CNE, State, LCtx, getCFGElementRef());
ExplodedNodeSet DstPreCall;
getCheckerManager().runCheckersForPreCall(DstPreCall, Pred,
@@ -1023,7 +1028,7 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
CallEventManager &CEMgr = getStateManager().getCallEventManager();
CallEventRef<CXXAllocatorCall> Call =
- CEMgr.getCXXAllocatorCall(CNE, State, LCtx);
+ CEMgr.getCXXAllocatorCall(CNE, State, LCtx, getCFGElementRef());
if (!AMgr.getAnalyzerOptions().MayInlineCXXAllocator) {
// Invalidate placement args.
@@ -1124,7 +1129,7 @@ void ExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE,
CallEventManager &CEMgr = getStateManager().getCallEventManager();
CallEventRef<CXXDeallocatorCall> Call = CEMgr.getCXXDeallocatorCall(
- CDE, Pred->getState(), Pred->getLocationContext());
+ CDE, Pred->getState(), Pred->getLocationContext(), getCFGElementRef());
ExplodedNodeSet DstPreCall;
getCheckerManager().runCheckersForPreCall(DstPreCall, Pred, *Call, *this);
@@ -1188,18 +1193,13 @@ void ExprEngine::VisitLambdaExpr(const LambdaExpr *LE, ExplodedNode *Pred,
// If we created a new MemRegion for the lambda, we should explicitly bind
// the captures.
- unsigned Idx = 0;
- CXXRecordDecl::field_iterator CurField = LE->getLambdaClass()->field_begin();
- for (LambdaExpr::const_capture_init_iterator i = LE->capture_init_begin(),
- e = LE->capture_init_end();
- i != e; ++i, ++CurField, ++Idx) {
- FieldDecl *FieldForCapture = *CurField;
+ for (auto const [Idx, FieldForCapture, InitExpr] :
+ llvm::zip(llvm::seq<unsigned>(0, -1), LE->getLambdaClass()->fields(),
+ LE->capture_inits())) {
SVal FieldLoc = State->getLValue(FieldForCapture, V);
SVal InitVal;
if (!FieldForCapture->hasCapturedVLAType()) {
- const Expr *InitExpr = *i;
-
assert(InitExpr && "Capture missing initialization expression");
// Capturing a 0 length array is a no-op, so we ignore it to get a more
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 54528475cb31..b987ce278936 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -374,17 +374,15 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
CleanedNodes.Add(CEBNode);
}
- for (ExplodedNodeSet::iterator I = CleanedNodes.begin(),
- E = CleanedNodes.end(); I != E; ++I) {
-
+ for (ExplodedNode *N : CleanedNodes) {
// Step 4: Generate the CallExit and leave the callee's context.
// CleanedNodes -> CEENode
CallExitEnd Loc(calleeCtx, callerCtx);
bool isNew;
- ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState();
+ ProgramStateRef CEEState = (N == CEBNode) ? state : N->getState();
ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
- CEENode->addPredecessor(*I, G);
+ CEENode->addPredecessor(N, G);
if (!isNew)
return;
@@ -610,15 +608,14 @@ void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
// Get the call in its initial state. We use this as a template to perform
// all the checks.
CallEventManager &CEMgr = getStateManager().getCallEventManager();
- CallEventRef<> CallTemplate
- = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext());
+ CallEventRef<> CallTemplate = CEMgr.getSimpleCall(
+ CE, Pred->getState(), Pred->getLocationContext(), getCFGElementRef());
// Evaluate the function call. We try each of the checkers
// to see if the can evaluate the function call.
ExplodedNodeSet dstCallEvaluated;
- for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
- I != E; ++I) {
- evalCall(dstCallEvaluated, *I, *CallTemplate);
+ for (ExplodedNode *N : dstPreVisit) {
+ evalCall(dstCallEvaluated, N, *CallTemplate);
}
// Finally, perform the post-condition check of the CallExpr and store
@@ -837,7 +834,8 @@ void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
State = bindReturnValue(Call, Pred->getLocationContext(), State);
// And make the result node.
- Bldr.generateNode(Call.getProgramPoint(), State, Pred);
+ static SimpleProgramPointTag PT("ExprEngine", "Conservative eval call");
+ Bldr.generateNode(Call.getProgramPoint(false, &PT), State, Pred);
}
ExprEngine::CallInlinePolicy
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
index 25c36e9aea24..8072531ef6fd 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
@@ -148,8 +148,8 @@ void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
CallEventManager &CEMgr = getStateManager().getCallEventManager();
- CallEventRef<ObjCMethodCall> Msg =
- CEMgr.getObjCMethodCall(ME, Pred->getState(), Pred->getLocationContext());
+ CallEventRef<ObjCMethodCall> Msg = CEMgr.getObjCMethodCall(
+ ME, Pred->getState(), Pred->getLocationContext(), getCFGElementRef());
// There are three cases for the receiver:
// (1) it is definitely nil,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index b4578385a147..0fe0c93dc016 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -602,9 +602,9 @@ void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
// Output any other meta data.
- for (PathDiagnostic::meta_iterator I = D.meta_begin(), E = D.meta_end();
- I != E; ++I) {
- os << "<tr><td></td><td>" << html::EscapeText(*I) << "</td></tr>\n";
+ for (const std::string &Metadata :
+ llvm::make_range(D.meta_begin(), D.meta_end())) {
+ os << "<tr><td></td><td>" << html::EscapeText(Metadata) << "</td></tr>\n";
}
os << R"<<<(
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
index 0c126a632f74..16db6b249dc9 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -39,6 +39,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CheckedArithmetic.h"
@@ -73,8 +74,7 @@ RegionTy* MemRegionManager::getSubRegion(const Arg1Ty arg1,
auto *R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, InsertPos));
if (!R) {
- R = A.Allocate<RegionTy>();
- new (R) RegionTy(arg1, superRegion);
+ R = new (A) RegionTy(arg1, superRegion);
Regions.InsertNode(R, InsertPos);
}
@@ -90,8 +90,7 @@ RegionTy* MemRegionManager::getSubRegion(const Arg1Ty arg1, const Arg2Ty arg2,
auto *R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, InsertPos));
if (!R) {
- R = A.Allocate<RegionTy>();
- new (R) RegionTy(arg1, arg2, superRegion);
+ R = new (A) RegionTy(arg1, arg2, superRegion);
Regions.InsertNode(R, InsertPos);
}
@@ -109,8 +108,7 @@ RegionTy* MemRegionManager::getSubRegion(const Arg1Ty arg1, const Arg2Ty arg2,
auto *R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, InsertPos));
if (!R) {
- R = A.Allocate<RegionTy>();
- new (R) RegionTy(arg1, arg2, arg3, superRegion);
+ R = new (A) RegionTy(arg1, arg2, arg3, superRegion);
Regions.InsertNode(R, InsertPos);
}
@@ -161,6 +159,18 @@ const StackFrameContext *VarRegion::getStackFrame() const {
return SSR ? SSR->getStackFrame() : nullptr;
}
+const StackFrameContext *
+CXXLifetimeExtendedObjectRegion::getStackFrame() const {
+ const auto *SSR = dyn_cast<StackSpaceRegion>(getMemorySpace());
+ return SSR ? SSR->getStackFrame() : nullptr;
+}
+
+const StackFrameContext *CXXTempObjectRegion::getStackFrame() const {
+ assert(isa<StackSpaceRegion>(getMemorySpace()) &&
+ "A temporary object can only be allocated on the stack");
+ return cast<StackSpaceRegion>(getMemorySpace())->getStackFrame();
+}
+
ObjCIvarRegion::ObjCIvarRegion(const ObjCIvarDecl *ivd, const SubRegion *sReg)
: DeclRegion(sReg, ObjCIvarRegionKind), IVD(ivd) {
assert(IVD);
@@ -392,6 +402,20 @@ void CXXTempObjectRegion::Profile(llvm::FoldingSetNodeID &ID) const {
ProfileRegion(ID, Ex, getSuperRegion());
}
+void CXXLifetimeExtendedObjectRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+ const Expr *E,
+ const ValueDecl *D,
+ const MemRegion *sReg) {
+ ID.AddPointer(E);
+ ID.AddPointer(D);
+ ID.AddPointer(sReg);
+}
+
+void CXXLifetimeExtendedObjectRegion::Profile(
+ llvm::FoldingSetNodeID &ID) const {
+ ProfileRegion(ID, Ex, ExD, getSuperRegion());
+}
+
void CXXBaseObjectRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
const CXXRecordDecl *RD,
bool IsVirtual,
@@ -468,11 +492,9 @@ void BlockCodeRegion::dumpToStream(raw_ostream &os) const {
void BlockDataRegion::dumpToStream(raw_ostream &os) const {
os << "block_data{" << BC;
os << "; ";
- for (BlockDataRegion::referenced_vars_iterator
- I = referenced_vars_begin(),
- E = referenced_vars_end(); I != E; ++I)
- os << "(" << I.getCapturedRegion() << "<-" <<
- I.getOriginalRegion() << ") ";
+ for (auto Var : referenced_vars())
+ os << "(" << Var.getCapturedRegion() << "<-" << Var.getOriginalRegion()
+ << ") ";
os << '}';
}
@@ -486,6 +508,16 @@ void CXXTempObjectRegion::dumpToStream(raw_ostream &os) const {
<< "S" << Ex->getID(getContext()) << '}';
}
+void CXXLifetimeExtendedObjectRegion::dumpToStream(raw_ostream &os) const {
+ os << "lifetime_extended_object{" << getValueType() << ", ";
+ if (const IdentifierInfo *ID = ExD->getIdentifier())
+ os << ID->getName();
+ else
+ os << "D" << ExD->getID();
+ os << ", "
+ << "S" << Ex->getID(getContext()) << '}';
+}
+
void CXXBaseObjectRegion::dumpToStream(raw_ostream &os) const {
os << "Base{" << superRegion << ',' << getDecl()->getName() << '}';
}
@@ -712,21 +744,17 @@ std::string MemRegion::getDescriptiveName(bool UseQuotes) const {
}
SourceRange MemRegion::sourceRange() const {
- const auto *const VR = dyn_cast<VarRegion>(this->getBaseRegion());
- const auto *const FR = dyn_cast<FieldRegion>(this);
-
// Check for more specific regions first.
- // FieldRegion
- if (FR) {
+ if (auto *FR = dyn_cast<FieldRegion>(this)) {
return FR->getDecl()->getSourceRange();
}
- // VarRegion
- else if (VR) {
+
+ if (auto *VR = dyn_cast<VarRegion>(this->getBaseRegion())) {
return VR->getDecl()->getSourceRange();
}
+
// Return invalid source range (can be checked by client).
- else
- return {};
+ return {};
}
//===----------------------------------------------------------------------===//
@@ -750,6 +778,7 @@ DefinedOrUnknownSVal MemRegionManager::getStaticSize(const MemRegion *MR,
case MemRegion::CXXBaseObjectRegionKind:
case MemRegion::CXXDerivedObjectRegionKind:
case MemRegion::CXXTempObjectRegionKind:
+ case MemRegion::CXXLifetimeExtendedObjectRegionKind:
case MemRegion::CXXThisRegionKind:
case MemRegion::ObjCIvarRegionKind:
case MemRegion::NonParamVarRegionKind:
@@ -776,49 +805,46 @@ DefinedOrUnknownSVal MemRegionManager::getStaticSize(const MemRegion *MR,
// We currently don't model flexible array members (FAMs), which are:
// - int array[]; of IncompleteArrayType
// - int array[0]; of ConstantArrayType with size 0
- // - int array[1]; of ConstantArrayType with size 1 (*)
- // (*): Consider single element array object members as FAM candidates only
- // if the consider-single-element-arrays-as-flexible-array-members
- // analyzer option is true.
+ // - int array[1]; of ConstantArrayType with size 1
// https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html
- const auto isFlexibleArrayMemberCandidate = [this,
- &SVB](QualType Ty) -> bool {
- const ArrayType *AT = Ctx.getAsArrayType(Ty);
+ const auto isFlexibleArrayMemberCandidate =
+ [this](const ArrayType *AT) -> bool {
if (!AT)
return false;
- if (isa<IncompleteArrayType>(AT))
- return true;
- if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
- using FAMKind = LangOptions::StrictFlexArraysLevelKind;
- const FAMKind StrictFlexArraysLevel =
+ auto IsIncompleteArray = [](const ArrayType *AT) {
+ return isa<IncompleteArrayType>(AT);
+ };
+ auto IsArrayOfZero = [](const ArrayType *AT) {
+ const auto *CAT = dyn_cast<ConstantArrayType>(AT);
+ return CAT && CAT->getSize() == 0;
+ };
+ auto IsArrayOfOne = [](const ArrayType *AT) {
+ const auto *CAT = dyn_cast<ConstantArrayType>(AT);
+ return CAT && CAT->getSize() == 1;
+ };
+
+ using FAMKind = LangOptions::StrictFlexArraysLevelKind;
+ const FAMKind StrictFlexArraysLevel =
Ctx.getLangOpts().getStrictFlexArraysLevel();
- const AnalyzerOptions &Opts = SVB.getAnalyzerOptions();
- const llvm::APInt &Size = CAT->getSize();
-
- if (StrictFlexArraysLevel <= FAMKind::ZeroOrIncomplete && Size.isZero())
- return true;
-
- // The "-fstrict-flex-arrays" should have precedence over
- // consider-single-element-arrays-as-flexible-array-members
- // analyzer-config when checking single element arrays.
- if (StrictFlexArraysLevel == FAMKind::Default) {
- // FIXME: After clang-17 released, we should remove this branch.
- if (Opts.ShouldConsiderSingleElementArraysAsFlexibleArrayMembers &&
- Size.isOne())
- return true;
- } else {
- // -fstrict-flex-arrays was specified, since it's not the default, so
- // ignore analyzer-config.
- if (StrictFlexArraysLevel <= FAMKind::OneZeroOrIncomplete &&
- Size.isOne())
- return true;
- }
- }
- return false;
+
+ // "Default": Any trailing array member is a FAM.
+ // Since we cannot tell at this point if this array is a trailing member
+ // or not, let's just do the same as for "OneZeroOrIncomplete".
+ if (StrictFlexArraysLevel == FAMKind::Default)
+ return IsArrayOfOne(AT) || IsArrayOfZero(AT) || IsIncompleteArray(AT);
+
+ if (StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete)
+ return IsArrayOfOne(AT) || IsArrayOfZero(AT) || IsIncompleteArray(AT);
+
+ if (StrictFlexArraysLevel == FAMKind::ZeroOrIncomplete)
+ return IsArrayOfZero(AT) || IsIncompleteArray(AT);
+
+ assert(StrictFlexArraysLevel == FAMKind::IncompleteOnly);
+ return IsIncompleteArray(AT);
};
- if (isFlexibleArrayMemberCandidate(Ty))
+ if (isFlexibleArrayMemberCandidate(Ctx.getAsArrayType(Ty)))
return UnknownVal();
return Size;
@@ -838,8 +864,7 @@ DefinedOrUnknownSVal MemRegionManager::getStaticSize(const MemRegion *MR,
template <typename REG>
const REG *MemRegionManager::LazyAllocate(REG*& region) {
if (!region) {
- region = A.Allocate<REG>();
- new (region) REG(*this);
+ region = new (A) REG(*this);
}
return region;
@@ -848,8 +873,7 @@ const REG *MemRegionManager::LazyAllocate(REG*& region) {
template <typename REG, typename ARG>
const REG *MemRegionManager::LazyAllocate(REG*& region, ARG a) {
if (!region) {
- region = A.Allocate<REG>();
- new (region) REG(this, a);
+ region = new (A) REG(this, a);
}
return region;
@@ -863,8 +887,7 @@ MemRegionManager::getStackLocalsRegion(const StackFrameContext *STC) {
if (R)
return R;
- R = A.Allocate<StackLocalsSpaceRegion>();
- new (R) StackLocalsSpaceRegion(*this, STC);
+ R = new (A) StackLocalsSpaceRegion(*this, STC);
return R;
}
@@ -876,8 +899,7 @@ MemRegionManager::getStackArgumentsRegion(const StackFrameContext *STC) {
if (R)
return R;
- R = A.Allocate<StackArgumentsSpaceRegion>();
- new (R) StackArgumentsSpaceRegion(*this, STC);
+ R = new (A) StackArgumentsSpaceRegion(*this, STC);
return R;
}
@@ -898,8 +920,7 @@ const GlobalsSpaceRegion
if (R)
return R;
- R = A.Allocate<StaticGlobalSpaceRegion>();
- new (R) StaticGlobalSpaceRegion(*this, CR);
+ R = new (A) StaticGlobalSpaceRegion(*this, CR);
return R;
}
@@ -945,13 +966,11 @@ getStackOrCaptureRegionForDeclContext(const LocationContext *LC,
if (const auto *BC = dyn_cast<BlockInvocationContext>(LC)) {
const auto *BR = static_cast<const BlockDataRegion *>(BC->getData());
// FIXME: This can be made more efficient.
- for (BlockDataRegion::referenced_vars_iterator
- I = BR->referenced_vars_begin(),
- E = BR->referenced_vars_end(); I != E; ++I) {
- const TypedValueRegion *OrigR = I.getOriginalRegion();
+ for (auto Var : BR->referenced_vars()) {
+ const TypedValueRegion *OrigR = Var.getOriginalRegion();
if (const auto *VR = dyn_cast<VarRegion>(OrigR)) {
if (VR->getDecl() == VD)
- return cast<VarRegion>(I.getCapturedRegion());
+ return cast<VarRegion>(Var.getCapturedRegion());
}
}
}
@@ -1058,13 +1077,16 @@ const VarRegion *MemRegionManager::getVarRegion(const VarDecl *D,
}
}
- return getSubRegion<NonParamVarRegion>(D, sReg);
+ return getNonParamVarRegion(D, sReg);
}
const NonParamVarRegion *
MemRegionManager::getNonParamVarRegion(const VarDecl *D,
const MemRegion *superR) {
+ // Prefer the definition over the canonical decl as the canonical form.
D = D->getCanonicalDecl();
+ if (const VarDecl *Def = D->getDefinition())
+ D = Def;
return getSubRegion<NonParamVarRegion>(D, superR);
}
@@ -1109,12 +1131,6 @@ MemRegionManager::getBlockDataRegion(const BlockCodeRegion *BC,
return getSubRegion<BlockDataRegion>(BC, LC, blockCount, sReg);
}
-const CXXTempObjectRegion *
-MemRegionManager::getCXXStaticTempObjectRegion(const Expr *Ex) {
- return getSubRegion<CXXTempObjectRegion>(
- Ex, getGlobalsRegion(MemRegion::GlobalInternalSpaceRegionKind, nullptr));
-}
-
const CompoundLiteralRegion*
MemRegionManager::getCompoundLiteralRegion(const CompoundLiteralExpr *CL,
const LocationContext *LC) {
@@ -1145,8 +1161,7 @@ MemRegionManager::getElementRegion(QualType elementType, NonLoc Idx,
auto *R = cast_or_null<ElementRegion>(data);
if (!R) {
- R = A.Allocate<ElementRegion>();
- new (R) ElementRegion(T, Idx, superRegion);
+ R = new (A) ElementRegion(T, Idx, superRegion);
Regions.InsertNode(R, InsertPos);
}
@@ -1197,6 +1212,23 @@ MemRegionManager::getCXXTempObjectRegion(Expr const *E,
return getSubRegion<CXXTempObjectRegion>(E, getStackLocalsRegion(SFC));
}
+const CXXLifetimeExtendedObjectRegion *
+MemRegionManager::getCXXLifetimeExtendedObjectRegion(
+ const Expr *Ex, const ValueDecl *VD, const LocationContext *LC) {
+ const StackFrameContext *SFC = LC->getStackFrame();
+ assert(SFC);
+ return getSubRegion<CXXLifetimeExtendedObjectRegion>(
+ Ex, VD, getStackLocalsRegion(SFC));
+}
+
+const CXXLifetimeExtendedObjectRegion *
+MemRegionManager::getCXXStaticLifetimeExtendedObjectRegion(
+ const Expr *Ex, const ValueDecl *VD) {
+ return getSubRegion<CXXLifetimeExtendedObjectRegion>(
+ Ex, VD,
+ getGlobalsRegion(MemRegion::GlobalInternalSpaceRegionKind, nullptr));
+}
+
/// Checks whether \p BaseClass is a valid virtual or direct non-virtual base
/// class of the type of \p Super.
static bool isValidBaseClass(const CXXRecordDecl *BaseClass,
@@ -1283,7 +1315,7 @@ const MemSpaceRegion *MemRegion::getMemorySpace() const {
SR = dyn_cast<SubRegion>(R);
}
- return dyn_cast<MemSpaceRegion>(R);
+ return cast<MemSpaceRegion>(R);
}
bool MemRegion::hasStackStorage() const {
@@ -1298,10 +1330,6 @@ bool MemRegion::hasStackParametersStorage() const {
return isa<StackArgumentsSpaceRegion>(getMemorySpace());
}
-bool MemRegion::hasGlobalsOrParametersStorage() const {
- return isa<StackArgumentsSpaceRegion, GlobalsSpaceRegion>(getMemorySpace());
-}
-
// Strips away all elements and fields.
// Returns the base region of them.
const MemRegion *MemRegion::getBaseRegion() const {
@@ -1474,6 +1502,7 @@ static RegionOffset calculateOffset(const MemRegion *R) {
case MemRegion::NonParamVarRegionKind:
case MemRegion::ParamVarRegionKind:
case MemRegion::CXXTempObjectRegionKind:
+ case MemRegion::CXXLifetimeExtendedObjectRegionKind:
// Usual base regions.
goto Finish;
@@ -1664,10 +1693,8 @@ void BlockDataRegion::LazyInitializeReferencedVars() {
using VarVec = BumpVector<const MemRegion *>;
- auto *BV = A.Allocate<VarVec>();
- new (BV) VarVec(BC, NumBlockVars);
- auto *BVOriginal = A.Allocate<VarVec>();
- new (BVOriginal) VarVec(BC, NumBlockVars);
+ auto *BV = new (A) VarVec(BC, NumBlockVars);
+ auto *BVOriginal = new (A) VarVec(BC, NumBlockVars);
for (const auto *VD : ReferencedBlockVars) {
const VarRegion *VR = nullptr;
@@ -1715,10 +1742,13 @@ BlockDataRegion::referenced_vars_end() const {
VecOriginal->end());
}
+llvm::iterator_range<BlockDataRegion::referenced_vars_iterator>
+BlockDataRegion::referenced_vars() const {
+ return llvm::make_range(referenced_vars_begin(), referenced_vars_end());
+}
+
const VarRegion *BlockDataRegion::getOriginalRegion(const VarRegion *R) const {
- for (referenced_vars_iterator I = referenced_vars_begin(),
- E = referenced_vars_end();
- I != E; ++I) {
+ for (const auto &I : referenced_vars()) {
if (I.getCapturedRegion() == R)
return I.getOriginalRegion();
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index a3b08d4581a5..bdf485364cef 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -367,10 +367,8 @@ void PlistPrinter::ReportMacroSubPieces(raw_ostream &o,
unsigned indent, unsigned depth) {
MacroPieces.push_back(&P);
- for (PathPieces::const_iterator I = P.subPieces.begin(),
- E = P.subPieces.end();
- I != E; ++I) {
- ReportPiece(o, **I, indent, depth, /*includeControlFlow*/ false);
+ for (const auto &SubPiece : P.subPieces) {
+ ReportPiece(o, *SubPiece, indent, depth, /*includeControlFlow*/ false);
}
assert(P.getFixits().size() == 0 &&
@@ -500,12 +498,12 @@ static void printCoverage(const PathDiagnostic *D,
// Mapping from file IDs to executed lines.
const FilesToLineNumsMap &ExecutedLines = D->getExecutedLines();
- for (auto I = ExecutedLines.begin(), E = ExecutedLines.end(); I != E; ++I) {
- unsigned FileKey = AddFID(FM, Fids, I->first);
+ for (const auto &[FID, Lines] : ExecutedLines) {
+ unsigned FileKey = AddFID(FM, Fids, FID);
Indent(o, IndentLevel) << "<key>" << FileKey << "</key>\n";
Indent(o, IndentLevel) << "<array>\n";
IndentLevel++;
- for (unsigned LineNo : I->second) {
+ for (unsigned LineNo : Lines) {
Indent(o, IndentLevel);
EmitInteger(o, LineNo) << "\n";
}
@@ -597,8 +595,8 @@ void PlistDiagnostics::printBugPath(llvm::raw_ostream &o, const FIDMap &FM,
o << " <array>\n";
- for (PathPieces::const_iterator E = Path.end(); I != E; ++I)
- Printer.ReportDiag(o, **I);
+ for (const auto &Piece : llvm::make_range(I, Path.end()))
+ Printer.ReportDiag(o, *Piece);
o << " </array>\n";
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
index 90ebbaad2bf3..f12f1a5ac970 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -156,9 +156,8 @@ ProgramState::invalidateRegions(RegionList Regions,
const CallEvent *Call,
RegionAndSymbolInvalidationTraits *ITraits) const {
SmallVector<SVal, 8> Values;
- for (RegionList::const_iterator I = Regions.begin(),
- End = Regions.end(); I != End; ++I)
- Values.push_back(loc::MemRegionVal(*I));
+ for (const MemRegion *Reg : Regions)
+ Values.push_back(loc::MemRegionVal(Reg));
return invalidateRegionsImpl(Values, E, Count, LCtx, CausedByPointerEscape,
IS, ITraits, Call);
@@ -424,7 +423,7 @@ ProgramStateRef ProgramStateManager::getPersistentState(ProgramState &State) {
freeStates.pop_back();
}
else {
- newState = (ProgramState*) Alloc.Allocate<ProgramState>();
+ newState = Alloc.Allocate<ProgramState>();
}
new (newState) ProgramState(State);
StateSet.InsertNode(newState, InsertPos);
@@ -556,22 +555,20 @@ bool ScanReachableSymbols::scan(nonloc::LazyCompoundVal val) {
}
bool ScanReachableSymbols::scan(nonloc::CompoundVal val) {
- for (nonloc::CompoundVal::iterator I=val.begin(), E=val.end(); I!=E; ++I)
- if (!scan(*I))
+ for (SVal V : val)
+ if (!scan(V))
return false;
return true;
}
bool ScanReachableSymbols::scan(const SymExpr *sym) {
- for (SymExpr::symbol_iterator SI = sym->symbol_begin(),
- SE = sym->symbol_end();
- SI != SE; ++SI) {
- bool wasVisited = !visited.insert(*SI).second;
+ for (SymbolRef SubSym : sym->symbols()) {
+ bool wasVisited = !visited.insert(SubSym).second;
if (wasVisited)
continue;
- if (!visitor.VisitSymbol(*SI))
+ if (!visitor.VisitSymbol(SubSym))
return false;
}
@@ -630,10 +627,8 @@ bool ScanReachableSymbols::scan(const MemRegion *R) {
// Regions captured by a block are also implicitly reachable.
if (const BlockDataRegion *BDR = dyn_cast<BlockDataRegion>(R)) {
- BlockDataRegion::referenced_vars_iterator I = BDR->referenced_vars_begin(),
- E = BDR->referenced_vars_end();
- for ( ; I != E; ++I) {
- if (!scan(I.getCapturedRegion()))
+ for (auto Var : BDR->referenced_vars()) {
+ if (!scan(Var.getCapturedRegion()))
return false;
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index a275d36286d3..5de99384449a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -1083,7 +1083,7 @@ areFeasible(ConstraintRangeTy Constraints) {
///
/// \returns true if assuming this Sym to be true means equality of operands
/// false if it means disequality of operands
-/// None otherwise
+/// std::nullopt otherwise
std::optional<bool> meansEquality(const SymSymExpr *Sym) {
switch (Sym->getOpcode()) {
case BO_Sub:
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index 49855305cecc..c773cef30d5e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -28,6 +28,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <optional>
#include <utility>
@@ -231,7 +232,7 @@ public:
void printJson(raw_ostream &Out, const char *NL = "\n",
unsigned int Space = 0, bool IsDot = false) const {
- for (iterator I = begin(); I != end(); ++I) {
+ for (iterator I = begin(), E = end(); I != E; ++I) {
// TODO: We might need a .printJson for I.getKey() as well.
Indent(Out, Space, IsDot)
<< "{ \"cluster\": \"" << I.getKey() << "\", \"pointer\": \""
@@ -239,18 +240,19 @@ public:
++Space;
const ClusterBindings &CB = I.getData();
- for (ClusterBindings::iterator CI = CB.begin(); CI != CB.end(); ++CI) {
+ for (ClusterBindings::iterator CI = CB.begin(), CE = CB.end(); CI != CE;
+ ++CI) {
Indent(Out, Space, IsDot) << "{ " << CI.getKey() << ", \"value\": ";
CI.getData().printJson(Out, /*AddQuotes=*/true);
Out << " }";
- if (std::next(CI) != CB.end())
+ if (std::next(CI) != CE)
Out << ',';
Out << NL;
}
--Space;
Indent(Out, Space, IsDot) << "]}";
- if (std::next(I) != end())
+ if (std::next(I) != E)
Out << ',';
Out << NL;
}
@@ -644,16 +646,13 @@ public: // Part of public interface to class.
void iterBindings(Store store, BindingsHandler& f) override {
RegionBindingsRef B = getRegionBindings(store);
- for (RegionBindingsRef::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- const ClusterBindings &Cluster = I.getData();
- for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
- CI != CE; ++CI) {
- const BindingKey &K = CI.getKey();
- if (!K.isDirect())
+ for (const auto &[Region, Cluster] : B) {
+ for (const auto &[Key, Value] : Cluster) {
+ if (!Key.isDirect())
continue;
- if (const SubRegion *R = dyn_cast<SubRegion>(K.getRegion())) {
+ if (const SubRegion *R = dyn_cast<SubRegion>(Key.getRegion())) {
// FIXME: Possibly incorporate the offset?
- if (!f.HandleBinding(*this, store, R, CI.getData()))
+ if (!f.HandleBinding(*this, store, R, Value))
return;
}
}
@@ -874,9 +873,8 @@ collectSubRegionBindings(SmallVectorImpl<BindingPair> &Bindings,
Length = FR->getDecl()->getBitWidthValue(SVB.getContext());
}
- for (ClusterBindings::iterator I = Cluster.begin(), E = Cluster.end();
- I != E; ++I) {
- BindingKey NextKey = I.getKey();
+ for (const auto &StoreEntry : Cluster) {
+ BindingKey NextKey = StoreEntry.first;
if (NextKey.getRegion() == TopKey.getRegion()) {
// FIXME: This doesn't catch the case where we're really invalidating a
// region with a symbolic offset. Example:
@@ -887,7 +885,7 @@ collectSubRegionBindings(SmallVectorImpl<BindingPair> &Bindings,
NextKey.getOffset() - TopKey.getOffset() < Length) {
// Case 1: The next binding is inside the region we're invalidating.
// Include it.
- Bindings.push_back(*I);
+ Bindings.push_back(StoreEntry);
} else if (NextKey.getOffset() == TopKey.getOffset()) {
// Case 2: The next binding is at the same offset as the region we're
@@ -897,7 +895,7 @@ collectSubRegionBindings(SmallVectorImpl<BindingPair> &Bindings,
// FIXME: This is probably incorrect; consider invalidating an outer
// struct whose first field is bound to a LazyCompoundVal.
if (IncludeAllDefaultBindings || NextKey.isDirect())
- Bindings.push_back(*I);
+ Bindings.push_back(StoreEntry);
}
} else if (NextKey.hasSymbolicOffset()) {
@@ -908,13 +906,13 @@ collectSubRegionBindings(SmallVectorImpl<BindingPair> &Bindings,
// we'll be conservative and include it.
if (IncludeAllDefaultBindings || NextKey.isDirect())
if (isCompatibleWithFields(NextKey, FieldsInSymbolicSubregions))
- Bindings.push_back(*I);
+ Bindings.push_back(StoreEntry);
} else if (const SubRegion *BaseSR = dyn_cast<SubRegion>(Base)) {
// Case 4: The next key is symbolic, but we changed a known
// super-region. In this case the binding is certainly included.
if (BaseSR->isSubRegionOf(Top))
if (isCompatibleWithFields(NextKey, FieldsInSymbolicSubregions))
- Bindings.push_back(*I);
+ Bindings.push_back(StoreEntry);
}
}
}
@@ -956,10 +954,8 @@ RegionStoreManager::removeSubRegionBindings(RegionBindingsConstRef B,
/*IncludeAllDefaultBindings=*/false);
ClusterBindingsRef Result(*Cluster, CBFactory);
- for (SmallVectorImpl<BindingPair>::const_iterator I = Bindings.begin(),
- E = Bindings.end();
- I != E; ++I)
- Result = Result.remove(I->first);
+ for (BindingKey Key : llvm::make_first_range(Bindings))
+ Result = Result.remove(Key);
// If we're invalidating a region with a symbolic offset, we need to make sure
// we don't treat the base region as uninitialized anymore.
@@ -1056,8 +1052,8 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
RegionAndSymbolInvalidationTraits::TK_PreserveContents);
if (C) {
- for (ClusterBindings::iterator I = C->begin(), E = C->end(); I != E; ++I)
- VisitBinding(I.getData());
+ for (SVal Val : llvm::make_second_range(*C))
+ VisitBinding(Val);
// Invalidate regions contents.
if (!PreserveRegionsContents)
@@ -1093,10 +1089,8 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
// BlockDataRegion? If so, invalidate captured variables that are passed
// by reference.
if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(baseR)) {
- for (BlockDataRegion::referenced_vars_iterator
- BI = BR->referenced_vars_begin(), BE = BR->referenced_vars_end() ;
- BI != BE; ++BI) {
- const VarRegion *VR = BI.getCapturedRegion();
+ for (auto Var : BR->referenced_vars()) {
+ const VarRegion *VR = Var.getCapturedRegion();
const VarDecl *VD = VR->getDecl();
if (VD->hasAttr<BlocksAttr>() || !VD->hasLocalStorage()) {
AddToWorkList(VR);
@@ -1200,9 +1194,7 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
if (!C)
goto conjure_default;
- for (ClusterBindings::iterator I = C->begin(), E = C->end(); I != E;
- ++I) {
- const BindingKey &BK = I.getKey();
+ for (const auto &[BK, V] : *C) {
std::optional<uint64_t> ROffset =
BK.hasSymbolicOffset() ? std::optional<uint64_t>() : BK.getOffset();
@@ -1213,10 +1205,9 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
(UpperOverflow &&
(*ROffset >= LowerOffset || *ROffset < UpperOffset)) ||
(LowerOffset == UpperOffset && *ROffset == LowerOffset))) {
- B = B.removeBinding(I.getKey());
+ B = B.removeBinding(BK);
// Bound symbolic regions need to be invalidated for dead symbol
// detection.
- SVal V = I.getData();
const MemRegion *R = V.getAsRegion();
if (isa_and_nonnull<SymbolicRegion>(R))
VisitBinding(V);
@@ -1289,12 +1280,8 @@ RegionStoreManager::invalidateGlobalRegion(MemRegion::Kind K,
void RegionStoreManager::populateWorkList(InvalidateRegionsWorker &W,
ArrayRef<SVal> Values,
InvalidatedRegions *TopLevelRegions) {
- for (ArrayRef<SVal>::iterator I = Values.begin(),
- E = Values.end(); I != E; ++I) {
- SVal V = *I;
- if (std::optional<nonloc::LazyCompoundVal> LCS =
- V.getAs<nonloc::LazyCompoundVal>()) {
-
+ for (SVal V : Values) {
+ if (auto LCS = V.getAs<nonloc::LazyCompoundVal>()) {
for (SVal S : getInterestingValues(*LCS))
if (const MemRegion *R = S.getAsRegion())
W.AddToWorkList(R);
@@ -2281,10 +2268,7 @@ RegionStoreManager::getInterestingValues(nonloc::LazyCompoundVal LCV) {
SmallVector<BindingPair, 32> Bindings;
collectSubRegionBindings(Bindings, svalBuilder, *Cluster, LazyR,
/*IncludeAllDefaultBindings=*/true);
- for (SmallVectorImpl<BindingPair>::const_iterator I = Bindings.begin(),
- E = Bindings.end();
- I != E; ++I) {
- SVal V = I->second;
+ for (SVal V : llvm::make_second_range(Bindings)) {
if (V.isUnknownOrUndef() || V.isConstant())
continue;
@@ -2609,11 +2593,11 @@ std::optional<RegionBindingsRef> RegionStoreManager::tryBindSmallStruct(
RegionBindingsRef NewB = B;
- for (FieldVector::iterator I = Fields.begin(), E = Fields.end(); I != E; ++I){
- const FieldRegion *SourceFR = MRMgr.getFieldRegion(*I, LCV.getRegion());
+ for (const FieldDecl *Field : Fields) {
+ const FieldRegion *SourceFR = MRMgr.getFieldRegion(Field, LCV.getRegion());
SVal V = getBindingForField(getRegionBindings(LCV.getStore()), SourceFR);
- const FieldRegion *DestFR = MRMgr.getFieldRegion(*I, R);
+ const FieldRegion *DestFR = MRMgr.getFieldRegion(Field, R);
NewB = bind(NewB, loc::MemRegionVal(DestFR), V);
}
@@ -2829,11 +2813,11 @@ void RemoveDeadBindingsWorker::VisitCluster(const MemRegion *baseR,
if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(baseR))
SymReaper.markLive(SymR->getSymbol());
- for (ClusterBindings::iterator I = C->begin(), E = C->end(); I != E; ++I) {
+ for (const auto &[Key, Val] : *C) {
// Element index of a binding key is live.
- SymReaper.markElementIndicesLive(I.getKey().getRegion());
+ SymReaper.markElementIndicesLive(Key.getRegion());
- VisitBinding(I.getData());
+ VisitBinding(Val);
}
}
@@ -2860,17 +2844,15 @@ void RemoveDeadBindingsWorker::VisitBinding(SVal V) {
// All regions captured by a block are also live.
if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(R)) {
- BlockDataRegion::referenced_vars_iterator I = BR->referenced_vars_begin(),
- E = BR->referenced_vars_end();
- for ( ; I != E; ++I)
- AddToWorkList(I.getCapturedRegion());
+ for (auto Var : BR->referenced_vars())
+ AddToWorkList(Var.getCapturedRegion());
}
}
// Update the set of live symbols.
- for (auto SI = V.symbol_begin(), SE = V.symbol_end(); SI!=SE; ++SI)
- SymReaper.markLive(*SI);
+ for (SymbolRef Sym : V.symbols())
+ SymReaper.markLive(Sym);
}
bool RemoveDeadBindingsWorker::UpdatePostponed() {
@@ -2878,12 +2860,10 @@ bool RemoveDeadBindingsWorker::UpdatePostponed() {
// having done a scan.
bool Changed = false;
- for (auto I = Postponed.begin(), E = Postponed.end(); I != E; ++I) {
- if (const SymbolicRegion *SR = *I) {
- if (SymReaper.isLive(SR->getSymbol())) {
- Changed |= AddToWorkList(SR);
- *I = nullptr;
- }
+ for (const SymbolicRegion *SR : Postponed) {
+ if (SymReaper.isLive(SR->getSymbol())) {
+ Changed |= AddToWorkList(SR);
+ SR = nullptr;
}
}
@@ -2898,9 +2878,8 @@ StoreRef RegionStoreManager::removeDeadBindings(Store store,
W.GenerateClusters();
// Enqueue the region roots onto the worklist.
- for (SymbolReaper::region_iterator I = SymReaper.region_begin(),
- E = SymReaper.region_end(); I != E; ++I) {
- W.AddToWorkList(*I);
+ for (const MemRegion *Reg : SymReaper.regions()) {
+ W.AddToWorkList(Reg);
}
do W.RunWorkList(); while (W.UpdatePostponed());
@@ -2908,9 +2887,7 @@ StoreRef RegionStoreManager::removeDeadBindings(Store store,
// We have now scanned the store, marking reachable regions and symbols
// as live. We now remove all the regions that are dead from the store
// as well as update DSymbols with the set symbols that are now dead.
- for (RegionBindingsRef::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- const MemRegion *Base = I.getKey();
-
+ for (const MemRegion *Base : llvm::make_first_range(B)) {
// If the cluster has been visited, we know the region has been marked.
// Otherwise, remove the dead entry.
if (!W.isVisited(Base))
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
index fed17c77f03d..4fe828bdf768 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -395,7 +395,6 @@ std::optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
return evalCast(*Val, CE->getType(), SE->getType());
}
}
- // FALLTHROUGH
[[fallthrough]];
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp
index bc9c1e40d808..2a43a01ff886 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp
@@ -174,6 +174,9 @@ public:
QualType VisitSymbolicRegion(const SymbolicRegion *SR) {
return Visit(SR->getSymbol());
}
+ QualType VisitAllocaRegion(const AllocaRegion *) {
+ return QualType{Context.VoidPtrTy};
+ }
QualType VisitTypedRegion(const TypedRegion *TR) {
return TR->getLocationType();
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
index fe1fa22af7ab..7577b7682a95 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
@@ -144,6 +144,7 @@ std::optional<const MemRegion *> StoreManager::castRegion(const MemRegion *R,
case MemRegion::NonParamVarRegionKind:
case MemRegion::ParamVarRegionKind:
case MemRegion::CXXTempObjectRegionKind:
+ case MemRegion::CXXLifetimeExtendedObjectRegionKind:
case MemRegion::CXXBaseObjectRegionKind:
case MemRegion::CXXDerivedObjectRegionKind:
return MakeElementRegion(cast<SubRegion>(R), PointeeTy);
@@ -256,10 +257,8 @@ SVal StoreManager::evalDerivedToBase(SVal Derived, const CastExpr *Cast) {
// Walk through the cast path to create nested CXXBaseRegions.
SVal Result = Derived;
- for (CastExpr::path_const_iterator I = Cast->path_begin(),
- E = Cast->path_end();
- I != E; ++I) {
- Result = evalDerivedToBase(Result, (*I)->getType(), (*I)->isVirtual());
+ for (const CXXBaseSpecifier *Base : Cast->path()) {
+ Result = evalDerivedToBase(Result, Base->getType(), Base->isVirtual());
}
return Result;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
index 3e97f0c95fc3..9025e11a3f51 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -170,8 +170,7 @@ SymbolManager::getRegionValueSymbol(const TypedValueRegion* R) {
void *InsertPos;
SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
if (!SD) {
- SD = (SymExpr*) BPAlloc.Allocate<SymbolRegionValue>();
- new (SD) SymbolRegionValue(SymbolCounter, R);
+ SD = new (BPAlloc) SymbolRegionValue(SymbolCounter, R);
DataSet.InsertNode(SD, InsertPos);
++SymbolCounter;
}
@@ -189,8 +188,7 @@ const SymbolConjured* SymbolManager::conjureSymbol(const Stmt *E,
void *InsertPos;
SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
if (!SD) {
- SD = (SymExpr*) BPAlloc.Allocate<SymbolConjured>();
- new (SD) SymbolConjured(SymbolCounter, E, LCtx, T, Count, SymbolTag);
+ SD = new (BPAlloc) SymbolConjured(SymbolCounter, E, LCtx, T, Count, SymbolTag);
DataSet.InsertNode(SD, InsertPos);
++SymbolCounter;
}
@@ -206,8 +204,7 @@ SymbolManager::getDerivedSymbol(SymbolRef parentSymbol,
void *InsertPos;
SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
if (!SD) {
- SD = (SymExpr*) BPAlloc.Allocate<SymbolDerived>();
- new (SD) SymbolDerived(SymbolCounter, parentSymbol, R);
+ SD = new (BPAlloc) SymbolDerived(SymbolCounter, parentSymbol, R);
DataSet.InsertNode(SD, InsertPos);
++SymbolCounter;
}
@@ -222,8 +219,7 @@ SymbolManager::getExtentSymbol(const SubRegion *R) {
void *InsertPos;
SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
if (!SD) {
- SD = (SymExpr*) BPAlloc.Allocate<SymbolExtent>();
- new (SD) SymbolExtent(SymbolCounter, R);
+ SD = new (BPAlloc) SymbolExtent(SymbolCounter, R);
DataSet.InsertNode(SD, InsertPos);
++SymbolCounter;
}
@@ -240,8 +236,7 @@ SymbolManager::getMetadataSymbol(const MemRegion* R, const Stmt *S, QualType T,
void *InsertPos;
SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
if (!SD) {
- SD = (SymExpr*) BPAlloc.Allocate<SymbolMetadata>();
- new (SD) SymbolMetadata(SymbolCounter, R, S, T, LCtx, Count, SymbolTag);
+ SD = new (BPAlloc) SymbolMetadata(SymbolCounter, R, S, T, LCtx, Count, SymbolTag);
DataSet.InsertNode(SD, InsertPos);
++SymbolCounter;
}
@@ -257,8 +252,7 @@ SymbolManager::getCastSymbol(const SymExpr *Op,
void *InsertPos;
SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
if (!data) {
- data = (SymbolCast*) BPAlloc.Allocate<SymbolCast>();
- new (data) SymbolCast(Op, From, To);
+ data = new (BPAlloc) SymbolCast(Op, From, To);
DataSet.InsertNode(data, InsertPos);
}
@@ -275,8 +269,7 @@ const SymIntExpr *SymbolManager::getSymIntExpr(const SymExpr *lhs,
SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
if (!data) {
- data = (SymIntExpr*) BPAlloc.Allocate<SymIntExpr>();
- new (data) SymIntExpr(lhs, op, v, t);
+ data = new (BPAlloc) SymIntExpr(lhs, op, v, t);
DataSet.InsertNode(data, InsertPos);
}
@@ -293,8 +286,7 @@ const IntSymExpr *SymbolManager::getIntSymExpr(const llvm::APSInt& lhs,
SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
if (!data) {
- data = (IntSymExpr*) BPAlloc.Allocate<IntSymExpr>();
- new (data) IntSymExpr(lhs, op, rhs, t);
+ data = new (BPAlloc) IntSymExpr(lhs, op, rhs, t);
DataSet.InsertNode(data, InsertPos);
}
@@ -311,8 +303,7 @@ const SymSymExpr *SymbolManager::getSymSymExpr(const SymExpr *lhs,
SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
if (!data) {
- data = (SymSymExpr*) BPAlloc.Allocate<SymSymExpr>();
- new (data) SymSymExpr(lhs, op, rhs, t);
+ data = new (BPAlloc) SymSymExpr(lhs, op, rhs, t);
DataSet.InsertNode(data, InsertPos);
}
@@ -327,8 +318,7 @@ const UnarySymExpr *SymbolManager::getUnarySymExpr(const SymExpr *Operand,
void *InsertPos;
SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
if (!data) {
- data = (UnarySymExpr *)BPAlloc.Allocate<UnarySymExpr>();
- new (data) UnarySymExpr(Operand, Opc, T);
+ data = new (BPAlloc) UnarySymExpr(Operand, Opc, T);
DataSet.InsertNode(data, InsertPos);
}
@@ -398,7 +388,7 @@ void SymbolReaper::markDependentsLive(SymbolRef sym) {
if (const SymbolRefSmallVectorTy *Deps = SymMgr.getDependentSymbols(sym)) {
for (const auto I : *Deps) {
- if (TheLiving.find(I) != TheLiving.end())
+ if (TheLiving.contains(I))
continue;
markLive(I);
}
@@ -424,8 +414,8 @@ void SymbolReaper::markElementIndicesLive(const MemRegion *region) {
SR = dyn_cast<SubRegion>(SR->getSuperRegion())) {
if (const auto ER = dyn_cast<ElementRegion>(SR)) {
SVal Idx = ER->getIndex();
- for (auto SI = Idx.symbol_begin(), SE = Idx.symbol_end(); SI != SE; ++SI)
- markLive(*SI);
+ for (SymbolRef Sym : Idx.symbols())
+ markLive(Sym);
}
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
index 05f4d19ebda0..71268af22e24 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
@@ -86,10 +86,7 @@ public:
}
};
- for (std::vector<const PathDiagnostic *>::iterator I = Diags.begin(),
- E = Diags.end();
- I != E; ++I) {
- const PathDiagnostic *PD = *I;
+ for (const PathDiagnostic *PD : Diags) {
std::string WarningMsg = (DiagOpts.ShouldDisplayDiagnosticName
? " [" + PD->getCheckerName() + "]"
: "")
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
index 4618d17577dd..f0d3f43c414c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
@@ -233,7 +233,7 @@ void CheckerRegistry::initializeRegistry(const CheckerManager &Mgr) {
// done recursively, its arguably cheaper, but for sure less error prone to
// recalculate from scratch.
auto IsEnabled = [&](const CheckerInfo *Checker) {
- return llvm::is_contained(Tmp, Checker);
+ return Tmp.contains(Checker);
};
for (const CheckerInfo &Checker : Data.Checkers) {
if (!Checker.isEnabled(Mgr))
@@ -525,4 +525,3 @@ void CheckerRegistry::validateCheckerOptions() const {
<< SuppliedCheckerOrPackage;
}
}
-
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp
index 276f7313b08f..0f1039d81d52 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp
@@ -28,11 +28,10 @@ using namespace ento;
ModelConsumer::ModelConsumer(llvm::StringMap<Stmt *> &Bodies)
: Bodies(Bodies) {}
-bool ModelConsumer::HandleTopLevelDecl(DeclGroupRef D) {
- for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
-
+bool ModelConsumer::HandleTopLevelDecl(DeclGroupRef DeclGroup) {
+ for (const Decl *D : DeclGroup) {
// Only interested in definitions.
- const FunctionDecl *func = llvm::dyn_cast<FunctionDecl>(*I);
+ const auto *func = llvm::dyn_cast<FunctionDecl>(D);
if (func && func->hasBody()) {
Bodies.insert(std::make_pair(func->getName(), func->getBody()));
}
diff --git a/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index 86da7e86f831..4b9736b6009c 100644
--- a/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -113,6 +113,10 @@ bool RVVType::verifyType() const {
return false;
if (isFloat() && ElementBitwidth == 8)
return false;
+ if (IsTuple && (NF == 1 || NF > 8))
+ return false;
+ if (IsTuple && (1 << std::max(0, LMUL.Log2LMUL)) * NF > 8)
+ return false;
unsigned V = *Scale;
switch (ElementBitwidth) {
case 1:
@@ -214,6 +218,9 @@ void RVVType::initBuiltinStr() {
// vector values.
if (IsPointer)
BuiltinStr += "*";
+
+ if (IsTuple)
+ BuiltinStr = "T" + utostr(NF) + BuiltinStr;
}
void RVVType::initClangBuiltinStr() {
@@ -237,7 +244,8 @@ void RVVType::initClangBuiltinStr() {
default:
llvm_unreachable("ScalarTypeKind is invalid");
}
- ClangBuiltinStr += utostr(ElementBitwidth) + LMUL.str() + "_t";
+ ClangBuiltinStr += utostr(ElementBitwidth) + LMUL.str() +
+ (IsTuple ? "x" + utostr(NF) : "") + "_t";
}
void RVVType::initTypeStr() {
@@ -249,7 +257,8 @@ void RVVType::initTypeStr() {
auto getTypeString = [&](StringRef TypeStr) {
if (isScalar())
return Twine(TypeStr + Twine(ElementBitwidth) + "_t").str();
- return Twine("v" + TypeStr + Twine(ElementBitwidth) + LMUL.str() + "_t")
+ return Twine("v" + TypeStr + Twine(ElementBitwidth) + LMUL.str() +
+ (IsTuple ? "x" + utostr(NF) : "") + "_t")
.str();
};
@@ -325,6 +334,14 @@ void RVVType::initShortStr() {
}
if (isVector())
ShortStr += LMUL.str();
+ if (isTuple())
+ ShortStr += "x" + utostr(NF);
+}
+
+static VectorTypeModifier getTupleVTM(unsigned NF) {
+ assert(2 <= NF && NF <= 8 && "2 <= NF <= 8");
+ return static_cast<VectorTypeModifier>(
+ static_cast<uint8_t>(VectorTypeModifier::Tuple2) + (NF - 2));
}
void RVVType::applyBasicType() {
@@ -542,6 +559,13 @@ PrototypeDescriptor::parsePrototypeDescriptor(
return std::nullopt;
}
+ } else if (ComplexTT.first == "Tuple") {
+ unsigned NF = 0;
+ if (ComplexTT.second.getAsInteger(10, NF)) {
+ llvm_unreachable("Invalid NF value!");
+ return std::nullopt;
+ }
+ VTM = getTupleVTM(NF);
} else {
llvm_unreachable("Illegal complex type transformers!");
}
@@ -702,6 +726,18 @@ void RVVType::applyModifier(const PrototypeDescriptor &Transformer) {
case VectorTypeModifier::SFixedLog2LMUL3:
applyFixedLog2LMUL(3, FixedLMULType::SmallerThan);
break;
+ case VectorTypeModifier::Tuple2:
+ case VectorTypeModifier::Tuple3:
+ case VectorTypeModifier::Tuple4:
+ case VectorTypeModifier::Tuple5:
+ case VectorTypeModifier::Tuple6:
+ case VectorTypeModifier::Tuple7:
+ case VectorTypeModifier::Tuple8: {
+ IsTuple = true;
+ NF = 2 + static_cast<uint8_t>(Transformer.VTM) -
+ static_cast<uint8_t>(VectorTypeModifier::Tuple2);
+ break;
+ }
case VectorTypeModifier::NoModifier:
break;
}
@@ -788,10 +824,6 @@ void RVVType::applyFixedLog2LMUL(int Log2LMUL, enum FixedLMULType Type) {
std::optional<RVVTypes>
RVVTypeCache::computeTypes(BasicType BT, int Log2LMUL, unsigned NF,
ArrayRef<PrototypeDescriptor> Prototype) {
- // LMUL x NF must be less than or equal to 8.
- if ((Log2LMUL >= 1) && (1 << Log2LMUL) * NF > 8)
- return std::nullopt;
-
RVVTypes Types;
for (const PrototypeDescriptor &Proto : Prototype) {
auto T = computeType(BT, Log2LMUL, Proto);
@@ -843,16 +875,14 @@ std::optional<RVVTypePtr> RVVTypeCache::computeType(BasicType BT, int Log2LMUL,
//===----------------------------------------------------------------------===//
// RVVIntrinsic implementation
//===----------------------------------------------------------------------===//
-RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
- StringRef NewOverloadedName,
- StringRef OverloadedSuffix, StringRef IRName,
- bool IsMasked, bool HasMaskedOffOperand, bool HasVL,
- PolicyScheme Scheme, bool SupportOverloading,
- bool HasBuiltinAlias, StringRef ManualCodegen,
- const RVVTypes &OutInTypes,
- const std::vector<int64_t> &NewIntrinsicTypes,
- const std::vector<StringRef> &RequiredFeatures,
- unsigned NF, Policy NewPolicyAttrs)
+RVVIntrinsic::RVVIntrinsic(
+ StringRef NewName, StringRef Suffix, StringRef NewOverloadedName,
+ StringRef OverloadedSuffix, StringRef IRName, bool IsMasked,
+ bool HasMaskedOffOperand, bool HasVL, PolicyScheme Scheme,
+ bool SupportOverloading, bool HasBuiltinAlias, StringRef ManualCodegen,
+ const RVVTypes &OutInTypes, const std::vector<int64_t> &NewIntrinsicTypes,
+ const std::vector<StringRef> &RequiredFeatures, unsigned NF,
+ Policy NewPolicyAttrs, bool HasFRMRoundModeOp)
: IRName(IRName), IsMasked(IsMasked),
HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), Scheme(Scheme),
SupportOverloading(SupportOverloading), HasBuiltinAlias(HasBuiltinAlias),
@@ -871,7 +901,7 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
OverloadedName += "_" + OverloadedSuffix.str();
updateNamesAndPolicy(IsMasked, hasPolicy(), Name, BuiltinName, OverloadedName,
- PolicyAttrs);
+ PolicyAttrs, HasFRMRoundModeOp);
// Init OutputType and InputTypes
OutputType = OutInTypes[0];
@@ -912,7 +942,7 @@ std::string RVVIntrinsic::getSuffixStr(
llvm::SmallVector<PrototypeDescriptor> RVVIntrinsic::computeBuiltinTypes(
llvm::ArrayRef<PrototypeDescriptor> Prototype, bool IsMasked,
bool HasMaskedOffOperand, bool HasVL, unsigned NF,
- PolicyScheme DefaultScheme, Policy PolicyAttrs) {
+ PolicyScheme DefaultScheme, Policy PolicyAttrs, bool IsTuple) {
SmallVector<PrototypeDescriptor> NewPrototype(Prototype.begin(),
Prototype.end());
bool HasPassthruOp = DefaultScheme == PolicyScheme::HasPassthruOperand;
@@ -923,13 +953,22 @@ llvm::SmallVector<PrototypeDescriptor> RVVIntrinsic::computeBuiltinTypes(
if (NF == 1) {
NewPrototype.insert(NewPrototype.begin() + 1, NewPrototype[0]);
} else if (NF > 1) {
- // Convert
- // (void, op0 address, op1 address, ...)
- // to
- // (void, op0 address, op1 address, ..., maskedoff0, maskedoff1, ...)
- PrototypeDescriptor MaskoffType = NewPrototype[1];
- MaskoffType.TM &= ~static_cast<uint8_t>(TypeModifier::Pointer);
- NewPrototype.insert(NewPrototype.begin() + NF + 1, NF, MaskoffType);
+ if (IsTuple) {
+ PrototypeDescriptor BasePtrOperand = Prototype[1];
+ PrototypeDescriptor MaskoffType = PrototypeDescriptor(
+ static_cast<uint8_t>(BaseTypeModifier::Vector),
+ static_cast<uint8_t>(getTupleVTM(NF)),
+ BasePtrOperand.TM & ~static_cast<uint8_t>(TypeModifier::Pointer));
+ NewPrototype.insert(NewPrototype.begin() + 1, MaskoffType);
+ } else {
+ // Convert
+ // (void, op0 address, op1 address, ...)
+ // to
+ // (void, op0 address, op1 address, ..., maskedoff0, maskedoff1, ...)
+ PrototypeDescriptor MaskoffType = NewPrototype[1];
+ MaskoffType.TM &= ~static_cast<uint8_t>(TypeModifier::Pointer);
+ NewPrototype.insert(NewPrototype.begin() + NF + 1, NF, MaskoffType);
+ }
}
}
if (HasMaskedOffOperand && NF > 1) {
@@ -938,8 +977,12 @@ llvm::SmallVector<PrototypeDescriptor> RVVIntrinsic::computeBuiltinTypes(
// to
// (void, op0 address, op1 address, ..., mask, maskedoff0, maskedoff1,
// ...)
- NewPrototype.insert(NewPrototype.begin() + NF + 1,
- PrototypeDescriptor::Mask);
+ if (IsTuple)
+ NewPrototype.insert(NewPrototype.begin() + 1,
+ PrototypeDescriptor::Mask);
+ else
+ NewPrototype.insert(NewPrototype.begin() + NF + 1,
+ PrototypeDescriptor::Mask);
} else {
// If IsMasked, insert PrototypeDescriptor:Mask as first input operand.
NewPrototype.insert(NewPrototype.begin() + 1, PrototypeDescriptor::Mask);
@@ -949,20 +992,30 @@ llvm::SmallVector<PrototypeDescriptor> RVVIntrinsic::computeBuiltinTypes(
if (PolicyAttrs.isTUPolicy() && HasPassthruOp)
NewPrototype.insert(NewPrototype.begin(), NewPrototype[0]);
} else if (PolicyAttrs.isTUPolicy() && HasPassthruOp) {
- // NF > 1 cases for segment load operations.
- // Convert
- // (void, op0 address, op1 address, ...)
- // to
- // (void, op0 address, op1 address, maskedoff0, maskedoff1, ...)
- PrototypeDescriptor MaskoffType = Prototype[1];
- MaskoffType.TM &= ~static_cast<uint8_t>(TypeModifier::Pointer);
- NewPrototype.insert(NewPrototype.begin() + NF + 1, NF, MaskoffType);
+ if (IsTuple) {
+ PrototypeDescriptor BasePtrOperand = Prototype[0];
+ PrototypeDescriptor MaskoffType = PrototypeDescriptor(
+ static_cast<uint8_t>(BaseTypeModifier::Vector),
+ static_cast<uint8_t>(getTupleVTM(NF)),
+ BasePtrOperand.TM & ~static_cast<uint8_t>(TypeModifier::Pointer));
+ NewPrototype.insert(NewPrototype.begin(), MaskoffType);
+ } else {
+ // NF > 1 cases for segment load operations.
+ // Convert
+ // (void, op0 address, op1 address, ...)
+ // to
+ // (void, op0 address, op1 address, maskedoff0, maskedoff1, ...)
+ PrototypeDescriptor MaskoffType = Prototype[1];
+ MaskoffType.TM &= ~static_cast<uint8_t>(TypeModifier::Pointer);
+ NewPrototype.insert(NewPrototype.begin() + NF + 1, NF, MaskoffType);
+ }
}
}
// If HasVL, append PrototypeDescriptor:VL to last operand
if (HasVL)
NewPrototype.push_back(PrototypeDescriptor::VL);
+
return NewPrototype;
}
@@ -990,11 +1043,9 @@ RVVIntrinsic::getSupportedMaskedPolicies(bool HasTailPolicy,
"and mask policy");
}
-void RVVIntrinsic::updateNamesAndPolicy(bool IsMasked, bool HasPolicy,
- std::string &Name,
- std::string &BuiltinName,
- std::string &OverloadedName,
- Policy &PolicyAttrs) {
+void RVVIntrinsic::updateNamesAndPolicy(
+ bool IsMasked, bool HasPolicy, std::string &Name, std::string &BuiltinName,
+ std::string &OverloadedName, Policy &PolicyAttrs, bool HasFRMRoundModeOp) {
auto appendPolicySuffix = [&](const std::string &suffix) {
Name += suffix;
@@ -1007,6 +1058,11 @@ void RVVIntrinsic::updateNamesAndPolicy(bool IsMasked, bool HasPolicy,
Name = "__riscv_" + Name;
OverloadedName = "__riscv_" + OverloadedName;
+ if (HasFRMRoundModeOp) {
+ Name += "_rm";
+ BuiltinName += "_rm";
+ }
+
if (IsMasked) {
if (PolicyAttrs.isTUMUPolicy())
appendPolicySuffix("_tumu");
@@ -1016,19 +1072,15 @@ void RVVIntrinsic::updateNamesAndPolicy(bool IsMasked, bool HasPolicy,
appendPolicySuffix("_mu");
else if (PolicyAttrs.isTAMAPolicy()) {
Name += "_m";
- if (HasPolicy)
- BuiltinName += "_tama";
- else
- BuiltinName += "_m";
+ BuiltinName += "_m";
} else
llvm_unreachable("Unhandled policy condition");
} else {
if (PolicyAttrs.isTUPolicy())
appendPolicySuffix("_tu");
- else if (PolicyAttrs.isTAPolicy()) {
- if (HasPolicy)
- BuiltinName += "_ta";
- } else
+ else if (PolicyAttrs.isTAPolicy()) // no suffix needed
+ return;
+ else
llvm_unreachable("Unhandled policy condition");
}
}
@@ -1077,6 +1129,8 @@ raw_ostream &operator<<(raw_ostream &OS, const RVVIntrinsicRecord &Record) {
OS << (int)Record.HasMaskedOffOperand << ",";
OS << (int)Record.HasTailPolicy << ",";
OS << (int)Record.HasMaskPolicy << ",";
+ OS << (int)Record.HasFRMRoundModeOp << ",";
+ OS << (int)Record.IsTuple << ",";
OS << (int)Record.UnMaskedPolicyScheme << ",";
OS << (int)Record.MaskedPolicyScheme << ",";
OS << "},\n";
diff --git a/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp b/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp
index bf517e2dd312..0da087c33e3f 100644
--- a/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Testing/CommandLineArgs.h"
+#include "llvm/MC/TargetRegistry.h"
#include "llvm/Support/ErrorHandling.h"
namespace clang {
@@ -109,4 +110,18 @@ StringRef getFilenameForTesting(TestLanguage Lang) {
llvm_unreachable("Unhandled TestLanguage enum");
}
+std::string getAnyTargetForTesting() {
+ for (const auto &Target : llvm::TargetRegistry::targets()) {
+ std::string Error;
+ StringRef TargetName(Target.getName());
+ if (TargetName == "x86-64")
+ TargetName = "x86_64";
+ if (llvm::TargetRegistry::lookupTarget(std::string(TargetName), Error) ==
+ &Target) {
+ return std::string(TargetName);
+ }
+ }
+ return "";
+}
+
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Testing/TestAST.cpp b/contrib/llvm-project/clang/lib/Testing/TestAST.cpp
index 8c79fcd7d636..3a50c2d9b5d0 100644
--- a/contrib/llvm-project/clang/lib/Testing/TestAST.cpp
+++ b/contrib/llvm-project/clang/lib/Testing/TestAST.cpp
@@ -16,6 +16,7 @@
#include "llvm/Support/VirtualFileSystem.h"
#include "gtest/gtest.h"
+#include <string>
namespace clang {
namespace {
@@ -91,7 +92,9 @@ TestAST::TestAST(const TestInputs &In) {
Argv.push_back(S.c_str());
for (const auto &S : In.ExtraArgs)
Argv.push_back(S.c_str());
- std::string Filename = getFilenameForTesting(In.Language).str();
+ std::string Filename = In.FileName;
+ if (Filename.empty())
+ Filename = getFilenameForTesting(In.Language).str();
Argv.push_back(Filename.c_str());
Clang->setInvocation(std::make_unique<CompilerInvocation>());
if (!CompilerInvocation::CreateFromArgs(Clang->getInvocation(), Argv,
diff --git a/contrib/llvm-project/clang/lib/Tooling/CompilationDatabase.cpp b/contrib/llvm-project/clang/lib/Tooling/CompilationDatabase.cpp
index 1e19e68633d2..fdf6015508d9 100644
--- a/contrib/llvm-project/clang/lib/Tooling/CompilationDatabase.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/CompilationDatabase.cpp
@@ -37,11 +37,11 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorOr.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/LineIterator.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
#include <algorithm>
#include <cassert>
#include <cstring>
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
index 97b41fc68917..31404855e3b1 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
@@ -181,7 +181,11 @@ static bool shouldCacheStatFailures(StringRef Filename) {
StringRef Ext = llvm::sys::path::extension(Filename);
if (Ext.empty())
return false; // This may be the module cache directory.
- // Only cache stat failures on source files.
+ // Only cache stat failures on files that are not expected to change during
+ // the build.
+ StringRef FName = llvm::sys::path::filename(Filename);
+ if (FName == "module.modulemap" || FName == "module.map")
+ return true;
return shouldScanForDirectivesBasedOnExtension(Filename);
}
@@ -258,6 +262,9 @@ DependencyScanningWorkerFilesystem::status(const Twine &Path) {
SmallString<256> OwnedFilename;
StringRef Filename = Path.toStringRef(OwnedFilename);
+ if (Filename.endswith(".pcm"))
+ return getUnderlyingFS().status(Path);
+
llvm::ErrorOr<EntryRef> Result = getOrCreateFileSystemEntry(Filename);
if (!Result)
return Result.getError();
@@ -315,6 +322,9 @@ DependencyScanningWorkerFilesystem::openFileForRead(const Twine &Path) {
SmallString<256> OwnedFilename;
StringRef Filename = Path.toStringRef(OwnedFilename);
+ if (Filename.endswith(".pcm"))
+ return getUnderlyingFS().openFileForRead(Path);
+
llvm::ErrorOr<EntryRef> Result = getOrCreateFileSystemEntry(Filename);
if (!Result)
return Result.getError();
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
index ae1662237e87..6641518b572b 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
@@ -14,27 +14,6 @@ using namespace clang;
using namespace tooling;
using namespace dependencies;
-static std::vector<std::string>
-makeTUCommandLineWithoutPaths(ArrayRef<std::string> OriginalCommandLine) {
- std::vector<std::string> Args = OriginalCommandLine;
-
- Args.push_back("-fno-implicit-modules");
- Args.push_back("-fno-implicit-module-maps");
-
- // These arguments are unused in explicit compiles.
- llvm::erase_if(Args, [](StringRef Arg) {
- if (Arg.consume_front("-fmodules-")) {
- return Arg.startswith("cache-path=") ||
- Arg.startswith("prune-interval=") ||
- Arg.startswith("prune-after=") ||
- Arg == "validate-once-per-build-session";
- }
- return Arg.startswith("-fbuild-session-file=");
- });
-
- return Args;
-}
-
DependencyScanningTool::DependencyScanningTool(
DependencyScanningService &Service,
llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS)
@@ -67,11 +46,6 @@ public:
void handleContextHash(std::string Hash) override {}
- std::string lookupModuleOutput(const ModuleID &ID,
- ModuleOutputKind Kind) override {
- llvm::report_fatal_error("unexpected call to lookupModuleOutput");
- }
-
void printDependencies(std::string &S) {
assert(Opts && "Handled dependency output options.");
@@ -101,11 +75,11 @@ protected:
} // anonymous namespace
llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
- const std::vector<std::string> &CommandLine, StringRef CWD,
- std::optional<StringRef> ModuleName) {
+ const std::vector<std::string> &CommandLine, StringRef CWD) {
MakeDependencyPrinterConsumer Consumer;
+ CallbackActionController Controller(nullptr);
auto Result =
- Worker.computeDependencies(CWD, CommandLine, Consumer, ModuleName);
+ Worker.computeDependencies(CWD, CommandLine, Consumer, Controller);
if (Result)
return std::move(Result);
std::string Output;
@@ -114,8 +88,8 @@ llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
}
llvm::Expected<P1689Rule> DependencyScanningTool::getP1689ModuleDependencyFile(
- const CompileCommand &Command, StringRef CWD,
- std::string &MakeformatOutput, std::string &MakeformatOutputPath) {
+ const CompileCommand &Command, StringRef CWD, std::string &MakeformatOutput,
+ std::string &MakeformatOutputPath) {
class P1689ModuleDependencyPrinterConsumer
: public MakeDependencyPrinterConsumer {
public:
@@ -145,9 +119,20 @@ llvm::Expected<P1689Rule> DependencyScanningTool::getP1689ModuleDependencyFile(
P1689Rule &Rule;
};
+ class P1689ActionController : public DependencyActionController {
+ public:
+ // The lookupModuleOutput is for clang modules. P1689 format don't need it.
+ std::string lookupModuleOutput(const ModuleID &,
+ ModuleOutputKind Kind) override {
+ return "";
+ }
+ };
+
P1689Rule Rule;
P1689ModuleDependencyPrinterConsumer Consumer(Rule, Command);
- auto Result = Worker.computeDependencies(CWD, Command.CommandLine, Consumer);
+ P1689ActionController Controller;
+ auto Result = Worker.computeDependencies(CWD, Command.CommandLine, Consumer,
+ Controller);
if (Result)
return std::move(Result);
@@ -157,102 +142,68 @@ llvm::Expected<P1689Rule> DependencyScanningTool::getP1689ModuleDependencyFile(
return Rule;
}
-llvm::Expected<FullDependenciesResult>
-DependencyScanningTool::getFullDependencies(
+llvm::Expected<TranslationUnitDeps>
+DependencyScanningTool::getTranslationUnitDependencies(
const std::vector<std::string> &CommandLine, StringRef CWD,
const llvm::StringSet<> &AlreadySeen,
- LookupModuleOutputCallback LookupModuleOutput,
- std::optional<StringRef> ModuleName) {
- FullDependencyConsumer Consumer(AlreadySeen, LookupModuleOutput,
- Worker.shouldEagerLoadModules());
+ LookupModuleOutputCallback LookupModuleOutput) {
+ FullDependencyConsumer Consumer(AlreadySeen);
+ CallbackActionController Controller(LookupModuleOutput);
llvm::Error Result =
- Worker.computeDependencies(CWD, CommandLine, Consumer, ModuleName);
+ Worker.computeDependencies(CWD, CommandLine, Consumer, Controller);
if (Result)
return std::move(Result);
- return Consumer.takeFullDependencies();
+ return Consumer.takeTranslationUnitDeps();
}
-llvm::Expected<FullDependenciesResult>
-DependencyScanningTool::getFullDependenciesLegacyDriverCommand(
- const std::vector<std::string> &CommandLine, StringRef CWD,
- const llvm::StringSet<> &AlreadySeen,
- LookupModuleOutputCallback LookupModuleOutput,
- std::optional<StringRef> ModuleName) {
- FullDependencyConsumer Consumer(AlreadySeen, LookupModuleOutput,
- Worker.shouldEagerLoadModules());
- llvm::Error Result =
- Worker.computeDependencies(CWD, CommandLine, Consumer, ModuleName);
+llvm::Expected<ModuleDepsGraph> DependencyScanningTool::getModuleDependencies(
+ StringRef ModuleName, const std::vector<std::string> &CommandLine,
+ StringRef CWD, const llvm::StringSet<> &AlreadySeen,
+ LookupModuleOutputCallback LookupModuleOutput) {
+ FullDependencyConsumer Consumer(AlreadySeen);
+ CallbackActionController Controller(LookupModuleOutput);
+ llvm::Error Result = Worker.computeDependencies(CWD, CommandLine, Consumer,
+ Controller, ModuleName);
if (Result)
return std::move(Result);
- return Consumer.getFullDependenciesLegacyDriverCommand(CommandLine);
+ return Consumer.takeModuleGraphDeps();
}
-FullDependenciesResult FullDependencyConsumer::takeFullDependencies() {
- FullDependenciesResult FDR;
- FullDependencies &FD = FDR.FullDeps;
+TranslationUnitDeps FullDependencyConsumer::takeTranslationUnitDeps() {
+ TranslationUnitDeps TU;
- FD.ID.ContextHash = std::move(ContextHash);
- FD.FileDeps = std::move(Dependencies);
- FD.PrebuiltModuleDeps = std::move(PrebuiltModuleDeps);
- FD.Commands = std::move(Commands);
+ TU.ID.ContextHash = std::move(ContextHash);
+ TU.FileDeps = std::move(Dependencies);
+ TU.PrebuiltModuleDeps = std::move(PrebuiltModuleDeps);
+ TU.Commands = std::move(Commands);
for (auto &&M : ClangModuleDeps) {
auto &MD = M.second;
if (MD.ImportedByMainFile)
- FD.ClangModuleDeps.push_back(MD.ID);
+ TU.ClangModuleDeps.push_back(MD.ID);
// TODO: Avoid handleModuleDependency even being called for modules
// we've already seen.
if (AlreadySeen.count(M.first))
continue;
- FDR.DiscoveredModules.push_back(std::move(MD));
+ TU.ModuleGraph.push_back(std::move(MD));
}
- return FDR;
+ return TU;
}
-FullDependenciesResult
-FullDependencyConsumer::getFullDependenciesLegacyDriverCommand(
- const std::vector<std::string> &OriginalCommandLine) const {
- FullDependencies FD;
-
- FD.DriverCommandLine = makeTUCommandLineWithoutPaths(
- ArrayRef<std::string>(OriginalCommandLine).slice(1));
-
- FD.ID.ContextHash = std::move(ContextHash);
-
- FD.FileDeps.assign(Dependencies.begin(), Dependencies.end());
-
- for (const PrebuiltModuleDep &PMD : PrebuiltModuleDeps)
- FD.DriverCommandLine.push_back("-fmodule-file=" + PMD.PCMFile);
+ModuleDepsGraph FullDependencyConsumer::takeModuleGraphDeps() {
+ ModuleDepsGraph ModuleGraph;
for (auto &&M : ClangModuleDeps) {
auto &MD = M.second;
- if (MD.ImportedByMainFile) {
- FD.ClangModuleDeps.push_back(MD.ID);
- auto PCMPath = LookupModuleOutput(MD.ID, ModuleOutputKind::ModuleFile);
- if (EagerLoadModules) {
- FD.DriverCommandLine.push_back("-fmodule-file=" + PCMPath);
- } else {
- FD.DriverCommandLine.push_back("-fmodule-map-file=" +
- MD.ClangModuleMapFile);
- FD.DriverCommandLine.push_back("-fmodule-file=" + MD.ID.ModuleName +
- "=" + PCMPath);
- }
- }
- }
-
- FD.PrebuiltModuleDeps = std::move(PrebuiltModuleDeps);
-
- FullDependenciesResult FDR;
-
- for (auto &&M : ClangModuleDeps) {
// TODO: Avoid handleModuleDependency even being called for modules
// we've already seen.
if (AlreadySeen.count(M.first))
continue;
- FDR.DiscoveredModules.push_back(std::move(M.second));
+ ModuleGraph.push_back(std::move(MD));
}
- FDR.FullDeps = std::move(FD);
- return FDR;
+ return ModuleGraph;
}
+
+CallbackActionController::~CallbackActionController() {}
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
index 8eb0328d6322..28206dceadd9 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/DependencyScanning/DependencyScanningWorker.h"
+#include "clang/Basic/DiagnosticDriver.h"
#include "clang/Basic/DiagnosticFrontend.h"
#include "clang/CodeGen/ObjectFilePCHContainerOperations.h"
#include "clang/Driver/Compilation.h"
@@ -22,7 +23,9 @@
#include "clang/Tooling/DependencyScanning/DependencyScanningService.h"
#include "clang/Tooling/DependencyScanning/ModuleDepCollector.h"
#include "clang/Tooling/Tooling.h"
-#include "llvm/Support/Host.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Error.h"
+#include "llvm/TargetParser/Host.h"
#include <optional>
using namespace clang;
@@ -63,30 +66,19 @@ using PrebuiltModuleFilesT = decltype(HeaderSearchOptions::PrebuiltModuleFiles);
class PrebuiltModuleListener : public ASTReaderListener {
public:
PrebuiltModuleListener(PrebuiltModuleFilesT &PrebuiltModuleFiles,
- llvm::StringSet<> &InputFiles, bool VisitInputFiles,
llvm::SmallVector<std::string> &NewModuleFiles)
- : PrebuiltModuleFiles(PrebuiltModuleFiles), InputFiles(InputFiles),
- VisitInputFiles(VisitInputFiles), NewModuleFiles(NewModuleFiles) {}
+ : PrebuiltModuleFiles(PrebuiltModuleFiles),
+ NewModuleFiles(NewModuleFiles) {}
bool needsImportVisitation() const override { return true; }
- bool needsInputFileVisitation() override { return VisitInputFiles; }
- bool needsSystemInputFileVisitation() override { return VisitInputFiles; }
void visitImport(StringRef ModuleName, StringRef Filename) override {
if (PrebuiltModuleFiles.insert({ModuleName.str(), Filename.str()}).second)
NewModuleFiles.push_back(Filename.str());
}
- bool visitInputFile(StringRef Filename, bool isSystem, bool isOverridden,
- bool isExplicitModule) override {
- InputFiles.insert(Filename);
- return true;
- }
-
private:
PrebuiltModuleFilesT &PrebuiltModuleFiles;
- llvm::StringSet<> &InputFiles;
- bool VisitInputFiles;
llvm::SmallVector<std::string> &NewModuleFiles;
};
@@ -94,13 +86,10 @@ private:
/// transitively imports and contributing input files.
static void visitPrebuiltModule(StringRef PrebuiltModuleFilename,
CompilerInstance &CI,
- PrebuiltModuleFilesT &ModuleFiles,
- llvm::StringSet<> &InputFiles,
- bool VisitInputFiles) {
+ PrebuiltModuleFilesT &ModuleFiles) {
// List of module files to be processed.
llvm::SmallVector<std::string> Worklist{PrebuiltModuleFilename.str()};
- PrebuiltModuleListener Listener(ModuleFiles, InputFiles, VisitInputFiles,
- Worklist);
+ PrebuiltModuleListener Listener(ModuleFiles, Worklist);
while (!Worklist.empty())
ASTReader::readASTFileControlBlock(
@@ -146,13 +135,14 @@ class DependencyScanningAction : public tooling::ToolAction {
public:
DependencyScanningAction(
StringRef WorkingDirectory, DependencyConsumer &Consumer,
+ DependencyActionController &Controller,
llvm::IntrusiveRefCntPtr<DependencyScanningWorkerFilesystem> DepFS,
ScanningOutputFormat Format, bool OptimizeArgs, bool EagerLoadModules,
bool DisableFree, std::optional<StringRef> ModuleName = std::nullopt)
: WorkingDirectory(WorkingDirectory), Consumer(Consumer),
- DepFS(std::move(DepFS)), Format(Format), OptimizeArgs(OptimizeArgs),
- EagerLoadModules(EagerLoadModules), DisableFree(DisableFree),
- ModuleName(ModuleName) {}
+ Controller(Controller), DepFS(std::move(DepFS)), Format(Format),
+ OptimizeArgs(OptimizeArgs), EagerLoadModules(EagerLoadModules),
+ DisableFree(DisableFree), ModuleName(ModuleName) {}
bool runInvocation(std::shared_ptr<CompilerInvocation> Invocation,
FileManager *FileMgr,
@@ -191,6 +181,7 @@ public:
ScanInstance.getFrontendOpts().GenerateGlobalModuleIndex = false;
ScanInstance.getFrontendOpts().UseGlobalModuleIndex = false;
ScanInstance.getFrontendOpts().ModulesShareFileManager = false;
+ ScanInstance.getHeaderSearchOpts().ModuleFormat = "raw";
ScanInstance.setFileManager(FileMgr);
// Support for virtual file system overlays.
@@ -200,15 +191,13 @@ public:
ScanInstance.createSourceManager(*FileMgr);
- llvm::StringSet<> PrebuiltModulesInputFiles;
// Store the list of prebuilt module files into header search options. This
// will prevent the implicit build to create duplicate modules and will
// force reuse of the existing prebuilt module files instead.
if (!ScanInstance.getPreprocessorOpts().ImplicitPCHInclude.empty())
visitPrebuiltModule(
ScanInstance.getPreprocessorOpts().ImplicitPCHInclude, ScanInstance,
- ScanInstance.getHeaderSearchOpts().PrebuiltModuleFiles,
- PrebuiltModulesInputFiles, /*VisitInputFiles=*/DepFS != nullptr);
+ ScanInstance.getHeaderSearchOpts().PrebuiltModuleFiles);
// Use the dependency scanning optimized file system if requested to do so.
if (DepFS) {
@@ -250,8 +239,8 @@ public:
case ScanningOutputFormat::P1689:
case ScanningOutputFormat::Full:
MDC = std::make_shared<ModuleDepCollector>(
- std::move(Opts), ScanInstance, Consumer, OriginalInvocation,
- OptimizeArgs, EagerLoadModules,
+ std::move(Opts), ScanInstance, Consumer, Controller,
+ OriginalInvocation, OptimizeArgs, EagerLoadModules,
Format == ScanningOutputFormat::P1689);
ScanInstance.addDependencyCollector(MDC);
break;
@@ -264,6 +253,9 @@ public:
// context hashing.
ScanInstance.getHeaderSearchOpts().ModulesStrictContextHash = true;
+ // Avoid some checks and module map parsing when loading PCM files.
+ ScanInstance.getPreprocessorOpts().ModulesCheckRelocated = false;
+
std::unique_ptr<FrontendAction> Action;
if (ModuleName)
@@ -300,6 +292,7 @@ private:
private:
StringRef WorkingDirectory;
DependencyConsumer &Consumer;
+ DependencyActionController &Controller;
llvm::IntrusiveRefCntPtr<DependencyScanningWorkerFilesystem> DepFS;
ScanningOutputFormat Format;
bool OptimizeArgs;
@@ -320,12 +313,11 @@ DependencyScanningWorker::DependencyScanningWorker(
: Format(Service.getFormat()), OptimizeArgs(Service.canOptimizeArgs()),
EagerLoadModules(Service.shouldEagerLoadModules()) {
PCHContainerOps = std::make_shared<PCHContainerOperations>();
+ // We need to read object files from PCH built outside the scanner.
PCHContainerOps->registerReader(
std::make_unique<ObjectFilePCHContainerReader>());
- // We don't need to write object files, but the current PCH implementation
- // requires the writer to be registered as well.
- PCHContainerOps->registerWriter(
- std::make_unique<ObjectFilePCHContainerWriter>());
+ // The scanner itself writes only raw ast files.
+ PCHContainerOps->registerWriter(std::make_unique<RawPCHContainerWriter>());
switch (Service.getMode()) {
case ScanningMode::DependencyDirectivesScan:
@@ -342,7 +334,8 @@ DependencyScanningWorker::DependencyScanningWorker(
llvm::Error DependencyScanningWorker::computeDependencies(
StringRef WorkingDirectory, const std::vector<std::string> &CommandLine,
- DependencyConsumer &Consumer, std::optional<StringRef> ModuleName) {
+ DependencyConsumer &Consumer, DependencyActionController &Controller,
+ std::optional<StringRef> ModuleName) {
std::vector<const char *> CLI;
for (const std::string &Arg : CommandLine)
CLI.push_back(Arg.c_str());
@@ -355,24 +348,37 @@ llvm::Error DependencyScanningWorker::computeDependencies(
llvm::raw_string_ostream DiagnosticsOS(DiagnosticOutput);
TextDiagnosticPrinter DiagPrinter(DiagnosticsOS, DiagOpts.release());
- if (computeDependencies(WorkingDirectory, CommandLine, Consumer, DiagPrinter,
- ModuleName))
+ if (computeDependencies(WorkingDirectory, CommandLine, Consumer, Controller,
+ DiagPrinter, ModuleName))
return llvm::Error::success();
return llvm::make_error<llvm::StringError>(DiagnosticsOS.str(),
llvm::inconvertibleErrorCode());
}
static bool forEachDriverJob(
- ArrayRef<std::string> Args, DiagnosticsEngine &Diags, FileManager &FM,
+ ArrayRef<std::string> ArgStrs, DiagnosticsEngine &Diags, FileManager &FM,
llvm::function_ref<bool(const driver::Command &Cmd)> Callback) {
+ SmallVector<const char *, 256> Argv;
+ Argv.reserve(ArgStrs.size());
+ for (const std::string &Arg : ArgStrs)
+ Argv.push_back(Arg.c_str());
+
+ llvm::vfs::FileSystem *FS = &FM.getVirtualFileSystem();
+
std::unique_ptr<driver::Driver> Driver = std::make_unique<driver::Driver>(
- Args[0], llvm::sys::getDefaultTargetTriple(), Diags,
- "clang LLVM compiler", &FM.getVirtualFileSystem());
+ Argv[0], llvm::sys::getDefaultTargetTriple(), Diags,
+ "clang LLVM compiler", FS);
Driver->setTitle("clang_based_tool");
- std::vector<const char *> Argv;
- for (const std::string &Arg : Args)
- Argv.push_back(Arg.c_str());
+ llvm::BumpPtrAllocator Alloc;
+ bool CLMode = driver::IsClangCL(
+ driver::getDriverMode(Argv[0], ArrayRef(Argv).slice(1)));
+
+ if (llvm::Error E = driver::expandResponseFiles(Argv, CLMode, Alloc, FS)) {
+ Diags.Report(diag::err_drv_expand_response_file)
+ << llvm::toString(std::move(E));
+ return false;
+ }
const std::unique_ptr<driver::Compilation> Compilation(
Driver->BuildCompilation(llvm::ArrayRef(Argv)));
@@ -388,37 +394,47 @@ static bool forEachDriverJob(
bool DependencyScanningWorker::computeDependencies(
StringRef WorkingDirectory, const std::vector<std::string> &CommandLine,
- DependencyConsumer &Consumer, DiagnosticConsumer &DC,
- std::optional<StringRef> ModuleName) {
+ DependencyConsumer &Consumer, DependencyActionController &Controller,
+ DiagnosticConsumer &DC, std::optional<StringRef> ModuleName) {
// Reset what might have been modified in the previous worker invocation.
BaseFS->setCurrentWorkingDirectory(WorkingDirectory);
std::optional<std::vector<std::string>> ModifiedCommandLine;
llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> ModifiedFS;
- if (ModuleName) {
- ModifiedCommandLine = CommandLine;
- ModifiedCommandLine->emplace_back(*ModuleName);
+ // If we're scanning based on a module name alone, we don't expect the client
+ // to provide us with an input file. However, the driver really wants to have
+ // one. Let's just make it up to make the driver happy.
+ if (ModuleName) {
auto OverlayFS =
llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(BaseFS);
auto InMemoryFS =
llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
InMemoryFS->setCurrentWorkingDirectory(WorkingDirectory);
- InMemoryFS->addFile(*ModuleName, 0, llvm::MemoryBuffer::getMemBuffer(""));
OverlayFS->pushOverlay(InMemoryFS);
ModifiedFS = OverlayFS;
+
+ SmallString<128> FakeInputPath;
+ // TODO: We should retry the creation if the path already exists.
+ llvm::sys::fs::createUniquePath(*ModuleName + "-%%%%%%%%.input",
+ FakeInputPath,
+ /*MakeAbsolute=*/false);
+ InMemoryFS->addFile(FakeInputPath, 0, llvm::MemoryBuffer::getMemBuffer(""));
+
+ ModifiedCommandLine = CommandLine;
+ ModifiedCommandLine->emplace_back(FakeInputPath);
}
const std::vector<std::string> &FinalCommandLine =
ModifiedCommandLine ? *ModifiedCommandLine : CommandLine;
+ auto &FinalFS = ModifiedFS ? ModifiedFS : BaseFS;
FileSystemOptions FSOpts;
FSOpts.WorkingDir = WorkingDirectory.str();
- auto FileMgr = llvm::makeIntrusiveRefCnt<FileManager>(
- FSOpts, ModifiedFS ? ModifiedFS : BaseFS);
+ auto FileMgr = llvm::makeIntrusiveRefCnt<FileManager>(FSOpts, FinalFS);
- std::vector<const char *> FinalCCommandLine(CommandLine.size(), nullptr);
- llvm::transform(CommandLine, FinalCCommandLine.begin(),
+ std::vector<const char *> FinalCCommandLine(FinalCommandLine.size(), nullptr);
+ llvm::transform(FinalCommandLine, FinalCCommandLine.begin(),
[](const std::string &Str) { return Str.c_str(); });
auto DiagOpts = CreateAndPopulateDiagOpts(FinalCCommandLine);
@@ -435,9 +451,9 @@ bool DependencyScanningWorker::computeDependencies(
// in-process; preserve the original value, which is
// always true for a driver invocation.
bool DisableFree = true;
- DependencyScanningAction Action(WorkingDirectory, Consumer, DepFS, Format,
- OptimizeArgs, EagerLoadModules, DisableFree,
- ModuleName);
+ DependencyScanningAction Action(WorkingDirectory, Consumer, Controller, DepFS,
+ Format, OptimizeArgs, EagerLoadModules,
+ DisableFree, ModuleName);
bool Success = forEachDriverJob(
FinalCommandLine, *Diags, *FileMgr, [&](const driver::Command &Cmd) {
if (StringRef(Cmd.getCreator().getName()) != "clang") {
@@ -475,3 +491,5 @@ bool DependencyScanningWorker::computeDependencies(
<< llvm::join(FinalCommandLine, " ");
return Success && Action.hasScanned();
}
+
+DependencyActionController::~DependencyActionController() {}
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
index d1cbf79a843e..aac24ca724aa 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
@@ -56,16 +56,16 @@ static std::vector<std::string> splitString(std::string S, char Separator) {
void ModuleDepCollector::addOutputPaths(CompilerInvocation &CI,
ModuleDeps &Deps) {
CI.getFrontendOpts().OutputFile =
- Consumer.lookupModuleOutput(Deps.ID, ModuleOutputKind::ModuleFile);
+ Controller.lookupModuleOutput(Deps.ID, ModuleOutputKind::ModuleFile);
if (!CI.getDiagnosticOpts().DiagnosticSerializationFile.empty())
CI.getDiagnosticOpts().DiagnosticSerializationFile =
- Consumer.lookupModuleOutput(
+ Controller.lookupModuleOutput(
Deps.ID, ModuleOutputKind::DiagnosticSerializationFile);
if (!CI.getDependencyOutputOpts().OutputFile.empty()) {
- CI.getDependencyOutputOpts().OutputFile =
- Consumer.lookupModuleOutput(Deps.ID, ModuleOutputKind::DependencyFile);
+ CI.getDependencyOutputOpts().OutputFile = Controller.lookupModuleOutput(
+ Deps.ID, ModuleOutputKind::DependencyFile);
CI.getDependencyOutputOpts().Targets =
- splitString(Consumer.lookupModuleOutput(
+ splitString(Controller.lookupModuleOutput(
Deps.ID, ModuleOutputKind::DependencyTargets),
'\0');
if (!CI.getDependencyOutputOpts().OutputFile.empty() &&
@@ -100,6 +100,8 @@ ModuleDepCollector::makeInvocationForModuleBuildWithoutOutputs(
if (!CI.getLangOpts()->ModulesCodegen) {
CI.getCodeGenOpts().DebugCompilationDir.clear();
CI.getCodeGenOpts().CoverageCompilationDir.clear();
+ CI.getCodeGenOpts().CoverageDataFile.clear();
+ CI.getCodeGenOpts().CoverageNotesFile.clear();
}
// Map output paths that affect behaviour to "-" so their existence is in the
@@ -111,6 +113,9 @@ ModuleDepCollector::makeInvocationForModuleBuildWithoutOutputs(
CI.getDependencyOutputOpts().Targets.clear();
CI.getFrontendOpts().ProgramAction = frontend::GenerateModule;
+ CI.getFrontendOpts().ARCMTAction = FrontendOptions::ARCMT_None;
+ CI.getFrontendOpts().ObjCMTAction = FrontendOptions::ObjCMT_None;
+ CI.getFrontendOpts().MTMigrateDir.clear();
CI.getLangOpts()->ModuleName = Deps.ID.ModuleName;
CI.getFrontendOpts().IsSystemModule = Deps.IsSystem;
@@ -202,7 +207,7 @@ void ModuleDepCollector::addModuleFiles(
CompilerInvocation &CI, ArrayRef<ModuleID> ClangModuleDeps) const {
for (const ModuleID &MID : ClangModuleDeps) {
std::string PCMPath =
- Consumer.lookupModuleOutput(MID, ModuleOutputKind::ModuleFile);
+ Controller.lookupModuleOutput(MID, ModuleOutputKind::ModuleFile);
if (EagerLoadModules)
CI.getFrontendOpts().ModuleFiles.push_back(std::move(PCMPath));
else
@@ -233,7 +238,7 @@ void ModuleDepCollector::applyDiscoveredDependencies(CompilerInvocation &CI) {
.getModuleMap()
.getModuleMapFileForUniquing(CurrentModule))
CI.getFrontendOpts().ModuleMapFiles.emplace_back(
- CurrentModuleMap->getName());
+ CurrentModuleMap->getNameAsRequested());
SmallVector<ModuleID> DirectDeps;
for (const auto &KV : ModularDeps)
@@ -264,13 +269,12 @@ static std::string getModuleContextHash(const ModuleDeps &MD,
HashBuilder.add(serialization::VERSION_MAJOR, serialization::VERSION_MINOR);
// Hash the BuildInvocation without any input files.
- SmallVector<const char *, 32> DummyArgs;
- CI.generateCC1CommandLine(DummyArgs, [&](const Twine &Arg) {
- Scratch.clear();
- StringRef Str = Arg.toStringRef(Scratch);
- HashBuilder.add(Str);
- return "<unused>";
- });
+ SmallVector<const char *, 32> Args;
+ llvm::BumpPtrAllocator Alloc;
+ llvm::StringSaver Saver(Alloc);
+ CI.generateCC1CommandLine(
+ Args, [&](const Twine &Arg) { return Saver.save(Arg).data(); });
+ HashBuilder.addRange(Args);
// Hash the module dependencies. These paths may differ even if the invocation
// is identical if they depend on the contents of the files in the TU -- for
@@ -299,11 +303,12 @@ void ModuleDepCollector::associateWithContextHash(const CompilerInvocation &CI,
assert(Inserted && "duplicate module mapping");
}
-void ModuleDepCollectorPP::FileChanged(SourceLocation Loc,
- FileChangeReason Reason,
- SrcMgr::CharacteristicKind FileType,
- FileID PrevFID) {
- if (Reason != PPCallbacks::EnterFile)
+void ModuleDepCollectorPP::LexedFileChanged(FileID FID,
+ LexedFileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID,
+ SourceLocation Loc) {
+ if (Reason != LexedFileChangeReason::EnterFile)
return;
// This has to be delayed as the context hash can change at the start of
@@ -318,8 +323,7 @@ void ModuleDepCollectorPP::FileChanged(SourceLocation Loc,
// Dependency generation really does want to go all the way to the
// file entry for a source location to find out what is depended on.
// We do not want #line markers to affect dependency generation!
- if (std::optional<StringRef> Filename =
- SM.getNonBuiltinFilenameForID(SM.getFileID(SM.getExpansionLoc(Loc))))
+ if (std::optional<StringRef> Filename = SM.getNonBuiltinFilenameForID(FID))
MDC.addFileDep(llvm::sys::path::remove_leading_dotslash(*Filename));
}
@@ -496,8 +500,7 @@ static void forEachSubmoduleSorted(const Module *M,
// Submodule order depends on order of header includes for inferred submodules
// we don't care about the exact order, so sort so that it's consistent across
// TUs to improve sharing.
- SmallVector<const Module *> Submodules(M->submodule_begin(),
- M->submodule_end());
+ SmallVector<const Module *> Submodules(M->submodules());
llvm::stable_sort(Submodules, [](const Module *A, const Module *B) {
return A->Name < B->Name;
});
@@ -575,11 +578,11 @@ void ModuleDepCollectorPP::addAffectingClangModule(
ModuleDepCollector::ModuleDepCollector(
std::unique_ptr<DependencyOutputOptions> Opts,
CompilerInstance &ScanInstance, DependencyConsumer &C,
- CompilerInvocation OriginalCI, bool OptimizeArgs, bool EagerLoadModules,
- bool IsStdModuleP1689Format)
- : ScanInstance(ScanInstance), Consumer(C), Opts(std::move(Opts)),
- OriginalInvocation(std::move(OriginalCI)), OptimizeArgs(OptimizeArgs),
- EagerLoadModules(EagerLoadModules),
+ DependencyActionController &Controller, CompilerInvocation OriginalCI,
+ bool OptimizeArgs, bool EagerLoadModules, bool IsStdModuleP1689Format)
+ : ScanInstance(ScanInstance), Consumer(C), Controller(Controller),
+ Opts(std::move(Opts)), OriginalInvocation(std::move(OriginalCI)),
+ OptimizeArgs(OptimizeArgs), EagerLoadModules(EagerLoadModules),
IsStdModuleP1689Format(IsStdModuleP1689Format) {}
void ModuleDepCollector::attachToPreprocessor(Preprocessor &PP) {
diff --git a/contrib/llvm-project/clang/lib/Tooling/DumpTool/ClangSrcLocDump.cpp b/contrib/llvm-project/clang/lib/Tooling/DumpTool/ClangSrcLocDump.cpp
index e67ffb037095..f48fbb0f3c6a 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DumpTool/ClangSrcLocDump.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DumpTool/ClangSrcLocDump.cpp
@@ -18,8 +18,8 @@
#include "clang/Tooling/Tooling.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/JSON.h"
+#include "llvm/TargetParser/Host.h"
#include "ASTSrcLocProcessor.h"
diff --git a/contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp b/contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp
index 88d20ba3957d..ebf8aa2a7628 100644
--- a/contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp
@@ -7,15 +7,16 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/CompilationDatabase.h"
+#include "clang/Tooling/Tooling.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorOr.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/StringSaver.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace tooling {
@@ -48,28 +49,9 @@ public:
private:
std::vector<CompileCommand> expand(std::vector<CompileCommand> Cmds) const {
- for (auto &Cmd : Cmds) {
- bool SeenRSPFile = false;
- llvm::SmallVector<const char *, 20> Argv;
- Argv.reserve(Cmd.CommandLine.size());
- for (auto &Arg : Cmd.CommandLine) {
- Argv.push_back(Arg.c_str());
- if (!Arg.empty())
- SeenRSPFile |= Arg.front() == '@';
- }
- if (!SeenRSPFile)
- continue;
- llvm::BumpPtrAllocator Alloc;
- llvm::cl::ExpansionContext ECtx(Alloc, Tokenizer);
- llvm::Error Err = ECtx.setVFS(FS.get())
- .setCurrentDir(Cmd.Directory)
- .expandResponseFiles(Argv);
- if (Err)
- llvm::errs() << Err;
- // Don't assign directly, Argv aliases CommandLine.
- std::vector<std::string> ExpandedArgv(Argv.begin(), Argv.end());
- Cmd.CommandLine = std::move(ExpandedArgv);
- }
+ for (auto &Cmd : Cmds)
+ tooling::addExpandedResponseFiles(Cmd.CommandLine, Cmd.Directory,
+ Tokenizer, *FS);
return Cmds;
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderAnalysis.cpp b/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderAnalysis.cpp
index 49d23908d33b..f83e19f10cba 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderAnalysis.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderAnalysis.cpp
@@ -67,7 +67,7 @@ llvm::StringRef getFileContents(const FileEntry *FE, const SourceManager &SM) {
} // namespace
bool isSelfContainedHeader(const FileEntry *FE, const SourceManager &SM,
- HeaderSearch &HeaderInfo) {
+ const HeaderSearch &HeaderInfo) {
assert(FE);
if (!HeaderInfo.isFileMultipleIncludeGuarded(FE) &&
!HeaderInfo.hasFileBeenImported(FE) &&
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp b/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
index 172eff1bf6ab..15a2024c4788 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
@@ -253,7 +253,7 @@ bool IncludeCategoryManager::isMainHeader(StringRef IncludeName) const {
// 1) foo.h => bar.cc
// 2) foo.proto.h => foo.cc
StringRef Matching;
- if (MatchingFileStem.startswith_insensitive(HeaderStem))
+ if (MatchingFileStem.starts_with_insensitive(HeaderStem))
Matching = MatchingFileStem; // example 1), 2)
else if (FileStem.equals_insensitive(HeaderStem))
Matching = FileStem; // example 3)
@@ -276,6 +276,7 @@ HeaderIncludes::HeaderIncludes(StringRef FileName, StringRef Code,
MaxInsertOffset(MinInsertOffset +
getMaxHeaderInsertionOffset(
FileName, Code.drop_front(MinInsertOffset), Style)),
+ MainIncludeFound(false),
Categories(Style, FileName) {
// Add 0 for main header and INT_MAX for headers that are not in any
// category.
@@ -335,7 +336,9 @@ void HeaderIncludes::addExistingInclude(Include IncludeToAdd,
// Only record the offset of current #include if we can insert after it.
if (CurInclude.R.getOffset() <= MaxInsertOffset) {
int Priority = Categories.getIncludePriority(
- CurInclude.Name, /*CheckMainHeader=*/FirstIncludeOffset < 0);
+ CurInclude.Name, /*CheckMainHeader=*/!MainIncludeFound);
+ if (Priority == 0)
+ MainIncludeFound = true;
CategoryEndOffsets[Priority] = NextLineOffset;
IncludesByPriority[Priority].push_back(&CurInclude);
if (FirstIncludeOffset < 0)
@@ -362,7 +365,7 @@ HeaderIncludes::insert(llvm::StringRef IncludeName, bool IsAngled,
std::string(llvm::formatv(IsAngled ? "<{0}>" : "\"{0}\"", IncludeName));
StringRef QuotedName = Quoted;
int Priority = Categories.getIncludePriority(
- QuotedName, /*CheckMainHeader=*/FirstIncludeOffset < 0);
+ QuotedName, /*CheckMainHeader=*/!MainIncludeFound);
auto CatOffset = CategoryEndOffsets.find(Priority);
assert(CatOffset != CategoryEndOffsets.end());
unsigned InsertOffset = CatOffset->second; // Fall back offset
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/CSymbolMap.inc b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/CSymbolMap.inc
index 463ce921f067..463ce921f067 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/CSymbolMap.inc
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/CSymbolMap.inc
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StandardLibrary.cpp b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StandardLibrary.cpp
index 9e5e421fdebc..cfcb955831ad 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StandardLibrary.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StandardLibrary.cpp
@@ -8,108 +8,230 @@
#include "clang/Tooling/Inclusions/StandardLibrary.h"
#include "clang/AST/Decl.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
+#include <optional>
namespace clang {
namespace tooling {
namespace stdlib {
-static llvm::StringRef *HeaderNames;
-static std::pair<llvm::StringRef, llvm::StringRef> *SymbolNames;
-static unsigned *SymbolHeaderIDs;
-static llvm::DenseMap<llvm::StringRef, unsigned> *HeaderIDs;
-// Maps symbol name -> Symbol::ID, within a namespace.
+namespace {
+// Symbol name -> Symbol::ID, within a namespace.
using NSSymbolMap = llvm::DenseMap<llvm::StringRef, unsigned>;
-static llvm::DenseMap<llvm::StringRef, NSSymbolMap *> *NamespaceSymbols;
-static int initialize() {
- unsigned SymCount = 0;
-#define SYMBOL(Name, NS, Header) ++SymCount;
-#include "clang/Tooling/Inclusions/CSymbolMap.inc"
-#include "clang/Tooling/Inclusions/StdSymbolMap.inc"
+// A Mapping per language.
+struct SymbolHeaderMapping {
+ llvm::StringRef *HeaderNames = nullptr;
+ // Header name => Header::ID
+ llvm::DenseMap<llvm::StringRef, unsigned> *HeaderIDs;
+
+ unsigned SymbolCount = 0;
+ // Symbol::ID => symbol qualified_name/name/scope
+ struct SymbolName {
+ const char *Data; // std::vector
+ unsigned ScopeLen; // ~~~~~
+ unsigned NameLen; // ~~~~~~
+ StringRef scope() const { return StringRef(Data, ScopeLen); }
+ StringRef name() const { return StringRef(Data + ScopeLen, NameLen); }
+ StringRef qualifiedName() const {
+ return StringRef(Data, ScopeLen + NameLen);
+ }
+ } *SymbolNames = nullptr;
+ // Symbol name -> Symbol::ID, within a namespace.
+ llvm::DenseMap<llvm::StringRef, NSSymbolMap *> *NamespaceSymbols = nullptr;
+ // Symbol::ID => Header::ID
+ llvm::SmallVector<unsigned> *SymbolHeaderIDs = nullptr;
+};
+} // namespace
+static SymbolHeaderMapping
+ *LanguageMappings[static_cast<unsigned>(Lang::LastValue) + 1];
+static const SymbolHeaderMapping *getMappingPerLang(Lang L) {
+ return LanguageMappings[static_cast<unsigned>(L)];
+}
+
+static int countSymbols(Lang Language) {
+ llvm::DenseSet<llvm::StringRef> Set;
+#define SYMBOL(Name, NS, Header) Set.insert(#NS #Name);
+ switch (Language) {
+ case Lang::C:
+#include "CSymbolMap.inc"
+ break;
+ case Lang::CXX:
+#include "StdSpecialSymbolMap.inc"
+#include "StdSymbolMap.inc"
+#include "StdTsSymbolMap.inc"
+ break;
+ }
#undef SYMBOL
- SymbolNames = new std::remove_reference_t<decltype(*SymbolNames)>[SymCount];
- SymbolHeaderIDs =
- new std::remove_reference_t<decltype(*SymbolHeaderIDs)>[SymCount];
- NamespaceSymbols = new std::remove_reference_t<decltype(*NamespaceSymbols)>;
- HeaderIDs = new std::remove_reference_t<decltype(*HeaderIDs)>;
+ return Set.size();
+}
+static int initialize(Lang Language) {
+ SymbolHeaderMapping *Mapping = new SymbolHeaderMapping();
+ LanguageMappings[static_cast<unsigned>(Language)] = Mapping;
+
+ unsigned SymCount = countSymbols(Language);
+ Mapping->SymbolCount = SymCount;
+ Mapping->SymbolNames =
+ new std::remove_reference_t<decltype(*Mapping->SymbolNames)>[SymCount];
+ Mapping->SymbolHeaderIDs = new std::remove_reference_t<
+ decltype(*Mapping->SymbolHeaderIDs)>[SymCount];
+ Mapping->NamespaceSymbols =
+ new std::remove_reference_t<decltype(*Mapping->NamespaceSymbols)>;
+ Mapping->HeaderIDs =
+ new std::remove_reference_t<decltype(*Mapping->HeaderIDs)>;
auto AddNS = [&](llvm::StringRef NS) -> NSSymbolMap & {
- auto R = NamespaceSymbols->try_emplace(NS, nullptr);
+ auto R = Mapping->NamespaceSymbols->try_emplace(NS, nullptr);
if (R.second)
R.first->second = new NSSymbolMap();
return *R.first->second;
};
auto AddHeader = [&](llvm::StringRef Header) -> unsigned {
- return HeaderIDs->try_emplace(Header, HeaderIDs->size()).first->second;
+ return Mapping->HeaderIDs->try_emplace(Header, Mapping->HeaderIDs->size())
+ .first->second;
};
- auto Add = [&, SymIndex(0)](llvm::StringRef Name, llvm::StringRef NS,
- llvm::StringRef HeaderName) mutable {
- if (NS == "None")
- NS = "";
-
- SymbolNames[SymIndex] = {NS, Name};
- SymbolHeaderIDs[SymIndex] = AddHeader(HeaderName);
+ auto Add = [&, SymIndex(-1)](llvm::StringRef QName, unsigned NSLen,
+ llvm::StringRef HeaderName) mutable {
+ // Correct "Nonefoo" => foo.
+ // FIXME: get rid of "None" from the generated mapping files.
+ if (QName.take_front(NSLen) == "None") {
+ QName = QName.drop_front(NSLen);
+ NSLen = 0;
+ }
- NSSymbolMap &NSSymbols = AddNS(NS);
- NSSymbols.try_emplace(Name, SymIndex);
+ if (SymIndex >= 0 &&
+ Mapping->SymbolNames[SymIndex].qualifiedName() == QName) {
+ // Not a new symbol, use the same index.
+ assert(llvm::none_of(llvm::ArrayRef(Mapping->SymbolNames, SymIndex),
+ [&QName](const SymbolHeaderMapping::SymbolName &S) {
+ return S.qualifiedName() == QName;
+ }) &&
+ "The symbol has been added before, make sure entries in the .inc "
+ "file are grouped by symbol name!");
+ } else {
+ // First symbol or new symbol, increment next available index.
+ ++SymIndex;
+ }
+ Mapping->SymbolNames[SymIndex] = {
+ QName.data(), NSLen, static_cast<unsigned int>(QName.size() - NSLen)};
+ if (!HeaderName.empty())
+ Mapping->SymbolHeaderIDs[SymIndex].push_back(AddHeader(HeaderName));
- ++SymIndex;
+ NSSymbolMap &NSSymbols = AddNS(QName.take_front(NSLen));
+ NSSymbols.try_emplace(QName.drop_front(NSLen), SymIndex);
};
-#define SYMBOL(Name, NS, Header) Add(#Name, #NS, #Header);
-#include "clang/Tooling/Inclusions/CSymbolMap.inc"
-#include "clang/Tooling/Inclusions/StdSymbolMap.inc"
+#define SYMBOL(Name, NS, Header) Add(#NS #Name, strlen(#NS), #Header);
+ switch (Language) {
+ case Lang::C:
+#include "CSymbolMap.inc"
+ break;
+ case Lang::CXX:
+#include "StdSpecialSymbolMap.inc"
+#include "StdSymbolMap.inc"
+#include "StdTsSymbolMap.inc"
+ break;
+ }
#undef SYMBOL
- HeaderNames = new llvm::StringRef[HeaderIDs->size()];
- for (const auto &E : *HeaderIDs)
- HeaderNames[E.second] = E.first;
+ Mapping->HeaderNames = new llvm::StringRef[Mapping->HeaderIDs->size()];
+ for (const auto &E : *Mapping->HeaderIDs)
+ Mapping->HeaderNames[E.second] = E.first;
return 0;
}
static void ensureInitialized() {
- static int Dummy = initialize();
+ static int Dummy = []() {
+ for (unsigned L = 0; L <= static_cast<unsigned>(Lang::LastValue); ++L)
+ initialize(static_cast<Lang>(L));
+ return 0;
+ }();
(void)Dummy;
}
-std::optional<Header> Header::named(llvm::StringRef Name) {
+std::vector<Header> Header::all(Lang L) {
ensureInitialized();
- auto It = HeaderIDs->find(Name);
- if (It == HeaderIDs->end())
+ std::vector<Header> Result;
+ const auto *Mapping = getMappingPerLang(L);
+ Result.reserve(Mapping->HeaderIDs->size());
+ for (unsigned I = 0, E = Mapping->HeaderIDs->size(); I < E; ++I)
+ Result.push_back(Header(I, L));
+ return Result;
+}
+std::optional<Header> Header::named(llvm::StringRef Name, Lang L) {
+ ensureInitialized();
+ const auto *Mapping = getMappingPerLang(L);
+ auto It = Mapping->HeaderIDs->find(Name);
+ if (It == Mapping->HeaderIDs->end())
return std::nullopt;
- return Header(It->second);
+ return Header(It->second, L);
+}
+llvm::StringRef Header::name() const {
+ return getMappingPerLang(Language)->HeaderNames[ID];
}
-llvm::StringRef Header::name() const { return HeaderNames[ID]; }
-llvm::StringRef Symbol::scope() const { return SymbolNames[ID].first; }
-llvm::StringRef Symbol::name() const { return SymbolNames[ID].second; }
-std::optional<Symbol> Symbol::named(llvm::StringRef Scope,
- llvm::StringRef Name) {
+
+std::vector<Symbol> Symbol::all(Lang L) {
+ ensureInitialized();
+ std::vector<Symbol> Result;
+ const auto *Mapping = getMappingPerLang(L);
+ Result.reserve(Mapping->SymbolCount);
+ for (unsigned I = 0, E = Mapping->SymbolCount; I < E; ++I)
+ Result.push_back(Symbol(I, L));
+ return Result;
+}
+llvm::StringRef Symbol::scope() const {
+ return getMappingPerLang(Language)->SymbolNames[ID].scope();
+}
+llvm::StringRef Symbol::name() const {
+ return getMappingPerLang(Language)->SymbolNames[ID].name();
+}
+llvm::StringRef Symbol::qualifiedName() const {
+ return getMappingPerLang(Language)->SymbolNames[ID].qualifiedName();
+}
+std::optional<Symbol> Symbol::named(llvm::StringRef Scope, llvm::StringRef Name,
+ Lang L) {
ensureInitialized();
- if (NSSymbolMap *NSSymbols = NamespaceSymbols->lookup(Scope)) {
+
+ if (NSSymbolMap *NSSymbols =
+ getMappingPerLang(L)->NamespaceSymbols->lookup(Scope)) {
auto It = NSSymbols->find(Name);
if (It != NSSymbols->end())
- return Symbol(It->second);
+ return Symbol(It->second, L);
}
return std::nullopt;
}
-Header Symbol::header() const { return Header(SymbolHeaderIDs[ID]); }
+std::optional<Header> Symbol::header() const {
+ const auto& Headers = getMappingPerLang(Language)->SymbolHeaderIDs[ID];
+ if (Headers.empty())
+ return std::nullopt;
+ return Header(Headers.front(), Language);
+}
llvm::SmallVector<Header> Symbol::headers() const {
- return {header()}; // FIXME: multiple in case of ambiguity
+ llvm::SmallVector<Header> Results;
+ for (auto HeaderID : getMappingPerLang(Language)->SymbolHeaderIDs[ID])
+ Results.emplace_back(Header(HeaderID, Language));
+ return Results;
}
Recognizer::Recognizer() { ensureInitialized(); }
-NSSymbolMap *Recognizer::namespaceSymbols(const NamespaceDecl *D) {
- auto It = NamespaceCache.find(D);
+NSSymbolMap *Recognizer::namespaceSymbols(const DeclContext *DC, Lang L) {
+ if (DC->isTranslationUnit()) // global scope.
+ return getMappingPerLang(L)->NamespaceSymbols->lookup("");
+
+ auto It = NamespaceCache.find(DC);
if (It != NamespaceCache.end())
return It->second;
-
+ const NamespaceDecl *D = llvm::cast<NamespaceDecl>(DC);
NSSymbolMap *Result = [&]() -> NSSymbolMap * {
- if (D && D->isAnonymousNamespace())
+ if (D->isAnonymousNamespace())
return nullptr;
// Print the namespace and its parents ommitting inline scopes.
std::string Scope;
@@ -117,24 +239,35 @@ NSSymbolMap *Recognizer::namespaceSymbols(const NamespaceDecl *D) {
ND = llvm::dyn_cast_or_null<NamespaceDecl>(ND->getParent()))
if (!ND->isInlineNamespace() && !ND->isAnonymousNamespace())
Scope = ND->getName().str() + "::" + Scope;
- return NamespaceSymbols->lookup(Scope);
+ return getMappingPerLang(L)->NamespaceSymbols->lookup(Scope);
}();
NamespaceCache.try_emplace(D, Result);
return Result;
}
std::optional<Symbol> Recognizer::operator()(const Decl *D) {
+ Lang L;
+ if (D->getLangOpts().CPlusPlus) {
+ L = Lang::CXX;
+ } else if (D->getLangOpts().C11) {
+ L = Lang::C;
+ } else {
+ return std::nullopt; // not a supported language.
+ }
+
// If D is std::vector::iterator, `vector` is the outer symbol to look up.
// We keep all the candidate DCs as some may turn out to be anon enums.
// Do this resolution lazily as we may turn out not to have a std namespace.
llvm::SmallVector<const DeclContext *> IntermediateDecl;
const DeclContext *DC = D->getDeclContext();
- while (DC && !DC->isNamespace()) {
+ if (!DC) // The passed D is a TranslationUnitDecl!
+ return std::nullopt;
+ while (!DC->isNamespace() && !DC->isTranslationUnit()) {
if (NamedDecl::classofKind(DC->getDeclKind()))
IntermediateDecl.push_back(DC);
DC = DC->getParent();
}
- NSSymbolMap *Symbols = namespaceSymbols(cast_or_null<NamespaceDecl>(DC));
+ NSSymbolMap *Symbols = namespaceSymbols(DC, L);
if (!Symbols)
return std::nullopt;
@@ -157,7 +290,7 @@ std::optional<Symbol> Recognizer::operator()(const Decl *D) {
auto It = Symbols->find(Name);
if (It == Symbols->end())
return std::nullopt;
- return Symbol(It->second);
+ return Symbol(It->second, L);
}
} // namespace stdlib
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdSpecialSymbolMap.inc b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdSpecialSymbolMap.inc
new file mode 100644
index 000000000000..ae620a0b9958
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdSpecialSymbolMap.inc
@@ -0,0 +1,722 @@
+//===-- StdSpecialSymbolMap.inc ---------------------------------*- C++ -*-===//
+//
+// This is a hand-curated list for C++ symbols that cannot be parsed/extracted
+// via the include-mapping tool (gen_std.py).
+//
+//===----------------------------------------------------------------------===//
+
+// Symbols that can be provided by any of the headers, ordered by the header
+// preference.
+// cppreference mentions the <locale> header is an alternative for these symbols,
+// but they are not per the standard.
+SYMBOL(consume_header, std::, <codecvt>)
+SYMBOL(generate_header, std::, <codecvt>)
+SYMBOL(little_endian, std::, <codecvt>)
+
+SYMBOL(mbstate_t, std::, <cwchar>)
+SYMBOL(mbstate_t, std::, <cuchar>)
+SYMBOL(size_t, std::, <cstddef>)
+SYMBOL(size_t, std::, <cstdlib>)
+SYMBOL(size_t, std::, <cstring>)
+SYMBOL(size_t, std::, <cwchar>)
+SYMBOL(size_t, std::, <cuchar>)
+SYMBOL(size_t, std::, <ctime>)
+SYMBOL(size_t, std::, <cstdio>)
+SYMBOL(size_t, None, <cstddef>)
+SYMBOL(size_t, None, <cstdlib>)
+SYMBOL(size_t, None, <cstring>)
+SYMBOL(size_t, None, <cwchar>)
+SYMBOL(size_t, None, <cuchar>)
+SYMBOL(size_t, None, <ctime>)
+SYMBOL(size_t, None, <cstdio>)
+SYMBOL(size_t, None, <stddef.h>)
+SYMBOL(size_t, None, <stdlib.h>)
+SYMBOL(size_t, None, <string.h>)
+SYMBOL(size_t, None, <wchar.h>)
+SYMBOL(size_t, None, <uchar.h>)
+SYMBOL(size_t, None, <time.h>)
+SYMBOL(size_t, None, <stdio.h>)
+SYMBOL(unwrap_ref_decay, std::, <type_traits>)
+SYMBOL(unwrap_ref_decay, std::, <functional>)
+SYMBOL(unwrap_reference, std::, <type_traits>)
+SYMBOL(unwrap_reference, std::, <functional>)
+SYMBOL(unwrap_ref_decay_t, std::, <type_traits>)
+SYMBOL(unwrap_ref_decay_t, std::, <functional>)
+SYMBOL(wint_t, std::, <cwctype>)
+SYMBOL(wint_t, std::, <cwchar>)
+SYMBOL(swap, std::, <utility>)
+SYMBOL(swap, std::, <algorithm>) // until C++11
+// C++ [string.view.synop 23.3.2]: The function templates defined in
+// [utility.swap] ... are available when <string_­view> is included.
+SYMBOL(swap, std::, <string_view>) // since C++17
+// C++ [tuple.helper 22.4.7]: In addition to being available via inclusion of
+// the <tuple> header, ... any of the headers <array>, <ranges>, or <utility>
+// are included.
+SYMBOL(tuple_size, std::, <tuple>)
+SYMBOL(tuple_size, std::, <array>)
+SYMBOL(tuple_size, std::, <ranges>)
+SYMBOL(tuple_size, std::, <utility>)
+SYMBOL(tuple_element, std::, <tuple>)
+SYMBOL(tuple_element, std::, <array>)
+SYMBOL(tuple_element, std::, <ranges>)
+SYMBOL(tuple_element, std::, <utility>)
+// C++ [iterator.range 25.7]: In addition to being available via inclusion of
+// the <iterator> header, the function templates in [iterator.range] are
+// available when any of the following headers are included: <array>, <deque>,
+// <forward_­list>, ... and <vector>.
+SYMBOL(begin, std::, <iterator>)
+SYMBOL(begin, std::, <array>)
+SYMBOL(begin, std::, <deque>)
+SYMBOL(begin, std::, <forward_list>)
+SYMBOL(begin, std::, <list>)
+SYMBOL(begin, std::, <map>)
+SYMBOL(begin, std::, <regex>)
+SYMBOL(begin, std::, <set>)
+SYMBOL(begin, std::, <span>)
+SYMBOL(begin, std::, <string>)
+SYMBOL(begin, std::, <string_view>)
+SYMBOL(begin, std::, <unordered_map>)
+SYMBOL(begin, std::, <unordered_set>)
+SYMBOL(begin, std::, <vector>)
+SYMBOL(cbegin, std::, <iterator>)
+SYMBOL(cbegin, std::, <array>)
+SYMBOL(cbegin, std::, <deque>)
+SYMBOL(cbegin, std::, <forward_list>)
+SYMBOL(cbegin, std::, <list>)
+SYMBOL(cbegin, std::, <map>)
+SYMBOL(cbegin, std::, <regex>)
+SYMBOL(cbegin, std::, <set>)
+SYMBOL(cbegin, std::, <span>)
+SYMBOL(cbegin, std::, <string>)
+SYMBOL(cbegin, std::, <string_view>)
+SYMBOL(cbegin, std::, <unordered_map>)
+SYMBOL(cbegin, std::, <unordered_set>)
+SYMBOL(cbegin, std::, <vector>)
+SYMBOL(cend, std::, <iterator>)
+SYMBOL(cend, std::, <array>)
+SYMBOL(cend, std::, <deque>)
+SYMBOL(cend, std::, <forward_list>)
+SYMBOL(cend, std::, <list>)
+SYMBOL(cend, std::, <map>)
+SYMBOL(cend, std::, <regex>)
+SYMBOL(cend, std::, <set>)
+SYMBOL(cend, std::, <span>)
+SYMBOL(cend, std::, <string>)
+SYMBOL(cend, std::, <string_view>)
+SYMBOL(cend, std::, <unordered_map>)
+SYMBOL(cend, std::, <unordered_set>)
+SYMBOL(cend, std::, <vector>)
+SYMBOL(crbegin, std::, <iterator>)
+SYMBOL(crbegin, std::, <array>)
+SYMBOL(crbegin, std::, <deque>)
+SYMBOL(crbegin, std::, <forward_list>)
+SYMBOL(crbegin, std::, <list>)
+SYMBOL(crbegin, std::, <map>)
+SYMBOL(crbegin, std::, <regex>)
+SYMBOL(crbegin, std::, <set>)
+SYMBOL(crbegin, std::, <span>)
+SYMBOL(crbegin, std::, <string>)
+SYMBOL(crbegin, std::, <string_view>)
+SYMBOL(crbegin, std::, <unordered_map>)
+SYMBOL(crbegin, std::, <unordered_set>)
+SYMBOL(crbegin, std::, <vector>)
+SYMBOL(crend, std::, <iterator>)
+SYMBOL(crend, std::, <array>)
+SYMBOL(crend, std::, <deque>)
+SYMBOL(crend, std::, <forward_list>)
+SYMBOL(crend, std::, <list>)
+SYMBOL(crend, std::, <map>)
+SYMBOL(crend, std::, <regex>)
+SYMBOL(crend, std::, <set>)
+SYMBOL(crend, std::, <span>)
+SYMBOL(crend, std::, <string>)
+SYMBOL(crend, std::, <string_view>)
+SYMBOL(crend, std::, <unordered_map>)
+SYMBOL(crend, std::, <unordered_set>)
+SYMBOL(crend, std::, <vector>)
+SYMBOL(data, std::, <iterator>)
+SYMBOL(data, std::, <array>)
+SYMBOL(data, std::, <deque>)
+SYMBOL(data, std::, <forward_list>)
+SYMBOL(data, std::, <list>)
+SYMBOL(data, std::, <map>)
+SYMBOL(data, std::, <regex>)
+SYMBOL(data, std::, <set>)
+SYMBOL(data, std::, <span>)
+SYMBOL(data, std::, <string>)
+SYMBOL(data, std::, <string_view>)
+SYMBOL(data, std::, <unordered_map>)
+SYMBOL(data, std::, <unordered_set>)
+SYMBOL(data, std::, <vector>)
+SYMBOL(empty, std::, <iterator>)
+SYMBOL(empty, std::, <array>)
+SYMBOL(empty, std::, <deque>)
+SYMBOL(empty, std::, <forward_list>)
+SYMBOL(empty, std::, <list>)
+SYMBOL(empty, std::, <map>)
+SYMBOL(empty, std::, <regex>)
+SYMBOL(empty, std::, <set>)
+SYMBOL(empty, std::, <span>)
+SYMBOL(empty, std::, <string>)
+SYMBOL(empty, std::, <string_view>)
+SYMBOL(empty, std::, <unordered_map>)
+SYMBOL(empty, std::, <unordered_set>)
+SYMBOL(empty, std::, <vector>)
+SYMBOL(end, std::, <iterator>)
+SYMBOL(end, std::, <array>)
+SYMBOL(end, std::, <deque>)
+SYMBOL(end, std::, <forward_list>)
+SYMBOL(end, std::, <list>)
+SYMBOL(end, std::, <map>)
+SYMBOL(end, std::, <regex>)
+SYMBOL(end, std::, <set>)
+SYMBOL(end, std::, <span>)
+SYMBOL(end, std::, <string>)
+SYMBOL(end, std::, <string_view>)
+SYMBOL(end, std::, <unordered_map>)
+SYMBOL(end, std::, <unordered_set>)
+SYMBOL(end, std::, <vector>)
+SYMBOL(rbegin, std::, <iterator>)
+SYMBOL(rbegin, std::, <array>)
+SYMBOL(rbegin, std::, <deque>)
+SYMBOL(rbegin, std::, <forward_list>)
+SYMBOL(rbegin, std::, <list>)
+SYMBOL(rbegin, std::, <map>)
+SYMBOL(rbegin, std::, <regex>)
+SYMBOL(rbegin, std::, <set>)
+SYMBOL(rbegin, std::, <span>)
+SYMBOL(rbegin, std::, <string>)
+SYMBOL(rbegin, std::, <string_view>)
+SYMBOL(rbegin, std::, <unordered_map>)
+SYMBOL(rbegin, std::, <unordered_set>)
+SYMBOL(rbegin, std::, <vector>)
+SYMBOL(rend, std::, <iterator>)
+SYMBOL(rend, std::, <array>)
+SYMBOL(rend, std::, <deque>)
+SYMBOL(rend, std::, <forward_list>)
+SYMBOL(rend, std::, <list>)
+SYMBOL(rend, std::, <map>)
+SYMBOL(rend, std::, <regex>)
+SYMBOL(rend, std::, <set>)
+SYMBOL(rend, std::, <span>)
+SYMBOL(rend, std::, <string>)
+SYMBOL(rend, std::, <string_view>)
+SYMBOL(rend, std::, <unordered_map>)
+SYMBOL(rend, std::, <unordered_set>)
+SYMBOL(rend, std::, <vector>)
+SYMBOL(size, std::, <iterator>)
+SYMBOL(size, std::, <array>)
+SYMBOL(size, std::, <deque>)
+SYMBOL(size, std::, <forward_list>)
+SYMBOL(size, std::, <list>)
+SYMBOL(size, std::, <map>)
+SYMBOL(size, std::, <regex>)
+SYMBOL(size, std::, <set>)
+SYMBOL(size, std::, <span>)
+SYMBOL(size, std::, <string>)
+SYMBOL(size, std::, <string_view>)
+SYMBOL(size, std::, <unordered_map>)
+SYMBOL(size, std::, <unordered_set>)
+SYMBOL(size, std::, <vector>)
+SYMBOL(ssize, std::, <iterator>)
+SYMBOL(ssize, std::, <array>)
+SYMBOL(ssize, std::, <deque>)
+SYMBOL(ssize, std::, <forward_list>)
+SYMBOL(ssize, std::, <list>)
+SYMBOL(ssize, std::, <map>)
+SYMBOL(ssize, std::, <regex>)
+SYMBOL(ssize, std::, <set>)
+SYMBOL(ssize, std::, <span>)
+SYMBOL(ssize, std::, <string>)
+SYMBOL(ssize, std::, <string_view>)
+SYMBOL(ssize, std::, <unordered_map>)
+SYMBOL(ssize, std::, <unordered_set>)
+SYMBOL(ssize, std::, <vector>)
+
+// Add headers for generic integer-type abs.
+// Ignore other variants (std::complex, std::valarray, std::intmax_t)
+SYMBOL(abs, std::, <cstdlib>)
+SYMBOL(abs, std::, <cmath>)
+SYMBOL(abs, None, <cstdlib>)
+SYMBOL(abs, None, <stdlib.h>)
+SYMBOL(abs, None, <cmath>)
+SYMBOL(abs, None, <math.h>)
+
+// Only add headers for the generic atomic template.
+// Ignore variants (std::weak_ptr, std::shared_ptr).
+SYMBOL(atomic, std::, <atomic>)
+// atomic_* family symbols. <stdatomic.h> is for C compatibility.
+SYMBOL(atomic_bool, std::, <atomic>)
+SYMBOL(atomic_bool, None, <stdatomic.h>)
+SYMBOL(atomic_char, std::, <atomic>)
+SYMBOL(atomic_char, None, <stdatomic.h>)
+SYMBOL(atomic_char16_t, std::, <atomic>)
+SYMBOL(atomic_char16_t, None, <stdatomic.h>)
+SYMBOL(atomic_char32_t, std::, <atomic>)
+SYMBOL(atomic_char32_t, None, <stdatomic.h>)
+SYMBOL(atomic_char8_t, std::, <atomic>)
+SYMBOL(atomic_char8_t, None, <stdatomic.h>)
+SYMBOL(atomic_int, std::, <atomic>)
+SYMBOL(atomic_int, None, <stdatomic.h>)
+SYMBOL(atomic_int16_t, std::, <atomic>)
+SYMBOL(atomic_int16_t, None, <stdatomic.h>)
+SYMBOL(atomic_int32_t, std::, <atomic>)
+SYMBOL(atomic_int32_t, None, <stdatomic.h>)
+SYMBOL(atomic_int64_t, std::, <atomic>)
+SYMBOL(atomic_int64_t, None, <stdatomic.h>)
+SYMBOL(atomic_int8_t, std::, <atomic>)
+SYMBOL(atomic_int8_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_fast16_t, std::, <atomic>)
+SYMBOL(atomic_int_fast16_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_fast32_t, std::, <atomic>)
+SYMBOL(atomic_int_fast32_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_fast64_t, std::, <atomic>)
+SYMBOL(atomic_int_fast64_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_fast8_t, std::, <atomic>)
+SYMBOL(atomic_int_fast8_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_least16_t, std::, <atomic>)
+SYMBOL(atomic_int_least16_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_least32_t, std::, <atomic>)
+SYMBOL(atomic_int_least32_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_least64_t, std::, <atomic>)
+SYMBOL(atomic_int_least64_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_least8_t, std::, <atomic>)
+SYMBOL(atomic_int_least8_t, None, <stdatomic.h>)
+SYMBOL(atomic_intmax_t, std::, <atomic>)
+SYMBOL(atomic_intmax_t, None, <stdatomic.h>)
+SYMBOL(atomic_intptr_t, std::, <atomic>)
+SYMBOL(atomic_intptr_t, None, <stdatomic.h>)
+SYMBOL(atomic_llong, std::, <atomic>)
+SYMBOL(atomic_llong, None, <stdatomic.h>)
+SYMBOL(atomic_long, std::, <atomic>)
+SYMBOL(atomic_long, None, <stdatomic.h>)
+SYMBOL(atomic_ptrdiff_t, std::, <atomic>)
+SYMBOL(atomic_ptrdiff_t, None, <stdatomic.h>)
+SYMBOL(atomic_schar, std::, <atomic>)
+SYMBOL(atomic_schar, None, <stdatomic.h>)
+SYMBOL(atomic_short, std::, <atomic>)
+SYMBOL(atomic_short, None, <stdatomic.h>)
+SYMBOL(atomic_signed_lock_free, std::, <atomic>)
+SYMBOL(atomic_signed_lock_free, None, <stdatomic.h>)
+SYMBOL(atomic_size_t, std::, <atomic>)
+SYMBOL(atomic_size_t, None, <stdatomic.h>)
+SYMBOL(atomic_uchar, std::, <atomic>)
+SYMBOL(atomic_uchar, None, <stdatomic.h>)
+SYMBOL(atomic_uint, std::, <atomic>)
+SYMBOL(atomic_uint, None, <stdatomic.h>)
+SYMBOL(atomic_uint16_t, std::, <atomic>)
+SYMBOL(atomic_uint16_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint32_t, std::, <atomic>)
+SYMBOL(atomic_uint32_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint64_t, std::, <atomic>)
+SYMBOL(atomic_uint64_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint8_t, std::, <atomic>)
+SYMBOL(atomic_uint8_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_fast16_t, std::, <atomic>)
+SYMBOL(atomic_uint_fast16_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_fast32_t, std::, <atomic>)
+SYMBOL(atomic_uint_fast32_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_fast64_t, std::, <atomic>)
+SYMBOL(atomic_uint_fast64_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_fast8_t, std::, <atomic>)
+SYMBOL(atomic_uint_fast8_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_least16_t, std::, <atomic>)
+SYMBOL(atomic_uint_least16_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_least32_t, std::, <atomic>)
+SYMBOL(atomic_uint_least32_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_least64_t, std::, <atomic>)
+SYMBOL(atomic_uint_least64_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_least8_t, std::, <atomic>)
+SYMBOL(atomic_uint_least8_t, None, <stdatomic.h>)
+SYMBOL(atomic_uintmax_t, std::, <atomic>)
+SYMBOL(atomic_uintmax_t, None, <stdatomic.h>)
+SYMBOL(atomic_uintptr_t, std::, <atomic>)
+SYMBOL(atomic_uintptr_t, None, <stdatomic.h>)
+SYMBOL(atomic_ullong, std::, <atomic>)
+SYMBOL(atomic_ullong, None, <stdatomic.h>)
+SYMBOL(atomic_ulong, std::, <atomic>)
+SYMBOL(atomic_ulong, None, <stdatomic.h>)
+SYMBOL(atomic_unsigned_lock_free, std::, <atomic>)
+SYMBOL(atomic_unsigned_lock_free, None, <stdatomic.h>)
+SYMBOL(atomic_ushort, std::, <atomic>)
+SYMBOL(atomic_ushort, None, <stdatomic.h>)
+SYMBOL(atomic_wchar_t, std::, <atomic>)
+SYMBOL(atomic_wchar_t, None, <stdatomic.h>)
+
+// std::get has a few variants for different types (tuple, array, pair etc)
+// which is tricky to disambiguate without type information.
+// Don't set any header for it, as it comes with the type header.
+SYMBOL(get, std::, /*no headers*/)
+// Similarly make_error_{code,condition} also have different overloads (errc,
+// io_errc, future_errc) and each of them are provided by relevant headers
+// providing the type.
+SYMBOL(make_error_code, std::, /*no headers*/)
+SYMBOL(make_error_condition, std::, /*no headers*/)
+
+// cppreference symbol index page was missing these symbols.
+// Remove them when the cppreference offline archive catches up.
+SYMBOL(index_sequence, std::, <utility>)
+SYMBOL(index_sequence_for, std::, <utility>)
+SYMBOL(make_index_sequence, std::, <utility>)
+SYMBOL(make_integer_sequence, std::, <utility>)
+
+// The std::placeholder symbols (_1, ..., _N) are listed in the cppreference
+// placeholder.html, but the index only contains a single entry with "_1, _2, ..., _N"
+// text, which are not handled by the script.
+// N is an implementation-defined number (10 for libc++; 29 for libstdc++).
+SYMBOL(_1, std::placeholders::, <functional>)
+SYMBOL(_2, std::placeholders::, <functional>)
+SYMBOL(_3, std::placeholders::, <functional>)
+SYMBOL(_4, std::placeholders::, <functional>)
+SYMBOL(_5, std::placeholders::, <functional>)
+SYMBOL(_6, std::placeholders::, <functional>)
+SYMBOL(_7, std::placeholders::, <functional>)
+SYMBOL(_8, std::placeholders::, <functional>)
+SYMBOL(_9, std::placeholders::, <functional>)
+SYMBOL(_10, std::placeholders::, <functional>)
+SYMBOL(_11, std::placeholders::, <functional>)
+SYMBOL(_12, std::placeholders::, <functional>)
+SYMBOL(_13, std::placeholders::, <functional>)
+SYMBOL(_14, std::placeholders::, <functional>)
+SYMBOL(_15, std::placeholders::, <functional>)
+SYMBOL(_16, std::placeholders::, <functional>)
+SYMBOL(_17, std::placeholders::, <functional>)
+SYMBOL(_18, std::placeholders::, <functional>)
+SYMBOL(_19, std::placeholders::, <functional>)
+SYMBOL(_20, std::placeholders::, <functional>)
+SYMBOL(_21, std::placeholders::, <functional>)
+SYMBOL(_22, std::placeholders::, <functional>)
+SYMBOL(_23, std::placeholders::, <functional>)
+SYMBOL(_24, std::placeholders::, <functional>)
+SYMBOL(_25, std::placeholders::, <functional>)
+SYMBOL(_26, std::placeholders::, <functional>)
+SYMBOL(_27, std::placeholders::, <functional>)
+SYMBOL(_28, std::placeholders::, <functional>)
+SYMBOL(_29, std::placeholders::, <functional>)
+
+// Macros
+SYMBOL(NULL, None, <cstddef>)
+SYMBOL(NULL, None, <stddef.h>)
+SYMBOL(NULL, None, <cstdlib>)
+SYMBOL(NULL, None, <stdlib.h>)
+SYMBOL(NULL, None, <cstring>)
+SYMBOL(NULL, None, <string.h>)
+SYMBOL(NULL, None, <cwchar>)
+SYMBOL(NULL, None, <wchar.h>)
+SYMBOL(NULL, None, <ctime>)
+SYMBOL(NULL, None, <time.h>)
+SYMBOL(NULL, None, <clocale>)
+SYMBOL(NULL, None, <locale.h>)
+SYMBOL(NULL, None, <cstdio>)
+SYMBOL(NULL, None, <stdio.h>)
+
+// Theres are macros that not spelled out in page linked from the index.
+// Extracted from https://en.cppreference.com/w/cpp/header/cinttypes
+SYMBOL(PRId8, None, <cinttypes>)
+SYMBOL(PRId8, None, <inttypes.h>)
+SYMBOL(PRId16, None, <cinttypes>)
+SYMBOL(PRId16, None, <inttypes.h>)
+SYMBOL(PRId32, None, <cinttypes>)
+SYMBOL(PRId32, None, <inttypes.h>)
+SYMBOL(PRId64, None, <cinttypes>)
+SYMBOL(PRId64, None, <inttypes.h>)
+SYMBOL(PRIdLEAST8, None, <cinttypes>)
+SYMBOL(PRIdLEAST8, None, <inttypes.h>)
+SYMBOL(PRIdLEAST16, None, <cinttypes>)
+SYMBOL(PRIdLEAST16, None, <inttypes.h>)
+SYMBOL(PRIdLEAST32, None, <cinttypes>)
+SYMBOL(PRIdLEAST32, None, <inttypes.h>)
+SYMBOL(PRIdLEAST64, None, <cinttypes>)
+SYMBOL(PRIdLEAST64, None, <inttypes.h>)
+SYMBOL(PRIdFAST8, None, <cinttypes>)
+SYMBOL(PRIdFAST8, None, <inttypes.h>)
+SYMBOL(PRIdFAST16, None, <cinttypes>)
+SYMBOL(PRIdFAST16, None, <inttypes.h>)
+SYMBOL(PRIdFAST32, None, <cinttypes>)
+SYMBOL(PRIdFAST32, None, <inttypes.h>)
+SYMBOL(PRIdFAST64, None, <cinttypes>)
+SYMBOL(PRIdFAST64, None, <inttypes.h>)
+SYMBOL(PRIdMAX, None, <cinttypes>)
+SYMBOL(PRIdMAX, None, <inttypes.h>)
+SYMBOL(PRIdPTR, None, <cinttypes>)
+SYMBOL(PRIdPTR, None, <inttypes.h>)
+SYMBOL(PRIi8, None, <cinttypes>)
+SYMBOL(PRIi8, None, <inttypes.h>)
+SYMBOL(PRIi16, None, <cinttypes>)
+SYMBOL(PRIi16, None, <inttypes.h>)
+SYMBOL(PRIi32, None, <cinttypes>)
+SYMBOL(PRIi32, None, <inttypes.h>)
+SYMBOL(PRIi64, None, <cinttypes>)
+SYMBOL(PRIi64, None, <inttypes.h>)
+SYMBOL(PRIiLEAST8, None, <cinttypes>)
+SYMBOL(PRIiLEAST8, None, <inttypes.h>)
+SYMBOL(PRIiLEAST16, None, <cinttypes>)
+SYMBOL(PRIiLEAST16, None, <inttypes.h>)
+SYMBOL(PRIiLEAST32, None, <cinttypes>)
+SYMBOL(PRIiLEAST32, None, <inttypes.h>)
+SYMBOL(PRIiLEAST64, None, <cinttypes>)
+SYMBOL(PRIiLEAST64, None, <inttypes.h>)
+SYMBOL(PRIiFAST8, None, <cinttypes>)
+SYMBOL(PRIiFAST8, None, <inttypes.h>)
+SYMBOL(PRIiFAST16, None, <cinttypes>)
+SYMBOL(PRIiFAST16, None, <inttypes.h>)
+SYMBOL(PRIiFAST32, None, <cinttypes>)
+SYMBOL(PRIiFAST32, None, <inttypes.h>)
+SYMBOL(PRIiFAST64, None, <cinttypes>)
+SYMBOL(PRIiFAST64, None, <inttypes.h>)
+SYMBOL(PRIiMAX, None, <cinttypes>)
+SYMBOL(PRIiMAX, None, <inttypes.h>)
+SYMBOL(PRIiPTR, None, <cinttypes>)
+SYMBOL(PRIiPTR, None, <inttypes.h>)
+SYMBOL(PRIu8, None, <cinttypes>)
+SYMBOL(PRIu8, None, <inttypes.h>)
+SYMBOL(PRIu16, None, <cinttypes>)
+SYMBOL(PRIu16, None, <inttypes.h>)
+SYMBOL(PRIu32, None, <cinttypes>)
+SYMBOL(PRIu32, None, <inttypes.h>)
+SYMBOL(PRIu64, None, <cinttypes>)
+SYMBOL(PRIu64, None, <inttypes.h>)
+SYMBOL(PRIuLEAST8, None, <cinttypes>)
+SYMBOL(PRIuLEAST8, None, <inttypes.h>)
+SYMBOL(PRIuLEAST16, None, <cinttypes>)
+SYMBOL(PRIuLEAST16, None, <inttypes.h>)
+SYMBOL(PRIuLEAST32, None, <cinttypes>)
+SYMBOL(PRIuLEAST32, None, <inttypes.h>)
+SYMBOL(PRIuLEAST64, None, <cinttypes>)
+SYMBOL(PRIuLEAST64, None, <inttypes.h>)
+SYMBOL(PRIuFAST8, None, <cinttypes>)
+SYMBOL(PRIuFAST8, None, <inttypes.h>)
+SYMBOL(PRIuFAST16, None, <cinttypes>)
+SYMBOL(PRIuFAST16, None, <inttypes.h>)
+SYMBOL(PRIuFAST32, None, <cinttypes>)
+SYMBOL(PRIuFAST32, None, <inttypes.h>)
+SYMBOL(PRIuFAST64, None, <cinttypes>)
+SYMBOL(PRIuFAST64, None, <inttypes.h>)
+SYMBOL(PRIuMAX, None, <cinttypes>)
+SYMBOL(PRIuMAX, None, <inttypes.h>)
+SYMBOL(PRIuPTR, None, <cinttypes>)
+SYMBOL(PRIuPTR, None, <inttypes.h>)
+SYMBOL(PRIo8, None, <cinttypes>)
+SYMBOL(PRIo8, None, <inttypes.h>)
+SYMBOL(PRIo16, None, <cinttypes>)
+SYMBOL(PRIo16, None, <inttypes.h>)
+SYMBOL(PRIo32, None, <cinttypes>)
+SYMBOL(PRIo32, None, <inttypes.h>)
+SYMBOL(PRIo64, None, <cinttypes>)
+SYMBOL(PRIo64, None, <inttypes.h>)
+SYMBOL(PRIoLEAST8, None, <cinttypes>)
+SYMBOL(PRIoLEAST8, None, <inttypes.h>)
+SYMBOL(PRIoLEAST16, None, <cinttypes>)
+SYMBOL(PRIoLEAST16, None, <inttypes.h>)
+SYMBOL(PRIoLEAST32, None, <cinttypes>)
+SYMBOL(PRIoLEAST32, None, <inttypes.h>)
+SYMBOL(PRIoLEAST64, None, <cinttypes>)
+SYMBOL(PRIoLEAST64, None, <inttypes.h>)
+SYMBOL(PRIoFAST8, None, <cinttypes>)
+SYMBOL(PRIoFAST8, None, <inttypes.h>)
+SYMBOL(PRIoFAST16, None, <cinttypes>)
+SYMBOL(PRIoFAST16, None, <inttypes.h>)
+SYMBOL(PRIoFAST32, None, <cinttypes>)
+SYMBOL(PRIoFAST32, None, <inttypes.h>)
+SYMBOL(PRIoFAST64, None, <cinttypes>)
+SYMBOL(PRIoFAST64, None, <inttypes.h>)
+SYMBOL(PRIoMAX, None, <cinttypes>)
+SYMBOL(PRIoMAX, None, <inttypes.h>)
+SYMBOL(PRIoPTR, None, <cinttypes>)
+SYMBOL(PRIoPTR, None, <inttypes.h>)
+SYMBOL(PRIx8, None, <cinttypes>)
+SYMBOL(PRIx8, None, <inttypes.h>)
+SYMBOL(PRIx16, None, <cinttypes>)
+SYMBOL(PRIx16, None, <inttypes.h>)
+SYMBOL(PRIx32, None, <cinttypes>)
+SYMBOL(PRIx32, None, <inttypes.h>)
+SYMBOL(PRIx64, None, <cinttypes>)
+SYMBOL(PRIx64, None, <inttypes.h>)
+SYMBOL(PRIxLEAST8, None, <cinttypes>)
+SYMBOL(PRIxLEAST8, None, <inttypes.h>)
+SYMBOL(PRIxLEAST16, None, <cinttypes>)
+SYMBOL(PRIxLEAST16, None, <inttypes.h>)
+SYMBOL(PRIxLEAST32, None, <cinttypes>)
+SYMBOL(PRIxLEAST32, None, <inttypes.h>)
+SYMBOL(PRIxLEAST64, None, <cinttypes>)
+SYMBOL(PRIxLEAST64, None, <inttypes.h>)
+SYMBOL(PRIxFAST8, None, <cinttypes>)
+SYMBOL(PRIxFAST8, None, <inttypes.h>)
+SYMBOL(PRIxFAST16, None, <cinttypes>)
+SYMBOL(PRIxFAST16, None, <inttypes.h>)
+SYMBOL(PRIxFAST32, None, <cinttypes>)
+SYMBOL(PRIxFAST32, None, <inttypes.h>)
+SYMBOL(PRIxFAST64, None, <cinttypes>)
+SYMBOL(PRIxFAST64, None, <inttypes.h>)
+SYMBOL(PRIxMAX, None, <cinttypes>)
+SYMBOL(PRIxMAX, None, <inttypes.h>)
+SYMBOL(PRIxPTR, None, <cinttypes>)
+SYMBOL(PRIxPTR, None, <inttypes.h>)
+SYMBOL(PRIX8, None, <cinttypes>)
+SYMBOL(PRIX8, None, <inttypes.h>)
+SYMBOL(PRIX16, None, <cinttypes>)
+SYMBOL(PRIX16, None, <inttypes.h>)
+SYMBOL(PRIX32, None, <cinttypes>)
+SYMBOL(PRIX32, None, <inttypes.h>)
+SYMBOL(PRIX64, None, <cinttypes>)
+SYMBOL(PRIX64, None, <inttypes.h>)
+SYMBOL(PRIXLEAST8, None, <cinttypes>)
+SYMBOL(PRIXLEAST8, None, <inttypes.h>)
+SYMBOL(PRIXLEAST16, None, <cinttypes>)
+SYMBOL(PRIXLEAST16, None, <inttypes.h>)
+SYMBOL(PRIXLEAST32, None, <cinttypes>)
+SYMBOL(PRIXLEAST32, None, <inttypes.h>)
+SYMBOL(PRIXLEAST64, None, <cinttypes>)
+SYMBOL(PRIXLEAST64, None, <inttypes.h>)
+SYMBOL(PRIXFAST8, None, <cinttypes>)
+SYMBOL(PRIXFAST8, None, <inttypes.h>)
+SYMBOL(PRIXFAST16, None, <cinttypes>)
+SYMBOL(PRIXFAST16, None, <inttypes.h>)
+SYMBOL(PRIXFAST32, None, <cinttypes>)
+SYMBOL(PRIXFAST32, None, <inttypes.h>)
+SYMBOL(PRIXFAST64, None, <cinttypes>)
+SYMBOL(PRIXFAST64, None, <inttypes.h>)
+SYMBOL(PRIXMAX, None, <cinttypes>)
+SYMBOL(PRIXMAX, None, <inttypes.h>)
+SYMBOL(PRIXPTR, None, <cinttypes>)
+SYMBOL(PRIXPTR, None, <inttypes.h>)
+SYMBOL(SCNd8, None, <cinttypes>)
+SYMBOL(SCNd8, None, <inttypes.h>)
+SYMBOL(SCNd16, None, <cinttypes>)
+SYMBOL(SCNd16, None, <inttypes.h>)
+SYMBOL(SCNd32, None, <cinttypes>)
+SYMBOL(SCNd32, None, <inttypes.h>)
+SYMBOL(SCNd64, None, <cinttypes>)
+SYMBOL(SCNd64, None, <inttypes.h>)
+SYMBOL(SCNdLEAST8, None, <cinttypes>)
+SYMBOL(SCNdLEAST8, None, <inttypes.h>)
+SYMBOL(SCNdLEAST16, None, <cinttypes>)
+SYMBOL(SCNdLEAST16, None, <inttypes.h>)
+SYMBOL(SCNdLEAST32, None, <cinttypes>)
+SYMBOL(SCNdLEAST32, None, <inttypes.h>)
+SYMBOL(SCNdLEAST64, None, <cinttypes>)
+SYMBOL(SCNdLEAST64, None, <inttypes.h>)
+SYMBOL(SCNdFAST8, None, <cinttypes>)
+SYMBOL(SCNdFAST8, None, <inttypes.h>)
+SYMBOL(SCNdFAST16, None, <cinttypes>)
+SYMBOL(SCNdFAST16, None, <inttypes.h>)
+SYMBOL(SCNdFAST32, None, <cinttypes>)
+SYMBOL(SCNdFAST32, None, <inttypes.h>)
+SYMBOL(SCNdFAST64, None, <cinttypes>)
+SYMBOL(SCNdFAST64, None, <inttypes.h>)
+SYMBOL(SCNdMAX, None, <cinttypes>)
+SYMBOL(SCNdMAX, None, <inttypes.h>)
+SYMBOL(SCNdPTR, None, <cinttypes>)
+SYMBOL(SCNdPTR, None, <inttypes.h>)
+SYMBOL(SCNi8, None, <cinttypes>)
+SYMBOL(SCNi8, None, <inttypes.h>)
+SYMBOL(SCNi16, None, <cinttypes>)
+SYMBOL(SCNi16, None, <inttypes.h>)
+SYMBOL(SCNi32, None, <cinttypes>)
+SYMBOL(SCNi32, None, <inttypes.h>)
+SYMBOL(SCNi64, None, <cinttypes>)
+SYMBOL(SCNi64, None, <inttypes.h>)
+SYMBOL(SCNiLEAST8, None, <cinttypes>)
+SYMBOL(SCNiLEAST8, None, <inttypes.h>)
+SYMBOL(SCNiLEAST16, None, <cinttypes>)
+SYMBOL(SCNiLEAST16, None, <inttypes.h>)
+SYMBOL(SCNiLEAST32, None, <cinttypes>)
+SYMBOL(SCNiLEAST32, None, <inttypes.h>)
+SYMBOL(SCNiLEAST64, None, <cinttypes>)
+SYMBOL(SCNiLEAST64, None, <inttypes.h>)
+SYMBOL(SCNiFAST8, None, <cinttypes>)
+SYMBOL(SCNiFAST8, None, <inttypes.h>)
+SYMBOL(SCNiFAST16, None, <cinttypes>)
+SYMBOL(SCNiFAST16, None, <inttypes.h>)
+SYMBOL(SCNiFAST32, None, <cinttypes>)
+SYMBOL(SCNiFAST32, None, <inttypes.h>)
+SYMBOL(SCNiFAST64, None, <cinttypes>)
+SYMBOL(SCNiFAST64, None, <inttypes.h>)
+SYMBOL(SCNiMAX, None, <cinttypes>)
+SYMBOL(SCNiMAX, None, <inttypes.h>)
+SYMBOL(SCNiPTR, None, <cinttypes>)
+SYMBOL(SCNiPTR, None, <inttypes.h>)
+SYMBOL(SCNu8, None, <cinttypes>)
+SYMBOL(SCNu8, None, <inttypes.h>)
+SYMBOL(SCNu16, None, <cinttypes>)
+SYMBOL(SCNu16, None, <inttypes.h>)
+SYMBOL(SCNu32, None, <cinttypes>)
+SYMBOL(SCNu32, None, <inttypes.h>)
+SYMBOL(SCNu64, None, <cinttypes>)
+SYMBOL(SCNu64, None, <inttypes.h>)
+SYMBOL(SCNuLEAST8, None, <cinttypes>)
+SYMBOL(SCNuLEAST8, None, <inttypes.h>)
+SYMBOL(SCNuLEAST16, None, <cinttypes>)
+SYMBOL(SCNuLEAST16, None, <inttypes.h>)
+SYMBOL(SCNuLEAST32, None, <cinttypes>)
+SYMBOL(SCNuLEAST32, None, <inttypes.h>)
+SYMBOL(SCNuLEAST64, None, <cinttypes>)
+SYMBOL(SCNuLEAST64, None, <inttypes.h>)
+SYMBOL(SCNuFAST8, None, <cinttypes>)
+SYMBOL(SCNuFAST8, None, <inttypes.h>)
+SYMBOL(SCNuFAST16, None, <cinttypes>)
+SYMBOL(SCNuFAST16, None, <inttypes.h>)
+SYMBOL(SCNuFAST32, None, <cinttypes>)
+SYMBOL(SCNuFAST32, None, <inttypes.h>)
+SYMBOL(SCNuFAST64, None, <cinttypes>)
+SYMBOL(SCNuFAST64, None, <inttypes.h>)
+SYMBOL(SCNuMAX, None, <cinttypes>)
+SYMBOL(SCNuMAX, None, <inttypes.h>)
+SYMBOL(SCNuPTR, None, <cinttypes>)
+SYMBOL(SCNuPTR, None, <inttypes.h>)
+SYMBOL(SCNo8, None, <cinttypes>)
+SYMBOL(SCNo8, None, <inttypes.h>)
+SYMBOL(SCNo16, None, <cinttypes>)
+SYMBOL(SCNo16, None, <inttypes.h>)
+SYMBOL(SCNo32, None, <cinttypes>)
+SYMBOL(SCNo32, None, <inttypes.h>)
+SYMBOL(SCNo64, None, <cinttypes>)
+SYMBOL(SCNo64, None, <inttypes.h>)
+SYMBOL(SCNoLEAST8, None, <cinttypes>)
+SYMBOL(SCNoLEAST8, None, <inttypes.h>)
+SYMBOL(SCNoLEAST16, None, <cinttypes>)
+SYMBOL(SCNoLEAST16, None, <inttypes.h>)
+SYMBOL(SCNoLEAST32, None, <cinttypes>)
+SYMBOL(SCNoLEAST32, None, <inttypes.h>)
+SYMBOL(SCNoLEAST64, None, <cinttypes>)
+SYMBOL(SCNoLEAST64, None, <inttypes.h>)
+SYMBOL(SCNoFAST8, None, <cinttypes>)
+SYMBOL(SCNoFAST8, None, <inttypes.h>)
+SYMBOL(SCNoFAST16, None, <cinttypes>)
+SYMBOL(SCNoFAST16, None, <inttypes.h>)
+SYMBOL(SCNoFAST32, None, <cinttypes>)
+SYMBOL(SCNoFAST32, None, <inttypes.h>)
+SYMBOL(SCNoFAST64, None, <cinttypes>)
+SYMBOL(SCNoFAST64, None, <inttypes.h>)
+SYMBOL(SCNoMAX, None, <cinttypes>)
+SYMBOL(SCNoMAX, None, <inttypes.h>)
+SYMBOL(SCNoPTR, None, <cinttypes>)
+SYMBOL(SCNoPTR, None, <inttypes.h>)
+SYMBOL(SCNx8, None, <cinttypes>)
+SYMBOL(SCNx8, None, <inttypes.h>)
+SYMBOL(SCNx16, None, <cinttypes>)
+SYMBOL(SCNx16, None, <inttypes.h>)
+SYMBOL(SCNx32, None, <cinttypes>)
+SYMBOL(SCNx32, None, <inttypes.h>)
+SYMBOL(SCNx64, None, <cinttypes>)
+SYMBOL(SCNx64, None, <inttypes.h>)
+SYMBOL(SCNxLEAST8, None, <cinttypes>)
+SYMBOL(SCNxLEAST8, None, <inttypes.h>)
+SYMBOL(SCNxLEAST16, None, <cinttypes>)
+SYMBOL(SCNxLEAST16, None, <inttypes.h>)
+SYMBOL(SCNxLEAST32, None, <cinttypes>)
+SYMBOL(SCNxLEAST32, None, <inttypes.h>)
+SYMBOL(SCNxLEAST64, None, <cinttypes>)
+SYMBOL(SCNxLEAST64, None, <inttypes.h>)
+SYMBOL(SCNxFAST8, None, <cinttypes>)
+SYMBOL(SCNxFAST8, None, <inttypes.h>)
+SYMBOL(SCNxFAST16, None, <cinttypes>)
+SYMBOL(SCNxFAST16, None, <inttypes.h>)
+SYMBOL(SCNxFAST32, None, <cinttypes>)
+SYMBOL(SCNxFAST32, None, <inttypes.h>)
+SYMBOL(SCNxFAST64, None, <cinttypes>)
+SYMBOL(SCNxFAST64, None, <inttypes.h>)
+SYMBOL(SCNxMAX, None, <cinttypes>)
+SYMBOL(SCNxMAX, None, <inttypes.h>)
+SYMBOL(SCNxPTR, None, <cinttypes>)
+SYMBOL(SCNxPTR, None, <inttypes.h>)
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdSymbolMap.inc b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdSymbolMap.inc
new file mode 100644
index 000000000000..a08ec11e77a4
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdSymbolMap.inc
@@ -0,0 +1,3819 @@
+//===-- gen_std.py generated file -------------------------------*- C++ -*-===//
+//
+// Used to build a lookup table (qualified names => include headers) for CPP
+// Standard Library symbols.
+//
+// This file was generated automatically by
+// clang/tools/include-mapping/gen_std.py, DO NOT EDIT!
+//
+// Generated from cppreference offline HTML book (modified on 2022-07-30).
+//===----------------------------------------------------------------------===//
+
+SYMBOL(ATOMIC_BOOL_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_CHAR16_T_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_CHAR32_T_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_CHAR8_T_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_CHAR_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_FLAG_INIT, None, <atomic>)
+SYMBOL(ATOMIC_INT_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_LLONG_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_LONG_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_POINTER_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_SHORT_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_VAR_INIT, None, <atomic>)
+SYMBOL(ATOMIC_WCHAR_T_LOCK_FREE, None, <atomic>)
+SYMBOL(BUFSIZ, None, <cstdio>)
+SYMBOL(BUFSIZ, None, <stdio.h>)
+SYMBOL(CHAR_BIT, None, <climits>)
+SYMBOL(CHAR_BIT, None, <limits.h>)
+SYMBOL(CHAR_MAX, None, <climits>)
+SYMBOL(CHAR_MAX, None, <limits.h>)
+SYMBOL(CHAR_MIN, None, <climits>)
+SYMBOL(CHAR_MIN, None, <limits.h>)
+SYMBOL(CLOCKS_PER_SEC, None, <ctime>)
+SYMBOL(CLOCKS_PER_SEC, None, <time.h>)
+SYMBOL(DBL_DECIMAL_DIG, None, <cfloat>)
+SYMBOL(DBL_DECIMAL_DIG, None, <float.h>)
+SYMBOL(DBL_DIG, None, <cfloat>)
+SYMBOL(DBL_DIG, None, <float.h>)
+SYMBOL(DBL_EPSILON, None, <cfloat>)
+SYMBOL(DBL_EPSILON, None, <float.h>)
+SYMBOL(DBL_HAS_SUBNORM, None, <cfloat>)
+SYMBOL(DBL_HAS_SUBNORM, None, <float.h>)
+SYMBOL(DBL_MANT_DIG, None, <cfloat>)
+SYMBOL(DBL_MANT_DIG, None, <float.h>)
+SYMBOL(DBL_MAX, None, <cfloat>)
+SYMBOL(DBL_MAX, None, <float.h>)
+SYMBOL(DBL_MAX_10_EXP, None, <cfloat>)
+SYMBOL(DBL_MAX_10_EXP, None, <float.h>)
+SYMBOL(DBL_MAX_EXP, None, <cfloat>)
+SYMBOL(DBL_MAX_EXP, None, <float.h>)
+SYMBOL(DBL_MIN, None, <cfloat>)
+SYMBOL(DBL_MIN, None, <float.h>)
+SYMBOL(DBL_MIN_10_EXP, None, <cfloat>)
+SYMBOL(DBL_MIN_10_EXP, None, <float.h>)
+SYMBOL(DBL_MIN_EXP, None, <cfloat>)
+SYMBOL(DBL_MIN_EXP, None, <float.h>)
+SYMBOL(DBL_TRUE_MIN, None, <cfloat>)
+SYMBOL(DBL_TRUE_MIN, None, <float.h>)
+SYMBOL(DECIMAL_DIG, None, <cfloat>)
+SYMBOL(DECIMAL_DIG, None, <float.h>)
+SYMBOL(E2BIG, None, <cerrno>)
+SYMBOL(E2BIG, None, <errno.h>)
+SYMBOL(EACCES, None, <cerrno>)
+SYMBOL(EACCES, None, <errno.h>)
+SYMBOL(EADDRINUSE, None, <cerrno>)
+SYMBOL(EADDRINUSE, None, <errno.h>)
+SYMBOL(EADDRNOTAVAIL, None, <cerrno>)
+SYMBOL(EADDRNOTAVAIL, None, <errno.h>)
+SYMBOL(EAFNOSUPPORT, None, <cerrno>)
+SYMBOL(EAFNOSUPPORT, None, <errno.h>)
+SYMBOL(EAGAIN, None, <cerrno>)
+SYMBOL(EAGAIN, None, <errno.h>)
+SYMBOL(EALREADY, None, <cerrno>)
+SYMBOL(EALREADY, None, <errno.h>)
+SYMBOL(EBADF, None, <cerrno>)
+SYMBOL(EBADF, None, <errno.h>)
+SYMBOL(EBADMSG, None, <cerrno>)
+SYMBOL(EBADMSG, None, <errno.h>)
+SYMBOL(EBUSY, None, <cerrno>)
+SYMBOL(EBUSY, None, <errno.h>)
+SYMBOL(ECANCELED, None, <cerrno>)
+SYMBOL(ECANCELED, None, <errno.h>)
+SYMBOL(ECHILD, None, <cerrno>)
+SYMBOL(ECHILD, None, <errno.h>)
+SYMBOL(ECONNABORTED, None, <cerrno>)
+SYMBOL(ECONNABORTED, None, <errno.h>)
+SYMBOL(ECONNREFUSED, None, <cerrno>)
+SYMBOL(ECONNREFUSED, None, <errno.h>)
+SYMBOL(ECONNRESET, None, <cerrno>)
+SYMBOL(ECONNRESET, None, <errno.h>)
+SYMBOL(EDEADLK, None, <cerrno>)
+SYMBOL(EDEADLK, None, <errno.h>)
+SYMBOL(EDESTADDRREQ, None, <cerrno>)
+SYMBOL(EDESTADDRREQ, None, <errno.h>)
+SYMBOL(EDOM, None, <cerrno>)
+SYMBOL(EDOM, None, <errno.h>)
+SYMBOL(EEXIST, None, <cerrno>)
+SYMBOL(EEXIST, None, <errno.h>)
+SYMBOL(EFAULT, None, <cerrno>)
+SYMBOL(EFAULT, None, <errno.h>)
+SYMBOL(EFBIG, None, <cerrno>)
+SYMBOL(EFBIG, None, <errno.h>)
+SYMBOL(EHOSTUNREACH, None, <cerrno>)
+SYMBOL(EHOSTUNREACH, None, <errno.h>)
+SYMBOL(EIDRM, None, <cerrno>)
+SYMBOL(EIDRM, None, <errno.h>)
+SYMBOL(EILSEQ, None, <cerrno>)
+SYMBOL(EILSEQ, None, <errno.h>)
+SYMBOL(EINPROGRESS, None, <cerrno>)
+SYMBOL(EINPROGRESS, None, <errno.h>)
+SYMBOL(EINTR, None, <cerrno>)
+SYMBOL(EINTR, None, <errno.h>)
+SYMBOL(EINVAL, None, <cerrno>)
+SYMBOL(EINVAL, None, <errno.h>)
+SYMBOL(EIO, None, <cerrno>)
+SYMBOL(EIO, None, <errno.h>)
+SYMBOL(EISCONN, None, <cerrno>)
+SYMBOL(EISCONN, None, <errno.h>)
+SYMBOL(EISDIR, None, <cerrno>)
+SYMBOL(EISDIR, None, <errno.h>)
+SYMBOL(ELOOP, None, <cerrno>)
+SYMBOL(ELOOP, None, <errno.h>)
+SYMBOL(EMFILE, None, <cerrno>)
+SYMBOL(EMFILE, None, <errno.h>)
+SYMBOL(EMLINK, None, <cerrno>)
+SYMBOL(EMLINK, None, <errno.h>)
+SYMBOL(EMSGSIZE, None, <cerrno>)
+SYMBOL(EMSGSIZE, None, <errno.h>)
+SYMBOL(ENAMETOOLONG, None, <cerrno>)
+SYMBOL(ENAMETOOLONG, None, <errno.h>)
+SYMBOL(ENETDOWN, None, <cerrno>)
+SYMBOL(ENETDOWN, None, <errno.h>)
+SYMBOL(ENETRESET, None, <cerrno>)
+SYMBOL(ENETRESET, None, <errno.h>)
+SYMBOL(ENETUNREACH, None, <cerrno>)
+SYMBOL(ENETUNREACH, None, <errno.h>)
+SYMBOL(ENFILE, None, <cerrno>)
+SYMBOL(ENFILE, None, <errno.h>)
+SYMBOL(ENOBUFS, None, <cerrno>)
+SYMBOL(ENOBUFS, None, <errno.h>)
+SYMBOL(ENODATA, None, <cerrno>)
+SYMBOL(ENODATA, None, <errno.h>)
+SYMBOL(ENODEV, None, <cerrno>)
+SYMBOL(ENODEV, None, <errno.h>)
+SYMBOL(ENOENT, None, <cerrno>)
+SYMBOL(ENOENT, None, <errno.h>)
+SYMBOL(ENOEXEC, None, <cerrno>)
+SYMBOL(ENOEXEC, None, <errno.h>)
+SYMBOL(ENOLCK, None, <cerrno>)
+SYMBOL(ENOLCK, None, <errno.h>)
+SYMBOL(ENOLINK, None, <cerrno>)
+SYMBOL(ENOLINK, None, <errno.h>)
+SYMBOL(ENOMEM, None, <cerrno>)
+SYMBOL(ENOMEM, None, <errno.h>)
+SYMBOL(ENOMSG, None, <cerrno>)
+SYMBOL(ENOMSG, None, <errno.h>)
+SYMBOL(ENOPROTOOPT, None, <cerrno>)
+SYMBOL(ENOPROTOOPT, None, <errno.h>)
+SYMBOL(ENOSPC, None, <cerrno>)
+SYMBOL(ENOSPC, None, <errno.h>)
+SYMBOL(ENOSR, None, <cerrno>)
+SYMBOL(ENOSR, None, <errno.h>)
+SYMBOL(ENOSTR, None, <cerrno>)
+SYMBOL(ENOSTR, None, <errno.h>)
+SYMBOL(ENOSYS, None, <cerrno>)
+SYMBOL(ENOSYS, None, <errno.h>)
+SYMBOL(ENOTCONN, None, <cerrno>)
+SYMBOL(ENOTCONN, None, <errno.h>)
+SYMBOL(ENOTDIR, None, <cerrno>)
+SYMBOL(ENOTDIR, None, <errno.h>)
+SYMBOL(ENOTEMPTY, None, <cerrno>)
+SYMBOL(ENOTEMPTY, None, <errno.h>)
+SYMBOL(ENOTRECOVERABLE, None, <cerrno>)
+SYMBOL(ENOTRECOVERABLE, None, <errno.h>)
+SYMBOL(ENOTSOCK, None, <cerrno>)
+SYMBOL(ENOTSOCK, None, <errno.h>)
+SYMBOL(ENOTSUP, None, <cerrno>)
+SYMBOL(ENOTSUP, None, <errno.h>)
+SYMBOL(ENOTTY, None, <cerrno>)
+SYMBOL(ENOTTY, None, <errno.h>)
+SYMBOL(ENXIO, None, <cerrno>)
+SYMBOL(ENXIO, None, <errno.h>)
+SYMBOL(EOF, None, <cstdio>)
+SYMBOL(EOF, None, <stdio.h>)
+SYMBOL(EOPNOTSUPP, None, <cerrno>)
+SYMBOL(EOPNOTSUPP, None, <errno.h>)
+SYMBOL(EOVERFLOW, None, <cerrno>)
+SYMBOL(EOVERFLOW, None, <errno.h>)
+SYMBOL(EOWNERDEAD, None, <cerrno>)
+SYMBOL(EOWNERDEAD, None, <errno.h>)
+SYMBOL(EPERM, None, <cerrno>)
+SYMBOL(EPERM, None, <errno.h>)
+SYMBOL(EPIPE, None, <cerrno>)
+SYMBOL(EPIPE, None, <errno.h>)
+SYMBOL(EPROTO, None, <cerrno>)
+SYMBOL(EPROTO, None, <errno.h>)
+SYMBOL(EPROTONOSUPPORT, None, <cerrno>)
+SYMBOL(EPROTONOSUPPORT, None, <errno.h>)
+SYMBOL(EPROTOTYPE, None, <cerrno>)
+SYMBOL(EPROTOTYPE, None, <errno.h>)
+SYMBOL(ERANGE, None, <cerrno>)
+SYMBOL(ERANGE, None, <errno.h>)
+SYMBOL(EROFS, None, <cerrno>)
+SYMBOL(EROFS, None, <errno.h>)
+SYMBOL(ESPIPE, None, <cerrno>)
+SYMBOL(ESPIPE, None, <errno.h>)
+SYMBOL(ESRCH, None, <cerrno>)
+SYMBOL(ESRCH, None, <errno.h>)
+SYMBOL(ETIME, None, <cerrno>)
+SYMBOL(ETIME, None, <errno.h>)
+SYMBOL(ETIMEDOUT, None, <cerrno>)
+SYMBOL(ETIMEDOUT, None, <errno.h>)
+SYMBOL(ETXTBSY, None, <cerrno>)
+SYMBOL(ETXTBSY, None, <errno.h>)
+SYMBOL(EWOULDBLOCK, None, <cerrno>)
+SYMBOL(EWOULDBLOCK, None, <errno.h>)
+SYMBOL(EXDEV, None, <cerrno>)
+SYMBOL(EXDEV, None, <errno.h>)
+SYMBOL(EXIT_FAILURE, None, <cstdlib>)
+SYMBOL(EXIT_FAILURE, None, <stdlib.h>)
+SYMBOL(EXIT_SUCCESS, None, <cstdlib>)
+SYMBOL(EXIT_SUCCESS, None, <stdlib.h>)
+SYMBOL(FE_ALL_EXCEPT, None, <cfenv>)
+SYMBOL(FE_ALL_EXCEPT, None, <fenv.h>)
+SYMBOL(FE_DFL_ENV, None, <cfenv>)
+SYMBOL(FE_DFL_ENV, None, <fenv.h>)
+SYMBOL(FE_DIVBYZERO, None, <cfenv>)
+SYMBOL(FE_DIVBYZERO, None, <fenv.h>)
+SYMBOL(FE_DOWNWARD, None, <cfenv>)
+SYMBOL(FE_DOWNWARD, None, <fenv.h>)
+SYMBOL(FE_INEXACT, None, <cfenv>)
+SYMBOL(FE_INEXACT, None, <fenv.h>)
+SYMBOL(FE_INVALID, None, <cfenv>)
+SYMBOL(FE_INVALID, None, <fenv.h>)
+SYMBOL(FE_OVERFLOW, None, <cfenv>)
+SYMBOL(FE_OVERFLOW, None, <fenv.h>)
+SYMBOL(FE_TONEAREST, None, <cfenv>)
+SYMBOL(FE_TONEAREST, None, <fenv.h>)
+SYMBOL(FE_TOWARDZERO, None, <cfenv>)
+SYMBOL(FE_TOWARDZERO, None, <fenv.h>)
+SYMBOL(FE_UNDERFLOW, None, <cfenv>)
+SYMBOL(FE_UNDERFLOW, None, <fenv.h>)
+SYMBOL(FE_UPWARD, None, <cfenv>)
+SYMBOL(FE_UPWARD, None, <fenv.h>)
+SYMBOL(FILENAME_MAX, None, <cstdio>)
+SYMBOL(FILENAME_MAX, None, <stdio.h>)
+SYMBOL(FLT_DECIMAL_DIG, None, <cfloat>)
+SYMBOL(FLT_DECIMAL_DIG, None, <float.h>)
+SYMBOL(FLT_DIG, None, <cfloat>)
+SYMBOL(FLT_DIG, None, <float.h>)
+SYMBOL(FLT_EPSILON, None, <cfloat>)
+SYMBOL(FLT_EPSILON, None, <float.h>)
+SYMBOL(FLT_EVAL_METHOD, None, <cfloat>)
+SYMBOL(FLT_EVAL_METHOD, None, <float.h>)
+SYMBOL(FLT_HAS_SUBNORM, None, <cfloat>)
+SYMBOL(FLT_HAS_SUBNORM, None, <float.h>)
+SYMBOL(FLT_MANT_DIG, None, <cfloat>)
+SYMBOL(FLT_MANT_DIG, None, <float.h>)
+SYMBOL(FLT_MAX, None, <cfloat>)
+SYMBOL(FLT_MAX, None, <float.h>)
+SYMBOL(FLT_MAX_10_EXP, None, <cfloat>)
+SYMBOL(FLT_MAX_10_EXP, None, <float.h>)
+SYMBOL(FLT_MAX_EXP, None, <cfloat>)
+SYMBOL(FLT_MAX_EXP, None, <float.h>)
+SYMBOL(FLT_MIN, None, <cfloat>)
+SYMBOL(FLT_MIN, None, <float.h>)
+SYMBOL(FLT_MIN_10_EXP, None, <cfloat>)
+SYMBOL(FLT_MIN_10_EXP, None, <float.h>)
+SYMBOL(FLT_MIN_EXP, None, <cfloat>)
+SYMBOL(FLT_MIN_EXP, None, <float.h>)
+SYMBOL(FLT_RADIX, None, <cfloat>)
+SYMBOL(FLT_RADIX, None, <float.h>)
+SYMBOL(FLT_ROUNDS, None, <cfloat>)
+SYMBOL(FLT_ROUNDS, None, <float.h>)
+SYMBOL(FLT_TRUE_MIN, None, <cfloat>)
+SYMBOL(FLT_TRUE_MIN, None, <float.h>)
+SYMBOL(FOPEN_MAX, None, <cstdio>)
+SYMBOL(FOPEN_MAX, None, <stdio.h>)
+SYMBOL(FP_FAST_FMA, None, <cmath>)
+SYMBOL(FP_FAST_FMA, None, <math.h>)
+SYMBOL(FP_FAST_FMAF, None, <cmath>)
+SYMBOL(FP_FAST_FMAF, None, <math.h>)
+SYMBOL(FP_FAST_FMAL, None, <cmath>)
+SYMBOL(FP_FAST_FMAL, None, <math.h>)
+SYMBOL(FP_ILOGB0, None, <cmath>)
+SYMBOL(FP_ILOGB0, None, <math.h>)
+SYMBOL(FP_ILOGBNAN, None, <cmath>)
+SYMBOL(FP_ILOGBNAN, None, <math.h>)
+SYMBOL(FP_INFINITE, None, <cmath>)
+SYMBOL(FP_INFINITE, None, <math.h>)
+SYMBOL(FP_NAN, None, <cmath>)
+SYMBOL(FP_NAN, None, <math.h>)
+SYMBOL(FP_NORMAL, None, <cmath>)
+SYMBOL(FP_NORMAL, None, <math.h>)
+SYMBOL(FP_SUBNORMAL, None, <cmath>)
+SYMBOL(FP_SUBNORMAL, None, <math.h>)
+SYMBOL(FP_ZERO, None, <cmath>)
+SYMBOL(FP_ZERO, None, <math.h>)
+SYMBOL(HUGE_VAL, None, <cmath>)
+SYMBOL(HUGE_VAL, None, <math.h>)
+SYMBOL(HUGE_VALF, None, <cmath>)
+SYMBOL(HUGE_VALF, None, <math.h>)
+SYMBOL(HUGE_VALL, None, <cmath>)
+SYMBOL(HUGE_VALL, None, <math.h>)
+SYMBOL(INFINITY, None, <cmath>)
+SYMBOL(INFINITY, None, <math.h>)
+SYMBOL(INT16_MAX, None, <cstdint>)
+SYMBOL(INT16_MAX, None, <stdint.h>)
+SYMBOL(INT16_MIN, None, <cstdint>)
+SYMBOL(INT16_MIN, None, <stdint.h>)
+SYMBOL(INT32_MAX, None, <cstdint>)
+SYMBOL(INT32_MAX, None, <stdint.h>)
+SYMBOL(INT32_MIN, None, <cstdint>)
+SYMBOL(INT32_MIN, None, <stdint.h>)
+SYMBOL(INT64_MAX, None, <cstdint>)
+SYMBOL(INT64_MAX, None, <stdint.h>)
+SYMBOL(INT64_MIN, None, <cstdint>)
+SYMBOL(INT64_MIN, None, <stdint.h>)
+SYMBOL(INT8_MAX, None, <cstdint>)
+SYMBOL(INT8_MAX, None, <stdint.h>)
+SYMBOL(INT8_MIN, None, <cstdint>)
+SYMBOL(INT8_MIN, None, <stdint.h>)
+SYMBOL(INTMAX_MAX, None, <cstdint>)
+SYMBOL(INTMAX_MAX, None, <stdint.h>)
+SYMBOL(INTMAX_MIN, None, <cstdint>)
+SYMBOL(INTMAX_MIN, None, <stdint.h>)
+SYMBOL(INTPTR_MAX, None, <cstdint>)
+SYMBOL(INTPTR_MAX, None, <stdint.h>)
+SYMBOL(INTPTR_MIN, None, <cstdint>)
+SYMBOL(INTPTR_MIN, None, <stdint.h>)
+SYMBOL(INT_FAST16_MAX, None, <cstdint>)
+SYMBOL(INT_FAST16_MAX, None, <stdint.h>)
+SYMBOL(INT_FAST16_MIN, None, <cstdint>)
+SYMBOL(INT_FAST16_MIN, None, <stdint.h>)
+SYMBOL(INT_FAST32_MAX, None, <cstdint>)
+SYMBOL(INT_FAST32_MAX, None, <stdint.h>)
+SYMBOL(INT_FAST32_MIN, None, <cstdint>)
+SYMBOL(INT_FAST32_MIN, None, <stdint.h>)
+SYMBOL(INT_FAST64_MAX, None, <cstdint>)
+SYMBOL(INT_FAST64_MAX, None, <stdint.h>)
+SYMBOL(INT_FAST64_MIN, None, <cstdint>)
+SYMBOL(INT_FAST64_MIN, None, <stdint.h>)
+SYMBOL(INT_FAST8_MAX, None, <cstdint>)
+SYMBOL(INT_FAST8_MAX, None, <stdint.h>)
+SYMBOL(INT_FAST8_MIN, None, <cstdint>)
+SYMBOL(INT_FAST8_MIN, None, <stdint.h>)
+SYMBOL(INT_LEAST16_MAX, None, <cstdint>)
+SYMBOL(INT_LEAST16_MAX, None, <stdint.h>)
+SYMBOL(INT_LEAST16_MIN, None, <cstdint>)
+SYMBOL(INT_LEAST16_MIN, None, <stdint.h>)
+SYMBOL(INT_LEAST32_MAX, None, <cstdint>)
+SYMBOL(INT_LEAST32_MAX, None, <stdint.h>)
+SYMBOL(INT_LEAST32_MIN, None, <cstdint>)
+SYMBOL(INT_LEAST32_MIN, None, <stdint.h>)
+SYMBOL(INT_LEAST64_MAX, None, <cstdint>)
+SYMBOL(INT_LEAST64_MAX, None, <stdint.h>)
+SYMBOL(INT_LEAST64_MIN, None, <cstdint>)
+SYMBOL(INT_LEAST64_MIN, None, <stdint.h>)
+SYMBOL(INT_LEAST8_MAX, None, <cstdint>)
+SYMBOL(INT_LEAST8_MAX, None, <stdint.h>)
+SYMBOL(INT_LEAST8_MIN, None, <cstdint>)
+SYMBOL(INT_LEAST8_MIN, None, <stdint.h>)
+SYMBOL(INT_MAX, None, <climits>)
+SYMBOL(INT_MAX, None, <limits.h>)
+SYMBOL(INT_MIN, None, <climits>)
+SYMBOL(INT_MIN, None, <limits.h>)
+SYMBOL(LC_ALL, None, <clocale>)
+SYMBOL(LC_ALL, None, <locale.h>)
+SYMBOL(LC_COLLATE, None, <clocale>)
+SYMBOL(LC_COLLATE, None, <locale.h>)
+SYMBOL(LC_CTYPE, None, <clocale>)
+SYMBOL(LC_CTYPE, None, <locale.h>)
+SYMBOL(LC_MONETARY, None, <clocale>)
+SYMBOL(LC_MONETARY, None, <locale.h>)
+SYMBOL(LC_NUMERIC, None, <clocale>)
+SYMBOL(LC_NUMERIC, None, <locale.h>)
+SYMBOL(LC_TIME, None, <clocale>)
+SYMBOL(LC_TIME, None, <locale.h>)
+SYMBOL(LDBL_DECIMAL_DIG, None, <cfloat>)
+SYMBOL(LDBL_DECIMAL_DIG, None, <float.h>)
+SYMBOL(LDBL_DIG, None, <cfloat>)
+SYMBOL(LDBL_DIG, None, <float.h>)
+SYMBOL(LDBL_EPSILON, None, <cfloat>)
+SYMBOL(LDBL_EPSILON, None, <float.h>)
+SYMBOL(LDBL_HAS_SUBNORM, None, <cfloat>)
+SYMBOL(LDBL_HAS_SUBNORM, None, <float.h>)
+SYMBOL(LDBL_MANT_DIG, None, <cfloat>)
+SYMBOL(LDBL_MANT_DIG, None, <float.h>)
+SYMBOL(LDBL_MAX, None, <cfloat>)
+SYMBOL(LDBL_MAX, None, <float.h>)
+SYMBOL(LDBL_MAX_10_EXP, None, <cfloat>)
+SYMBOL(LDBL_MAX_10_EXP, None, <float.h>)
+SYMBOL(LDBL_MAX_EXP, None, <cfloat>)
+SYMBOL(LDBL_MAX_EXP, None, <float.h>)
+SYMBOL(LDBL_MIN, None, <cfloat>)
+SYMBOL(LDBL_MIN, None, <float.h>)
+SYMBOL(LDBL_MIN_10_EXP, None, <cfloat>)
+SYMBOL(LDBL_MIN_10_EXP, None, <float.h>)
+SYMBOL(LDBL_MIN_EXP, None, <cfloat>)
+SYMBOL(LDBL_MIN_EXP, None, <float.h>)
+SYMBOL(LDBL_TRUE_MIN, None, <cfloat>)
+SYMBOL(LDBL_TRUE_MIN, None, <float.h>)
+SYMBOL(LLONG_MAX, None, <climits>)
+SYMBOL(LLONG_MAX, None, <limits.h>)
+SYMBOL(LLONG_MIN, None, <climits>)
+SYMBOL(LLONG_MIN, None, <limits.h>)
+SYMBOL(LONG_MAX, None, <climits>)
+SYMBOL(LONG_MAX, None, <limits.h>)
+SYMBOL(LONG_MIN, None, <climits>)
+SYMBOL(LONG_MIN, None, <limits.h>)
+SYMBOL(L_tmpnam, None, <cstdio>)
+SYMBOL(L_tmpnam, None, <stdio.h>)
+SYMBOL(MATH_ERREXCEPT, None, <cmath>)
+SYMBOL(MATH_ERREXCEPT, None, <math.h>)
+SYMBOL(MATH_ERRNO, None, <cmath>)
+SYMBOL(MATH_ERRNO, None, <math.h>)
+SYMBOL(MB_CUR_MAX, None, <cstdlib>)
+SYMBOL(MB_CUR_MAX, None, <stdlib.h>)
+SYMBOL(MB_LEN_MAX, None, <climits>)
+SYMBOL(MB_LEN_MAX, None, <limits.h>)
+SYMBOL(NAN, None, <cmath>)
+SYMBOL(NAN, None, <math.h>)
+SYMBOL(ONCE_FLAG_INIT, None, <mutex>)
+SYMBOL(PTRDIFF_MAX, None, <cstdint>)
+SYMBOL(PTRDIFF_MAX, None, <stdint.h>)
+SYMBOL(PTRDIFF_MIN, None, <cstdint>)
+SYMBOL(PTRDIFF_MIN, None, <stdint.h>)
+SYMBOL(RAND_MAX, None, <cstdlib>)
+SYMBOL(RAND_MAX, None, <stdlib.h>)
+SYMBOL(SCHAR_MAX, None, <climits>)
+SYMBOL(SCHAR_MAX, None, <limits.h>)
+SYMBOL(SCHAR_MIN, None, <climits>)
+SYMBOL(SCHAR_MIN, None, <limits.h>)
+SYMBOL(SEEK_CUR, None, <cstdio>)
+SYMBOL(SEEK_CUR, None, <stdio.h>)
+SYMBOL(SEEK_END, None, <cstdio>)
+SYMBOL(SEEK_END, None, <stdio.h>)
+SYMBOL(SEEK_SET, None, <cstdio>)
+SYMBOL(SEEK_SET, None, <stdio.h>)
+SYMBOL(SHRT_MAX, None, <climits>)
+SYMBOL(SHRT_MAX, None, <limits.h>)
+SYMBOL(SHRT_MIN, None, <climits>)
+SYMBOL(SHRT_MIN, None, <limits.h>)
+SYMBOL(SIGABRT, None, <csignal>)
+SYMBOL(SIGABRT, None, <signal.h>)
+SYMBOL(SIGFPE, None, <csignal>)
+SYMBOL(SIGFPE, None, <signal.h>)
+SYMBOL(SIGILL, None, <csignal>)
+SYMBOL(SIGILL, None, <signal.h>)
+SYMBOL(SIGINT, None, <csignal>)
+SYMBOL(SIGINT, None, <signal.h>)
+SYMBOL(SIGSEGV, None, <csignal>)
+SYMBOL(SIGSEGV, None, <signal.h>)
+SYMBOL(SIGTERM, None, <csignal>)
+SYMBOL(SIGTERM, None, <signal.h>)
+SYMBOL(SIG_ATOMIC_MAX, None, <cstdint>)
+SYMBOL(SIG_ATOMIC_MAX, None, <stdint.h>)
+SYMBOL(SIG_ATOMIC_MIN, None, <cstdint>)
+SYMBOL(SIG_ATOMIC_MIN, None, <stdint.h>)
+SYMBOL(SIG_DFL, None, <csignal>)
+SYMBOL(SIG_DFL, None, <signal.h>)
+SYMBOL(SIG_ERR, None, <csignal>)
+SYMBOL(SIG_ERR, None, <signal.h>)
+SYMBOL(SIG_IGN, None, <csignal>)
+SYMBOL(SIG_IGN, None, <signal.h>)
+SYMBOL(SIZE_MAX, None, <cstdint>)
+SYMBOL(SIZE_MAX, None, <stdint.h>)
+SYMBOL(TIME_UTC, None, <ctime>)
+SYMBOL(TIME_UTC, None, <time.h>)
+SYMBOL(TMP_MAX, None, <cstdio>)
+SYMBOL(TMP_MAX, None, <stdio.h>)
+SYMBOL(UCHAR_MAX, None, <climits>)
+SYMBOL(UCHAR_MAX, None, <limits.h>)
+SYMBOL(UINT16_MAX, None, <cstdint>)
+SYMBOL(UINT16_MAX, None, <stdint.h>)
+SYMBOL(UINT32_MAX, None, <cstdint>)
+SYMBOL(UINT32_MAX, None, <stdint.h>)
+SYMBOL(UINT64_MAX, None, <cstdint>)
+SYMBOL(UINT64_MAX, None, <stdint.h>)
+SYMBOL(UINT8_MAX, None, <cstdint>)
+SYMBOL(UINT8_MAX, None, <stdint.h>)
+SYMBOL(UINTMAX_MAX, None, <cstdint>)
+SYMBOL(UINTMAX_MAX, None, <stdint.h>)
+SYMBOL(UINTPTR_MAX, None, <cstdint>)
+SYMBOL(UINTPTR_MAX, None, <stdint.h>)
+SYMBOL(UINT_FAST16_MAX, None, <cstdint>)
+SYMBOL(UINT_FAST16_MAX, None, <stdint.h>)
+SYMBOL(UINT_FAST32_MAX, None, <cstdint>)
+SYMBOL(UINT_FAST32_MAX, None, <stdint.h>)
+SYMBOL(UINT_FAST64_MAX, None, <cstdint>)
+SYMBOL(UINT_FAST64_MAX, None, <stdint.h>)
+SYMBOL(UINT_FAST8_MAX, None, <cstdint>)
+SYMBOL(UINT_FAST8_MAX, None, <stdint.h>)
+SYMBOL(UINT_LEAST16_MAX, None, <cstdint>)
+SYMBOL(UINT_LEAST16_MAX, None, <stdint.h>)
+SYMBOL(UINT_LEAST32_MAX, None, <cstdint>)
+SYMBOL(UINT_LEAST32_MAX, None, <stdint.h>)
+SYMBOL(UINT_LEAST64_MAX, None, <cstdint>)
+SYMBOL(UINT_LEAST64_MAX, None, <stdint.h>)
+SYMBOL(UINT_LEAST8_MAX, None, <cstdint>)
+SYMBOL(UINT_LEAST8_MAX, None, <stdint.h>)
+SYMBOL(UINT_MAX, None, <climits>)
+SYMBOL(UINT_MAX, None, <limits.h>)
+SYMBOL(ULLONG_MAX, None, <climits>)
+SYMBOL(ULLONG_MAX, None, <limits.h>)
+SYMBOL(ULONG_MAX, None, <climits>)
+SYMBOL(ULONG_MAX, None, <limits.h>)
+SYMBOL(USHRT_MAX, None, <climits>)
+SYMBOL(USHRT_MAX, None, <limits.h>)
+SYMBOL(WEOF, None, <cwchar>)
+SYMBOL(WEOF, None, <wchar.h>)
+SYMBOL(WINT_MAX, None, <cstdint>)
+SYMBOL(WINT_MAX, None, <stdint.h>)
+SYMBOL(WINT_MIN, None, <cstdint>)
+SYMBOL(WINT_MIN, None, <stdint.h>)
+SYMBOL(_IOFBF, None, <cstdio>)
+SYMBOL(_IOFBF, None, <stdio.h>)
+SYMBOL(_IOLBF, None, <cstdio>)
+SYMBOL(_IOLBF, None, <stdio.h>)
+SYMBOL(_IONBF, None, <cstdio>)
+SYMBOL(_IONBF, None, <stdio.h>)
+SYMBOL(assert, None, <cassert>)
+SYMBOL(assert, None, <assert.h>)
+SYMBOL(errno, None, <cerrno>)
+SYMBOL(errno, None, <errno.h>)
+SYMBOL(math_errhandling, None, <cmath>)
+SYMBOL(math_errhandling, None, <math.h>)
+SYMBOL(offsetof, None, <cstddef>)
+SYMBOL(offsetof, None, <stddef.h>)
+SYMBOL(setjmp, None, <csetjmp>)
+SYMBOL(setjmp, None, <setjmp.h>)
+SYMBOL(stderr, None, <cstdio>)
+SYMBOL(stderr, None, <stdio.h>)
+SYMBOL(stdin, None, <cstdio>)
+SYMBOL(stdin, None, <stdio.h>)
+SYMBOL(stdout, None, <cstdio>)
+SYMBOL(stdout, None, <stdio.h>)
+SYMBOL(va_arg, None, <cstdarg>)
+SYMBOL(va_arg, None, <stdarg.h>)
+SYMBOL(va_copy, None, <cstdarg>)
+SYMBOL(va_copy, None, <stdarg.h>)
+SYMBOL(va_end, None, <cstdarg>)
+SYMBOL(va_end, None, <stdarg.h>)
+SYMBOL(va_start, None, <cstdarg>)
+SYMBOL(va_start, None, <stdarg.h>)
+SYMBOL(FILE, std::, <cstdio>)
+SYMBOL(FILE, None, <cstdio>)
+SYMBOL(FILE, None, <stdio.h>)
+SYMBOL(_Exit, std::, <cstdlib>)
+SYMBOL(_Exit, None, <cstdlib>)
+SYMBOL(_Exit, None, <stdlib.h>)
+SYMBOL(accumulate, std::, <numeric>)
+SYMBOL(acos, std::, <cmath>)
+SYMBOL(acos, None, <cmath>)
+SYMBOL(acos, None, <math.h>)
+SYMBOL(acosf, std::, <cmath>)
+SYMBOL(acosf, None, <cmath>)
+SYMBOL(acosf, None, <math.h>)
+SYMBOL(acosh, std::, <cmath>)
+SYMBOL(acosh, None, <cmath>)
+SYMBOL(acosh, None, <math.h>)
+SYMBOL(acoshf, std::, <cmath>)
+SYMBOL(acoshf, None, <cmath>)
+SYMBOL(acoshf, None, <math.h>)
+SYMBOL(acoshl, std::, <cmath>)
+SYMBOL(acoshl, None, <cmath>)
+SYMBOL(acoshl, None, <math.h>)
+SYMBOL(acosl, std::, <cmath>)
+SYMBOL(acosl, None, <cmath>)
+SYMBOL(acosl, None, <math.h>)
+SYMBOL(add_const, std::, <type_traits>)
+SYMBOL(add_const_t, std::, <type_traits>)
+SYMBOL(add_cv, std::, <type_traits>)
+SYMBOL(add_cv_t, std::, <type_traits>)
+SYMBOL(add_lvalue_reference, std::, <type_traits>)
+SYMBOL(add_lvalue_reference_t, std::, <type_traits>)
+SYMBOL(add_pointer, std::, <type_traits>)
+SYMBOL(add_pointer_t, std::, <type_traits>)
+SYMBOL(add_rvalue_reference, std::, <type_traits>)
+SYMBOL(add_rvalue_reference_t, std::, <type_traits>)
+SYMBOL(add_volatile, std::, <type_traits>)
+SYMBOL(add_volatile_t, std::, <type_traits>)
+SYMBOL(addressof, std::, <memory>)
+SYMBOL(adjacent_difference, std::, <numeric>)
+SYMBOL(adjacent_find, std::, <algorithm>)
+SYMBOL(adopt_lock, std::, <mutex>)
+SYMBOL(adopt_lock_t, std::, <mutex>)
+SYMBOL(advance, std::, <iterator>)
+SYMBOL(align, std::, <memory>)
+SYMBOL(align_val_t, std::, <new>)
+SYMBOL(aligned_alloc, std::, <cstdlib>)
+SYMBOL(aligned_alloc, None, <cstdlib>)
+SYMBOL(aligned_alloc, None, <stdlib.h>)
+SYMBOL(aligned_storage, std::, <type_traits>)
+SYMBOL(aligned_storage_t, std::, <type_traits>)
+SYMBOL(aligned_union, std::, <type_traits>)
+SYMBOL(aligned_union_t, std::, <type_traits>)
+SYMBOL(alignment_of, std::, <type_traits>)
+SYMBOL(alignment_of_v, std::, <type_traits>)
+SYMBOL(all_of, std::, <algorithm>)
+SYMBOL(allocate_at_least, std::, <memory>)
+SYMBOL(allocate_shared, std::, <memory>)
+SYMBOL(allocate_shared_for_overwrite, std::, <memory>)
+SYMBOL(allocation_result, std::, <memory>)
+SYMBOL(allocator, std::, <memory>)
+SYMBOL(allocator_arg, std::, <memory>)
+SYMBOL(allocator_arg_t, std::, <memory>)
+SYMBOL(allocator_traits, std::, <memory>)
+SYMBOL(any, std::, <any>)
+SYMBOL(any_of, std::, <algorithm>)
+SYMBOL(apply, std::, <tuple>)
+SYMBOL(arg, std::, <complex>)
+SYMBOL(array, std::, <array>)
+SYMBOL(as_bytes, std::, <span>)
+SYMBOL(as_const, std::, <utility>)
+SYMBOL(as_writable_bytes, std::, <span>)
+SYMBOL(asctime, std::, <ctime>)
+SYMBOL(asctime, None, <ctime>)
+SYMBOL(asctime, None, <time.h>)
+SYMBOL(asin, std::, <cmath>)
+SYMBOL(asin, None, <cmath>)
+SYMBOL(asin, None, <math.h>)
+SYMBOL(asinf, std::, <cmath>)
+SYMBOL(asinf, None, <cmath>)
+SYMBOL(asinf, None, <math.h>)
+SYMBOL(asinh, std::, <cmath>)
+SYMBOL(asinh, None, <cmath>)
+SYMBOL(asinh, None, <math.h>)
+SYMBOL(asinhf, std::, <cmath>)
+SYMBOL(asinhf, None, <cmath>)
+SYMBOL(asinhf, None, <math.h>)
+SYMBOL(asinhl, std::, <cmath>)
+SYMBOL(asinhl, None, <cmath>)
+SYMBOL(asinhl, None, <math.h>)
+SYMBOL(asinl, std::, <cmath>)
+SYMBOL(asinl, None, <cmath>)
+SYMBOL(asinl, None, <math.h>)
+SYMBOL(assignable_from, std::, <concepts>)
+SYMBOL(assoc_laguerre, std::, <cmath>)
+SYMBOL(assoc_laguerref, std::, <cmath>)
+SYMBOL(assoc_laguerrel, std::, <cmath>)
+SYMBOL(assoc_legendre, std::, <cmath>)
+SYMBOL(assoc_legendref, std::, <cmath>)
+SYMBOL(assoc_legendrel, std::, <cmath>)
+SYMBOL(assume_aligned, std::, <memory>)
+SYMBOL(async, std::, <future>)
+SYMBOL(at_quick_exit, std::, <cstdlib>)
+SYMBOL(at_quick_exit, None, <cstdlib>)
+SYMBOL(at_quick_exit, None, <stdlib.h>)
+SYMBOL(atan, std::, <cmath>)
+SYMBOL(atan, None, <cmath>)
+SYMBOL(atan, None, <math.h>)
+SYMBOL(atan2, std::, <cmath>)
+SYMBOL(atan2, None, <cmath>)
+SYMBOL(atan2, None, <math.h>)
+SYMBOL(atan2f, std::, <cmath>)
+SYMBOL(atan2f, None, <cmath>)
+SYMBOL(atan2f, None, <math.h>)
+SYMBOL(atan2l, std::, <cmath>)
+SYMBOL(atan2l, None, <cmath>)
+SYMBOL(atan2l, None, <math.h>)
+SYMBOL(atanf, std::, <cmath>)
+SYMBOL(atanf, None, <cmath>)
+SYMBOL(atanf, None, <math.h>)
+SYMBOL(atanh, std::, <cmath>)
+SYMBOL(atanh, None, <cmath>)
+SYMBOL(atanh, None, <math.h>)
+SYMBOL(atanhf, std::, <cmath>)
+SYMBOL(atanhf, None, <cmath>)
+SYMBOL(atanhf, None, <math.h>)
+SYMBOL(atanhl, std::, <cmath>)
+SYMBOL(atanhl, None, <cmath>)
+SYMBOL(atanhl, None, <math.h>)
+SYMBOL(atanl, std::, <cmath>)
+SYMBOL(atanl, None, <cmath>)
+SYMBOL(atanl, None, <math.h>)
+SYMBOL(atexit, std::, <cstdlib>)
+SYMBOL(atexit, None, <cstdlib>)
+SYMBOL(atexit, None, <stdlib.h>)
+SYMBOL(atof, std::, <cstdlib>)
+SYMBOL(atof, None, <cstdlib>)
+SYMBOL(atof, None, <stdlib.h>)
+SYMBOL(atoi, std::, <cstdlib>)
+SYMBOL(atoi, None, <cstdlib>)
+SYMBOL(atoi, None, <stdlib.h>)
+SYMBOL(atol, std::, <cstdlib>)
+SYMBOL(atol, None, <cstdlib>)
+SYMBOL(atol, None, <stdlib.h>)
+SYMBOL(atoll, std::, <cstdlib>)
+SYMBOL(atoll, None, <cstdlib>)
+SYMBOL(atoll, None, <stdlib.h>)
+SYMBOL(atomic_compare_exchange_strong, std::, <atomic>)
+SYMBOL(atomic_compare_exchange_strong_explicit, std::, <atomic>)
+SYMBOL(atomic_compare_exchange_weak, std::, <atomic>)
+SYMBOL(atomic_compare_exchange_weak_explicit, std::, <atomic>)
+SYMBOL(atomic_exchange, std::, <atomic>)
+SYMBOL(atomic_exchange_explicit, std::, <atomic>)
+SYMBOL(atomic_fetch_add, std::, <atomic>)
+SYMBOL(atomic_fetch_add_explicit, std::, <atomic>)
+SYMBOL(atomic_fetch_and, std::, <atomic>)
+SYMBOL(atomic_fetch_and_explicit, std::, <atomic>)
+SYMBOL(atomic_fetch_or, std::, <atomic>)
+SYMBOL(atomic_fetch_or_explicit, std::, <atomic>)
+SYMBOL(atomic_fetch_sub, std::, <atomic>)
+SYMBOL(atomic_fetch_sub_explicit, std::, <atomic>)
+SYMBOL(atomic_fetch_xor, std::, <atomic>)
+SYMBOL(atomic_fetch_xor_explicit, std::, <atomic>)
+SYMBOL(atomic_flag, std::, <atomic>)
+SYMBOL(atomic_flag_clear, std::, <atomic>)
+SYMBOL(atomic_flag_clear_explicit, std::, <atomic>)
+SYMBOL(atomic_flag_notify_all, std::, <atomic>)
+SYMBOL(atomic_flag_notify_one, std::, <atomic>)
+SYMBOL(atomic_flag_test, std::, <atomic>)
+SYMBOL(atomic_flag_test_and_set, std::, <atomic>)
+SYMBOL(atomic_flag_test_and_set_explicit, std::, <atomic>)
+SYMBOL(atomic_flag_test_explicit, std::, <atomic>)
+SYMBOL(atomic_flag_wait, std::, <atomic>)
+SYMBOL(atomic_flag_wait_explicit, std::, <atomic>)
+SYMBOL(atomic_init, std::, <atomic>)
+SYMBOL(atomic_is_lock_free, std::, <atomic>)
+SYMBOL(atomic_load, std::, <atomic>)
+SYMBOL(atomic_load_explicit, std::, <atomic>)
+SYMBOL(atomic_notify_all, std::, <atomic>)
+SYMBOL(atomic_notify_one, std::, <atomic>)
+SYMBOL(atomic_ref, std::, <atomic>)
+SYMBOL(atomic_signal_fence, std::, <atomic>)
+SYMBOL(atomic_store, std::, <atomic>)
+SYMBOL(atomic_store_explicit, std::, <atomic>)
+SYMBOL(atomic_thread_fence, std::, <atomic>)
+SYMBOL(atomic_wait, std::, <atomic>)
+SYMBOL(atomic_wait_explicit, std::, <atomic>)
+SYMBOL(atto, std::, <ratio>)
+SYMBOL(auto_ptr, std::, <memory>)
+SYMBOL(back_insert_iterator, std::, <iterator>)
+SYMBOL(back_inserter, std::, <iterator>)
+SYMBOL(bad_alloc, std::, <new>)
+SYMBOL(bad_any_cast, std::, <any>)
+SYMBOL(bad_array_new_length, std::, <new>)
+SYMBOL(bad_cast, std::, <typeinfo>)
+SYMBOL(bad_exception, std::, <exception>)
+SYMBOL(bad_function_call, std::, <functional>)
+SYMBOL(bad_optional_access, std::, <optional>)
+SYMBOL(bad_typeid, std::, <typeinfo>)
+SYMBOL(bad_variant_access, std::, <variant>)
+SYMBOL(bad_weak_ptr, std::, <memory>)
+SYMBOL(barrier, std::, <barrier>)
+SYMBOL(basic_common_reference, std::, <type_traits>)
+SYMBOL(basic_filebuf, std::, <fstream>)
+SYMBOL(basic_filebuf, std::, <iosfwd>)
+SYMBOL(basic_format_arg, std::, <format>)
+SYMBOL(basic_format_args, std::, <format>)
+SYMBOL(basic_format_context, std::, <format>)
+SYMBOL(basic_format_parse_context, std::, <format>)
+SYMBOL(basic_fstream, std::, <fstream>)
+SYMBOL(basic_fstream, std::, <iosfwd>)
+SYMBOL(basic_ifstream, std::, <fstream>)
+SYMBOL(basic_ifstream, std::, <iosfwd>)
+SYMBOL(basic_ios, std::, <ios>)
+SYMBOL(basic_ios, std::, <iostream>)
+SYMBOL(basic_ios, std::, <iosfwd>)
+SYMBOL(basic_iostream, std::, <istream>)
+SYMBOL(basic_iostream, std::, <iostream>)
+SYMBOL(basic_iostream, std::, <iosfwd>)
+SYMBOL(basic_ispanstream, std::, <spanstream>)
+SYMBOL(basic_ispanstream, std::, <iosfwd>)
+SYMBOL(basic_istream, std::, <istream>)
+SYMBOL(basic_istream, std::, <iostream>)
+SYMBOL(basic_istream, std::, <iosfwd>)
+SYMBOL(basic_istringstream, std::, <sstream>)
+SYMBOL(basic_istringstream, std::, <iosfwd>)
+SYMBOL(basic_ofstream, std::, <fstream>)
+SYMBOL(basic_ofstream, std::, <iosfwd>)
+SYMBOL(basic_ospanstream, std::, <spanstream>)
+SYMBOL(basic_ospanstream, std::, <iosfwd>)
+SYMBOL(basic_ostream, std::, <ostream>)
+SYMBOL(basic_ostream, std::, <iostream>)
+SYMBOL(basic_ostream, std::, <iosfwd>)
+SYMBOL(basic_ostringstream, std::, <sstream>)
+SYMBOL(basic_ostringstream, std::, <iosfwd>)
+SYMBOL(basic_osyncstream, std::, <syncstream>)
+SYMBOL(basic_osyncstream, std::, <iosfwd>)
+SYMBOL(basic_regex, std::, <regex>)
+SYMBOL(basic_spanbuf, std::, <spanstream>)
+SYMBOL(basic_spanbuf, std::, <iosfwd>)
+SYMBOL(basic_spanstream, std::, <spanstream>)
+SYMBOL(basic_spanstream, std::, <iosfwd>)
+SYMBOL(basic_stacktrace, std::, <stacktrace>)
+SYMBOL(basic_streambuf, std::, <streambuf>)
+SYMBOL(basic_streambuf, std::, <iostream>)
+SYMBOL(basic_streambuf, std::, <iosfwd>)
+SYMBOL(basic_string, std::, <string>)
+SYMBOL(basic_string_view, std::, <string_view>)
+SYMBOL(basic_stringbuf, std::, <sstream>)
+SYMBOL(basic_stringbuf, std::, <iosfwd>)
+SYMBOL(basic_stringstream, std::, <sstream>)
+SYMBOL(basic_stringstream, std::, <iosfwd>)
+SYMBOL(basic_syncbuf, std::, <syncstream>)
+SYMBOL(basic_syncbuf, std::, <iosfwd>)
+SYMBOL(bernoulli_distribution, std::, <random>)
+SYMBOL(beta, std::, <cmath>)
+SYMBOL(betaf, std::, <cmath>)
+SYMBOL(betal, std::, <cmath>)
+SYMBOL(bidirectional_iterator, std::, <iterator>)
+SYMBOL(bidirectional_iterator_tag, std::, <iterator>)
+SYMBOL(binary_function, std::, <functional>)
+SYMBOL(binary_negate, std::, <functional>)
+SYMBOL(binary_search, std::, <algorithm>)
+SYMBOL(binary_semaphore, std::, <semaphore>)
+SYMBOL(bind, std::, <functional>)
+SYMBOL(bind1st, std::, <functional>)
+SYMBOL(bind2nd, std::, <functional>)
+SYMBOL(bind_back, std::, <functional>)
+SYMBOL(bind_front, std::, <functional>)
+SYMBOL(binder1st, std::, <functional>)
+SYMBOL(binder2nd, std::, <functional>)
+SYMBOL(binomial_distribution, std::, <random>)
+SYMBOL(bit_and, std::, <functional>)
+SYMBOL(bit_cast, std::, <bit>)
+SYMBOL(bit_ceil, std::, <bit>)
+SYMBOL(bit_floor, std::, <bit>)
+SYMBOL(bit_not, std::, <functional>)
+SYMBOL(bit_or, std::, <functional>)
+SYMBOL(bit_width, std::, <bit>)
+SYMBOL(bit_xor, std::, <functional>)
+SYMBOL(bitset, std::, <bitset>)
+SYMBOL(bool_constant, std::, <type_traits>)
+SYMBOL(boolalpha, std::, <ios>)
+SYMBOL(boolalpha, std::, <iostream>)
+SYMBOL(boyer_moore_horspool_searcher, std::, <functional>)
+SYMBOL(boyer_moore_searcher, std::, <functional>)
+SYMBOL(bsearch, std::, <cstdlib>)
+SYMBOL(bsearch, None, <cstdlib>)
+SYMBOL(bsearch, None, <stdlib.h>)
+SYMBOL(btowc, std::, <cwchar>)
+SYMBOL(btowc, None, <cwchar>)
+SYMBOL(btowc, None, <wchar.h>)
+SYMBOL(byte, std::, <cstddef>)
+SYMBOL(byteswap, std::, <bit>)
+SYMBOL(c16rtomb, std::, <cuchar>)
+SYMBOL(c16rtomb, None, <cuchar>)
+SYMBOL(c16rtomb, None, <uchar.h>)
+SYMBOL(c32rtomb, std::, <cuchar>)
+SYMBOL(c32rtomb, None, <cuchar>)
+SYMBOL(c32rtomb, None, <uchar.h>)
+SYMBOL(c8rtomb, std::, <cuchar>)
+SYMBOL(c8rtomb, None, <cuchar>)
+SYMBOL(c8rtomb, None, <uchar.h>)
+SYMBOL(call_once, std::, <mutex>)
+SYMBOL(calloc, std::, <cstdlib>)
+SYMBOL(calloc, None, <cstdlib>)
+SYMBOL(calloc, None, <stdlib.h>)
+SYMBOL(cauchy_distribution, std::, <random>)
+SYMBOL(cbrt, std::, <cmath>)
+SYMBOL(cbrt, None, <cmath>)
+SYMBOL(cbrt, None, <math.h>)
+SYMBOL(cbrtf, std::, <cmath>)
+SYMBOL(cbrtf, None, <cmath>)
+SYMBOL(cbrtf, None, <math.h>)
+SYMBOL(cbrtl, std::, <cmath>)
+SYMBOL(cbrtl, None, <cmath>)
+SYMBOL(cbrtl, None, <math.h>)
+SYMBOL(ceil, std::, <cmath>)
+SYMBOL(ceil, None, <cmath>)
+SYMBOL(ceil, None, <math.h>)
+SYMBOL(ceilf, std::, <cmath>)
+SYMBOL(ceilf, None, <cmath>)
+SYMBOL(ceilf, None, <math.h>)
+SYMBOL(ceill, std::, <cmath>)
+SYMBOL(ceill, None, <cmath>)
+SYMBOL(ceill, None, <math.h>)
+SYMBOL(centi, std::, <ratio>)
+SYMBOL(cerr, std::, <iostream>)
+SYMBOL(char_traits, std::, <string>)
+SYMBOL(chars_format, std::, <charconv>)
+SYMBOL(chi_squared_distribution, std::, <random>)
+SYMBOL(cin, std::, <iostream>)
+SYMBOL(clamp, std::, <algorithm>)
+SYMBOL(clearerr, std::, <cstdio>)
+SYMBOL(clearerr, None, <cstdio>)
+SYMBOL(clearerr, None, <stdio.h>)
+SYMBOL(clock, std::, <ctime>)
+SYMBOL(clock, None, <ctime>)
+SYMBOL(clock, None, <time.h>)
+SYMBOL(clock_t, std::, <ctime>)
+SYMBOL(clock_t, None, <ctime>)
+SYMBOL(clock_t, None, <time.h>)
+SYMBOL(clog, std::, <iostream>)
+SYMBOL(cmatch, std::, <regex>)
+SYMBOL(cmp_equal, std::, <utility>)
+SYMBOL(cmp_greater, std::, <utility>)
+SYMBOL(cmp_greater_equal, std::, <utility>)
+SYMBOL(cmp_less, std::, <utility>)
+SYMBOL(cmp_less_equal, std::, <utility>)
+SYMBOL(cmp_not_equal, std::, <utility>)
+SYMBOL(codecvt, std::, <locale>)
+SYMBOL(codecvt_base, std::, <locale>)
+SYMBOL(codecvt_byname, std::, <locale>)
+SYMBOL(codecvt_mode, std::, <codecvt>)
+SYMBOL(codecvt_utf16, std::, <codecvt>)
+SYMBOL(codecvt_utf8, std::, <codecvt>)
+SYMBOL(codecvt_utf8_utf16, std::, <codecvt>)
+SYMBOL(collate, std::, <locale>)
+SYMBOL(collate_byname, std::, <locale>)
+SYMBOL(common_comparison_category, std::, <compare>)
+SYMBOL(common_comparison_category_t, std::, <compare>)
+SYMBOL(common_iterator, std::, <iterator>)
+SYMBOL(common_reference, std::, <type_traits>)
+SYMBOL(common_reference_t, std::, <type_traits>)
+SYMBOL(common_reference_with, std::, <concepts>)
+SYMBOL(common_type, std::, <type_traits>)
+SYMBOL(common_type_t, std::, <type_traits>)
+SYMBOL(common_with, std::, <concepts>)
+SYMBOL(comp_ellint_1, std::, <cmath>)
+SYMBOL(comp_ellint_1f, std::, <cmath>)
+SYMBOL(comp_ellint_1l, std::, <cmath>)
+SYMBOL(comp_ellint_2, std::, <cmath>)
+SYMBOL(comp_ellint_2f, std::, <cmath>)
+SYMBOL(comp_ellint_2l, std::, <cmath>)
+SYMBOL(comp_ellint_3, std::, <cmath>)
+SYMBOL(comp_ellint_3f, std::, <cmath>)
+SYMBOL(comp_ellint_3l, std::, <cmath>)
+SYMBOL(compare_partial_order_fallback, std::, <compare>)
+SYMBOL(compare_strong_order_fallback, std::, <compare>)
+SYMBOL(compare_three_way_result, std::, <compare>)
+SYMBOL(compare_three_way_result_t, std::, <compare>)
+SYMBOL(compare_weak_order_fallback, std::, <compare>)
+SYMBOL(complex, std::, <complex>)
+SYMBOL(condition_variable, std::, <condition_variable>)
+SYMBOL(condition_variable_any, std::, <condition_variable>)
+SYMBOL(conditional, std::, <type_traits>)
+SYMBOL(conditional_t, std::, <type_traits>)
+SYMBOL(conj, std::, <complex>)
+SYMBOL(conjunction, std::, <type_traits>)
+SYMBOL(conjunction_v, std::, <type_traits>)
+SYMBOL(const_mem_fun1_ref_t, std::, <functional>)
+SYMBOL(const_mem_fun1_t, std::, <functional>)
+SYMBOL(const_mem_fun_ref_t, std::, <functional>)
+SYMBOL(const_mem_fun_t, std::, <functional>)
+SYMBOL(const_pointer_cast, std::, <memory>)
+SYMBOL(construct_at, std::, <memory>)
+SYMBOL(constructible_from, std::, <concepts>)
+SYMBOL(contiguous_iterator, std::, <iterator>)
+SYMBOL(contiguous_iterator_tag, std::, <iterator>)
+SYMBOL(convertible_to, std::, <concepts>)
+SYMBOL(copy, std::, <algorithm>)
+SYMBOL(copy_backward, std::, <algorithm>)
+SYMBOL(copy_constructible, std::, <concepts>)
+SYMBOL(copy_if, std::, <algorithm>)
+SYMBOL(copy_n, std::, <algorithm>)
+SYMBOL(copyable, std::, <concepts>)
+SYMBOL(copysign, std::, <cmath>)
+SYMBOL(copysign, None, <cmath>)
+SYMBOL(copysign, None, <math.h>)
+SYMBOL(copysignf, std::, <cmath>)
+SYMBOL(copysignf, None, <cmath>)
+SYMBOL(copysignf, None, <math.h>)
+SYMBOL(copysignl, std::, <cmath>)
+SYMBOL(copysignl, None, <cmath>)
+SYMBOL(copysignl, None, <math.h>)
+SYMBOL(coroutine_handle, std::, <coroutine>)
+SYMBOL(coroutine_traits, std::, <coroutine>)
+SYMBOL(cos, std::, <cmath>)
+SYMBOL(cos, None, <cmath>)
+SYMBOL(cos, None, <math.h>)
+SYMBOL(cosf, std::, <cmath>)
+SYMBOL(cosf, None, <cmath>)
+SYMBOL(cosf, None, <math.h>)
+SYMBOL(cosh, std::, <cmath>)
+SYMBOL(cosh, None, <cmath>)
+SYMBOL(cosh, None, <math.h>)
+SYMBOL(coshf, std::, <cmath>)
+SYMBOL(coshf, None, <cmath>)
+SYMBOL(coshf, None, <math.h>)
+SYMBOL(coshl, std::, <cmath>)
+SYMBOL(coshl, None, <cmath>)
+SYMBOL(coshl, None, <math.h>)
+SYMBOL(cosl, std::, <cmath>)
+SYMBOL(cosl, None, <cmath>)
+SYMBOL(cosl, None, <math.h>)
+SYMBOL(count, std::, <algorithm>)
+SYMBOL(count_if, std::, <algorithm>)
+SYMBOL(counted_iterator, std::, <iterator>)
+SYMBOL(counting_semaphore, std::, <semaphore>)
+SYMBOL(countl_one, std::, <bit>)
+SYMBOL(countl_zero, std::, <bit>)
+SYMBOL(countr_one, std::, <bit>)
+SYMBOL(countr_zero, std::, <bit>)
+SYMBOL(cout, std::, <iostream>)
+SYMBOL(cref, std::, <functional>)
+SYMBOL(cregex_iterator, std::, <regex>)
+SYMBOL(cregex_token_iterator, std::, <regex>)
+SYMBOL(csub_match, std::, <regex>)
+SYMBOL(ctime, std::, <ctime>)
+SYMBOL(ctime, None, <ctime>)
+SYMBOL(ctime, None, <time.h>)
+SYMBOL(ctype, std::, <locale>)
+SYMBOL(ctype_base, std::, <locale>)
+SYMBOL(ctype_byname, std::, <locale>)
+SYMBOL(current_exception, std::, <exception>)
+SYMBOL(cv_status, std::, <condition_variable>)
+SYMBOL(cyl_bessel_i, std::, <cmath>)
+SYMBOL(cyl_bessel_if, std::, <cmath>)
+SYMBOL(cyl_bessel_il, std::, <cmath>)
+SYMBOL(cyl_bessel_j, std::, <cmath>)
+SYMBOL(cyl_bessel_jf, std::, <cmath>)
+SYMBOL(cyl_bessel_jl, std::, <cmath>)
+SYMBOL(cyl_bessel_k, std::, <cmath>)
+SYMBOL(cyl_bessel_kf, std::, <cmath>)
+SYMBOL(cyl_bessel_kl, std::, <cmath>)
+SYMBOL(cyl_neumann, std::, <cmath>)
+SYMBOL(cyl_neumannf, std::, <cmath>)
+SYMBOL(cyl_neumannl, std::, <cmath>)
+SYMBOL(dec, std::, <ios>)
+SYMBOL(dec, std::, <iostream>)
+SYMBOL(deca, std::, <ratio>)
+SYMBOL(decay, std::, <type_traits>)
+SYMBOL(decay_t, std::, <type_traits>)
+SYMBOL(deci, std::, <ratio>)
+SYMBOL(declare_no_pointers, std::, <memory>)
+SYMBOL(declare_reachable, std::, <memory>)
+SYMBOL(declval, std::, <utility>)
+SYMBOL(default_delete, std::, <memory>)
+SYMBOL(default_initializable, std::, <concepts>)
+SYMBOL(default_random_engine, std::, <random>)
+SYMBOL(default_searcher, std::, <functional>)
+SYMBOL(default_sentinel, std::, <iterator>)
+SYMBOL(default_sentinel_t, std::, <iterator>)
+SYMBOL(defaultfloat, std::, <ios>)
+SYMBOL(defaultfloat, std::, <iostream>)
+SYMBOL(defer_lock, std::, <mutex>)
+SYMBOL(defer_lock_t, std::, <mutex>)
+SYMBOL(denorm_absent, std::, <limits>)
+SYMBOL(denorm_indeterminate, std::, <limits>)
+SYMBOL(denorm_present, std::, <limits>)
+SYMBOL(deque, std::, <deque>)
+SYMBOL(derived_from, std::, <concepts>)
+SYMBOL(destroy, std::, <memory>)
+SYMBOL(destroy_at, std::, <memory>)
+SYMBOL(destroy_n, std::, <memory>)
+SYMBOL(destroying_delete, std::, <new>)
+SYMBOL(destroying_delete_t, std::, <new>)
+SYMBOL(destructible, std::, <concepts>)
+SYMBOL(difftime, std::, <ctime>)
+SYMBOL(difftime, None, <ctime>)
+SYMBOL(difftime, None, <time.h>)
+SYMBOL(disable_sized_sentinel_for, std::, <iterator>)
+SYMBOL(discard_block_engine, std::, <random>)
+SYMBOL(discrete_distribution, std::, <random>)
+SYMBOL(disjunction, std::, <type_traits>)
+SYMBOL(disjunction_v, std::, <type_traits>)
+SYMBOL(distance, std::, <iterator>)
+SYMBOL(div_t, std::, <cstdlib>)
+SYMBOL(div_t, None, <cstdlib>)
+SYMBOL(div_t, None, <stdlib.h>)
+SYMBOL(divides, std::, <functional>)
+SYMBOL(domain_error, std::, <stdexcept>)
+SYMBOL(double_t, std::, <cmath>)
+SYMBOL(double_t, None, <cmath>)
+SYMBOL(double_t, None, <math.h>)
+SYMBOL(dynamic_extent, std::, <span>)
+SYMBOL(dynamic_pointer_cast, std::, <memory>)
+SYMBOL(ellint_1, std::, <cmath>)
+SYMBOL(ellint_1f, std::, <cmath>)
+SYMBOL(ellint_1l, std::, <cmath>)
+SYMBOL(ellint_2, std::, <cmath>)
+SYMBOL(ellint_2f, std::, <cmath>)
+SYMBOL(ellint_2l, std::, <cmath>)
+SYMBOL(ellint_3, std::, <cmath>)
+SYMBOL(ellint_3f, std::, <cmath>)
+SYMBOL(ellint_3l, std::, <cmath>)
+SYMBOL(emit_on_flush, std::, <ostream>)
+SYMBOL(emit_on_flush, std::, <iostream>)
+SYMBOL(enable_if, std::, <type_traits>)
+SYMBOL(enable_if_t, std::, <type_traits>)
+SYMBOL(enable_shared_from_this, std::, <memory>)
+SYMBOL(endian, std::, <bit>)
+SYMBOL(endl, std::, <ostream>)
+SYMBOL(endl, std::, <iostream>)
+SYMBOL(ends, std::, <ostream>)
+SYMBOL(ends, std::, <iostream>)
+SYMBOL(equal, std::, <algorithm>)
+SYMBOL(equal_range, std::, <algorithm>)
+SYMBOL(equal_to, std::, <functional>)
+SYMBOL(equality_comparable, std::, <concepts>)
+SYMBOL(equality_comparable_with, std::, <concepts>)
+SYMBOL(equivalence_relation, std::, <concepts>)
+SYMBOL(erase, std::, <vector>)
+SYMBOL(erase_if, std::, <vector>)
+SYMBOL(erf, std::, <cmath>)
+SYMBOL(erf, None, <cmath>)
+SYMBOL(erf, None, <math.h>)
+SYMBOL(erfc, std::, <cmath>)
+SYMBOL(erfc, None, <cmath>)
+SYMBOL(erfc, None, <math.h>)
+SYMBOL(erfcf, std::, <cmath>)
+SYMBOL(erfcf, None, <cmath>)
+SYMBOL(erfcf, None, <math.h>)
+SYMBOL(erfcl, std::, <cmath>)
+SYMBOL(erfcl, None, <cmath>)
+SYMBOL(erfcl, None, <math.h>)
+SYMBOL(erff, std::, <cmath>)
+SYMBOL(erff, None, <cmath>)
+SYMBOL(erff, None, <math.h>)
+SYMBOL(erfl, std::, <cmath>)
+SYMBOL(erfl, None, <cmath>)
+SYMBOL(erfl, None, <math.h>)
+SYMBOL(errc, std::, <system_error>)
+SYMBOL(error_category, std::, <system_error>)
+SYMBOL(error_code, std::, <system_error>)
+SYMBOL(error_condition, std::, <system_error>)
+SYMBOL(exa, std::, <ratio>)
+SYMBOL(exception, std::, <exception>)
+SYMBOL(exception_ptr, std::, <exception>)
+SYMBOL(exchange, std::, <utility>)
+SYMBOL(exclusive_scan, std::, <numeric>)
+SYMBOL(exit, std::, <cstdlib>)
+SYMBOL(exit, None, <cstdlib>)
+SYMBOL(exit, None, <stdlib.h>)
+SYMBOL(exp, std::, <cmath>)
+SYMBOL(exp, None, <cmath>)
+SYMBOL(exp, None, <math.h>)
+SYMBOL(exp2, std::, <cmath>)
+SYMBOL(exp2, None, <cmath>)
+SYMBOL(exp2, None, <math.h>)
+SYMBOL(exp2f, std::, <cmath>)
+SYMBOL(exp2f, None, <cmath>)
+SYMBOL(exp2f, None, <math.h>)
+SYMBOL(exp2l, std::, <cmath>)
+SYMBOL(exp2l, None, <cmath>)
+SYMBOL(exp2l, None, <math.h>)
+SYMBOL(expf, std::, <cmath>)
+SYMBOL(expf, None, <cmath>)
+SYMBOL(expf, None, <math.h>)
+SYMBOL(expint, std::, <cmath>)
+SYMBOL(expintf, std::, <cmath>)
+SYMBOL(expintl, std::, <cmath>)
+SYMBOL(expl, std::, <cmath>)
+SYMBOL(expl, None, <cmath>)
+SYMBOL(expl, None, <math.h>)
+SYMBOL(expm1, std::, <cmath>)
+SYMBOL(expm1, None, <cmath>)
+SYMBOL(expm1, None, <math.h>)
+SYMBOL(expm1f, std::, <cmath>)
+SYMBOL(expm1f, None, <cmath>)
+SYMBOL(expm1f, None, <math.h>)
+SYMBOL(expm1l, std::, <cmath>)
+SYMBOL(expm1l, None, <cmath>)
+SYMBOL(expm1l, None, <math.h>)
+SYMBOL(exponential_distribution, std::, <random>)
+SYMBOL(extent, std::, <type_traits>)
+SYMBOL(extent_v, std::, <type_traits>)
+SYMBOL(extreme_value_distribution, std::, <random>)
+SYMBOL(fabs, std::, <cmath>)
+SYMBOL(fabs, None, <cmath>)
+SYMBOL(fabs, None, <math.h>)
+SYMBOL(fabsf, std::, <cmath>)
+SYMBOL(fabsf, None, <cmath>)
+SYMBOL(fabsf, None, <math.h>)
+SYMBOL(fabsl, std::, <cmath>)
+SYMBOL(fabsl, None, <cmath>)
+SYMBOL(fabsl, None, <math.h>)
+SYMBOL(false_type, std::, <type_traits>)
+SYMBOL(fclose, std::, <cstdio>)
+SYMBOL(fclose, None, <cstdio>)
+SYMBOL(fclose, None, <stdio.h>)
+SYMBOL(fdim, std::, <cmath>)
+SYMBOL(fdim, None, <cmath>)
+SYMBOL(fdim, None, <math.h>)
+SYMBOL(fdimf, std::, <cmath>)
+SYMBOL(fdimf, None, <cmath>)
+SYMBOL(fdimf, None, <math.h>)
+SYMBOL(fdiml, std::, <cmath>)
+SYMBOL(fdiml, None, <cmath>)
+SYMBOL(fdiml, None, <math.h>)
+SYMBOL(feclearexcept, std::, <cfenv>)
+SYMBOL(feclearexcept, None, <cfenv>)
+SYMBOL(feclearexcept, None, <fenv.h>)
+SYMBOL(fegetenv, std::, <cfenv>)
+SYMBOL(fegetenv, None, <cfenv>)
+SYMBOL(fegetenv, None, <fenv.h>)
+SYMBOL(fegetexceptflag, std::, <cfenv>)
+SYMBOL(fegetexceptflag, None, <cfenv>)
+SYMBOL(fegetexceptflag, None, <fenv.h>)
+SYMBOL(fegetround, std::, <cfenv>)
+SYMBOL(fegetround, None, <cfenv>)
+SYMBOL(fegetround, None, <fenv.h>)
+SYMBOL(feholdexcept, std::, <cfenv>)
+SYMBOL(feholdexcept, None, <cfenv>)
+SYMBOL(feholdexcept, None, <fenv.h>)
+SYMBOL(femto, std::, <ratio>)
+SYMBOL(fenv_t, std::, <cfenv>)
+SYMBOL(fenv_t, None, <cfenv>)
+SYMBOL(fenv_t, None, <fenv.h>)
+SYMBOL(feof, std::, <cstdio>)
+SYMBOL(feof, None, <cstdio>)
+SYMBOL(feof, None, <stdio.h>)
+SYMBOL(feraiseexcept, std::, <cfenv>)
+SYMBOL(feraiseexcept, None, <cfenv>)
+SYMBOL(feraiseexcept, None, <fenv.h>)
+SYMBOL(ferror, std::, <cstdio>)
+SYMBOL(ferror, None, <cstdio>)
+SYMBOL(ferror, None, <stdio.h>)
+SYMBOL(fesetenv, std::, <cfenv>)
+SYMBOL(fesetenv, None, <cfenv>)
+SYMBOL(fesetenv, None, <fenv.h>)
+SYMBOL(fesetexceptflag, std::, <cfenv>)
+SYMBOL(fesetexceptflag, None, <cfenv>)
+SYMBOL(fesetexceptflag, None, <fenv.h>)
+SYMBOL(fesetround, std::, <cfenv>)
+SYMBOL(fesetround, None, <cfenv>)
+SYMBOL(fesetround, None, <fenv.h>)
+SYMBOL(fetestexcept, std::, <cfenv>)
+SYMBOL(fetestexcept, None, <cfenv>)
+SYMBOL(fetestexcept, None, <fenv.h>)
+SYMBOL(feupdateenv, std::, <cfenv>)
+SYMBOL(feupdateenv, None, <cfenv>)
+SYMBOL(feupdateenv, None, <fenv.h>)
+SYMBOL(fexcept_t, std::, <cfenv>)
+SYMBOL(fexcept_t, None, <cfenv>)
+SYMBOL(fexcept_t, None, <fenv.h>)
+SYMBOL(fflush, std::, <cstdio>)
+SYMBOL(fflush, None, <cstdio>)
+SYMBOL(fflush, None, <stdio.h>)
+SYMBOL(fgetc, std::, <cstdio>)
+SYMBOL(fgetc, None, <cstdio>)
+SYMBOL(fgetc, None, <stdio.h>)
+SYMBOL(fgetpos, std::, <cstdio>)
+SYMBOL(fgetpos, None, <cstdio>)
+SYMBOL(fgetpos, None, <stdio.h>)
+SYMBOL(fgets, std::, <cstdio>)
+SYMBOL(fgets, None, <cstdio>)
+SYMBOL(fgets, None, <stdio.h>)
+SYMBOL(fgetwc, std::, <cwchar>)
+SYMBOL(fgetwc, None, <cwchar>)
+SYMBOL(fgetwc, None, <wchar.h>)
+SYMBOL(fgetws, std::, <cwchar>)
+SYMBOL(fgetws, None, <cwchar>)
+SYMBOL(fgetws, None, <wchar.h>)
+SYMBOL(filebuf, std::, <streambuf>)
+SYMBOL(filebuf, std::, <iostream>)
+SYMBOL(filebuf, std::, <iosfwd>)
+SYMBOL(fill, std::, <algorithm>)
+SYMBOL(fill_n, std::, <algorithm>)
+SYMBOL(find, std::, <algorithm>)
+SYMBOL(find_end, std::, <algorithm>)
+SYMBOL(find_first_of, std::, <algorithm>)
+SYMBOL(find_if, std::, <algorithm>)
+SYMBOL(find_if_not, std::, <algorithm>)
+SYMBOL(fisher_f_distribution, std::, <random>)
+SYMBOL(fixed, std::, <ios>)
+SYMBOL(fixed, std::, <iostream>)
+SYMBOL(float_denorm_style, std::, <limits>)
+SYMBOL(float_round_style, std::, <limits>)
+SYMBOL(float_t, std::, <cmath>)
+SYMBOL(float_t, None, <cmath>)
+SYMBOL(float_t, None, <math.h>)
+SYMBOL(floating_point, std::, <concepts>)
+SYMBOL(floor, std::, <cmath>)
+SYMBOL(floor, None, <cmath>)
+SYMBOL(floor, None, <math.h>)
+SYMBOL(floorf, std::, <cmath>)
+SYMBOL(floorf, None, <cmath>)
+SYMBOL(floorf, None, <math.h>)
+SYMBOL(floorl, std::, <cmath>)
+SYMBOL(floorl, None, <cmath>)
+SYMBOL(floorl, None, <math.h>)
+SYMBOL(flush, std::, <ostream>)
+SYMBOL(flush, std::, <iostream>)
+SYMBOL(flush_emit, std::, <ostream>)
+SYMBOL(flush_emit, std::, <iostream>)
+SYMBOL(fma, std::, <cmath>)
+SYMBOL(fma, None, <cmath>)
+SYMBOL(fma, None, <math.h>)
+SYMBOL(fmaf, std::, <cmath>)
+SYMBOL(fmaf, None, <cmath>)
+SYMBOL(fmaf, None, <math.h>)
+SYMBOL(fmal, std::, <cmath>)
+SYMBOL(fmal, None, <cmath>)
+SYMBOL(fmal, None, <math.h>)
+SYMBOL(fmax, std::, <cmath>)
+SYMBOL(fmax, None, <cmath>)
+SYMBOL(fmax, None, <math.h>)
+SYMBOL(fmaxf, std::, <cmath>)
+SYMBOL(fmaxf, None, <cmath>)
+SYMBOL(fmaxf, None, <math.h>)
+SYMBOL(fmaxl, std::, <cmath>)
+SYMBOL(fmaxl, None, <cmath>)
+SYMBOL(fmaxl, None, <math.h>)
+SYMBOL(fmin, std::, <cmath>)
+SYMBOL(fmin, None, <cmath>)
+SYMBOL(fmin, None, <math.h>)
+SYMBOL(fminf, std::, <cmath>)
+SYMBOL(fminf, None, <cmath>)
+SYMBOL(fminf, None, <math.h>)
+SYMBOL(fminl, std::, <cmath>)
+SYMBOL(fminl, None, <cmath>)
+SYMBOL(fminl, None, <math.h>)
+SYMBOL(fmod, std::, <cmath>)
+SYMBOL(fmod, None, <cmath>)
+SYMBOL(fmod, None, <math.h>)
+SYMBOL(fmodf, std::, <cmath>)
+SYMBOL(fmodf, None, <cmath>)
+SYMBOL(fmodf, None, <math.h>)
+SYMBOL(fmodl, std::, <cmath>)
+SYMBOL(fmodl, None, <cmath>)
+SYMBOL(fmodl, None, <math.h>)
+SYMBOL(fopen, std::, <cstdio>)
+SYMBOL(fopen, None, <cstdio>)
+SYMBOL(fopen, None, <stdio.h>)
+SYMBOL(for_each, std::, <algorithm>)
+SYMBOL(for_each_n, std::, <algorithm>)
+SYMBOL(format, std::, <format>)
+SYMBOL(format_args, std::, <format>)
+SYMBOL(format_context, std::, <format>)
+SYMBOL(format_error, std::, <format>)
+SYMBOL(format_parse_context, std::, <format>)
+SYMBOL(format_to, std::, <format>)
+SYMBOL(format_to_n, std::, <format>)
+SYMBOL(format_to_n_result, std::, <format>)
+SYMBOL(formatted_size, std::, <format>)
+SYMBOL(formatter, std::, <format>)
+SYMBOL(forward, std::, <utility>)
+SYMBOL(forward_as_tuple, std::, <tuple>)
+SYMBOL(forward_iterator, std::, <iterator>)
+SYMBOL(forward_iterator_tag, std::, <iterator>)
+SYMBOL(forward_like, std::, <utility>)
+SYMBOL(forward_list, std::, <forward_list>)
+SYMBOL(fpclassify, std::, <cmath>)
+SYMBOL(fpclassify, None, <cmath>)
+SYMBOL(fpclassify, None, <math.h>)
+SYMBOL(fpos, std::, <ios>)
+SYMBOL(fpos, std::, <iostream>)
+SYMBOL(fpos, std::, <iosfwd>)
+SYMBOL(fpos_t, std::, <cstdio>)
+SYMBOL(fpos_t, None, <cstdio>)
+SYMBOL(fpos_t, None, <stdio.h>)
+SYMBOL(fprintf, std::, <cstdio>)
+SYMBOL(fprintf, None, <cstdio>)
+SYMBOL(fprintf, None, <stdio.h>)
+SYMBOL(fputc, std::, <cstdio>)
+SYMBOL(fputc, None, <cstdio>)
+SYMBOL(fputc, None, <stdio.h>)
+SYMBOL(fputs, std::, <cstdio>)
+SYMBOL(fputs, None, <cstdio>)
+SYMBOL(fputs, None, <stdio.h>)
+SYMBOL(fputwc, std::, <cwchar>)
+SYMBOL(fputwc, None, <cwchar>)
+SYMBOL(fputwc, None, <wchar.h>)
+SYMBOL(fputws, std::, <cwchar>)
+SYMBOL(fputws, None, <cwchar>)
+SYMBOL(fputws, None, <wchar.h>)
+SYMBOL(fread, std::, <cstdio>)
+SYMBOL(fread, None, <cstdio>)
+SYMBOL(fread, None, <stdio.h>)
+SYMBOL(free, std::, <cstdlib>)
+SYMBOL(free, None, <cstdlib>)
+SYMBOL(free, None, <stdlib.h>)
+SYMBOL(freopen, std::, <cstdio>)
+SYMBOL(freopen, None, <cstdio>)
+SYMBOL(freopen, None, <stdio.h>)
+SYMBOL(frexp, std::, <cmath>)
+SYMBOL(frexp, None, <cmath>)
+SYMBOL(frexp, None, <math.h>)
+SYMBOL(frexpf, std::, <cmath>)
+SYMBOL(frexpf, None, <cmath>)
+SYMBOL(frexpf, None, <math.h>)
+SYMBOL(frexpl, std::, <cmath>)
+SYMBOL(frexpl, None, <cmath>)
+SYMBOL(frexpl, None, <math.h>)
+SYMBOL(from_chars, std::, <charconv>)
+SYMBOL(from_chars_result, std::, <charconv>)
+SYMBOL(from_range, std::, <ranges>)
+SYMBOL(from_range_t, std::, <ranges>)
+SYMBOL(front_insert_iterator, std::, <iterator>)
+SYMBOL(front_inserter, std::, <iterator>)
+SYMBOL(fscanf, std::, <cstdio>)
+SYMBOL(fscanf, None, <cstdio>)
+SYMBOL(fscanf, None, <stdio.h>)
+SYMBOL(fseek, std::, <cstdio>)
+SYMBOL(fseek, None, <cstdio>)
+SYMBOL(fseek, None, <stdio.h>)
+SYMBOL(fsetpos, std::, <cstdio>)
+SYMBOL(fsetpos, None, <cstdio>)
+SYMBOL(fsetpos, None, <stdio.h>)
+SYMBOL(fstream, std::, <fstream>)
+SYMBOL(fstream, std::, <iosfwd>)
+SYMBOL(ftell, std::, <cstdio>)
+SYMBOL(ftell, None, <cstdio>)
+SYMBOL(ftell, None, <stdio.h>)
+SYMBOL(function, std::, <functional>)
+SYMBOL(future, std::, <future>)
+SYMBOL(future_category, std::, <future>)
+SYMBOL(future_errc, std::, <future>)
+SYMBOL(future_error, std::, <future>)
+SYMBOL(future_status, std::, <future>)
+SYMBOL(fwide, std::, <cwchar>)
+SYMBOL(fwide, None, <cwchar>)
+SYMBOL(fwide, None, <wchar.h>)
+SYMBOL(fwprintf, std::, <cwchar>)
+SYMBOL(fwprintf, None, <cwchar>)
+SYMBOL(fwprintf, None, <wchar.h>)
+SYMBOL(fwrite, std::, <cstdio>)
+SYMBOL(fwrite, None, <cstdio>)
+SYMBOL(fwrite, None, <stdio.h>)
+SYMBOL(fwscanf, std::, <cwchar>)
+SYMBOL(fwscanf, None, <cwchar>)
+SYMBOL(fwscanf, None, <wchar.h>)
+SYMBOL(gamma_distribution, std::, <random>)
+SYMBOL(gcd, std::, <numeric>)
+SYMBOL(generate, std::, <algorithm>)
+SYMBOL(generate_canonical, std::, <random>)
+SYMBOL(generate_n, std::, <algorithm>)
+SYMBOL(generic_category, std::, <system_error>)
+SYMBOL(geometric_distribution, std::, <random>)
+SYMBOL(get_deleter, std::, <memory>)
+SYMBOL(get_if, std::, <variant>)
+SYMBOL(get_money, std::, <iomanip>)
+SYMBOL(get_new_handler, std::, <new>)
+SYMBOL(get_pointer_safety, std::, <memory>)
+SYMBOL(get_temporary_buffer, std::, <memory>)
+SYMBOL(get_terminate, std::, <exception>)
+SYMBOL(get_time, std::, <iomanip>)
+SYMBOL(get_unexpected, std::, <exception>)
+SYMBOL(getc, std::, <cstdio>)
+SYMBOL(getc, None, <cstdio>)
+SYMBOL(getc, None, <stdio.h>)
+SYMBOL(getchar, std::, <cstdio>)
+SYMBOL(getchar, None, <cstdio>)
+SYMBOL(getchar, None, <stdio.h>)
+SYMBOL(getenv, std::, <cstdlib>)
+SYMBOL(getenv, None, <cstdlib>)
+SYMBOL(getenv, None, <stdlib.h>)
+SYMBOL(getline, std::, <string>)
+SYMBOL(gets, std::, <cstdio>)
+SYMBOL(gets, None, <cstdio>)
+SYMBOL(gets, None, <stdio.h>)
+SYMBOL(getwc, std::, <cwchar>)
+SYMBOL(getwc, None, <cwchar>)
+SYMBOL(getwc, None, <wchar.h>)
+SYMBOL(getwchar, std::, <cwchar>)
+SYMBOL(getwchar, None, <cwchar>)
+SYMBOL(getwchar, None, <wchar.h>)
+SYMBOL(giga, std::, <ratio>)
+SYMBOL(gmtime, std::, <ctime>)
+SYMBOL(gmtime, None, <ctime>)
+SYMBOL(gmtime, None, <time.h>)
+SYMBOL(greater, std::, <functional>)
+SYMBOL(greater_equal, std::, <functional>)
+SYMBOL(gslice, std::, <valarray>)
+SYMBOL(gslice_array, std::, <valarray>)
+SYMBOL(hardware_constructive_interference_size, std::, <new>)
+SYMBOL(hardware_destructive_interference_size, std::, <new>)
+SYMBOL(has_facet, std::, <locale>)
+SYMBOL(has_single_bit, std::, <bit>)
+SYMBOL(has_unique_object_representations, std::, <type_traits>)
+SYMBOL(has_unique_object_representations_v, std::, <type_traits>)
+SYMBOL(has_virtual_destructor, std::, <type_traits>)
+SYMBOL(has_virtual_destructor_v, std::, <type_traits>)
+SYMBOL(hash, std::, <functional>)
+SYMBOL(hecto, std::, <ratio>)
+SYMBOL(hermite, std::, <cmath>)
+SYMBOL(hermitef, std::, <cmath>)
+SYMBOL(hermitel, std::, <cmath>)
+SYMBOL(hex, std::, <ios>)
+SYMBOL(hex, std::, <iostream>)
+SYMBOL(hexfloat, std::, <ios>)
+SYMBOL(hexfloat, std::, <iostream>)
+SYMBOL(holds_alternative, std::, <variant>)
+SYMBOL(hypot, std::, <cmath>)
+SYMBOL(hypot, None, <cmath>)
+SYMBOL(hypot, None, <math.h>)
+SYMBOL(hypotf, std::, <cmath>)
+SYMBOL(hypotf, None, <cmath>)
+SYMBOL(hypotf, None, <math.h>)
+SYMBOL(hypotl, std::, <cmath>)
+SYMBOL(hypotl, None, <cmath>)
+SYMBOL(hypotl, None, <math.h>)
+SYMBOL(identity, std::, <functional>)
+SYMBOL(ifstream, std::, <fstream>)
+SYMBOL(ifstream, std::, <iosfwd>)
+SYMBOL(ignore, std::, <tuple>)
+SYMBOL(ilogb, std::, <cmath>)
+SYMBOL(ilogb, None, <cmath>)
+SYMBOL(ilogb, None, <math.h>)
+SYMBOL(ilogbf, std::, <cmath>)
+SYMBOL(ilogbf, None, <cmath>)
+SYMBOL(ilogbf, None, <math.h>)
+SYMBOL(ilogbl, std::, <cmath>)
+SYMBOL(ilogbl, None, <cmath>)
+SYMBOL(ilogbl, None, <math.h>)
+SYMBOL(imag, std::, <complex>)
+SYMBOL(imaxabs, std::, <cinttypes>)
+SYMBOL(imaxabs, None, <cinttypes>)
+SYMBOL(imaxabs, None, <inttypes.h>)
+SYMBOL(imaxdiv, std::, <cinttypes>)
+SYMBOL(imaxdiv, None, <cinttypes>)
+SYMBOL(imaxdiv, None, <inttypes.h>)
+SYMBOL(imaxdiv_t, std::, <cinttypes>)
+SYMBOL(imaxdiv_t, None, <cinttypes>)
+SYMBOL(imaxdiv_t, None, <inttypes.h>)
+SYMBOL(in_place, std::, <utility>)
+SYMBOL(in_place_index, std::, <utility>)
+SYMBOL(in_place_index_t, std::, <utility>)
+SYMBOL(in_place_t, std::, <utility>)
+SYMBOL(in_place_type, std::, <utility>)
+SYMBOL(in_place_type_t, std::, <utility>)
+SYMBOL(in_range, std::, <utility>)
+SYMBOL(includes, std::, <algorithm>)
+SYMBOL(inclusive_scan, std::, <numeric>)
+SYMBOL(incrementable, std::, <iterator>)
+SYMBOL(incrementable_traits, std::, <iterator>)
+SYMBOL(independent_bits_engine, std::, <random>)
+SYMBOL(indirect_array, std::, <valarray>)
+SYMBOL(indirect_binary_predicate, std::, <iterator>)
+SYMBOL(indirect_equivalence_relation, std::, <iterator>)
+SYMBOL(indirect_result_t, std::, <iterator>)
+SYMBOL(indirect_strict_weak_order, std::, <iterator>)
+SYMBOL(indirect_unary_predicate, std::, <iterator>)
+SYMBOL(indirectly_comparable, std::, <iterator>)
+SYMBOL(indirectly_copyable, std::, <iterator>)
+SYMBOL(indirectly_copyable_storable, std::, <iterator>)
+SYMBOL(indirectly_movable, std::, <iterator>)
+SYMBOL(indirectly_movable_storable, std::, <iterator>)
+SYMBOL(indirectly_readable, std::, <iterator>)
+SYMBOL(indirectly_readable_traits, std::, <iterator>)
+SYMBOL(indirectly_regular_unary_invocable, std::, <iterator>)
+SYMBOL(indirectly_swappable, std::, <iterator>)
+SYMBOL(indirectly_unary_invocable, std::, <iterator>)
+SYMBOL(indirectly_writable, std::, <iterator>)
+SYMBOL(initializer_list, std::, <initializer_list>)
+SYMBOL(inner_product, std::, <numeric>)
+SYMBOL(inout_ptr, std::, <memory>)
+SYMBOL(inout_ptr_t, std::, <memory>)
+SYMBOL(inplace_merge, std::, <algorithm>)
+SYMBOL(input_iterator, std::, <iterator>)
+SYMBOL(input_iterator_tag, std::, <iterator>)
+SYMBOL(input_or_output_iterator, std::, <iterator>)
+SYMBOL(insert_iterator, std::, <iterator>)
+SYMBOL(inserter, std::, <iterator>)
+SYMBOL(int16_t, std::, <cstdint>)
+SYMBOL(int16_t, None, <cstdint>)
+SYMBOL(int16_t, None, <stdint.h>)
+SYMBOL(int32_t, std::, <cstdint>)
+SYMBOL(int32_t, None, <cstdint>)
+SYMBOL(int32_t, None, <stdint.h>)
+SYMBOL(int64_t, std::, <cstdint>)
+SYMBOL(int64_t, None, <cstdint>)
+SYMBOL(int64_t, None, <stdint.h>)
+SYMBOL(int8_t, std::, <cstdint>)
+SYMBOL(int8_t, None, <cstdint>)
+SYMBOL(int8_t, None, <stdint.h>)
+SYMBOL(int_fast16_t, std::, <cstdint>)
+SYMBOL(int_fast16_t, None, <cstdint>)
+SYMBOL(int_fast16_t, None, <stdint.h>)
+SYMBOL(int_fast32_t, std::, <cstdint>)
+SYMBOL(int_fast32_t, None, <cstdint>)
+SYMBOL(int_fast32_t, None, <stdint.h>)
+SYMBOL(int_fast64_t, std::, <cstdint>)
+SYMBOL(int_fast64_t, None, <cstdint>)
+SYMBOL(int_fast64_t, None, <stdint.h>)
+SYMBOL(int_fast8_t, std::, <cstdint>)
+SYMBOL(int_fast8_t, None, <cstdint>)
+SYMBOL(int_fast8_t, None, <stdint.h>)
+SYMBOL(int_least16_t, std::, <cstdint>)
+SYMBOL(int_least16_t, None, <cstdint>)
+SYMBOL(int_least16_t, None, <stdint.h>)
+SYMBOL(int_least32_t, std::, <cstdint>)
+SYMBOL(int_least32_t, None, <cstdint>)
+SYMBOL(int_least32_t, None, <stdint.h>)
+SYMBOL(int_least64_t, std::, <cstdint>)
+SYMBOL(int_least64_t, None, <cstdint>)
+SYMBOL(int_least64_t, None, <stdint.h>)
+SYMBOL(int_least8_t, std::, <cstdint>)
+SYMBOL(int_least8_t, None, <cstdint>)
+SYMBOL(int_least8_t, None, <stdint.h>)
+SYMBOL(integer_sequence, std::, <utility>)
+SYMBOL(integral, std::, <concepts>)
+SYMBOL(integral_constant, std::, <type_traits>)
+SYMBOL(internal, std::, <ios>)
+SYMBOL(internal, std::, <iostream>)
+SYMBOL(intmax_t, std::, <cstdint>)
+SYMBOL(intmax_t, None, <cstdint>)
+SYMBOL(intmax_t, None, <stdint.h>)
+SYMBOL(intptr_t, std::, <cstdint>)
+SYMBOL(intptr_t, None, <cstdint>)
+SYMBOL(intptr_t, None, <stdint.h>)
+SYMBOL(invalid_argument, std::, <stdexcept>)
+SYMBOL(invocable, std::, <concepts>)
+SYMBOL(invoke, std::, <functional>)
+SYMBOL(invoke_r, std::, <functional>)
+SYMBOL(invoke_result, std::, <type_traits>)
+SYMBOL(invoke_result_t, std::, <type_traits>)
+SYMBOL(io_errc, std::, <ios>)
+SYMBOL(io_errc, std::, <iostream>)
+SYMBOL(io_state, std::, <ios>)
+SYMBOL(io_state, std::, <iostream>)
+SYMBOL(ios, std::, <ios>)
+SYMBOL(ios, std::, <iostream>)
+SYMBOL(ios, std::, <iosfwd>)
+SYMBOL(ios_base, std::, <ios>)
+SYMBOL(ios_base, std::, <iostream>)
+SYMBOL(iostream, std::, <istream>)
+SYMBOL(iostream, std::, <iostream>)
+SYMBOL(iostream, std::, <iosfwd>)
+SYMBOL(iostream_category, std::, <ios>)
+SYMBOL(iostream_category, std::, <iostream>)
+SYMBOL(iota, std::, <numeric>)
+SYMBOL(is_abstract, std::, <type_traits>)
+SYMBOL(is_abstract_v, std::, <type_traits>)
+SYMBOL(is_aggregate, std::, <type_traits>)
+SYMBOL(is_aggregate_v, std::, <type_traits>)
+SYMBOL(is_arithmetic, std::, <type_traits>)
+SYMBOL(is_arithmetic_v, std::, <type_traits>)
+SYMBOL(is_array, std::, <type_traits>)
+SYMBOL(is_array_v, std::, <type_traits>)
+SYMBOL(is_assignable, std::, <type_traits>)
+SYMBOL(is_assignable_v, std::, <type_traits>)
+SYMBOL(is_base_of, std::, <type_traits>)
+SYMBOL(is_base_of_v, std::, <type_traits>)
+SYMBOL(is_bind_expression, std::, <functional>)
+SYMBOL(is_bind_expression_v, std::, <functional>)
+SYMBOL(is_bounded_array, std::, <type_traits>)
+SYMBOL(is_bounded_array_v, std::, <type_traits>)
+SYMBOL(is_class, std::, <type_traits>)
+SYMBOL(is_class_v, std::, <type_traits>)
+SYMBOL(is_compound, std::, <type_traits>)
+SYMBOL(is_compound_v, std::, <type_traits>)
+SYMBOL(is_const, std::, <type_traits>)
+SYMBOL(is_const_v, std::, <type_traits>)
+SYMBOL(is_constant_evaluated, std::, <type_traits>)
+SYMBOL(is_constructible, std::, <type_traits>)
+SYMBOL(is_constructible_v, std::, <type_traits>)
+SYMBOL(is_convertible, std::, <type_traits>)
+SYMBOL(is_convertible_v, std::, <type_traits>)
+SYMBOL(is_copy_assignable, std::, <type_traits>)
+SYMBOL(is_copy_assignable_v, std::, <type_traits>)
+SYMBOL(is_copy_constructible, std::, <type_traits>)
+SYMBOL(is_copy_constructible_v, std::, <type_traits>)
+SYMBOL(is_corresponding_member, std::, <type_traits>)
+SYMBOL(is_default_constructible, std::, <type_traits>)
+SYMBOL(is_default_constructible_v, std::, <type_traits>)
+SYMBOL(is_destructible, std::, <type_traits>)
+SYMBOL(is_destructible_v, std::, <type_traits>)
+SYMBOL(is_empty, std::, <type_traits>)
+SYMBOL(is_empty_v, std::, <type_traits>)
+SYMBOL(is_enum, std::, <type_traits>)
+SYMBOL(is_enum_v, std::, <type_traits>)
+SYMBOL(is_eq, std::, <compare>)
+SYMBOL(is_error_code_enum, std::, <system_error>)
+SYMBOL(is_error_condition_enum, std::, <system_error>)
+SYMBOL(is_error_condition_enum_v, std::, <system_error>)
+SYMBOL(is_execution_policy, std::, <execution>)
+SYMBOL(is_execution_policy_v, std::, <execution>)
+SYMBOL(is_final, std::, <type_traits>)
+SYMBOL(is_final_v, std::, <type_traits>)
+SYMBOL(is_floating_point, std::, <type_traits>)
+SYMBOL(is_floating_point_v, std::, <type_traits>)
+SYMBOL(is_function, std::, <type_traits>)
+SYMBOL(is_function_v, std::, <type_traits>)
+SYMBOL(is_fundamental, std::, <type_traits>)
+SYMBOL(is_fundamental_v, std::, <type_traits>)
+SYMBOL(is_gt, std::, <compare>)
+SYMBOL(is_gteq, std::, <compare>)
+SYMBOL(is_heap, std::, <algorithm>)
+SYMBOL(is_heap_until, std::, <algorithm>)
+SYMBOL(is_integral, std::, <type_traits>)
+SYMBOL(is_integral_v, std::, <type_traits>)
+SYMBOL(is_invocable, std::, <type_traits>)
+SYMBOL(is_invocable_r, std::, <type_traits>)
+SYMBOL(is_invocable_r_v, std::, <type_traits>)
+SYMBOL(is_invocable_v, std::, <type_traits>)
+SYMBOL(is_layout_compatible, std::, <type_traits>)
+SYMBOL(is_layout_compatible_v, std::, <type_traits>)
+SYMBOL(is_literal_type, std::, <type_traits>)
+SYMBOL(is_literal_type_v, std::, <type_traits>)
+SYMBOL(is_lt, std::, <compare>)
+SYMBOL(is_lteq, std::, <compare>)
+SYMBOL(is_lvalue_reference, std::, <type_traits>)
+SYMBOL(is_lvalue_reference_v, std::, <type_traits>)
+SYMBOL(is_member_function_pointer, std::, <type_traits>)
+SYMBOL(is_member_function_pointer_v, std::, <type_traits>)
+SYMBOL(is_member_object_pointer, std::, <type_traits>)
+SYMBOL(is_member_object_pointer_v, std::, <type_traits>)
+SYMBOL(is_member_pointer, std::, <type_traits>)
+SYMBOL(is_member_pointer_v, std::, <type_traits>)
+SYMBOL(is_move_assignable, std::, <type_traits>)
+SYMBOL(is_move_assignable_v, std::, <type_traits>)
+SYMBOL(is_move_constructible, std::, <type_traits>)
+SYMBOL(is_move_constructible_v, std::, <type_traits>)
+SYMBOL(is_neq, std::, <compare>)
+SYMBOL(is_nothrow_assignable, std::, <type_traits>)
+SYMBOL(is_nothrow_assignable_v, std::, <type_traits>)
+SYMBOL(is_nothrow_constructible, std::, <type_traits>)
+SYMBOL(is_nothrow_constructible_v, std::, <type_traits>)
+SYMBOL(is_nothrow_convertible, std::, <type_traits>)
+SYMBOL(is_nothrow_convertible_v, std::, <type_traits>)
+SYMBOL(is_nothrow_copy_assignable, std::, <type_traits>)
+SYMBOL(is_nothrow_copy_assignable_v, std::, <type_traits>)
+SYMBOL(is_nothrow_copy_constructible, std::, <type_traits>)
+SYMBOL(is_nothrow_copy_constructible_v, std::, <type_traits>)
+SYMBOL(is_nothrow_default_constructible, std::, <type_traits>)
+SYMBOL(is_nothrow_default_constructible_v, std::, <type_traits>)
+SYMBOL(is_nothrow_destructible, std::, <type_traits>)
+SYMBOL(is_nothrow_destructible_v, std::, <type_traits>)
+SYMBOL(is_nothrow_invocable, std::, <type_traits>)
+SYMBOL(is_nothrow_invocable_r, std::, <type_traits>)
+SYMBOL(is_nothrow_invocable_r_v, std::, <type_traits>)
+SYMBOL(is_nothrow_invocable_v, std::, <type_traits>)
+SYMBOL(is_nothrow_move_assignable, std::, <type_traits>)
+SYMBOL(is_nothrow_move_assignable_v, std::, <type_traits>)
+SYMBOL(is_nothrow_move_constructible, std::, <type_traits>)
+SYMBOL(is_nothrow_move_constructible_v, std::, <type_traits>)
+SYMBOL(is_nothrow_swappable, std::, <type_traits>)
+SYMBOL(is_nothrow_swappable_v, std::, <type_traits>)
+SYMBOL(is_nothrow_swappable_with, std::, <type_traits>)
+SYMBOL(is_nothrow_swappable_with_v, std::, <type_traits>)
+SYMBOL(is_null_pointer, std::, <type_traits>)
+SYMBOL(is_null_pointer_v, std::, <type_traits>)
+SYMBOL(is_object, std::, <type_traits>)
+SYMBOL(is_object_v, std::, <type_traits>)
+SYMBOL(is_partitioned, std::, <algorithm>)
+SYMBOL(is_permutation, std::, <algorithm>)
+SYMBOL(is_placeholder, std::, <functional>)
+SYMBOL(is_placeholder_v, std::, <functional>)
+SYMBOL(is_pod, std::, <type_traits>)
+SYMBOL(is_pod_v, std::, <type_traits>)
+SYMBOL(is_pointer, std::, <type_traits>)
+SYMBOL(is_pointer_interconvertible_base_of, std::, <type_traits>)
+SYMBOL(is_pointer_interconvertible_base_of_v, std::, <type_traits>)
+SYMBOL(is_pointer_interconvertible_with_class, std::, <type_traits>)
+SYMBOL(is_pointer_v, std::, <type_traits>)
+SYMBOL(is_polymorphic, std::, <type_traits>)
+SYMBOL(is_polymorphic_v, std::, <type_traits>)
+SYMBOL(is_reference, std::, <type_traits>)
+SYMBOL(is_reference_v, std::, <type_traits>)
+SYMBOL(is_rvalue_reference, std::, <type_traits>)
+SYMBOL(is_rvalue_reference_v, std::, <type_traits>)
+SYMBOL(is_same, std::, <type_traits>)
+SYMBOL(is_same_v, std::, <type_traits>)
+SYMBOL(is_scalar, std::, <type_traits>)
+SYMBOL(is_scalar_v, std::, <type_traits>)
+SYMBOL(is_scoped_enum, std::, <type_traits>)
+SYMBOL(is_scoped_enum_v, std::, <type_traits>)
+SYMBOL(is_signed, std::, <type_traits>)
+SYMBOL(is_signed_v, std::, <type_traits>)
+SYMBOL(is_sorted, std::, <algorithm>)
+SYMBOL(is_sorted_until, std::, <algorithm>)
+SYMBOL(is_standard_layout, std::, <type_traits>)
+SYMBOL(is_standard_layout_v, std::, <type_traits>)
+SYMBOL(is_swappable, std::, <type_traits>)
+SYMBOL(is_swappable_v, std::, <type_traits>)
+SYMBOL(is_swappable_with, std::, <type_traits>)
+SYMBOL(is_swappable_with_v, std::, <type_traits>)
+SYMBOL(is_trivial, std::, <type_traits>)
+SYMBOL(is_trivial_v, std::, <type_traits>)
+SYMBOL(is_trivially_assignable, std::, <type_traits>)
+SYMBOL(is_trivially_assignable_v, std::, <type_traits>)
+SYMBOL(is_trivially_constructible, std::, <type_traits>)
+SYMBOL(is_trivially_constructible_v, std::, <type_traits>)
+SYMBOL(is_trivially_copy_assignable, std::, <type_traits>)
+SYMBOL(is_trivially_copy_assignable_v, std::, <type_traits>)
+SYMBOL(is_trivially_copy_constructible, std::, <type_traits>)
+SYMBOL(is_trivially_copy_constructible_v, std::, <type_traits>)
+SYMBOL(is_trivially_copyable, std::, <type_traits>)
+SYMBOL(is_trivially_copyable_v, std::, <type_traits>)
+SYMBOL(is_trivially_default_constructible, std::, <type_traits>)
+SYMBOL(is_trivially_default_constructible_v, std::, <type_traits>)
+SYMBOL(is_trivially_destructible, std::, <type_traits>)
+SYMBOL(is_trivially_destructible_v, std::, <type_traits>)
+SYMBOL(is_trivially_move_assignable, std::, <type_traits>)
+SYMBOL(is_trivially_move_assignable_v, std::, <type_traits>)
+SYMBOL(is_trivially_move_constructible, std::, <type_traits>)
+SYMBOL(is_trivially_move_constructible_v, std::, <type_traits>)
+SYMBOL(is_unbounded_array, std::, <type_traits>)
+SYMBOL(is_unbounded_array_v, std::, <type_traits>)
+SYMBOL(is_union, std::, <type_traits>)
+SYMBOL(is_union_v, std::, <type_traits>)
+SYMBOL(is_unsigned, std::, <type_traits>)
+SYMBOL(is_unsigned_v, std::, <type_traits>)
+SYMBOL(is_void, std::, <type_traits>)
+SYMBOL(is_void_v, std::, <type_traits>)
+SYMBOL(is_volatile, std::, <type_traits>)
+SYMBOL(is_volatile_v, std::, <type_traits>)
+SYMBOL(isalnum, std::, <cctype>)
+SYMBOL(isalnum, None, <cctype>)
+SYMBOL(isalnum, None, <ctype.h>)
+SYMBOL(isalpha, std::, <cctype>)
+SYMBOL(isalpha, None, <cctype>)
+SYMBOL(isalpha, None, <ctype.h>)
+SYMBOL(isblank, std::, <cctype>)
+SYMBOL(isblank, None, <cctype>)
+SYMBOL(isblank, None, <ctype.h>)
+SYMBOL(iscntrl, std::, <cctype>)
+SYMBOL(iscntrl, None, <cctype>)
+SYMBOL(iscntrl, None, <ctype.h>)
+SYMBOL(isdigit, std::, <cctype>)
+SYMBOL(isdigit, None, <cctype>)
+SYMBOL(isdigit, None, <ctype.h>)
+SYMBOL(isfinite, std::, <cmath>)
+SYMBOL(isfinite, None, <cmath>)
+SYMBOL(isfinite, None, <math.h>)
+SYMBOL(isgraph, std::, <cctype>)
+SYMBOL(isgraph, None, <cctype>)
+SYMBOL(isgraph, None, <ctype.h>)
+SYMBOL(isgreater, std::, <cmath>)
+SYMBOL(isgreater, None, <cmath>)
+SYMBOL(isgreater, None, <math.h>)
+SYMBOL(isgreaterequal, std::, <cmath>)
+SYMBOL(isgreaterequal, None, <cmath>)
+SYMBOL(isgreaterequal, None, <math.h>)
+SYMBOL(isinf, std::, <cmath>)
+SYMBOL(isinf, None, <cmath>)
+SYMBOL(isinf, None, <math.h>)
+SYMBOL(isless, std::, <cmath>)
+SYMBOL(isless, None, <cmath>)
+SYMBOL(isless, None, <math.h>)
+SYMBOL(islessequal, std::, <cmath>)
+SYMBOL(islessequal, None, <cmath>)
+SYMBOL(islessequal, None, <math.h>)
+SYMBOL(islessgreater, std::, <cmath>)
+SYMBOL(islessgreater, None, <cmath>)
+SYMBOL(islessgreater, None, <math.h>)
+SYMBOL(islower, std::, <cctype>)
+SYMBOL(islower, None, <cctype>)
+SYMBOL(islower, None, <ctype.h>)
+SYMBOL(isnan, std::, <cmath>)
+SYMBOL(isnan, None, <cmath>)
+SYMBOL(isnan, None, <math.h>)
+SYMBOL(isnormal, std::, <cmath>)
+SYMBOL(isnormal, None, <cmath>)
+SYMBOL(isnormal, None, <math.h>)
+SYMBOL(ispanstream, std::, <spanstream>)
+SYMBOL(ispanstream, std::, <iosfwd>)
+SYMBOL(isprint, std::, <cctype>)
+SYMBOL(isprint, None, <cctype>)
+SYMBOL(isprint, None, <ctype.h>)
+SYMBOL(ispunct, std::, <cctype>)
+SYMBOL(ispunct, None, <cctype>)
+SYMBOL(ispunct, None, <ctype.h>)
+SYMBOL(isspace, std::, <cctype>)
+SYMBOL(isspace, None, <cctype>)
+SYMBOL(isspace, None, <ctype.h>)
+SYMBOL(istream, std::, <istream>)
+SYMBOL(istream, std::, <iostream>)
+SYMBOL(istream, std::, <iosfwd>)
+SYMBOL(istream_iterator, std::, <iterator>)
+SYMBOL(istreambuf_iterator, std::, <iterator>)
+SYMBOL(istreambuf_iterator, std::, <iosfwd>)
+SYMBOL(istringstream, std::, <sstream>)
+SYMBOL(istringstream, std::, <iosfwd>)
+SYMBOL(istrstream, std::, <strstream>)
+SYMBOL(isunordered, std::, <cmath>)
+SYMBOL(isunordered, None, <cmath>)
+SYMBOL(isunordered, None, <math.h>)
+SYMBOL(isupper, std::, <cctype>)
+SYMBOL(isupper, None, <cctype>)
+SYMBOL(isupper, None, <ctype.h>)
+SYMBOL(iswalnum, std::, <cwctype>)
+SYMBOL(iswalnum, None, <cwctype>)
+SYMBOL(iswalnum, None, <wctype.h>)
+SYMBOL(iswalpha, std::, <cwctype>)
+SYMBOL(iswalpha, None, <cwctype>)
+SYMBOL(iswalpha, None, <wctype.h>)
+SYMBOL(iswblank, std::, <cwctype>)
+SYMBOL(iswblank, None, <cwctype>)
+SYMBOL(iswblank, None, <wctype.h>)
+SYMBOL(iswcntrl, std::, <cwctype>)
+SYMBOL(iswcntrl, None, <cwctype>)
+SYMBOL(iswcntrl, None, <wctype.h>)
+SYMBOL(iswctype, std::, <cwctype>)
+SYMBOL(iswctype, None, <cwctype>)
+SYMBOL(iswctype, None, <wctype.h>)
+SYMBOL(iswdigit, std::, <cwctype>)
+SYMBOL(iswdigit, None, <cwctype>)
+SYMBOL(iswdigit, None, <wctype.h>)
+SYMBOL(iswgraph, std::, <cwctype>)
+SYMBOL(iswgraph, None, <cwctype>)
+SYMBOL(iswgraph, None, <wctype.h>)
+SYMBOL(iswlower, std::, <cwctype>)
+SYMBOL(iswlower, None, <cwctype>)
+SYMBOL(iswlower, None, <wctype.h>)
+SYMBOL(iswprint, std::, <cwctype>)
+SYMBOL(iswprint, None, <cwctype>)
+SYMBOL(iswprint, None, <wctype.h>)
+SYMBOL(iswpunct, std::, <cwctype>)
+SYMBOL(iswpunct, None, <cwctype>)
+SYMBOL(iswpunct, None, <wctype.h>)
+SYMBOL(iswspace, std::, <cwctype>)
+SYMBOL(iswspace, None, <cwctype>)
+SYMBOL(iswspace, None, <wctype.h>)
+SYMBOL(iswupper, std::, <cwctype>)
+SYMBOL(iswupper, None, <cwctype>)
+SYMBOL(iswupper, None, <wctype.h>)
+SYMBOL(iswxdigit, std::, <cwctype>)
+SYMBOL(iswxdigit, None, <cwctype>)
+SYMBOL(iswxdigit, None, <wctype.h>)
+SYMBOL(isxdigit, std::, <cctype>)
+SYMBOL(isxdigit, None, <cctype>)
+SYMBOL(isxdigit, None, <ctype.h>)
+SYMBOL(iter_common_reference_t, std::, <iterator>)
+SYMBOL(iter_const_reference_t, std::, <iterator>)
+SYMBOL(iter_difference_t, std::, <iterator>)
+SYMBOL(iter_reference_t, std::, <iterator>)
+SYMBOL(iter_rvalue_reference_t, std::, <iterator>)
+SYMBOL(iter_swap, std::, <algorithm>)
+SYMBOL(iter_value_t, std::, <iterator>)
+SYMBOL(iterator, std::, <iterator>)
+SYMBOL(iterator_traits, std::, <iterator>)
+SYMBOL(jmp_buf, std::, <csetjmp>)
+SYMBOL(jmp_buf, None, <csetjmp>)
+SYMBOL(jmp_buf, None, <setjmp.h>)
+SYMBOL(jthread, std::, <thread>)
+SYMBOL(kill_dependency, std::, <atomic>)
+SYMBOL(kilo, std::, <ratio>)
+SYMBOL(knuth_b, std::, <random>)
+SYMBOL(labs, std::, <cstdlib>)
+SYMBOL(labs, None, <cstdlib>)
+SYMBOL(labs, None, <stdlib.h>)
+SYMBOL(laguerre, std::, <cmath>)
+SYMBOL(laguerref, std::, <cmath>)
+SYMBOL(laguerrel, std::, <cmath>)
+SYMBOL(latch, std::, <latch>)
+SYMBOL(launch, std::, <future>)
+SYMBOL(launder, std::, <new>)
+SYMBOL(lcm, std::, <numeric>)
+SYMBOL(lconv, std::, <clocale>)
+SYMBOL(lconv, None, <clocale>)
+SYMBOL(lconv, None, <locale.h>)
+SYMBOL(ldexp, std::, <cmath>)
+SYMBOL(ldexp, None, <cmath>)
+SYMBOL(ldexp, None, <math.h>)
+SYMBOL(ldexpf, std::, <cmath>)
+SYMBOL(ldexpf, None, <cmath>)
+SYMBOL(ldexpf, None, <math.h>)
+SYMBOL(ldexpl, std::, <cmath>)
+SYMBOL(ldexpl, None, <cmath>)
+SYMBOL(ldexpl, None, <math.h>)
+SYMBOL(ldiv, std::, <cstdlib>)
+SYMBOL(ldiv, None, <cstdlib>)
+SYMBOL(ldiv, None, <stdlib.h>)
+SYMBOL(ldiv_t, std::, <cstdlib>)
+SYMBOL(ldiv_t, None, <cstdlib>)
+SYMBOL(ldiv_t, None, <stdlib.h>)
+SYMBOL(left, std::, <ios>)
+SYMBOL(left, std::, <iostream>)
+SYMBOL(legendre, std::, <cmath>)
+SYMBOL(legendref, std::, <cmath>)
+SYMBOL(legendrel, std::, <cmath>)
+SYMBOL(length_error, std::, <stdexcept>)
+SYMBOL(lerp, std::, <cmath>)
+SYMBOL(less, std::, <functional>)
+SYMBOL(less_equal, std::, <functional>)
+SYMBOL(lexicographical_compare, std::, <algorithm>)
+SYMBOL(lexicographical_compare_three_way, std::, <algorithm>)
+SYMBOL(lgamma, std::, <cmath>)
+SYMBOL(lgamma, None, <cmath>)
+SYMBOL(lgamma, None, <math.h>)
+SYMBOL(lgammaf, std::, <cmath>)
+SYMBOL(lgammaf, None, <cmath>)
+SYMBOL(lgammaf, None, <math.h>)
+SYMBOL(lgammal, std::, <cmath>)
+SYMBOL(lgammal, None, <cmath>)
+SYMBOL(lgammal, None, <math.h>)
+SYMBOL(linear_congruential_engine, std::, <random>)
+SYMBOL(list, std::, <list>)
+SYMBOL(llabs, std::, <cstdlib>)
+SYMBOL(llabs, None, <cstdlib>)
+SYMBOL(llabs, None, <stdlib.h>)
+SYMBOL(lldiv, std::, <cstdlib>)
+SYMBOL(lldiv, None, <cstdlib>)
+SYMBOL(lldiv, None, <stdlib.h>)
+SYMBOL(lldiv_t, std::, <cstdlib>)
+SYMBOL(lldiv_t, None, <cstdlib>)
+SYMBOL(lldiv_t, None, <stdlib.h>)
+SYMBOL(llrint, std::, <cmath>)
+SYMBOL(llrint, None, <cmath>)
+SYMBOL(llrint, None, <math.h>)
+SYMBOL(llrintf, std::, <cmath>)
+SYMBOL(llrintf, None, <cmath>)
+SYMBOL(llrintf, None, <math.h>)
+SYMBOL(llrintl, std::, <cmath>)
+SYMBOL(llrintl, None, <cmath>)
+SYMBOL(llrintl, None, <math.h>)
+SYMBOL(llround, std::, <cmath>)
+SYMBOL(llround, None, <cmath>)
+SYMBOL(llround, None, <math.h>)
+SYMBOL(llroundf, std::, <cmath>)
+SYMBOL(llroundf, None, <cmath>)
+SYMBOL(llroundf, None, <math.h>)
+SYMBOL(llroundl, std::, <cmath>)
+SYMBOL(llroundl, None, <cmath>)
+SYMBOL(llroundl, None, <math.h>)
+SYMBOL(locale, std::, <locale>)
+SYMBOL(localeconv, std::, <clocale>)
+SYMBOL(localeconv, None, <clocale>)
+SYMBOL(localeconv, None, <locale.h>)
+SYMBOL(localtime, std::, <ctime>)
+SYMBOL(localtime, None, <ctime>)
+SYMBOL(localtime, None, <time.h>)
+SYMBOL(lock, std::, <mutex>)
+SYMBOL(lock_guard, std::, <mutex>)
+SYMBOL(log, std::, <cmath>)
+SYMBOL(log, None, <cmath>)
+SYMBOL(log, None, <math.h>)
+SYMBOL(log10, std::, <cmath>)
+SYMBOL(log10, None, <cmath>)
+SYMBOL(log10, None, <math.h>)
+SYMBOL(log10f, std::, <cmath>)
+SYMBOL(log10f, None, <cmath>)
+SYMBOL(log10f, None, <math.h>)
+SYMBOL(log10l, std::, <cmath>)
+SYMBOL(log10l, None, <cmath>)
+SYMBOL(log10l, None, <math.h>)
+SYMBOL(log1p, std::, <cmath>)
+SYMBOL(log1p, None, <cmath>)
+SYMBOL(log1p, None, <math.h>)
+SYMBOL(log1pf, std::, <cmath>)
+SYMBOL(log1pf, None, <cmath>)
+SYMBOL(log1pf, None, <math.h>)
+SYMBOL(log1pl, std::, <cmath>)
+SYMBOL(log1pl, None, <cmath>)
+SYMBOL(log1pl, None, <math.h>)
+SYMBOL(log2, std::, <cmath>)
+SYMBOL(log2, None, <cmath>)
+SYMBOL(log2, None, <math.h>)
+SYMBOL(log2f, std::, <cmath>)
+SYMBOL(log2f, None, <cmath>)
+SYMBOL(log2f, None, <math.h>)
+SYMBOL(log2l, std::, <cmath>)
+SYMBOL(log2l, None, <cmath>)
+SYMBOL(log2l, None, <math.h>)
+SYMBOL(logb, std::, <cmath>)
+SYMBOL(logb, None, <cmath>)
+SYMBOL(logb, None, <math.h>)
+SYMBOL(logbf, std::, <cmath>)
+SYMBOL(logbf, None, <cmath>)
+SYMBOL(logbf, None, <math.h>)
+SYMBOL(logbl, std::, <cmath>)
+SYMBOL(logbl, None, <cmath>)
+SYMBOL(logbl, None, <math.h>)
+SYMBOL(logf, std::, <cmath>)
+SYMBOL(logf, None, <cmath>)
+SYMBOL(logf, None, <math.h>)
+SYMBOL(logic_error, std::, <stdexcept>)
+SYMBOL(logical_and, std::, <functional>)
+SYMBOL(logical_not, std::, <functional>)
+SYMBOL(logical_or, std::, <functional>)
+SYMBOL(logl, std::, <cmath>)
+SYMBOL(logl, None, <cmath>)
+SYMBOL(logl, None, <math.h>)
+SYMBOL(lognormal_distribution, std::, <random>)
+SYMBOL(longjmp, std::, <csetjmp>)
+SYMBOL(longjmp, None, <csetjmp>)
+SYMBOL(longjmp, None, <setjmp.h>)
+SYMBOL(lower_bound, std::, <algorithm>)
+SYMBOL(lrint, std::, <cmath>)
+SYMBOL(lrint, None, <cmath>)
+SYMBOL(lrint, None, <math.h>)
+SYMBOL(lrintf, std::, <cmath>)
+SYMBOL(lrintf, None, <cmath>)
+SYMBOL(lrintf, None, <math.h>)
+SYMBOL(lrintl, std::, <cmath>)
+SYMBOL(lrintl, None, <cmath>)
+SYMBOL(lrintl, None, <math.h>)
+SYMBOL(lround, std::, <cmath>)
+SYMBOL(lround, None, <cmath>)
+SYMBOL(lround, None, <math.h>)
+SYMBOL(lroundf, std::, <cmath>)
+SYMBOL(lroundf, None, <cmath>)
+SYMBOL(lroundf, None, <math.h>)
+SYMBOL(lroundl, std::, <cmath>)
+SYMBOL(lroundl, None, <cmath>)
+SYMBOL(lroundl, None, <math.h>)
+SYMBOL(make_exception_ptr, std::, <exception>)
+SYMBOL(make_format_args, std::, <format>)
+SYMBOL(make_from_tuple, std::, <tuple>)
+SYMBOL(make_heap, std::, <algorithm>)
+SYMBOL(make_move_iterator, std::, <iterator>)
+SYMBOL(make_obj_using_allocator, std::, <memory>)
+SYMBOL(make_optional, std::, <optional>)
+SYMBOL(make_pair, std::, <utility>)
+SYMBOL(make_reverse_iterator, std::, <iterator>)
+SYMBOL(make_shared, std::, <memory>)
+SYMBOL(make_shared_for_overwrite, std::, <memory>)
+SYMBOL(make_signed, std::, <type_traits>)
+SYMBOL(make_signed_t, std::, <type_traits>)
+SYMBOL(make_tuple, std::, <tuple>)
+SYMBOL(make_unique, std::, <memory>)
+SYMBOL(make_unique_for_overwrite, std::, <memory>)
+SYMBOL(make_unsigned, std::, <type_traits>)
+SYMBOL(make_unsigned_t, std::, <type_traits>)
+SYMBOL(make_wformat_args, std::, <format>)
+SYMBOL(malloc, std::, <cstdlib>)
+SYMBOL(malloc, None, <cstdlib>)
+SYMBOL(malloc, None, <stdlib.h>)
+SYMBOL(map, std::, <map>)
+SYMBOL(mask_array, std::, <valarray>)
+SYMBOL(match_results, std::, <regex>)
+SYMBOL(max, std::, <algorithm>)
+SYMBOL(max_align_t, std::, <cstddef>)
+SYMBOL(max_align_t, None, <cstddef>)
+SYMBOL(max_align_t, None, <stddef.h>)
+SYMBOL(max_element, std::, <algorithm>)
+SYMBOL(mblen, std::, <cstdlib>)
+SYMBOL(mblen, None, <cstdlib>)
+SYMBOL(mblen, None, <stdlib.h>)
+SYMBOL(mbrlen, std::, <cwchar>)
+SYMBOL(mbrlen, None, <cwchar>)
+SYMBOL(mbrlen, None, <wchar.h>)
+SYMBOL(mbrtoc16, std::, <cuchar>)
+SYMBOL(mbrtoc16, None, <cuchar>)
+SYMBOL(mbrtoc16, None, <uchar.h>)
+SYMBOL(mbrtoc32, std::, <cuchar>)
+SYMBOL(mbrtoc32, None, <cuchar>)
+SYMBOL(mbrtoc32, None, <uchar.h>)
+SYMBOL(mbrtoc8, std::, <cuchar>)
+SYMBOL(mbrtoc8, None, <cuchar>)
+SYMBOL(mbrtoc8, None, <uchar.h>)
+SYMBOL(mbrtowc, std::, <cwchar>)
+SYMBOL(mbrtowc, None, <cwchar>)
+SYMBOL(mbrtowc, None, <wchar.h>)
+SYMBOL(mbsinit, std::, <cwchar>)
+SYMBOL(mbsinit, None, <cwchar>)
+SYMBOL(mbsinit, None, <wchar.h>)
+SYMBOL(mbsrtowcs, std::, <cwchar>)
+SYMBOL(mbsrtowcs, None, <cwchar>)
+SYMBOL(mbsrtowcs, None, <wchar.h>)
+SYMBOL(mbstowcs, std::, <cstdlib>)
+SYMBOL(mbstowcs, None, <cstdlib>)
+SYMBOL(mbstowcs, None, <stdlib.h>)
+SYMBOL(mbtowc, std::, <cstdlib>)
+SYMBOL(mbtowc, None, <cstdlib>)
+SYMBOL(mbtowc, None, <stdlib.h>)
+SYMBOL(mega, std::, <ratio>)
+SYMBOL(mem_fn, std::, <functional>)
+SYMBOL(mem_fun, std::, <functional>)
+SYMBOL(mem_fun1_ref_t, std::, <functional>)
+SYMBOL(mem_fun1_t, std::, <functional>)
+SYMBOL(mem_fun_ref, std::, <functional>)
+SYMBOL(mem_fun_ref_t, std::, <functional>)
+SYMBOL(mem_fun_t, std::, <functional>)
+SYMBOL(memchr, std::, <cstring>)
+SYMBOL(memchr, None, <cstring>)
+SYMBOL(memchr, None, <string.h>)
+SYMBOL(memcmp, std::, <cstring>)
+SYMBOL(memcmp, None, <cstring>)
+SYMBOL(memcmp, None, <string.h>)
+SYMBOL(memcpy, std::, <cstring>)
+SYMBOL(memcpy, None, <cstring>)
+SYMBOL(memcpy, None, <string.h>)
+SYMBOL(memmove, std::, <cstring>)
+SYMBOL(memmove, None, <cstring>)
+SYMBOL(memmove, None, <string.h>)
+SYMBOL(memory_order, std::, <atomic>)
+SYMBOL(memory_order_acq_rel, std::, <atomic>)
+SYMBOL(memory_order_acquire, std::, <atomic>)
+SYMBOL(memory_order_consume, std::, <atomic>)
+SYMBOL(memory_order_relaxed, std::, <atomic>)
+SYMBOL(memory_order_release, std::, <atomic>)
+SYMBOL(memory_order_seq_cst, std::, <atomic>)
+SYMBOL(memset, std::, <cstring>)
+SYMBOL(memset, None, <cstring>)
+SYMBOL(memset, None, <string.h>)
+SYMBOL(merge, std::, <algorithm>)
+SYMBOL(mergeable, std::, <iterator>)
+SYMBOL(mersenne_twister_engine, std::, <random>)
+SYMBOL(messages, std::, <locale>)
+SYMBOL(messages_base, std::, <locale>)
+SYMBOL(messages_byname, std::, <locale>)
+SYMBOL(micro, std::, <ratio>)
+SYMBOL(midpoint, std::, <numeric>)
+SYMBOL(milli, std::, <ratio>)
+SYMBOL(min, std::, <algorithm>)
+SYMBOL(min_element, std::, <algorithm>)
+SYMBOL(minmax, std::, <algorithm>)
+SYMBOL(minmax_element, std::, <algorithm>)
+SYMBOL(minstd_rand, std::, <random>)
+SYMBOL(minstd_rand0, std::, <random>)
+SYMBOL(minus, std::, <functional>)
+SYMBOL(mismatch, std::, <algorithm>)
+SYMBOL(mktime, std::, <ctime>)
+SYMBOL(mktime, None, <ctime>)
+SYMBOL(mktime, None, <time.h>)
+SYMBOL(modf, std::, <cmath>)
+SYMBOL(modf, None, <cmath>)
+SYMBOL(modf, None, <math.h>)
+SYMBOL(modff, std::, <cmath>)
+SYMBOL(modff, None, <cmath>)
+SYMBOL(modff, None, <math.h>)
+SYMBOL(modfl, std::, <cmath>)
+SYMBOL(modfl, None, <cmath>)
+SYMBOL(modfl, None, <math.h>)
+SYMBOL(modulus, std::, <functional>)
+SYMBOL(money_base, std::, <locale>)
+SYMBOL(money_get, std::, <locale>)
+SYMBOL(money_put, std::, <locale>)
+SYMBOL(moneypunct, std::, <locale>)
+SYMBOL(moneypunct_byname, std::, <locale>)
+SYMBOL(monostate, std::, <variant>)
+SYMBOL(movable, std::, <concepts>)
+SYMBOL(move_backward, std::, <algorithm>)
+SYMBOL(move_constructible, std::, <concepts>)
+SYMBOL(move_if_noexcept, std::, <utility>)
+SYMBOL(move_iterator, std::, <iterator>)
+SYMBOL(move_only_function, std::, <functional>)
+SYMBOL(move_sentinel, std::, <iterator>)
+SYMBOL(mt19937, std::, <random>)
+SYMBOL(mt19937_64, std::, <random>)
+SYMBOL(multimap, std::, <map>)
+SYMBOL(multiplies, std::, <functional>)
+SYMBOL(multiset, std::, <set>)
+SYMBOL(mutex, std::, <mutex>)
+SYMBOL(nan, std::, <cmath>)
+SYMBOL(nan, None, <cmath>)
+SYMBOL(nan, None, <math.h>)
+SYMBOL(nanf, std::, <cmath>)
+SYMBOL(nanf, None, <cmath>)
+SYMBOL(nanf, None, <math.h>)
+SYMBOL(nanl, std::, <cmath>)
+SYMBOL(nanl, None, <cmath>)
+SYMBOL(nanl, None, <math.h>)
+SYMBOL(nano, std::, <ratio>)
+SYMBOL(nearbyint, std::, <cmath>)
+SYMBOL(nearbyint, None, <cmath>)
+SYMBOL(nearbyint, None, <math.h>)
+SYMBOL(nearbyintf, std::, <cmath>)
+SYMBOL(nearbyintf, None, <cmath>)
+SYMBOL(nearbyintf, None, <math.h>)
+SYMBOL(nearbyintl, std::, <cmath>)
+SYMBOL(nearbyintl, None, <cmath>)
+SYMBOL(nearbyintl, None, <math.h>)
+SYMBOL(negate, std::, <functional>)
+SYMBOL(negation, std::, <type_traits>)
+SYMBOL(negation_v, std::, <type_traits>)
+SYMBOL(negative_binomial_distribution, std::, <random>)
+SYMBOL(nested_exception, std::, <exception>)
+SYMBOL(new_handler, std::, <new>)
+SYMBOL(next, std::, <iterator>)
+SYMBOL(next_permutation, std::, <algorithm>)
+SYMBOL(nextafter, std::, <cmath>)
+SYMBOL(nextafter, None, <cmath>)
+SYMBOL(nextafter, None, <math.h>)
+SYMBOL(nextafterf, std::, <cmath>)
+SYMBOL(nextafterf, None, <cmath>)
+SYMBOL(nextafterf, None, <math.h>)
+SYMBOL(nextafterl, std::, <cmath>)
+SYMBOL(nextafterl, None, <cmath>)
+SYMBOL(nextafterl, None, <math.h>)
+SYMBOL(nexttoward, std::, <cmath>)
+SYMBOL(nexttoward, None, <cmath>)
+SYMBOL(nexttoward, None, <math.h>)
+SYMBOL(nexttowardf, std::, <cmath>)
+SYMBOL(nexttowardf, None, <cmath>)
+SYMBOL(nexttowardf, None, <math.h>)
+SYMBOL(nexttowardl, std::, <cmath>)
+SYMBOL(nexttowardl, None, <cmath>)
+SYMBOL(nexttowardl, None, <math.h>)
+SYMBOL(noboolalpha, std::, <ios>)
+SYMBOL(noboolalpha, std::, <iostream>)
+SYMBOL(noemit_on_flush, std::, <ostream>)
+SYMBOL(noemit_on_flush, std::, <iostream>)
+SYMBOL(none_of, std::, <algorithm>)
+SYMBOL(noop_coroutine, std::, <coroutine>)
+SYMBOL(noop_coroutine_handle, std::, <coroutine>)
+SYMBOL(noop_coroutine_promise, std::, <coroutine>)
+SYMBOL(norm, std::, <complex>)
+SYMBOL(normal_distribution, std::, <random>)
+SYMBOL(noshowbase, std::, <ios>)
+SYMBOL(noshowbase, std::, <iostream>)
+SYMBOL(noshowpoint, std::, <ios>)
+SYMBOL(noshowpoint, std::, <iostream>)
+SYMBOL(noshowpos, std::, <ios>)
+SYMBOL(noshowpos, std::, <iostream>)
+SYMBOL(noskipws, std::, <ios>)
+SYMBOL(noskipws, std::, <iostream>)
+SYMBOL(nostopstate, std::, <stop_token>)
+SYMBOL(nostopstate_t, std::, <stop_token>)
+SYMBOL(not1, std::, <functional>)
+SYMBOL(not2, std::, <functional>)
+SYMBOL(not_equal_to, std::, <functional>)
+SYMBOL(not_fn, std::, <functional>)
+SYMBOL(nothrow, std::, <new>)
+SYMBOL(nothrow_t, std::, <new>)
+SYMBOL(notify_all_at_thread_exit, std::, <condition_variable>)
+SYMBOL(nounitbuf, std::, <ios>)
+SYMBOL(nounitbuf, std::, <iostream>)
+SYMBOL(nouppercase, std::, <ios>)
+SYMBOL(nouppercase, std::, <iostream>)
+SYMBOL(nth_element, std::, <algorithm>)
+SYMBOL(nullopt, std::, <optional>)
+SYMBOL(nullopt_t, std::, <optional>)
+SYMBOL(nullptr_t, std::, <cstddef>)
+SYMBOL(nullptr_t, None, <cstddef>)
+SYMBOL(nullptr_t, None, <stddef.h>)
+SYMBOL(num_get, std::, <locale>)
+SYMBOL(num_put, std::, <locale>)
+SYMBOL(numeric_limits, std::, <limits>)
+SYMBOL(numpunct, std::, <locale>)
+SYMBOL(numpunct_byname, std::, <locale>)
+SYMBOL(oct, std::, <ios>)
+SYMBOL(oct, std::, <iostream>)
+SYMBOL(ofstream, std::, <fstream>)
+SYMBOL(ofstream, std::, <iosfwd>)
+SYMBOL(once_flag, std::, <mutex>)
+SYMBOL(open_mode, std::, <ios>)
+SYMBOL(open_mode, std::, <iostream>)
+SYMBOL(optional, std::, <optional>)
+SYMBOL(ospanstream, std::, <spanstream>)
+SYMBOL(ospanstream, std::, <iosfwd>)
+SYMBOL(ostream, std::, <ostream>)
+SYMBOL(ostream, std::, <iostream>)
+SYMBOL(ostream, std::, <iosfwd>)
+SYMBOL(ostream_iterator, std::, <iterator>)
+SYMBOL(ostreambuf_iterator, std::, <iterator>)
+SYMBOL(ostreambuf_iterator, std::, <iosfwd>)
+SYMBOL(ostringstream, std::, <sstream>)
+SYMBOL(ostringstream, std::, <iosfwd>)
+SYMBOL(ostrstream, std::, <strstream>)
+SYMBOL(osyncstream, std::, <syncstream>)
+SYMBOL(osyncstream, std::, <iosfwd>)
+SYMBOL(out_of_range, std::, <stdexcept>)
+SYMBOL(out_ptr, std::, <memory>)
+SYMBOL(out_ptr_t, std::, <memory>)
+SYMBOL(output_iterator, std::, <iterator>)
+SYMBOL(output_iterator_tag, std::, <iterator>)
+SYMBOL(overflow_error, std::, <stdexcept>)
+SYMBOL(owner_less, std::, <memory>)
+SYMBOL(packaged_task, std::, <future>)
+SYMBOL(pair, std::, <utility>)
+SYMBOL(partial_order, std::, <compare>)
+SYMBOL(partial_ordering, std::, <compare>)
+SYMBOL(partial_sort, std::, <algorithm>)
+SYMBOL(partial_sort_copy, std::, <algorithm>)
+SYMBOL(partial_sum, std::, <numeric>)
+SYMBOL(partition, std::, <algorithm>)
+SYMBOL(partition_copy, std::, <algorithm>)
+SYMBOL(partition_point, std::, <algorithm>)
+SYMBOL(permutable, std::, <iterator>)
+SYMBOL(perror, std::, <cstdio>)
+SYMBOL(perror, None, <cstdio>)
+SYMBOL(perror, None, <stdio.h>)
+SYMBOL(peta, std::, <ratio>)
+SYMBOL(pico, std::, <ratio>)
+SYMBOL(piecewise_constant_distribution, std::, <random>)
+SYMBOL(piecewise_construct, std::, <utility>)
+SYMBOL(piecewise_construct_t, std::, <utility>)
+SYMBOL(piecewise_linear_distribution, std::, <random>)
+SYMBOL(plus, std::, <functional>)
+SYMBOL(pointer_safety, std::, <memory>)
+SYMBOL(pointer_traits, std::, <memory>)
+SYMBOL(poisson_distribution, std::, <random>)
+SYMBOL(polar, std::, <complex>)
+SYMBOL(pop_heap, std::, <algorithm>)
+SYMBOL(popcount, std::, <bit>)
+SYMBOL(pow, std::, <cmath>)
+SYMBOL(pow, None, <cmath>)
+SYMBOL(pow, None, <math.h>)
+SYMBOL(powf, std::, <cmath>)
+SYMBOL(powf, None, <cmath>)
+SYMBOL(powf, None, <math.h>)
+SYMBOL(powl, std::, <cmath>)
+SYMBOL(powl, None, <cmath>)
+SYMBOL(powl, None, <math.h>)
+SYMBOL(predicate, std::, <concepts>)
+SYMBOL(preferred, std::, <memory>)
+SYMBOL(prev, std::, <iterator>)
+SYMBOL(prev_permutation, std::, <algorithm>)
+SYMBOL(printf, std::, <cstdio>)
+SYMBOL(printf, None, <cstdio>)
+SYMBOL(printf, None, <stdio.h>)
+SYMBOL(priority_queue, std::, <queue>)
+SYMBOL(proj, std::, <complex>)
+SYMBOL(projected, std::, <iterator>)
+SYMBOL(promise, std::, <future>)
+SYMBOL(ptr_fun, std::, <functional>)
+SYMBOL(ptrdiff_t, std::, <cstddef>)
+SYMBOL(ptrdiff_t, None, <cstddef>)
+SYMBOL(ptrdiff_t, None, <stddef.h>)
+SYMBOL(push_heap, std::, <algorithm>)
+SYMBOL(put_money, std::, <iomanip>)
+SYMBOL(put_time, std::, <iomanip>)
+SYMBOL(putc, std::, <cstdio>)
+SYMBOL(putc, None, <cstdio>)
+SYMBOL(putc, None, <stdio.h>)
+SYMBOL(putchar, std::, <cstdio>)
+SYMBOL(putchar, None, <cstdio>)
+SYMBOL(putchar, None, <stdio.h>)
+SYMBOL(puts, std::, <cstdio>)
+SYMBOL(puts, None, <cstdio>)
+SYMBOL(puts, None, <stdio.h>)
+SYMBOL(putwc, std::, <cwchar>)
+SYMBOL(putwc, None, <cwchar>)
+SYMBOL(putwc, None, <wchar.h>)
+SYMBOL(putwchar, std::, <cwchar>)
+SYMBOL(putwchar, None, <cwchar>)
+SYMBOL(putwchar, None, <wchar.h>)
+SYMBOL(qsort, std::, <cstdlib>)
+SYMBOL(qsort, None, <cstdlib>)
+SYMBOL(qsort, None, <stdlib.h>)
+SYMBOL(queue, std::, <queue>)
+SYMBOL(quick_exit, std::, <cstdlib>)
+SYMBOL(quick_exit, None, <cstdlib>)
+SYMBOL(quick_exit, None, <stdlib.h>)
+SYMBOL(quoted, std::, <iomanip>)
+SYMBOL(raise, std::, <csignal>)
+SYMBOL(raise, None, <csignal>)
+SYMBOL(raise, None, <signal.h>)
+SYMBOL(rand, std::, <cstdlib>)
+SYMBOL(rand, None, <cstdlib>)
+SYMBOL(rand, None, <stdlib.h>)
+SYMBOL(random_access_iterator, std::, <iterator>)
+SYMBOL(random_access_iterator_tag, std::, <iterator>)
+SYMBOL(random_device, std::, <random>)
+SYMBOL(random_shuffle, std::, <algorithm>)
+SYMBOL(range_error, std::, <stdexcept>)
+SYMBOL(rank, std::, <type_traits>)
+SYMBOL(rank_v, std::, <type_traits>)
+SYMBOL(ranlux24, std::, <random>)
+SYMBOL(ranlux24_base, std::, <random>)
+SYMBOL(ranlux48, std::, <random>)
+SYMBOL(ranlux48_base, std::, <random>)
+SYMBOL(ratio, std::, <ratio>)
+SYMBOL(ratio_add, std::, <ratio>)
+SYMBOL(ratio_divide, std::, <ratio>)
+SYMBOL(ratio_equal, std::, <ratio>)
+SYMBOL(ratio_equal_v, std::, <ratio>)
+SYMBOL(ratio_greater, std::, <ratio>)
+SYMBOL(ratio_greater_equal, std::, <ratio>)
+SYMBOL(ratio_greater_equal_v, std::, <ratio>)
+SYMBOL(ratio_greater_v, std::, <ratio>)
+SYMBOL(ratio_less, std::, <ratio>)
+SYMBOL(ratio_less_equal, std::, <ratio>)
+SYMBOL(ratio_less_equal_v, std::, <ratio>)
+SYMBOL(ratio_less_v, std::, <ratio>)
+SYMBOL(ratio_multiply, std::, <ratio>)
+SYMBOL(ratio_not_equal, std::, <ratio>)
+SYMBOL(ratio_not_equal_v, std::, <ratio>)
+SYMBOL(ratio_subtract, std::, <ratio>)
+SYMBOL(raw_storage_iterator, std::, <memory>)
+SYMBOL(real, std::, <complex>)
+SYMBOL(realloc, std::, <cstdlib>)
+SYMBOL(realloc, None, <cstdlib>)
+SYMBOL(realloc, None, <stdlib.h>)
+SYMBOL(recursive_mutex, std::, <mutex>)
+SYMBOL(recursive_timed_mutex, std::, <mutex>)
+SYMBOL(reduce, std::, <numeric>)
+SYMBOL(ref, std::, <functional>)
+SYMBOL(reference_wrapper, std::, <functional>)
+SYMBOL(regex, std::, <regex>)
+SYMBOL(regex_error, std::, <regex>)
+SYMBOL(regex_iterator, std::, <regex>)
+SYMBOL(regex_match, std::, <regex>)
+SYMBOL(regex_replace, std::, <regex>)
+SYMBOL(regex_search, std::, <regex>)
+SYMBOL(regex_token_iterator, std::, <regex>)
+SYMBOL(regex_traits, std::, <regex>)
+SYMBOL(regular, std::, <concepts>)
+SYMBOL(regular_invocable, std::, <concepts>)
+SYMBOL(reinterpret_pointer_cast, std::, <memory>)
+SYMBOL(relation, std::, <concepts>)
+SYMBOL(remainder, std::, <cmath>)
+SYMBOL(remainder, None, <cmath>)
+SYMBOL(remainder, None, <math.h>)
+SYMBOL(remainderf, std::, <cmath>)
+SYMBOL(remainderf, None, <cmath>)
+SYMBOL(remainderf, None, <math.h>)
+SYMBOL(remainderl, std::, <cmath>)
+SYMBOL(remainderl, None, <cmath>)
+SYMBOL(remainderl, None, <math.h>)
+SYMBOL(remove_all_extents, std::, <type_traits>)
+SYMBOL(remove_all_extents_t, std::, <type_traits>)
+SYMBOL(remove_const, std::, <type_traits>)
+SYMBOL(remove_const_t, std::, <type_traits>)
+SYMBOL(remove_copy, std::, <algorithm>)
+SYMBOL(remove_copy_if, std::, <algorithm>)
+SYMBOL(remove_cv, std::, <type_traits>)
+SYMBOL(remove_cv_t, std::, <type_traits>)
+SYMBOL(remove_cvref, std::, <type_traits>)
+SYMBOL(remove_cvref_t, std::, <type_traits>)
+SYMBOL(remove_extent, std::, <type_traits>)
+SYMBOL(remove_extent_t, std::, <type_traits>)
+SYMBOL(remove_if, std::, <algorithm>)
+SYMBOL(remove_pointer, std::, <type_traits>)
+SYMBOL(remove_pointer_t, std::, <type_traits>)
+SYMBOL(remove_reference, std::, <type_traits>)
+SYMBOL(remove_reference_t, std::, <type_traits>)
+SYMBOL(remove_volatile, std::, <type_traits>)
+SYMBOL(remove_volatile_t, std::, <type_traits>)
+SYMBOL(remquo, std::, <cmath>)
+SYMBOL(remquo, None, <cmath>)
+SYMBOL(remquo, None, <math.h>)
+SYMBOL(remquof, std::, <cmath>)
+SYMBOL(remquof, None, <cmath>)
+SYMBOL(remquof, None, <math.h>)
+SYMBOL(remquol, std::, <cmath>)
+SYMBOL(remquol, None, <cmath>)
+SYMBOL(remquol, None, <math.h>)
+SYMBOL(rename, std::, <cstdio>)
+SYMBOL(rename, None, <cstdio>)
+SYMBOL(rename, None, <stdio.h>)
+SYMBOL(replace, std::, <algorithm>)
+SYMBOL(replace_copy, std::, <algorithm>)
+SYMBOL(replace_copy_if, std::, <algorithm>)
+SYMBOL(replace_if, std::, <algorithm>)
+SYMBOL(resetiosflags, std::, <iomanip>)
+SYMBOL(result_of, std::, <type_traits>)
+SYMBOL(result_of_t, std::, <type_traits>)
+SYMBOL(rethrow_exception, std::, <exception>)
+SYMBOL(rethrow_if_nested, std::, <exception>)
+SYMBOL(return_temporary_buffer, std::, <memory>)
+SYMBOL(reverse, std::, <algorithm>)
+SYMBOL(reverse_copy, std::, <algorithm>)
+SYMBOL(reverse_iterator, std::, <iterator>)
+SYMBOL(rewind, std::, <cstdio>)
+SYMBOL(rewind, None, <cstdio>)
+SYMBOL(rewind, None, <stdio.h>)
+SYMBOL(riemann_zeta, std::, <cmath>)
+SYMBOL(riemann_zetaf, std::, <cmath>)
+SYMBOL(riemann_zetal, std::, <cmath>)
+SYMBOL(right, std::, <ios>)
+SYMBOL(right, std::, <iostream>)
+SYMBOL(rint, std::, <cmath>)
+SYMBOL(rint, None, <cmath>)
+SYMBOL(rint, None, <math.h>)
+SYMBOL(rintf, std::, <cmath>)
+SYMBOL(rintf, None, <cmath>)
+SYMBOL(rintf, None, <math.h>)
+SYMBOL(rintl, std::, <cmath>)
+SYMBOL(rintl, None, <cmath>)
+SYMBOL(rintl, None, <math.h>)
+SYMBOL(rotate, std::, <algorithm>)
+SYMBOL(rotate_copy, std::, <algorithm>)
+SYMBOL(rotl, std::, <bit>)
+SYMBOL(rotr, std::, <bit>)
+SYMBOL(round, std::, <cmath>)
+SYMBOL(round, None, <cmath>)
+SYMBOL(round, None, <math.h>)
+SYMBOL(round_indeterminate, std::, <limits>)
+SYMBOL(round_to_nearest, std::, <limits>)
+SYMBOL(round_toward_infinity, std::, <limits>)
+SYMBOL(round_toward_neg_infinity, std::, <limits>)
+SYMBOL(round_toward_zero, std::, <limits>)
+SYMBOL(roundf, std::, <cmath>)
+SYMBOL(roundf, None, <cmath>)
+SYMBOL(roundf, None, <math.h>)
+SYMBOL(roundl, std::, <cmath>)
+SYMBOL(roundl, None, <cmath>)
+SYMBOL(roundl, None, <math.h>)
+SYMBOL(runtime_error, std::, <stdexcept>)
+SYMBOL(same_as, std::, <concepts>)
+SYMBOL(sample, std::, <algorithm>)
+SYMBOL(scalbln, std::, <cmath>)
+SYMBOL(scalbln, None, <cmath>)
+SYMBOL(scalbln, None, <math.h>)
+SYMBOL(scalblnf, std::, <cmath>)
+SYMBOL(scalblnf, None, <cmath>)
+SYMBOL(scalblnf, None, <math.h>)
+SYMBOL(scalblnl, std::, <cmath>)
+SYMBOL(scalblnl, None, <cmath>)
+SYMBOL(scalblnl, None, <math.h>)
+SYMBOL(scalbn, std::, <cmath>)
+SYMBOL(scalbn, None, <cmath>)
+SYMBOL(scalbn, None, <math.h>)
+SYMBOL(scalbnf, std::, <cmath>)
+SYMBOL(scalbnf, None, <cmath>)
+SYMBOL(scalbnf, None, <math.h>)
+SYMBOL(scalbnl, std::, <cmath>)
+SYMBOL(scalbnl, None, <cmath>)
+SYMBOL(scalbnl, None, <math.h>)
+SYMBOL(scanf, std::, <cstdio>)
+SYMBOL(scanf, None, <cstdio>)
+SYMBOL(scanf, None, <stdio.h>)
+SYMBOL(scientific, std::, <ios>)
+SYMBOL(scientific, std::, <iostream>)
+SYMBOL(scoped_allocator_adaptor, std::, <scoped_allocator>)
+SYMBOL(scoped_lock, std::, <mutex>)
+SYMBOL(search, std::, <algorithm>)
+SYMBOL(search_n, std::, <algorithm>)
+SYMBOL(seed_seq, std::, <random>)
+SYMBOL(seek_dir, std::, <ios>)
+SYMBOL(seek_dir, std::, <iostream>)
+SYMBOL(semiregular, std::, <concepts>)
+SYMBOL(sentinel_for, std::, <iterator>)
+SYMBOL(set, std::, <set>)
+SYMBOL(set_difference, std::, <algorithm>)
+SYMBOL(set_intersection, std::, <algorithm>)
+SYMBOL(set_new_handler, std::, <new>)
+SYMBOL(set_symmetric_difference, std::, <algorithm>)
+SYMBOL(set_terminate, std::, <exception>)
+SYMBOL(set_unexpected, std::, <exception>)
+SYMBOL(set_union, std::, <algorithm>)
+SYMBOL(setbase, std::, <iomanip>)
+SYMBOL(setbuf, std::, <cstdio>)
+SYMBOL(setbuf, None, <cstdio>)
+SYMBOL(setbuf, None, <stdio.h>)
+SYMBOL(setfill, std::, <iomanip>)
+SYMBOL(setiosflags, std::, <iomanip>)
+SYMBOL(setlocale, std::, <clocale>)
+SYMBOL(setlocale, None, <clocale>)
+SYMBOL(setlocale, None, <locale.h>)
+SYMBOL(setprecision, std::, <iomanip>)
+SYMBOL(setvbuf, std::, <cstdio>)
+SYMBOL(setvbuf, None, <cstdio>)
+SYMBOL(setvbuf, None, <stdio.h>)
+SYMBOL(setw, std::, <iomanip>)
+SYMBOL(shared_future, std::, <future>)
+SYMBOL(shared_lock, std::, <shared_mutex>)
+SYMBOL(shared_mutex, std::, <shared_mutex>)
+SYMBOL(shared_ptr, std::, <memory>)
+SYMBOL(shared_timed_mutex, std::, <shared_mutex>)
+SYMBOL(shift_left, std::, <algorithm>)
+SYMBOL(shift_right, std::, <algorithm>)
+SYMBOL(showbase, std::, <ios>)
+SYMBOL(showbase, std::, <iostream>)
+SYMBOL(showpoint, std::, <ios>)
+SYMBOL(showpoint, std::, <iostream>)
+SYMBOL(showpos, std::, <ios>)
+SYMBOL(showpos, std::, <iostream>)
+SYMBOL(shuffle, std::, <algorithm>)
+SYMBOL(shuffle_order_engine, std::, <random>)
+SYMBOL(sig_atomic_t, std::, <csignal>)
+SYMBOL(sig_atomic_t, None, <csignal>)
+SYMBOL(sig_atomic_t, None, <signal.h>)
+SYMBOL(signal, std::, <csignal>)
+SYMBOL(signal, None, <csignal>)
+SYMBOL(signal, None, <signal.h>)
+SYMBOL(signbit, std::, <cmath>)
+SYMBOL(signbit, None, <cmath>)
+SYMBOL(signbit, None, <math.h>)
+SYMBOL(signed_integral, std::, <concepts>)
+SYMBOL(sin, std::, <cmath>)
+SYMBOL(sin, None, <cmath>)
+SYMBOL(sin, None, <math.h>)
+SYMBOL(sinf, std::, <cmath>)
+SYMBOL(sinf, None, <cmath>)
+SYMBOL(sinf, None, <math.h>)
+SYMBOL(sinh, std::, <cmath>)
+SYMBOL(sinh, None, <cmath>)
+SYMBOL(sinh, None, <math.h>)
+SYMBOL(sinhf, std::, <cmath>)
+SYMBOL(sinhf, None, <cmath>)
+SYMBOL(sinhf, None, <math.h>)
+SYMBOL(sinhl, std::, <cmath>)
+SYMBOL(sinhl, None, <cmath>)
+SYMBOL(sinhl, None, <math.h>)
+SYMBOL(sinl, std::, <cmath>)
+SYMBOL(sinl, None, <cmath>)
+SYMBOL(sinl, None, <math.h>)
+SYMBOL(sized_sentinel_for, std::, <iterator>)
+SYMBOL(skipws, std::, <ios>)
+SYMBOL(skipws, std::, <iostream>)
+SYMBOL(slice, std::, <valarray>)
+SYMBOL(slice_array, std::, <valarray>)
+SYMBOL(smatch, std::, <regex>)
+SYMBOL(snprintf, std::, <cstdio>)
+SYMBOL(snprintf, None, <cstdio>)
+SYMBOL(snprintf, None, <stdio.h>)
+SYMBOL(sort, std::, <algorithm>)
+SYMBOL(sort_heap, std::, <algorithm>)
+SYMBOL(sortable, std::, <iterator>)
+SYMBOL(source_location, std::, <source_location>)
+SYMBOL(span, std::, <span>)
+SYMBOL(spanbuf, std::, <spanstream>)
+SYMBOL(spanbuf, std::, <iosfwd>)
+SYMBOL(spanstream, std::, <spanstream>)
+SYMBOL(spanstream, std::, <iosfwd>)
+SYMBOL(sph_bessel, std::, <cmath>)
+SYMBOL(sph_bessel, None, <cmath>)
+SYMBOL(sph_bessel, None, <math.h>)
+SYMBOL(sph_besself, std::, <cmath>)
+SYMBOL(sph_besself, None, <cmath>)
+SYMBOL(sph_besself, None, <math.h>)
+SYMBOL(sph_bessell, std::, <cmath>)
+SYMBOL(sph_bessell, None, <cmath>)
+SYMBOL(sph_bessell, None, <math.h>)
+SYMBOL(sph_legendre, std::, <cmath>)
+SYMBOL(sph_legendref, std::, <cmath>)
+SYMBOL(sph_legendrel, std::, <cmath>)
+SYMBOL(sph_neumann, std::, <cmath>)
+SYMBOL(sph_neumannf, std::, <cmath>)
+SYMBOL(sph_neumannl, std::, <cmath>)
+SYMBOL(sprintf, std::, <cstdio>)
+SYMBOL(sprintf, None, <cstdio>)
+SYMBOL(sprintf, None, <stdio.h>)
+SYMBOL(sqrt, std::, <cmath>)
+SYMBOL(sqrt, None, <cmath>)
+SYMBOL(sqrt, None, <math.h>)
+SYMBOL(sqrtf, std::, <cmath>)
+SYMBOL(sqrtf, None, <cmath>)
+SYMBOL(sqrtf, None, <math.h>)
+SYMBOL(sqrtl, std::, <cmath>)
+SYMBOL(sqrtl, None, <cmath>)
+SYMBOL(sqrtl, None, <math.h>)
+SYMBOL(srand, std::, <cstdlib>)
+SYMBOL(srand, None, <cstdlib>)
+SYMBOL(srand, None, <stdlib.h>)
+SYMBOL(sregex_iterator, std::, <regex>)
+SYMBOL(sregex_token_iterator, std::, <regex>)
+SYMBOL(sscanf, std::, <cstdio>)
+SYMBOL(sscanf, None, <cstdio>)
+SYMBOL(sscanf, None, <stdio.h>)
+SYMBOL(ssub_match, std::, <regex>)
+SYMBOL(stable_partition, std::, <algorithm>)
+SYMBOL(stable_sort, std::, <algorithm>)
+SYMBOL(stack, std::, <stack>)
+SYMBOL(stacktrace, std::, <stacktrace>)
+SYMBOL(stacktrace_entry, std::, <stacktrace>)
+SYMBOL(static_pointer_cast, std::, <memory>)
+SYMBOL(stod, std::, <string>)
+SYMBOL(stof, std::, <string>)
+SYMBOL(stoi, std::, <string>)
+SYMBOL(stol, std::, <string>)
+SYMBOL(stold, std::, <string>)
+SYMBOL(stoll, std::, <string>)
+SYMBOL(stop_callback, std::, <stop_token>)
+SYMBOL(stop_source, std::, <stop_token>)
+SYMBOL(stop_token, std::, <stop_token>)
+SYMBOL(stoul, std::, <string>)
+SYMBOL(stoull, std::, <string>)
+SYMBOL(strcat, std::, <cstring>)
+SYMBOL(strcat, None, <cstring>)
+SYMBOL(strcat, None, <string.h>)
+SYMBOL(strchr, std::, <cstring>)
+SYMBOL(strchr, None, <cstring>)
+SYMBOL(strchr, None, <string.h>)
+SYMBOL(strcmp, std::, <cstring>)
+SYMBOL(strcmp, None, <cstring>)
+SYMBOL(strcmp, None, <string.h>)
+SYMBOL(strcoll, std::, <cstring>)
+SYMBOL(strcoll, None, <cstring>)
+SYMBOL(strcoll, None, <string.h>)
+SYMBOL(strcpy, std::, <cstring>)
+SYMBOL(strcpy, None, <cstring>)
+SYMBOL(strcpy, None, <string.h>)
+SYMBOL(strcspn, std::, <cstring>)
+SYMBOL(strcspn, None, <cstring>)
+SYMBOL(strcspn, None, <string.h>)
+SYMBOL(streambuf, std::, <streambuf>)
+SYMBOL(streambuf, std::, <iostream>)
+SYMBOL(streambuf, std::, <iosfwd>)
+SYMBOL(streamoff, std::, <ios>)
+SYMBOL(streamoff, std::, <iostream>)
+SYMBOL(streampos, std::, <iosfwd>)
+SYMBOL(streampos, std::, <iosfwd>)
+SYMBOL(streamsize, std::, <ios>)
+SYMBOL(streamsize, std::, <iostream>)
+SYMBOL(strerror, std::, <cstring>)
+SYMBOL(strerror, None, <cstring>)
+SYMBOL(strerror, None, <string.h>)
+SYMBOL(strftime, std::, <ctime>)
+SYMBOL(strftime, None, <ctime>)
+SYMBOL(strftime, None, <time.h>)
+SYMBOL(strict, std::, <memory>)
+SYMBOL(strict_weak_order, std::, <concepts>)
+SYMBOL(string, std::, <string>)
+SYMBOL(string_view, std::, <string_view>)
+SYMBOL(stringbuf, std::, <sstream>)
+SYMBOL(stringbuf, std::, <iosfwd>)
+SYMBOL(stringstream, std::, <sstream>)
+SYMBOL(stringstream, std::, <iosfwd>)
+SYMBOL(strlen, std::, <cstring>)
+SYMBOL(strlen, None, <cstring>)
+SYMBOL(strlen, None, <string.h>)
+SYMBOL(strncat, std::, <cstring>)
+SYMBOL(strncat, None, <cstring>)
+SYMBOL(strncat, None, <string.h>)
+SYMBOL(strncmp, std::, <cstring>)
+SYMBOL(strncmp, None, <cstring>)
+SYMBOL(strncmp, None, <string.h>)
+SYMBOL(strncpy, std::, <cstring>)
+SYMBOL(strncpy, None, <cstring>)
+SYMBOL(strncpy, None, <string.h>)
+SYMBOL(strong_order, std::, <compare>)
+SYMBOL(strong_ordering, std::, <compare>)
+SYMBOL(strpbrk, std::, <cstring>)
+SYMBOL(strpbrk, None, <cstring>)
+SYMBOL(strpbrk, None, <string.h>)
+SYMBOL(strrchr, std::, <cstring>)
+SYMBOL(strrchr, None, <cstring>)
+SYMBOL(strrchr, None, <string.h>)
+SYMBOL(strspn, std::, <cstring>)
+SYMBOL(strspn, None, <cstring>)
+SYMBOL(strspn, None, <string.h>)
+SYMBOL(strstr, std::, <cstring>)
+SYMBOL(strstr, None, <cstring>)
+SYMBOL(strstr, None, <string.h>)
+SYMBOL(strstream, std::, <strstream>)
+SYMBOL(strstreambuf, std::, <strstream>)
+SYMBOL(strtod, std::, <cstdlib>)
+SYMBOL(strtod, None, <cstdlib>)
+SYMBOL(strtod, None, <stdlib.h>)
+SYMBOL(strtof, std::, <cstdlib>)
+SYMBOL(strtof, None, <cstdlib>)
+SYMBOL(strtof, None, <stdlib.h>)
+SYMBOL(strtoimax, std::, <cinttypes>)
+SYMBOL(strtoimax, None, <cinttypes>)
+SYMBOL(strtoimax, None, <inttypes.h>)
+SYMBOL(strtok, std::, <cstring>)
+SYMBOL(strtok, None, <cstring>)
+SYMBOL(strtok, None, <string.h>)
+SYMBOL(strtol, std::, <cstdlib>)
+SYMBOL(strtol, None, <cstdlib>)
+SYMBOL(strtol, None, <stdlib.h>)
+SYMBOL(strtold, std::, <cstdlib>)
+SYMBOL(strtold, None, <cstdlib>)
+SYMBOL(strtold, None, <stdlib.h>)
+SYMBOL(strtoll, std::, <cstdlib>)
+SYMBOL(strtoll, None, <cstdlib>)
+SYMBOL(strtoll, None, <stdlib.h>)
+SYMBOL(strtoul, std::, <cstdlib>)
+SYMBOL(strtoul, None, <cstdlib>)
+SYMBOL(strtoul, None, <stdlib.h>)
+SYMBOL(strtoull, std::, <cstdlib>)
+SYMBOL(strtoull, None, <cstdlib>)
+SYMBOL(strtoull, None, <stdlib.h>)
+SYMBOL(strtoumax, std::, <cinttypes>)
+SYMBOL(strtoumax, None, <cinttypes>)
+SYMBOL(strtoumax, None, <inttypes.h>)
+SYMBOL(strxfrm, std::, <cstring>)
+SYMBOL(strxfrm, None, <cstring>)
+SYMBOL(strxfrm, None, <string.h>)
+SYMBOL(student_t_distribution, std::, <random>)
+SYMBOL(sub_match, std::, <regex>)
+SYMBOL(subtract_with_carry_engine, std::, <random>)
+SYMBOL(suspend_always, std::, <coroutine>)
+SYMBOL(suspend_never, std::, <coroutine>)
+SYMBOL(swap_ranges, std::, <algorithm>)
+SYMBOL(swappable, std::, <concepts>)
+SYMBOL(swappable_with, std::, <concepts>)
+SYMBOL(swprintf, std::, <cwchar>)
+SYMBOL(swprintf, None, <cwchar>)
+SYMBOL(swprintf, None, <wchar.h>)
+SYMBOL(swscanf, std::, <cwchar>)
+SYMBOL(swscanf, None, <cwchar>)
+SYMBOL(swscanf, None, <wchar.h>)
+SYMBOL(syncbuf, std::, <syncstream>)
+SYMBOL(syncbuf, std::, <iosfwd>)
+SYMBOL(system, std::, <cstdlib>)
+SYMBOL(system, None, <cstdlib>)
+SYMBOL(system, None, <stdlib.h>)
+SYMBOL(system_category, std::, <system_error>)
+SYMBOL(system_error, std::, <system_error>)
+SYMBOL(tan, std::, <cmath>)
+SYMBOL(tan, None, <cmath>)
+SYMBOL(tan, None, <math.h>)
+SYMBOL(tanf, std::, <cmath>)
+SYMBOL(tanf, None, <cmath>)
+SYMBOL(tanf, None, <math.h>)
+SYMBOL(tanh, std::, <cmath>)
+SYMBOL(tanh, None, <cmath>)
+SYMBOL(tanh, None, <math.h>)
+SYMBOL(tanhf, std::, <cmath>)
+SYMBOL(tanhf, None, <cmath>)
+SYMBOL(tanhf, None, <math.h>)
+SYMBOL(tanhl, std::, <cmath>)
+SYMBOL(tanhl, None, <cmath>)
+SYMBOL(tanhl, None, <math.h>)
+SYMBOL(tanl, std::, <cmath>)
+SYMBOL(tanl, None, <cmath>)
+SYMBOL(tanl, None, <math.h>)
+SYMBOL(tera, std::, <ratio>)
+SYMBOL(terminate, std::, <exception>)
+SYMBOL(terminate_handler, std::, <exception>)
+SYMBOL(tgamma, std::, <cmath>)
+SYMBOL(tgamma, None, <cmath>)
+SYMBOL(tgamma, None, <math.h>)
+SYMBOL(tgammaf, std::, <cmath>)
+SYMBOL(tgammaf, None, <cmath>)
+SYMBOL(tgammaf, None, <math.h>)
+SYMBOL(tgammal, std::, <cmath>)
+SYMBOL(tgammal, None, <cmath>)
+SYMBOL(tgammal, None, <math.h>)
+SYMBOL(thread, std::, <thread>)
+SYMBOL(three_way_comparable, std::, <compare>)
+SYMBOL(three_way_comparable_with, std::, <compare>)
+SYMBOL(throw_with_nested, std::, <exception>)
+SYMBOL(tie, std::, <tuple>)
+SYMBOL(time, std::, <ctime>)
+SYMBOL(time, None, <ctime>)
+SYMBOL(time, None, <time.h>)
+SYMBOL(time_base, std::, <locale>)
+SYMBOL(time_get, std::, <locale>)
+SYMBOL(time_get_byname, std::, <locale>)
+SYMBOL(time_put, std::, <locale>)
+SYMBOL(time_put_byname, std::, <locale>)
+SYMBOL(time_t, std::, <ctime>)
+SYMBOL(time_t, None, <ctime>)
+SYMBOL(time_t, None, <time.h>)
+SYMBOL(timed_mutex, std::, <mutex>)
+SYMBOL(timespec, std::, <ctime>)
+SYMBOL(timespec, None, <ctime>)
+SYMBOL(timespec, None, <time.h>)
+SYMBOL(timespec_get, std::, <ctime>)
+SYMBOL(timespec_get, None, <ctime>)
+SYMBOL(timespec_get, None, <time.h>)
+SYMBOL(tm, std::, <ctime>)
+SYMBOL(tm, None, <ctime>)
+SYMBOL(tm, None, <time.h>)
+SYMBOL(tmpfile, std::, <cstdio>)
+SYMBOL(tmpfile, None, <cstdio>)
+SYMBOL(tmpfile, None, <stdio.h>)
+SYMBOL(tmpnam, std::, <cstdio>)
+SYMBOL(tmpnam, None, <cstdio>)
+SYMBOL(tmpnam, None, <stdio.h>)
+SYMBOL(to_address, std::, <memory>)
+SYMBOL(to_array, std::, <array>)
+SYMBOL(to_chars, std::, <charconv>)
+SYMBOL(to_chars_result, std::, <charconv>)
+SYMBOL(to_integer, std::, <cstddef>)
+SYMBOL(to_integer, None, <cstddef>)
+SYMBOL(to_integer, None, <stddef.h>)
+SYMBOL(to_string, std::, <string>)
+SYMBOL(to_underlying, std::, <utility>)
+SYMBOL(to_wstring, std::, <string>)
+SYMBOL(tolower, std::, <cctype>)
+SYMBOL(tolower, None, <cctype>)
+SYMBOL(tolower, None, <ctype.h>)
+SYMBOL(totally_ordered, std::, <concepts>)
+SYMBOL(totally_ordered_with, std::, <concepts>)
+SYMBOL(toupper, std::, <cctype>)
+SYMBOL(toupper, None, <cctype>)
+SYMBOL(toupper, None, <ctype.h>)
+SYMBOL(towctrans, std::, <cwctype>)
+SYMBOL(towctrans, None, <cwctype>)
+SYMBOL(towctrans, None, <wctype.h>)
+SYMBOL(towlower, std::, <cwctype>)
+SYMBOL(towlower, None, <cwctype>)
+SYMBOL(towlower, None, <wctype.h>)
+SYMBOL(towupper, std::, <cwctype>)
+SYMBOL(towupper, None, <cwctype>)
+SYMBOL(towupper, None, <wctype.h>)
+SYMBOL(transform, std::, <algorithm>)
+SYMBOL(transform_exclusive_scan, std::, <numeric>)
+SYMBOL(transform_inclusive_scan, std::, <numeric>)
+SYMBOL(transform_reduce, std::, <numeric>)
+SYMBOL(true_type, std::, <type_traits>)
+SYMBOL(trunc, std::, <cmath>)
+SYMBOL(trunc, None, <cmath>)
+SYMBOL(trunc, None, <math.h>)
+SYMBOL(truncf, std::, <cmath>)
+SYMBOL(truncf, None, <cmath>)
+SYMBOL(truncf, None, <math.h>)
+SYMBOL(truncl, std::, <cmath>)
+SYMBOL(truncl, None, <cmath>)
+SYMBOL(truncl, None, <math.h>)
+SYMBOL(try_lock, std::, <mutex>)
+SYMBOL(try_to_lock, std::, <mutex>)
+SYMBOL(try_to_lock_t, std::, <mutex>)
+SYMBOL(tuple, std::, <tuple>)
+SYMBOL(tuple_cat, std::, <tuple>)
+SYMBOL(tuple_element_t, std::, <tuple>)
+SYMBOL(tuple_size_v, std::, <tuple>)
+SYMBOL(type_identity, std::, <type_traits>)
+SYMBOL(type_identity_t, std::, <type_traits>)
+SYMBOL(type_index, std::, <typeindex>)
+SYMBOL(type_info, std::, <typeinfo>)
+SYMBOL(u16streampos, std::, <iosfwd>)
+SYMBOL(u16streampos, std::, <iosfwd>)
+SYMBOL(u16string, std::, <string>)
+SYMBOL(u16string_view, std::, <string_view>)
+SYMBOL(u32streampos, std::, <iosfwd>)
+SYMBOL(u32streampos, std::, <iosfwd>)
+SYMBOL(u32string, std::, <string>)
+SYMBOL(u32string_view, std::, <string_view>)
+SYMBOL(u8streampos, std::, <iosfwd>)
+SYMBOL(u8streampos, std::, <iosfwd>)
+SYMBOL(u8string, std::, <string>)
+SYMBOL(u8string_view, std::, <string_view>)
+SYMBOL(uint16_t, std::, <cstdint>)
+SYMBOL(uint16_t, None, <cstdint>)
+SYMBOL(uint16_t, None, <stdint.h>)
+SYMBOL(uint32_t, std::, <cstdint>)
+SYMBOL(uint32_t, None, <cstdint>)
+SYMBOL(uint32_t, None, <stdint.h>)
+SYMBOL(uint64_t, std::, <cstdint>)
+SYMBOL(uint64_t, None, <cstdint>)
+SYMBOL(uint64_t, None, <stdint.h>)
+SYMBOL(uint8_t, std::, <cstdint>)
+SYMBOL(uint8_t, None, <cstdint>)
+SYMBOL(uint8_t, None, <stdint.h>)
+SYMBOL(uint_fast16_t, std::, <cstdint>)
+SYMBOL(uint_fast16_t, None, <cstdint>)
+SYMBOL(uint_fast16_t, None, <stdint.h>)
+SYMBOL(uint_fast32_t, std::, <cstdint>)
+SYMBOL(uint_fast32_t, None, <cstdint>)
+SYMBOL(uint_fast32_t, None, <stdint.h>)
+SYMBOL(uint_fast64_t, std::, <cstdint>)
+SYMBOL(uint_fast64_t, None, <cstdint>)
+SYMBOL(uint_fast64_t, None, <stdint.h>)
+SYMBOL(uint_fast8_t, std::, <cstdint>)
+SYMBOL(uint_fast8_t, None, <cstdint>)
+SYMBOL(uint_fast8_t, None, <stdint.h>)
+SYMBOL(uint_least16_t, std::, <cstdint>)
+SYMBOL(uint_least16_t, None, <cstdint>)
+SYMBOL(uint_least16_t, None, <stdint.h>)
+SYMBOL(uint_least32_t, std::, <cstdint>)
+SYMBOL(uint_least32_t, None, <cstdint>)
+SYMBOL(uint_least32_t, None, <stdint.h>)
+SYMBOL(uint_least64_t, std::, <cstdint>)
+SYMBOL(uint_least64_t, None, <cstdint>)
+SYMBOL(uint_least64_t, None, <stdint.h>)
+SYMBOL(uint_least8_t, std::, <cstdint>)
+SYMBOL(uint_least8_t, None, <cstdint>)
+SYMBOL(uint_least8_t, None, <stdint.h>)
+SYMBOL(uintmax_t, std::, <cstdint>)
+SYMBOL(uintmax_t, None, <cstdint>)
+SYMBOL(uintmax_t, None, <stdint.h>)
+SYMBOL(uintptr_t, std::, <cstdint>)
+SYMBOL(uintptr_t, None, <cstdint>)
+SYMBOL(uintptr_t, None, <stdint.h>)
+SYMBOL(unary_function, std::, <functional>)
+SYMBOL(unary_negate, std::, <functional>)
+SYMBOL(uncaught_exception, std::, <exception>)
+SYMBOL(uncaught_exceptions, std::, <exception>)
+SYMBOL(undeclare_no_pointers, std::, <memory>)
+SYMBOL(undeclare_reachable, std::, <memory>)
+SYMBOL(underflow_error, std::, <stdexcept>)
+SYMBOL(underlying_type, std::, <type_traits>)
+SYMBOL(underlying_type_t, std::, <type_traits>)
+SYMBOL(unexpected_handler, std::, <exception>)
+SYMBOL(ungetc, std::, <cstdio>)
+SYMBOL(ungetc, None, <cstdio>)
+SYMBOL(ungetc, None, <stdio.h>)
+SYMBOL(ungetwc, std::, <cwchar>)
+SYMBOL(ungetwc, None, <cwchar>)
+SYMBOL(ungetwc, None, <wchar.h>)
+SYMBOL(uniform_int_distribution, std::, <random>)
+SYMBOL(uniform_random_bit_generator, std::, <random>)
+SYMBOL(uniform_real_distribution, std::, <random>)
+SYMBOL(uninitialized_construct_using_allocator, std::, <memory>)
+SYMBOL(uninitialized_copy, std::, <memory>)
+SYMBOL(uninitialized_copy_n, std::, <memory>)
+SYMBOL(uninitialized_default_construct, std::, <memory>)
+SYMBOL(uninitialized_default_construct_n, std::, <memory>)
+SYMBOL(uninitialized_fill, std::, <memory>)
+SYMBOL(uninitialized_fill_n, std::, <memory>)
+SYMBOL(uninitialized_move, std::, <memory>)
+SYMBOL(uninitialized_move_n, std::, <memory>)
+SYMBOL(uninitialized_value_construct, std::, <memory>)
+SYMBOL(uninitialized_value_construct_n, std::, <memory>)
+SYMBOL(unique, std::, <algorithm>)
+SYMBOL(unique_copy, std::, <algorithm>)
+SYMBOL(unique_lock, std::, <mutex>)
+SYMBOL(unique_ptr, std::, <memory>)
+SYMBOL(unitbuf, std::, <ios>)
+SYMBOL(unitbuf, std::, <iostream>)
+SYMBOL(unordered_map, std::, <unordered_map>)
+SYMBOL(unordered_multimap, std::, <unordered_map>)
+SYMBOL(unordered_multiset, std::, <unordered_set>)
+SYMBOL(unordered_set, std::, <unordered_set>)
+SYMBOL(unreachable, std::, <utility>)
+SYMBOL(unreachable_sentinel, std::, <iterator>)
+SYMBOL(unreachable_sentinel_t, std::, <iterator>)
+SYMBOL(unsigned_integral, std::, <concepts>)
+SYMBOL(upper_bound, std::, <algorithm>)
+SYMBOL(uppercase, std::, <ios>)
+SYMBOL(uppercase, std::, <iostream>)
+SYMBOL(use_facet, std::, <locale>)
+SYMBOL(uses_allocator, std::, <memory>)
+SYMBOL(uses_allocator_construction_args, std::, <memory>)
+SYMBOL(uses_allocator_v, std::, <memory>)
+SYMBOL(va_list, std::, <cstdarg>)
+SYMBOL(va_list, None, <cstdarg>)
+SYMBOL(va_list, None, <stdarg.h>)
+SYMBOL(valarray, std::, <valarray>)
+SYMBOL(variant, std::, <variant>)
+SYMBOL(variant_alternative, std::, <variant>)
+SYMBOL(variant_alternative_t, std::, <variant>)
+SYMBOL(variant_npos, std::, <variant>)
+SYMBOL(variant_size, std::, <variant>)
+SYMBOL(variant_size_v, std::, <variant>)
+SYMBOL(vector, std::, <vector>)
+SYMBOL(vformat, std::, <format>)
+SYMBOL(vformat_to, std::, <format>)
+SYMBOL(vfprintf, std::, <cstdio>)
+SYMBOL(vfprintf, None, <cstdio>)
+SYMBOL(vfprintf, None, <stdio.h>)
+SYMBOL(vfscanf, std::, <cstdio>)
+SYMBOL(vfscanf, None, <cstdio>)
+SYMBOL(vfscanf, None, <stdio.h>)
+SYMBOL(vfwprintf, std::, <cwchar>)
+SYMBOL(vfwprintf, None, <cwchar>)
+SYMBOL(vfwprintf, None, <wchar.h>)
+SYMBOL(vfwscanf, std::, <cwchar>)
+SYMBOL(vfwscanf, None, <cwchar>)
+SYMBOL(vfwscanf, None, <wchar.h>)
+SYMBOL(visit, std::, <variant>)
+SYMBOL(visit_format_arg, std::, <format>)
+SYMBOL(void_t, std::, <type_traits>)
+SYMBOL(vprintf, std::, <cstdio>)
+SYMBOL(vprintf, None, <cstdio>)
+SYMBOL(vprintf, None, <stdio.h>)
+SYMBOL(vscanf, std::, <cstdio>)
+SYMBOL(vscanf, None, <cstdio>)
+SYMBOL(vscanf, None, <stdio.h>)
+SYMBOL(vsnprintf, std::, <cstdio>)
+SYMBOL(vsnprintf, None, <cstdio>)
+SYMBOL(vsnprintf, None, <stdio.h>)
+SYMBOL(vsprintf, std::, <cstdio>)
+SYMBOL(vsprintf, None, <cstdio>)
+SYMBOL(vsprintf, None, <stdio.h>)
+SYMBOL(vsscanf, std::, <cstdio>)
+SYMBOL(vsscanf, None, <cstdio>)
+SYMBOL(vsscanf, None, <stdio.h>)
+SYMBOL(vswprintf, std::, <cwchar>)
+SYMBOL(vswprintf, None, <cwchar>)
+SYMBOL(vswprintf, None, <wchar.h>)
+SYMBOL(vswscanf, std::, <cwchar>)
+SYMBOL(vswscanf, None, <cwchar>)
+SYMBOL(vswscanf, None, <wchar.h>)
+SYMBOL(vwprintf, std::, <cwchar>)
+SYMBOL(vwprintf, None, <cwchar>)
+SYMBOL(vwprintf, None, <wchar.h>)
+SYMBOL(vwscanf, std::, <cwchar>)
+SYMBOL(vwscanf, None, <cwchar>)
+SYMBOL(vwscanf, None, <wchar.h>)
+SYMBOL(wbuffer_convert, std::, <locale>)
+SYMBOL(wcerr, std::, <iostream>)
+SYMBOL(wcin, std::, <iostream>)
+SYMBOL(wclog, std::, <iostream>)
+SYMBOL(wcmatch, std::, <regex>)
+SYMBOL(wcout, std::, <iostream>)
+SYMBOL(wcregex_iterator, std::, <regex>)
+SYMBOL(wcregex_token_iterator, std::, <regex>)
+SYMBOL(wcrtomb, std::, <cwchar>)
+SYMBOL(wcrtomb, None, <cwchar>)
+SYMBOL(wcrtomb, None, <wchar.h>)
+SYMBOL(wcscat, std::, <cwchar>)
+SYMBOL(wcscat, None, <cwchar>)
+SYMBOL(wcscat, None, <wchar.h>)
+SYMBOL(wcschr, std::, <cwchar>)
+SYMBOL(wcschr, None, <cwchar>)
+SYMBOL(wcschr, None, <wchar.h>)
+SYMBOL(wcscmp, std::, <cwchar>)
+SYMBOL(wcscmp, None, <cwchar>)
+SYMBOL(wcscmp, None, <wchar.h>)
+SYMBOL(wcscoll, std::, <cwchar>)
+SYMBOL(wcscoll, None, <cwchar>)
+SYMBOL(wcscoll, None, <wchar.h>)
+SYMBOL(wcscpy, std::, <cwchar>)
+SYMBOL(wcscpy, None, <cwchar>)
+SYMBOL(wcscpy, None, <wchar.h>)
+SYMBOL(wcscspn, std::, <cwchar>)
+SYMBOL(wcscspn, None, <cwchar>)
+SYMBOL(wcscspn, None, <wchar.h>)
+SYMBOL(wcsftime, std::, <cwchar>)
+SYMBOL(wcsftime, None, <cwchar>)
+SYMBOL(wcsftime, None, <wchar.h>)
+SYMBOL(wcslen, std::, <cwchar>)
+SYMBOL(wcslen, None, <cwchar>)
+SYMBOL(wcslen, None, <wchar.h>)
+SYMBOL(wcsncat, std::, <cwchar>)
+SYMBOL(wcsncat, None, <cwchar>)
+SYMBOL(wcsncat, None, <wchar.h>)
+SYMBOL(wcsncmp, std::, <cwchar>)
+SYMBOL(wcsncmp, None, <cwchar>)
+SYMBOL(wcsncmp, None, <wchar.h>)
+SYMBOL(wcsncpy, std::, <cwchar>)
+SYMBOL(wcsncpy, None, <cwchar>)
+SYMBOL(wcsncpy, None, <wchar.h>)
+SYMBOL(wcspbrk, std::, <cwchar>)
+SYMBOL(wcspbrk, None, <cwchar>)
+SYMBOL(wcspbrk, None, <wchar.h>)
+SYMBOL(wcsrchr, std::, <cwchar>)
+SYMBOL(wcsrchr, None, <cwchar>)
+SYMBOL(wcsrchr, None, <wchar.h>)
+SYMBOL(wcsrtombs, std::, <cwchar>)
+SYMBOL(wcsrtombs, None, <cwchar>)
+SYMBOL(wcsrtombs, None, <wchar.h>)
+SYMBOL(wcsspn, std::, <cwchar>)
+SYMBOL(wcsspn, None, <cwchar>)
+SYMBOL(wcsspn, None, <wchar.h>)
+SYMBOL(wcsstr, std::, <cwchar>)
+SYMBOL(wcsstr, None, <cwchar>)
+SYMBOL(wcsstr, None, <wchar.h>)
+SYMBOL(wcstod, std::, <cwchar>)
+SYMBOL(wcstod, None, <cwchar>)
+SYMBOL(wcstod, None, <wchar.h>)
+SYMBOL(wcstof, std::, <cwchar>)
+SYMBOL(wcstof, None, <cwchar>)
+SYMBOL(wcstof, None, <wchar.h>)
+SYMBOL(wcstoimax, std::, <cinttypes>)
+SYMBOL(wcstoimax, None, <cinttypes>)
+SYMBOL(wcstoimax, None, <inttypes.h>)
+SYMBOL(wcstok, std::, <cwchar>)
+SYMBOL(wcstok, None, <cwchar>)
+SYMBOL(wcstok, None, <wchar.h>)
+SYMBOL(wcstol, std::, <cwchar>)
+SYMBOL(wcstol, None, <cwchar>)
+SYMBOL(wcstol, None, <wchar.h>)
+SYMBOL(wcstold, std::, <cwchar>)
+SYMBOL(wcstold, None, <cwchar>)
+SYMBOL(wcstold, None, <wchar.h>)
+SYMBOL(wcstoll, std::, <cwchar>)
+SYMBOL(wcstoll, None, <cwchar>)
+SYMBOL(wcstoll, None, <wchar.h>)
+SYMBOL(wcstombs, std::, <cstdlib>)
+SYMBOL(wcstombs, None, <cstdlib>)
+SYMBOL(wcstombs, None, <stdlib.h>)
+SYMBOL(wcstoul, std::, <cwchar>)
+SYMBOL(wcstoul, None, <cwchar>)
+SYMBOL(wcstoul, None, <wchar.h>)
+SYMBOL(wcstoull, std::, <cwchar>)
+SYMBOL(wcstoull, None, <cwchar>)
+SYMBOL(wcstoull, None, <wchar.h>)
+SYMBOL(wcstoumax, std::, <cinttypes>)
+SYMBOL(wcstoumax, None, <cinttypes>)
+SYMBOL(wcstoumax, None, <inttypes.h>)
+SYMBOL(wcsub_match, std::, <regex>)
+SYMBOL(wcsxfrm, std::, <cwchar>)
+SYMBOL(wcsxfrm, None, <cwchar>)
+SYMBOL(wcsxfrm, None, <wchar.h>)
+SYMBOL(wctob, std::, <cwchar>)
+SYMBOL(wctob, None, <cwchar>)
+SYMBOL(wctob, None, <wchar.h>)
+SYMBOL(wctomb, std::, <cstdlib>)
+SYMBOL(wctomb, None, <cstdlib>)
+SYMBOL(wctomb, None, <stdlib.h>)
+SYMBOL(wctrans, std::, <cwctype>)
+SYMBOL(wctrans, None, <cwctype>)
+SYMBOL(wctrans, None, <wctype.h>)
+SYMBOL(wctrans_t, std::, <cwctype>)
+SYMBOL(wctrans_t, None, <cwctype>)
+SYMBOL(wctrans_t, None, <wctype.h>)
+SYMBOL(wctype, std::, <cwctype>)
+SYMBOL(wctype, None, <cwctype>)
+SYMBOL(wctype, None, <wctype.h>)
+SYMBOL(wctype_t, std::, <cwctype>)
+SYMBOL(wctype_t, None, <cwctype>)
+SYMBOL(wctype_t, None, <wctype.h>)
+SYMBOL(weak_order, std::, <compare>)
+SYMBOL(weak_ordering, std::, <compare>)
+SYMBOL(weak_ptr, std::, <memory>)
+SYMBOL(weakly_incrementable, std::, <iterator>)
+SYMBOL(weibull_distribution, std::, <random>)
+SYMBOL(wfilebuf, std::, <streambuf>)
+SYMBOL(wfilebuf, std::, <iostream>)
+SYMBOL(wfilebuf, std::, <iosfwd>)
+SYMBOL(wformat_args, std::, <format>)
+SYMBOL(wformat_context, std::, <format>)
+SYMBOL(wformat_parse_context, std::, <format>)
+SYMBOL(wfstream, std::, <fstream>)
+SYMBOL(wfstream, std::, <iosfwd>)
+SYMBOL(wifstream, std::, <fstream>)
+SYMBOL(wifstream, std::, <iosfwd>)
+SYMBOL(wios, std::, <ios>)
+SYMBOL(wios, std::, <iostream>)
+SYMBOL(wios, std::, <iosfwd>)
+SYMBOL(wiostream, std::, <istream>)
+SYMBOL(wiostream, std::, <iostream>)
+SYMBOL(wiostream, std::, <iosfwd>)
+SYMBOL(wispanstream, std::, <spanstream>)
+SYMBOL(wispanstream, std::, <iosfwd>)
+SYMBOL(wistream, std::, <istream>)
+SYMBOL(wistream, std::, <iostream>)
+SYMBOL(wistream, std::, <iosfwd>)
+SYMBOL(wistringstream, std::, <sstream>)
+SYMBOL(wistringstream, std::, <iosfwd>)
+SYMBOL(wmemchr, std::, <cwchar>)
+SYMBOL(wmemchr, None, <cwchar>)
+SYMBOL(wmemchr, None, <wchar.h>)
+SYMBOL(wmemcmp, std::, <cwchar>)
+SYMBOL(wmemcmp, None, <cwchar>)
+SYMBOL(wmemcmp, None, <wchar.h>)
+SYMBOL(wmemcpy, std::, <cwchar>)
+SYMBOL(wmemcpy, None, <cwchar>)
+SYMBOL(wmemcpy, None, <wchar.h>)
+SYMBOL(wmemmove, std::, <cwchar>)
+SYMBOL(wmemmove, None, <cwchar>)
+SYMBOL(wmemmove, None, <wchar.h>)
+SYMBOL(wmemset, std::, <cwchar>)
+SYMBOL(wmemset, None, <cwchar>)
+SYMBOL(wmemset, None, <wchar.h>)
+SYMBOL(wofstream, std::, <fstream>)
+SYMBOL(wofstream, std::, <iosfwd>)
+SYMBOL(wospanstream, std::, <spanstream>)
+SYMBOL(wospanstream, std::, <iosfwd>)
+SYMBOL(wostream, std::, <ostream>)
+SYMBOL(wostream, std::, <iostream>)
+SYMBOL(wostream, std::, <iosfwd>)
+SYMBOL(wostringstream, std::, <sstream>)
+SYMBOL(wostringstream, std::, <iosfwd>)
+SYMBOL(wosyncstream, std::, <syncstream>)
+SYMBOL(wosyncstream, std::, <iosfwd>)
+SYMBOL(wprintf, std::, <cwchar>)
+SYMBOL(wprintf, None, <cwchar>)
+SYMBOL(wprintf, None, <wchar.h>)
+SYMBOL(wregex, std::, <regex>)
+SYMBOL(ws, std::, <istream>)
+SYMBOL(ws, std::, <iostream>)
+SYMBOL(wscanf, std::, <cwchar>)
+SYMBOL(wscanf, None, <cwchar>)
+SYMBOL(wscanf, None, <wchar.h>)
+SYMBOL(wsmatch, std::, <regex>)
+SYMBOL(wspanbuf, std::, <spanstream>)
+SYMBOL(wspanbuf, std::, <iosfwd>)
+SYMBOL(wspanstream, std::, <spanstream>)
+SYMBOL(wspanstream, std::, <iosfwd>)
+SYMBOL(wsregex_iterator, std::, <regex>)
+SYMBOL(wsregex_token_iterator, std::, <regex>)
+SYMBOL(wssub_match, std::, <regex>)
+SYMBOL(wstreambuf, std::, <streambuf>)
+SYMBOL(wstreambuf, std::, <iostream>)
+SYMBOL(wstreambuf, std::, <iosfwd>)
+SYMBOL(wstreampos, std::, <iosfwd>)
+SYMBOL(wstreampos, std::, <iosfwd>)
+SYMBOL(wstring, std::, <string>)
+SYMBOL(wstring_convert, std::, <locale>)
+SYMBOL(wstring_view, std::, <string_view>)
+SYMBOL(wstringbuf, std::, <sstream>)
+SYMBOL(wstringbuf, std::, <iosfwd>)
+SYMBOL(wstringstream, std::, <sstream>)
+SYMBOL(wstringstream, std::, <iosfwd>)
+SYMBOL(wsyncbuf, std::, <syncstream>)
+SYMBOL(wsyncbuf, std::, <iosfwd>)
+SYMBOL(yocto, std::, <ratio>)
+SYMBOL(yotta, std::, <ratio>)
+SYMBOL(zepto, std::, <ratio>)
+SYMBOL(zetta, std::, <ratio>)
+SYMBOL(April, std::chrono::, <chrono>)
+SYMBOL(August, std::chrono::, <chrono>)
+SYMBOL(December, std::chrono::, <chrono>)
+SYMBOL(February, std::chrono::, <chrono>)
+SYMBOL(Friday, std::chrono::, <chrono>)
+SYMBOL(January, std::chrono::, <chrono>)
+SYMBOL(July, std::chrono::, <chrono>)
+SYMBOL(June, std::chrono::, <chrono>)
+SYMBOL(March, std::chrono::, <chrono>)
+SYMBOL(May, std::chrono::, <chrono>)
+SYMBOL(Monday, std::chrono::, <chrono>)
+SYMBOL(November, std::chrono::, <chrono>)
+SYMBOL(October, std::chrono::, <chrono>)
+SYMBOL(Saturday, std::chrono::, <chrono>)
+SYMBOL(September, std::chrono::, <chrono>)
+SYMBOL(Sunday, std::chrono::, <chrono>)
+SYMBOL(Thursday, std::chrono::, <chrono>)
+SYMBOL(Tuesday, std::chrono::, <chrono>)
+SYMBOL(Wednesday, std::chrono::, <chrono>)
+SYMBOL(abs, std::chrono::, <chrono>)
+SYMBOL(ambiguous_local_time, std::chrono::, <chrono>)
+SYMBOL(choose, std::chrono::, <chrono>)
+SYMBOL(clock_cast, std::chrono::, <chrono>)
+SYMBOL(clock_time_conversion, std::chrono::, <chrono>)
+SYMBOL(current_zone, std::chrono::, <chrono>)
+SYMBOL(day, std::chrono::, <chrono>)
+SYMBOL(duration, std::chrono::, <chrono>)
+SYMBOL(duration_cast, std::chrono::, <chrono>)
+SYMBOL(duration_values, std::chrono::, <chrono>)
+SYMBOL(file_clock, std::chrono::, <chrono>)
+SYMBOL(file_seconds, std::chrono::, <chrono>)
+SYMBOL(file_time, std::chrono::, <chrono>)
+SYMBOL(get_leap_second_info, std::chrono::, <chrono>)
+SYMBOL(gps_clock, std::chrono::, <chrono>)
+SYMBOL(gps_seconds, std::chrono::, <chrono>)
+SYMBOL(gps_time, std::chrono::, <chrono>)
+SYMBOL(hh_mm_ss, std::chrono::, <chrono>)
+SYMBOL(high_resolution_clock, std::chrono::, <chrono>)
+SYMBOL(hours, std::chrono::, <chrono>)
+SYMBOL(is_am, std::chrono::, <chrono>)
+SYMBOL(is_clock, std::chrono::, <chrono>)
+SYMBOL(is_clock_v, std::chrono::, <chrono>)
+SYMBOL(is_pm, std::chrono::, <chrono>)
+SYMBOL(last, std::chrono::, <chrono>)
+SYMBOL(last_spec, std::chrono::, <chrono>)
+SYMBOL(leap_second, std::chrono::, <chrono>)
+SYMBOL(leap_second_info, std::chrono::, <chrono>)
+SYMBOL(local_info, std::chrono::, <chrono>)
+SYMBOL(local_seconds, std::chrono::, <chrono>)
+SYMBOL(local_t, std::chrono::, <chrono>)
+SYMBOL(local_time, std::chrono::, <chrono>)
+SYMBOL(local_time_format, std::chrono::, <chrono>)
+SYMBOL(locate_zone, std::chrono::, <chrono>)
+SYMBOL(make12, std::chrono::, <chrono>)
+SYMBOL(make24, std::chrono::, <chrono>)
+SYMBOL(microseconds, std::chrono::, <chrono>)
+SYMBOL(milliseconds, std::chrono::, <chrono>)
+SYMBOL(minutes, std::chrono::, <chrono>)
+SYMBOL(month, std::chrono::, <chrono>)
+SYMBOL(month_day, std::chrono::, <chrono>)
+SYMBOL(month_day_last, std::chrono::, <chrono>)
+SYMBOL(month_weekday, std::chrono::, <chrono>)
+SYMBOL(month_weekday_last, std::chrono::, <chrono>)
+SYMBOL(nanoseconds, std::chrono::, <chrono>)
+SYMBOL(nonexistent_local_time, std::chrono::, <chrono>)
+SYMBOL(parse, std::chrono::, <chrono>)
+SYMBOL(seconds, std::chrono::, <chrono>)
+SYMBOL(steady_clock, std::chrono::, <chrono>)
+SYMBOL(sys_days, std::chrono::, <chrono>)
+SYMBOL(sys_info, std::chrono::, <chrono>)
+SYMBOL(sys_seconds, std::chrono::, <chrono>)
+SYMBOL(sys_time, std::chrono::, <chrono>)
+SYMBOL(system_clock, std::chrono::, <chrono>)
+SYMBOL(tai_clock, std::chrono::, <chrono>)
+SYMBOL(tai_seconds, std::chrono::, <chrono>)
+SYMBOL(tai_time, std::chrono::, <chrono>)
+SYMBOL(time_point, std::chrono::, <chrono>)
+SYMBOL(time_point_cast, std::chrono::, <chrono>)
+SYMBOL(time_zone, std::chrono::, <chrono>)
+SYMBOL(time_zone_link, std::chrono::, <chrono>)
+SYMBOL(treat_as_floating_point, std::chrono::, <chrono>)
+SYMBOL(treat_as_floating_point_v, std::chrono::, <chrono>)
+SYMBOL(tzdb, std::chrono::, <chrono>)
+SYMBOL(tzdb_list, std::chrono::, <chrono>)
+SYMBOL(utc_clock, std::chrono::, <chrono>)
+SYMBOL(utc_seconds, std::chrono::, <chrono>)
+SYMBOL(utc_time, std::chrono::, <chrono>)
+SYMBOL(weekday, std::chrono::, <chrono>)
+SYMBOL(weekday_indexed, std::chrono::, <chrono>)
+SYMBOL(weekday_last, std::chrono::, <chrono>)
+SYMBOL(year, std::chrono::, <chrono>)
+SYMBOL(year_month, std::chrono::, <chrono>)
+SYMBOL(year_month_day, std::chrono::, <chrono>)
+SYMBOL(year_month_day_last, std::chrono::, <chrono>)
+SYMBOL(year_month_weekday, std::chrono::, <chrono>)
+SYMBOL(year_month_weekday_last, std::chrono::, <chrono>)
+SYMBOL(zoned_seconds, std::chrono::, <chrono>)
+SYMBOL(zoned_time, std::chrono::, <chrono>)
+SYMBOL(zoned_traits, std::chrono::, <chrono>)
+SYMBOL(par, std::execution::, <execution>)
+SYMBOL(par_unseq, std::execution::, <execution>)
+SYMBOL(parallel_policy, std::execution::, <execution>)
+SYMBOL(parallel_unsequenced_policy, std::execution::, <execution>)
+SYMBOL(seq, std::execution::, <execution>)
+SYMBOL(sequenced_policy, std::execution::, <execution>)
+SYMBOL(unseq, std::execution::, <execution>)
+SYMBOL(unsequenced_policy, std::execution::, <execution>)
+SYMBOL(absolute, std::filesystem::, <filesystem>)
+SYMBOL(canonical, std::filesystem::, <filesystem>)
+SYMBOL(copy, std::filesystem::, <filesystem>)
+SYMBOL(copy_file, std::filesystem::, <filesystem>)
+SYMBOL(copy_options, std::filesystem::, <filesystem>)
+SYMBOL(copy_symlink, std::filesystem::, <filesystem>)
+SYMBOL(create_directories, std::filesystem::, <filesystem>)
+SYMBOL(create_directory, std::filesystem::, <filesystem>)
+SYMBOL(create_directory_symlink, std::filesystem::, <filesystem>)
+SYMBOL(create_hard_link, std::filesystem::, <filesystem>)
+SYMBOL(create_symlink, std::filesystem::, <filesystem>)
+SYMBOL(current_path, std::filesystem::, <filesystem>)
+SYMBOL(directory_entry, std::filesystem::, <filesystem>)
+SYMBOL(directory_iterator, std::filesystem::, <filesystem>)
+SYMBOL(directory_options, std::filesystem::, <filesystem>)
+SYMBOL(equivalent, std::filesystem::, <filesystem>)
+SYMBOL(exists, std::filesystem::, <filesystem>)
+SYMBOL(file_size, std::filesystem::, <filesystem>)
+SYMBOL(file_status, std::filesystem::, <filesystem>)
+SYMBOL(file_time_type, std::filesystem::, <filesystem>)
+SYMBOL(file_type, std::filesystem::, <filesystem>)
+SYMBOL(filesystem_error, std::filesystem::, <filesystem>)
+SYMBOL(hard_link_count, std::filesystem::, <filesystem>)
+SYMBOL(hash_value, std::filesystem::, <filesystem>)
+SYMBOL(is_block_file, std::filesystem::, <filesystem>)
+SYMBOL(is_character_file, std::filesystem::, <filesystem>)
+SYMBOL(is_directory, std::filesystem::, <filesystem>)
+SYMBOL(is_empty, std::filesystem::, <filesystem>)
+SYMBOL(is_fifo, std::filesystem::, <filesystem>)
+SYMBOL(is_other, std::filesystem::, <filesystem>)
+SYMBOL(is_regular_file, std::filesystem::, <filesystem>)
+SYMBOL(is_socket, std::filesystem::, <filesystem>)
+SYMBOL(is_symlink, std::filesystem::, <filesystem>)
+SYMBOL(last_write_time, std::filesystem::, <filesystem>)
+SYMBOL(path, std::filesystem::, <filesystem>)
+SYMBOL(perm_options, std::filesystem::, <filesystem>)
+SYMBOL(permissions, std::filesystem::, <filesystem>)
+SYMBOL(perms, std::filesystem::, <filesystem>)
+SYMBOL(proximate, std::filesystem::, <filesystem>)
+SYMBOL(read_symlink, std::filesystem::, <filesystem>)
+SYMBOL(recursive_directory_iterator, std::filesystem::, <filesystem>)
+SYMBOL(relative, std::filesystem::, <filesystem>)
+SYMBOL(remove, std::filesystem::, <filesystem>)
+SYMBOL(remove_all, std::filesystem::, <filesystem>)
+SYMBOL(rename, std::filesystem::, <filesystem>)
+SYMBOL(resize_file, std::filesystem::, <filesystem>)
+SYMBOL(space, std::filesystem::, <filesystem>)
+SYMBOL(space_info, std::filesystem::, <filesystem>)
+SYMBOL(status, std::filesystem::, <filesystem>)
+SYMBOL(status_known, std::filesystem::, <filesystem>)
+SYMBOL(symlink_status, std::filesystem::, <filesystem>)
+SYMBOL(temp_directory_path, std::filesystem::, <filesystem>)
+SYMBOL(u8path, std::filesystem::, <filesystem>)
+SYMBOL(weakly_canonical, std::filesystem::, <filesystem>)
+SYMBOL(e, std::numbers::, <numbers>)
+SYMBOL(e_v, std::numbers::, <numbers>)
+SYMBOL(egamma, std::numbers::, <numbers>)
+SYMBOL(egamma_v, std::numbers::, <numbers>)
+SYMBOL(inv_pi, std::numbers::, <numbers>)
+SYMBOL(inv_pi_v, std::numbers::, <numbers>)
+SYMBOL(inv_sqrt3, std::numbers::, <numbers>)
+SYMBOL(inv_sqrt3_v, std::numbers::, <numbers>)
+SYMBOL(inv_sqrtpi, std::numbers::, <numbers>)
+SYMBOL(inv_sqrtpi_v, std::numbers::, <numbers>)
+SYMBOL(ln10, std::numbers::, <numbers>)
+SYMBOL(ln10_v, std::numbers::, <numbers>)
+SYMBOL(ln2, std::numbers::, <numbers>)
+SYMBOL(ln2_v, std::numbers::, <numbers>)
+SYMBOL(log10e, std::numbers::, <numbers>)
+SYMBOL(log10e_v, std::numbers::, <numbers>)
+SYMBOL(log2e, std::numbers::, <numbers>)
+SYMBOL(log2e_v, std::numbers::, <numbers>)
+SYMBOL(phi, std::numbers::, <numbers>)
+SYMBOL(phi_v, std::numbers::, <numbers>)
+SYMBOL(pi, std::numbers::, <numbers>)
+SYMBOL(pi_v, std::numbers::, <numbers>)
+SYMBOL(sqrt2, std::numbers::, <numbers>)
+SYMBOL(sqrt2_v, std::numbers::, <numbers>)
+SYMBOL(sqrt3, std::numbers::, <numbers>)
+SYMBOL(sqrt3_v, std::numbers::, <numbers>)
+SYMBOL(basic_string, std::pmr::, <string>)
+SYMBOL(cmatch, std::pmr::, <regex>)
+SYMBOL(deque, std::pmr::, <deque>)
+SYMBOL(forward_list, std::pmr::, <forward_list>)
+SYMBOL(get_default_resource, std::pmr::, <memory_resource>)
+SYMBOL(list, std::pmr::, <list>)
+SYMBOL(map, std::pmr::, <map>)
+SYMBOL(match_results, std::pmr::, <regex>)
+SYMBOL(memory_resource, std::pmr::, <memory_resource>)
+SYMBOL(monotonic_buffer_resource, std::pmr::, <memory_resource>)
+SYMBOL(multimap, std::pmr::, <map>)
+SYMBOL(multiset, std::pmr::, <set>)
+SYMBOL(new_delete_resource, std::pmr::, <memory_resource>)
+SYMBOL(null_memory_resource, std::pmr::, <memory_resource>)
+SYMBOL(polymorphic_allocator, std::pmr::, <memory_resource>)
+SYMBOL(pool_options, std::pmr::, <memory_resource>)
+SYMBOL(set, std::pmr::, <set>)
+SYMBOL(set_default_resource, std::pmr::, <memory_resource>)
+SYMBOL(smatch, std::pmr::, <regex>)
+SYMBOL(stacktrace, std::pmr::, <stacktrace>)
+SYMBOL(string, std::pmr::, <string>)
+SYMBOL(synchronized_pool_resource, std::pmr::, <memory_resource>)
+SYMBOL(u16string, std::pmr::, <string>)
+SYMBOL(u32string, std::pmr::, <string>)
+SYMBOL(u8string, std::pmr::, <string>)
+SYMBOL(unordered_map, std::pmr::, <unordered_map>)
+SYMBOL(unordered_multimap, std::pmr::, <unordered_map>)
+SYMBOL(unordered_multiset, std::pmr::, <unordered_set>)
+SYMBOL(unordered_set, std::pmr::, <unordered_set>)
+SYMBOL(unsynchronized_pool_resource, std::pmr::, <memory_resource>)
+SYMBOL(vector, std::pmr::, <vector>)
+SYMBOL(wcmatch, std::pmr::, <regex>)
+SYMBOL(wsmatch, std::pmr::, <regex>)
+SYMBOL(wstring, std::pmr::, <string>)
+SYMBOL(adjacent_find, std::ranges::, <algorithm>)
+SYMBOL(advance, std::ranges::, <iterator>)
+SYMBOL(all_of, std::ranges::, <algorithm>)
+SYMBOL(any_of, std::ranges::, <algorithm>)
+SYMBOL(as_const_view, std::ranges::, <ranges>)
+SYMBOL(as_rvalue_view, std::ranges::, <ranges>)
+SYMBOL(basic_istream_view, std::ranges::, <ranges>)
+SYMBOL(begin, std::ranges::, <ranges>)
+SYMBOL(bidirectional_range, std::ranges::, <ranges>)
+SYMBOL(binary_transform_result, std::ranges::, <algorithm>)
+SYMBOL(borrowed_iterator_t, std::ranges::, <ranges>)
+SYMBOL(borrowed_range, std::ranges::, <ranges>)
+SYMBOL(borrowed_subrange_t, std::ranges::, <ranges>)
+SYMBOL(cbegin, std::ranges::, <ranges>)
+SYMBOL(cdata, std::ranges::, <ranges>)
+SYMBOL(cend, std::ranges::, <ranges>)
+SYMBOL(clamp, std::ranges::, <algorithm>)
+SYMBOL(common_range, std::ranges::, <ranges>)
+SYMBOL(common_view, std::ranges::, <ranges>)
+SYMBOL(const_iterator_t, std::ranges::, <ranges>)
+SYMBOL(constant_range, std::ranges::, <ranges>)
+SYMBOL(construct_at, std::ranges::, <memory>)
+SYMBOL(contains, std::ranges::, <algorithm>)
+SYMBOL(contains_subrange, std::ranges::, <algorithm>)
+SYMBOL(contiguous_range, std::ranges::, <ranges>)
+SYMBOL(copy, std::ranges::, <algorithm>)
+SYMBOL(copy_backward, std::ranges::, <algorithm>)
+SYMBOL(copy_backward_result, std::ranges::, <algorithm>)
+SYMBOL(copy_if, std::ranges::, <algorithm>)
+SYMBOL(copy_if_result, std::ranges::, <algorithm>)
+SYMBOL(copy_n, std::ranges::, <algorithm>)
+SYMBOL(copy_n_result, std::ranges::, <algorithm>)
+SYMBOL(copy_result, std::ranges::, <algorithm>)
+SYMBOL(count, std::ranges::, <algorithm>)
+SYMBOL(count_if, std::ranges::, <algorithm>)
+SYMBOL(crbegin, std::ranges::, <ranges>)
+SYMBOL(crend, std::ranges::, <ranges>)
+SYMBOL(dangling, std::ranges::, <ranges>)
+SYMBOL(data, std::ranges::, <ranges>)
+SYMBOL(destroy, std::ranges::, <memory>)
+SYMBOL(destroy_at, std::ranges::, <memory>)
+SYMBOL(destroy_n, std::ranges::, <memory>)
+SYMBOL(disable_sized_range, std::ranges::, <ranges>)
+SYMBOL(distance, std::ranges::, <iterator>)
+SYMBOL(drop_view, std::ranges::, <ranges>)
+SYMBOL(drop_while_view, std::ranges::, <ranges>)
+SYMBOL(elements_view, std::ranges::, <ranges>)
+SYMBOL(empty, std::ranges::, <ranges>)
+SYMBOL(empty_view, std::ranges::, <ranges>)
+SYMBOL(enable_borrowed_range, std::ranges::, <ranges>)
+SYMBOL(enable_view, std::ranges::, <ranges>)
+SYMBOL(end, std::ranges::, <ranges>)
+SYMBOL(ends_with, std::ranges::, <algorithm>)
+SYMBOL(equal, std::ranges::, <algorithm>)
+SYMBOL(equal_to, std::ranges::, <functional>)
+SYMBOL(fill, std::ranges::, <algorithm>)
+SYMBOL(fill_n, std::ranges::, <algorithm>)
+SYMBOL(filter_view, std::ranges::, <ranges>)
+SYMBOL(find, std::ranges::, <algorithm>)
+SYMBOL(find_end, std::ranges::, <algorithm>)
+SYMBOL(find_first_of, std::ranges::, <algorithm>)
+SYMBOL(find_if, std::ranges::, <algorithm>)
+SYMBOL(find_if_not, std::ranges::, <algorithm>)
+SYMBOL(find_last, std::ranges::, <algorithm>)
+SYMBOL(find_last_if, std::ranges::, <algorithm>)
+SYMBOL(find_last_if_not, std::ranges::, <algorithm>)
+SYMBOL(for_each, std::ranges::, <algorithm>)
+SYMBOL(for_each_n, std::ranges::, <algorithm>)
+SYMBOL(for_each_n_result, std::ranges::, <algorithm>)
+SYMBOL(for_each_result, std::ranges::, <algorithm>)
+SYMBOL(forward_range, std::ranges::, <ranges>)
+SYMBOL(generate, std::ranges::, <algorithm>)
+SYMBOL(generate_n, std::ranges::, <algorithm>)
+SYMBOL(greater, std::ranges::, <functional>)
+SYMBOL(greater_equal, std::ranges::, <functional>)
+SYMBOL(in_found_result, std::ranges::, <algorithm>)
+SYMBOL(in_fun_result, std::ranges::, <algorithm>)
+SYMBOL(in_in_out_result, std::ranges::, <algorithm>)
+SYMBOL(in_in_result, std::ranges::, <algorithm>)
+SYMBOL(in_out_out_result, std::ranges::, <algorithm>)
+SYMBOL(in_out_result, std::ranges::, <algorithm>)
+SYMBOL(in_value_result, std::ranges::, <algorithm>)
+SYMBOL(includes, std::ranges::, <algorithm>)
+SYMBOL(inplace_merge, std::ranges::, <algorithm>)
+SYMBOL(input_range, std::ranges::, <ranges>)
+SYMBOL(iota, std::ranges::, <numeric>)
+SYMBOL(iota_result, std::ranges::, <numeric>)
+SYMBOL(iota_view, std::ranges::, <ranges>)
+SYMBOL(is_heap, std::ranges::, <algorithm>)
+SYMBOL(is_heap_until, std::ranges::, <algorithm>)
+SYMBOL(is_partitioned, std::ranges::, <algorithm>)
+SYMBOL(is_permutation, std::ranges::, <algorithm>)
+SYMBOL(is_sorted, std::ranges::, <algorithm>)
+SYMBOL(is_sorted_until, std::ranges::, <algorithm>)
+SYMBOL(istream_view, std::ranges::, <ranges>)
+SYMBOL(iter_move, std::ranges::, <iterator>)
+SYMBOL(iter_swap, std::ranges::, <iterator>)
+SYMBOL(iterator_t, std::ranges::, <ranges>)
+SYMBOL(join_view, std::ranges::, <ranges>)
+SYMBOL(join_with_view, std::ranges::, <ranges>)
+SYMBOL(keys_view, std::ranges::, <ranges>)
+SYMBOL(lazy_split_view, std::ranges::, <ranges>)
+SYMBOL(less, std::ranges::, <functional>)
+SYMBOL(less_equal, std::ranges::, <functional>)
+SYMBOL(lexicographical_compare, std::ranges::, <algorithm>)
+SYMBOL(make_heap, std::ranges::, <algorithm>)
+SYMBOL(max, std::ranges::, <algorithm>)
+SYMBOL(max_element, std::ranges::, <algorithm>)
+SYMBOL(merge, std::ranges::, <algorithm>)
+SYMBOL(merge_result, std::ranges::, <algorithm>)
+SYMBOL(min, std::ranges::, <algorithm>)
+SYMBOL(min_element, std::ranges::, <algorithm>)
+SYMBOL(min_max_result, std::ranges::, <algorithm>)
+SYMBOL(minmax, std::ranges::, <algorithm>)
+SYMBOL(minmax_element, std::ranges::, <algorithm>)
+SYMBOL(minmax_element_result, std::ranges::, <algorithm>)
+SYMBOL(minmax_result, std::ranges::, <algorithm>)
+SYMBOL(mismatch, std::ranges::, <algorithm>)
+SYMBOL(mismatch_result, std::ranges::, <algorithm>)
+SYMBOL(move, std::ranges::, <algorithm>)
+SYMBOL(move_backward, std::ranges::, <algorithm>)
+SYMBOL(move_backward_result, std::ranges::, <algorithm>)
+SYMBOL(move_result, std::ranges::, <algorithm>)
+SYMBOL(next, std::ranges::, <iterator>)
+SYMBOL(next_permutation, std::ranges::, <algorithm>)
+SYMBOL(next_permutation_result, std::ranges::, <algorithm>)
+SYMBOL(none_of, std::ranges::, <algorithm>)
+SYMBOL(not_equal_to, std::ranges::, <functional>)
+SYMBOL(nth_element, std::ranges::, <algorithm>)
+SYMBOL(out_value_result, std::ranges::, <algorithm>)
+SYMBOL(output_range, std::ranges::, <ranges>)
+SYMBOL(owning_view, std::ranges::, <ranges>)
+SYMBOL(partial_sort, std::ranges::, <algorithm>)
+SYMBOL(partial_sort_copy, std::ranges::, <algorithm>)
+SYMBOL(partial_sort_copy_result, std::ranges::, <algorithm>)
+SYMBOL(partition, std::ranges::, <algorithm>)
+SYMBOL(partition_copy, std::ranges::, <algorithm>)
+SYMBOL(partition_copy_result, std::ranges::, <algorithm>)
+SYMBOL(partition_point, std::ranges::, <algorithm>)
+SYMBOL(pop_heap, std::ranges::, <algorithm>)
+SYMBOL(prev, std::ranges::, <iterator>)
+SYMBOL(prev_permutation, std::ranges::, <algorithm>)
+SYMBOL(prev_permutation_result, std::ranges::, <algorithm>)
+SYMBOL(push_heap, std::ranges::, <algorithm>)
+SYMBOL(random_access_range, std::ranges::, <ranges>)
+SYMBOL(range, std::ranges::, <ranges>)
+SYMBOL(range_const_reference_t, std::ranges::, <ranges>)
+SYMBOL(range_difference_t, std::ranges::, <ranges>)
+SYMBOL(range_reference_t, std::ranges::, <ranges>)
+SYMBOL(range_rvalue_reference_t, std::ranges::, <ranges>)
+SYMBOL(range_size_t, std::ranges::, <ranges>)
+SYMBOL(range_value_t, std::ranges::, <ranges>)
+SYMBOL(rbegin, std::ranges::, <ranges>)
+SYMBOL(ref_view, std::ranges::, <ranges>)
+SYMBOL(remove, std::ranges::, <algorithm>)
+SYMBOL(remove_copy, std::ranges::, <algorithm>)
+SYMBOL(remove_copy_if, std::ranges::, <algorithm>)
+SYMBOL(remove_copy_if_result, std::ranges::, <algorithm>)
+SYMBOL(remove_copy_result, std::ranges::, <algorithm>)
+SYMBOL(remove_if, std::ranges::, <algorithm>)
+SYMBOL(rend, std::ranges::, <ranges>)
+SYMBOL(replace, std::ranges::, <algorithm>)
+SYMBOL(replace_copy, std::ranges::, <algorithm>)
+SYMBOL(replace_copy_if, std::ranges::, <algorithm>)
+SYMBOL(replace_copy_if_result, std::ranges::, <algorithm>)
+SYMBOL(replace_copy_result, std::ranges::, <algorithm>)
+SYMBOL(replace_if, std::ranges::, <algorithm>)
+SYMBOL(reverse, std::ranges::, <algorithm>)
+SYMBOL(reverse_copy, std::ranges::, <algorithm>)
+SYMBOL(reverse_copy_result, std::ranges::, <algorithm>)
+SYMBOL(reverse_view, std::ranges::, <ranges>)
+SYMBOL(rotate, std::ranges::, <algorithm>)
+SYMBOL(rotate_copy, std::ranges::, <algorithm>)
+SYMBOL(rotate_copy_result, std::ranges::, <algorithm>)
+SYMBOL(sample, std::ranges::, <algorithm>)
+SYMBOL(search, std::ranges::, <algorithm>)
+SYMBOL(search_n, std::ranges::, <algorithm>)
+SYMBOL(sentinel_t, std::ranges::, <ranges>)
+SYMBOL(set_difference, std::ranges::, <algorithm>)
+SYMBOL(set_difference_result, std::ranges::, <algorithm>)
+SYMBOL(set_intersection, std::ranges::, <algorithm>)
+SYMBOL(set_intersection_result, std::ranges::, <algorithm>)
+SYMBOL(set_symmetric_difference, std::ranges::, <algorithm>)
+SYMBOL(set_symmetric_difference_result, std::ranges::, <algorithm>)
+SYMBOL(set_union, std::ranges::, <algorithm>)
+SYMBOL(set_union_result, std::ranges::, <algorithm>)
+SYMBOL(shift_left, std::ranges::, <algorithm>)
+SYMBOL(shift_right, std::ranges::, <algorithm>)
+SYMBOL(shuffle, std::ranges::, <algorithm>)
+SYMBOL(single_view, std::ranges::, <ranges>)
+SYMBOL(size, std::ranges::, <ranges>)
+SYMBOL(sized_range, std::ranges::, <ranges>)
+SYMBOL(sort, std::ranges::, <algorithm>)
+SYMBOL(sort_heap, std::ranges::, <algorithm>)
+SYMBOL(split_view, std::ranges::, <ranges>)
+SYMBOL(ssize, std::ranges::, <ranges>)
+SYMBOL(stable_partition, std::ranges::, <algorithm>)
+SYMBOL(stable_sort, std::ranges::, <algorithm>)
+SYMBOL(starts_with, std::ranges::, <algorithm>)
+SYMBOL(subrange, std::ranges::, <ranges>)
+SYMBOL(subrange_kind, std::ranges::, <ranges>)
+SYMBOL(swap, std::ranges::, <concepts>)
+SYMBOL(swap_ranges, std::ranges::, <algorithm>)
+SYMBOL(swap_ranges_result, std::ranges::, <algorithm>)
+SYMBOL(take_view, std::ranges::, <ranges>)
+SYMBOL(take_while_view, std::ranges::, <ranges>)
+SYMBOL(to, std::ranges::, <ranges>)
+SYMBOL(transform, std::ranges::, <algorithm>)
+SYMBOL(transform_view, std::ranges::, <ranges>)
+SYMBOL(unary_transform_result, std::ranges::, <algorithm>)
+SYMBOL(uninitialized_copy, std::ranges::, <memory>)
+SYMBOL(uninitialized_copy_n, std::ranges::, <memory>)
+SYMBOL(uninitialized_copy_n_result, std::ranges::, <memory>)
+SYMBOL(uninitialized_copy_result, std::ranges::, <memory>)
+SYMBOL(uninitialized_default_construct, std::ranges::, <memory>)
+SYMBOL(uninitialized_default_construct_n, std::ranges::, <memory>)
+SYMBOL(uninitialized_fill, std::ranges::, <memory>)
+SYMBOL(uninitialized_fill_n, std::ranges::, <memory>)
+SYMBOL(uninitialized_move, std::ranges::, <memory>)
+SYMBOL(uninitialized_move_n, std::ranges::, <memory>)
+SYMBOL(uninitialized_move_n_result, std::ranges::, <memory>)
+SYMBOL(uninitialized_move_result, std::ranges::, <memory>)
+SYMBOL(uninitialized_value_construct, std::ranges::, <memory>)
+SYMBOL(uninitialized_value_construct_n, std::ranges::, <memory>)
+SYMBOL(unique, std::ranges::, <algorithm>)
+SYMBOL(unique_copy, std::ranges::, <algorithm>)
+SYMBOL(unique_copy_result, std::ranges::, <algorithm>)
+SYMBOL(values_view, std::ranges::, <ranges>)
+SYMBOL(view, std::ranges::, <ranges>)
+SYMBOL(view_base, std::ranges::, <ranges>)
+SYMBOL(view_interface, std::ranges::, <ranges>)
+SYMBOL(viewable_range, std::ranges::, <ranges>)
+SYMBOL(wistream_view, std::ranges::, <ranges>)
+SYMBOL(zip_transform_view, std::ranges::, <ranges>)
+SYMBOL(zip_view, std::ranges::, <ranges>)
+SYMBOL(ECMAScript, std::regex_constants::, <regex>)
+SYMBOL(awk, std::regex_constants::, <regex>)
+SYMBOL(basic, std::regex_constants::, <regex>)
+SYMBOL(collate, std::regex_constants::, <regex>)
+SYMBOL(egrep, std::regex_constants::, <regex>)
+SYMBOL(error_backref, std::regex_constants::, <regex>)
+SYMBOL(error_badbrace, std::regex_constants::, <regex>)
+SYMBOL(error_badrepeat, std::regex_constants::, <regex>)
+SYMBOL(error_brace, std::regex_constants::, <regex>)
+SYMBOL(error_brack, std::regex_constants::, <regex>)
+SYMBOL(error_collate, std::regex_constants::, <regex>)
+SYMBOL(error_complexity, std::regex_constants::, <regex>)
+SYMBOL(error_ctype, std::regex_constants::, <regex>)
+SYMBOL(error_escape, std::regex_constants::, <regex>)
+SYMBOL(error_paren, std::regex_constants::, <regex>)
+SYMBOL(error_range, std::regex_constants::, <regex>)
+SYMBOL(error_space, std::regex_constants::, <regex>)
+SYMBOL(error_stack, std::regex_constants::, <regex>)
+SYMBOL(error_type, std::regex_constants::, <regex>)
+SYMBOL(extended, std::regex_constants::, <regex>)
+SYMBOL(format_default, std::regex_constants::, <regex>)
+SYMBOL(format_first_only, std::regex_constants::, <regex>)
+SYMBOL(format_no_copy, std::regex_constants::, <regex>)
+SYMBOL(format_sed, std::regex_constants::, <regex>)
+SYMBOL(grep, std::regex_constants::, <regex>)
+SYMBOL(icase, std::regex_constants::, <regex>)
+SYMBOL(match_any, std::regex_constants::, <regex>)
+SYMBOL(match_continuous, std::regex_constants::, <regex>)
+SYMBOL(match_default, std::regex_constants::, <regex>)
+SYMBOL(match_flag_type, std::regex_constants::, <regex>)
+SYMBOL(match_not_bol, std::regex_constants::, <regex>)
+SYMBOL(match_not_bow, std::regex_constants::, <regex>)
+SYMBOL(match_not_eol, std::regex_constants::, <regex>)
+SYMBOL(match_not_eow, std::regex_constants::, <regex>)
+SYMBOL(match_not_null, std::regex_constants::, <regex>)
+SYMBOL(match_prev_avail, std::regex_constants::, <regex>)
+SYMBOL(multiline, std::regex_constants::, <regex>)
+SYMBOL(nosubs, std::regex_constants::, <regex>)
+SYMBOL(optimize, std::regex_constants::, <regex>)
+SYMBOL(syntax_option_type, std::regex_constants::, <regex>)
+SYMBOL(get_id, std::this_thread::, <thread>)
+SYMBOL(sleep_for, std::this_thread::, <thread>)
+SYMBOL(sleep_until, std::this_thread::, <thread>)
+SYMBOL(yield, std::this_thread::, <thread>)
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdTsSymbolMap.inc b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdTsSymbolMap.inc
new file mode 100644
index 000000000000..2733cb3f2ec4
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdTsSymbolMap.inc
@@ -0,0 +1,52 @@
+// These are derived from N4100[fs.filesystem.synopsis], final draft for
+// experimental filesystem.
+SYMBOL(absolute, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(canonical, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(copy, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(copy_file, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(copy_options, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(copy_symlink, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(create_directories, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(create_directory, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(create_directory_symlink, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(create_hard_link, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(create_symlink, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(current_path, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(directory_entry, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(directory_iterator, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(directory_options, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(equivalent, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(exists, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(file_size, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(file_status, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(file_time_type, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(file_type, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(filesystem_error, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(hard_link_count, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(is_block_file, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(is_character_file, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(is_directory, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(is_empty, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(is_fifo, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(is_other, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(is_regular_file, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(is_socket, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(is_symlink, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(last_write_time, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(path, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(permissions, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(perms, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(read_symlink, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(recursive_directory_iterator, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(remove, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(remove_all, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(rename, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(resize_file, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(space, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(space_info, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(status, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(status_known, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(symlink_status, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(system_complete, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(temp_directory_path, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(u8path, std::experimental::filesystem::, <experimental/filesystem>)
diff --git a/contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp b/contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp
index cdda587d0925..a77686996879 100644
--- a/contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp
@@ -19,18 +19,18 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorOr.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/StringSaver.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/YAMLParser.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/Triple.h"
#include <cassert>
#include <memory>
#include <optional>
diff --git a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
index aecfffcbef1f..9cdeeec0574b 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
@@ -228,16 +228,17 @@ public:
bool VisitDesignatedInitExpr(const DesignatedInitExpr *E) {
for (const DesignatedInitExpr::Designator &D : E->designators()) {
- if (D.isFieldDesignator() && D.getField()) {
- const FieldDecl *Decl = D.getField();
- if (isInUSRSet(Decl)) {
- auto StartLoc = D.getFieldLoc();
- auto EndLoc = D.getFieldLoc();
- RenameInfos.push_back({StartLoc, EndLoc,
- /*FromDecl=*/nullptr,
- /*Context=*/nullptr,
- /*Specifier=*/nullptr,
- /*IgnorePrefixQualifiers=*/true});
+ if (D.isFieldDesignator()) {
+ if (const FieldDecl *Decl = D.getFieldDecl()) {
+ if (isInUSRSet(Decl)) {
+ auto StartLoc = D.getFieldLoc();
+ auto EndLoc = D.getFieldLoc();
+ RenameInfos.push_back({StartLoc, EndLoc,
+ /*FromDecl=*/nullptr,
+ /*Context=*/nullptr,
+ /*Specifier=*/nullptr,
+ /*IgnorePrefixQualifiers=*/true});
+ }
}
}
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp
index b13dc9ef4aee..9c2f470e985f 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp
@@ -103,66 +103,13 @@ SourceRange spelledForExpandedSlow(SourceLocation First, SourceLocation Last,
// The token `a` is wrapped in 4 arg-expansions, we only want to unwrap 2.
// We distinguish them by whether the macro expands into the target file.
// Fortunately, the target file ones will always appear first.
- auto &ExpMacro =
- SM.getSLocEntry(SM.getFileID(ExpFirst.getExpansionLocStart()))
- .getExpansion();
- if (ExpMacro.getExpansionLocStart().isMacroID())
+ auto ExpFileID = SM.getFileID(ExpFirst.getExpansionLocStart());
+ if (ExpFileID == TargetFile)
break;
// Replace each endpoint with its spelling inside the macro arg.
// (This is getImmediateSpellingLoc without repeating lookups).
First = ExpFirst.getSpellingLoc().getLocWithOffset(DecFirst.second);
Last = ExpLast.getSpellingLoc().getLocWithOffset(DecLast.second);
-
- // Now: how do we adjust the previous/next bounds? Three cases:
- // A) If they are also part of the same macro arg, we translate them too.
- // This will ensure that we don't select any macros nested within the
- // macro arg that cover extra tokens. Critical case:
- // #define ID(X) X
- // ID(prev target) // selecting 'target' succeeds
- // #define LARGE ID(prev target)
- // LARGE // selecting 'target' fails.
- // B) They are not in the macro at all, then their expansion range is a
- // sibling to it, and we can safely substitute that.
- // #define PREV prev
- // #define ID(X) X
- // PREV ID(target) // selecting 'target' succeeds.
- // #define LARGE PREV ID(target)
- // LARGE // selecting 'target' fails.
- // C) They are in a different arg of this macro, or the macro body.
- // Now selecting the whole macro arg is fine, but the whole macro is not.
- // Model this by setting using the edge of the macro call as the bound.
- // #define ID2(X, Y) X Y
- // ID2(prev, target) // selecting 'target' succeeds
- // #define LARGE ID2(prev, target)
- // LARGE // selecting 'target' fails
- auto AdjustBound = [&](SourceLocation &Bound) {
- if (Bound.isInvalid() || !Bound.isMacroID()) // Non-macro must be case B.
- return;
- auto DecBound = SM.getDecomposedLoc(Bound);
- auto &ExpBound = SM.getSLocEntry(DecBound.first).getExpansion();
- if (ExpBound.isMacroArgExpansion() &&
- ExpBound.getExpansionLocStart() == ExpFirst.getExpansionLocStart()) {
- // Case A: translate to (spelling) loc within the macro arg.
- Bound = ExpBound.getSpellingLoc().getLocWithOffset(DecBound.second);
- return;
- }
- while (Bound.isMacroID()) {
- SourceRange Exp = SM.getImmediateExpansionRange(Bound).getAsRange();
- if (Exp.getBegin() == ExpMacro.getExpansionLocStart()) {
- // Case B: bounds become the macro call itself.
- Bound = (&Bound == &Prev) ? Exp.getBegin() : Exp.getEnd();
- return;
- }
- // Either case C, or expansion location will later find case B.
- // We choose the upper bound for Prev and the lower one for Next:
- // ID(prev) target ID(next)
- // ^ ^
- // new-prev new-next
- Bound = (&Bound == &Prev) ? Exp.getEnd() : Exp.getBegin();
- }
- };
- AdjustBound(Prev);
- AdjustBound(Next);
}
// In all remaining cases we need the full containing macros.
@@ -170,9 +117,10 @@ SourceRange spelledForExpandedSlow(SourceLocation First, SourceLocation Last,
SourceRange Candidate =
SM.getExpansionRange(SourceRange(First, Last)).getAsRange();
auto DecFirst = SM.getDecomposedExpansionLoc(Candidate.getBegin());
- auto DecLast = SM.getDecomposedLoc(Candidate.getEnd());
+ auto DecLast = SM.getDecomposedExpansionLoc(Candidate.getEnd());
// Can end up in the wrong file due to bad input or token-pasting shenanigans.
- if (Candidate.isInvalid() || DecFirst.first != TargetFile || DecLast.first != TargetFile)
+ if (Candidate.isInvalid() || DecFirst.first != TargetFile ||
+ DecLast.first != TargetFile)
return SourceRange();
// Check bounds, which may still be inside macros.
if (Prev.isValid()) {
@@ -986,7 +934,7 @@ std::string TokenBuffer::dumpForTests() const {
OS << "\n";
std::vector<FileID> Keys;
- for (auto F : Files)
+ for (const auto &F : Files)
Keys.push_back(F.first);
llvm::sort(Keys);
diff --git a/contrib/llvm-project/clang/lib/Tooling/Tooling.cpp b/contrib/llvm-project/clang/lib/Tooling/Tooling.cpp
index 8966c12ef7c1..46a784e44b93 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Tooling.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Tooling.cpp
@@ -43,14 +43,15 @@
#include "llvm/Option/OptTable.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
#include <cassert>
#include <cstring>
#include <memory>
@@ -299,6 +300,31 @@ void addTargetAndModeForProgramName(std::vector<std::string> &CommandLine,
}
}
+void addExpandedResponseFiles(std::vector<std::string> &CommandLine,
+ llvm::StringRef WorkingDir,
+ llvm::cl::TokenizerCallback Tokenizer,
+ llvm::vfs::FileSystem &FS) {
+ bool SeenRSPFile = false;
+ llvm::SmallVector<const char *, 20> Argv;
+ Argv.reserve(CommandLine.size());
+ for (auto &Arg : CommandLine) {
+ Argv.push_back(Arg.c_str());
+ if (!Arg.empty())
+ SeenRSPFile |= Arg.front() == '@';
+ }
+ if (!SeenRSPFile)
+ return;
+ llvm::BumpPtrAllocator Alloc;
+ llvm::cl::ExpansionContext ECtx(Alloc, Tokenizer);
+ llvm::Error Err =
+ ECtx.setVFS(&FS).setCurrentDir(WorkingDir).expandResponseFiles(Argv);
+ if (Err)
+ llvm::errs() << Err;
+ // Don't assign directly, Argv aliases CommandLine.
+ std::vector<std::string> ExpandedArgv(Argv.begin(), Argv.end());
+ CommandLine = std::move(ExpandedArgv);
+}
+
} // namespace tooling
} // namespace clang
@@ -516,13 +542,11 @@ int ClangTool::run(ToolAction *Action) {
// Remember the working directory in case we need to restore it.
std::string InitialWorkingDir;
- if (RestoreCWD) {
- if (auto CWD = OverlayFileSystem->getCurrentWorkingDirectory()) {
- InitialWorkingDir = std::move(*CWD);
- } else {
- llvm::errs() << "Could not get working directory: "
- << CWD.getError().message() << "\n";
- }
+ if (auto CWD = OverlayFileSystem->getCurrentWorkingDirectory()) {
+ InitialWorkingDir = std::move(*CWD);
+ } else {
+ llvm::errs() << "Could not get working directory: "
+ << CWD.getError().message() << "\n";
}
for (llvm::StringRef File : AbsolutePaths) {
@@ -636,10 +660,6 @@ int ClangTool::buildASTs(std::vector<std::unique_ptr<ASTUnit>> &ASTs) {
return run(&Action);
}
-void ClangTool::setRestoreWorkingDir(bool RestoreCWD) {
- this->RestoreCWD = RestoreCWD;
-}
-
void ClangTool::setPrintErrorMessage(bool PrintErrorMessage) {
this->PrintErrorMessage = PrintErrorMessage;
}
@@ -684,7 +704,7 @@ std::unique_ptr<ASTUnit> buildASTFromCodeWithArgs(
if (!Invocation.run())
return nullptr;
-
+
assert(ASTs.size() == 1);
return std::move(ASTs[0]);
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp
index 2198aefddc9f..f2c1b6f8520a 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp
@@ -327,7 +327,7 @@ public:
assert(containsNoNullStencils(CaseStencils) &&
"cases of selectBound may not be null");
}
- ~SelectBoundStencil() override{};
+ ~SelectBoundStencil() override {}
llvm::Error eval(const MatchFinder::MatchResult &match,
std::string *result) const override {
diff --git a/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArch.cpp b/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArch.cpp
index 2fdd398c9c67..7ae57b7877e1 100644
--- a/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArch.cpp
+++ b/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArch.cpp
@@ -6,126 +6,48 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements a tool for detecting name of AMDGPU installed in system
-// using HSA. This tool is used by AMDGPU OpenMP driver.
+// This file implements a tool for detecting name of AMDGPU installed in system.
+// This tool is used by AMDGPU OpenMP and HIP driver.
//
//===----------------------------------------------------------------------===//
-#include "llvm/Support/DynamicLibrary.h"
-#include "llvm/Support/Error.h"
-#include <memory>
-#include <string>
-#include <vector>
+#include "clang/Basic/Version.h"
+#include "llvm/Support/CommandLine.h"
-#if DYNAMIC_HSA
-typedef enum {
- HSA_STATUS_SUCCESS = 0x0,
-} hsa_status_t;
+using namespace llvm;
-typedef enum {
- HSA_DEVICE_TYPE_CPU = 0,
- HSA_DEVICE_TYPE_GPU = 1,
-} hsa_device_type_t;
+static cl::opt<bool> Help("h", cl::desc("Alias for -help"), cl::Hidden);
-typedef enum {
- HSA_AGENT_INFO_NAME = 0,
- HSA_AGENT_INFO_DEVICE = 17,
-} hsa_agent_info_t;
+// Mark all our options with this category.
+static cl::OptionCategory AMDGPUArchCategory("amdgpu-arch options");
-typedef struct hsa_agent_s {
- uint64_t handle;
-} hsa_agent_t;
-
-hsa_status_t (*hsa_init)();
-hsa_status_t (*hsa_shut_down)();
-hsa_status_t (*hsa_agent_get_info)(hsa_agent_t, hsa_agent_info_t, void *);
-hsa_status_t (*hsa_iterate_agents)(hsa_status_t (*)(hsa_agent_t, void *),
- void *);
-
-constexpr const char *DynamicHSAPath = "libhsa-runtime64.so";
-
-llvm::Error loadHSA() {
- std::string ErrMsg;
- auto DynlibHandle = std::make_unique<llvm::sys::DynamicLibrary>(
- llvm::sys::DynamicLibrary::getPermanentLibrary(DynamicHSAPath, &ErrMsg));
- if (!DynlibHandle->isValid()) {
- return llvm::createStringError(llvm::inconvertibleErrorCode(),
- "Failed to 'dlopen' %s", DynamicHSAPath);
- }
-#define DYNAMIC_INIT(SYMBOL) \
- { \
- void *SymbolPtr = DynlibHandle->getAddressOfSymbol(#SYMBOL); \
- if (!SymbolPtr) \
- return llvm::createStringError(llvm::inconvertibleErrorCode(), \
- "Failed to 'dlsym' " #SYMBOL); \
- SYMBOL = reinterpret_cast<decltype(SYMBOL)>(SymbolPtr); \
- }
- DYNAMIC_INIT(hsa_init);
- DYNAMIC_INIT(hsa_shut_down);
- DYNAMIC_INIT(hsa_agent_get_info);
- DYNAMIC_INIT(hsa_iterate_agents);
-#undef DYNAMIC_INIT
- return llvm::Error::success();
+static void PrintVersion(raw_ostream &OS) {
+ OS << clang::getClangToolFullVersion("amdgpu-arch") << '\n';
}
-#else
-#if defined(__has_include)
-#if __has_include("hsa/hsa.h")
-#include "hsa/hsa.h"
-#elif __has_include("hsa.h")
-#include "hsa.h"
-#endif
-#include "hsa/hsa.h"
-#endif
-
-llvm::Error loadHSA() { return llvm::Error::success(); }
-#endif
-
-static hsa_status_t iterateAgentsCallback(hsa_agent_t Agent, void *Data) {
- hsa_device_type_t DeviceType;
- hsa_status_t Status =
- hsa_agent_get_info(Agent, HSA_AGENT_INFO_DEVICE, &DeviceType);
-
- // continue only if device type if GPU
- if (Status != HSA_STATUS_SUCCESS || DeviceType != HSA_DEVICE_TYPE_GPU) {
- return Status;
- }
-
- std::vector<std::string> *GPUs =
- static_cast<std::vector<std::string> *>(Data);
- char GPUName[64];
- Status = hsa_agent_get_info(Agent, HSA_AGENT_INFO_NAME, GPUName);
- if (Status != HSA_STATUS_SUCCESS) {
- return Status;
- }
- GPUs->push_back(GPUName);
- return HSA_STATUS_SUCCESS;
-}
+int printGPUsByHSA();
+int printGPUsByHIP();
int main(int argc, char *argv[]) {
- // Attempt to load the HSA runtime.
- if (llvm::Error Err = loadHSA()) {
- logAllUnhandledErrors(std::move(Err), llvm::errs());
- return 1;
- }
-
- hsa_status_t Status = hsa_init();
- if (Status != HSA_STATUS_SUCCESS) {
- return 1;
- }
-
- std::vector<std::string> GPUs;
- Status = hsa_iterate_agents(iterateAgentsCallback, &GPUs);
- if (Status != HSA_STATUS_SUCCESS) {
- return 1;
+ cl::HideUnrelatedOptions(AMDGPUArchCategory);
+
+ cl::SetVersionPrinter(PrintVersion);
+ cl::ParseCommandLineOptions(
+ argc, argv,
+ "A tool to detect the presence of AMDGPU devices on the system. \n\n"
+ "The tool will output each detected GPU architecture separated by a\n"
+ "newline character. If multiple GPUs of the same architecture are found\n"
+ "a string will be printed for each\n");
+
+ if (Help) {
+ cl::PrintHelpMessage();
+ return 0;
}
- for (const auto &GPU : GPUs)
- printf("%s\n", GPU.c_str());
-
- if (GPUs.size() < 1)
- return 1;
+#ifndef _WIN32
+ if (!printGPUsByHSA())
+ return 0;
+#endif
- hsa_shut_down();
- return 0;
+ return printGPUsByHIP();
}
diff --git a/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArchByHIP.cpp b/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArchByHIP.cpp
new file mode 100644
index 000000000000..7c9071be0918
--- /dev/null
+++ b/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArchByHIP.cpp
@@ -0,0 +1,96 @@
+//===- AMDGPUArch.cpp - list AMDGPU installed ----------*- C++ -*---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a tool for detecting name of AMDGPU installed in system
+// using HIP runtime. This tool is used by AMDGPU OpenMP and HIP driver.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+typedef struct {
+ char padding[396];
+ char gcnArchName[256];
+ char padding2[1024];
+} hipDeviceProp_t;
+
+typedef enum {
+ hipSuccess = 0,
+} hipError_t;
+
+typedef hipError_t (*hipGetDeviceCount_t)(int *);
+typedef hipError_t (*hipDeviceGet_t)(int *, int);
+typedef hipError_t (*hipGetDeviceProperties_t)(hipDeviceProp_t *, int);
+
+int printGPUsByHIP() {
+#ifdef _WIN32
+ constexpr const char *DynamicHIPPath = "amdhip64.dll";
+#else
+ constexpr const char *DynamicHIPPath = "libamdhip64.so";
+#endif
+
+ std::string ErrMsg;
+ auto DynlibHandle = std::make_unique<llvm::sys::DynamicLibrary>(
+ llvm::sys::DynamicLibrary::getPermanentLibrary(DynamicHIPPath, &ErrMsg));
+ if (!DynlibHandle->isValid()) {
+ llvm::errs() << "Failed to load " << DynamicHIPPath << ": " << ErrMsg
+ << '\n';
+ return 1;
+ }
+
+#define DYNAMIC_INIT_HIP(SYMBOL) \
+ { \
+ void *SymbolPtr = DynlibHandle->getAddressOfSymbol(#SYMBOL); \
+ if (!SymbolPtr) { \
+ llvm::errs() << "Failed to find symbol " << #SYMBOL << '\n'; \
+ return 1; \
+ } \
+ SYMBOL = reinterpret_cast<decltype(SYMBOL)>(SymbolPtr); \
+ }
+
+ hipGetDeviceCount_t hipGetDeviceCount;
+ hipDeviceGet_t hipDeviceGet;
+ hipGetDeviceProperties_t hipGetDeviceProperties;
+
+ DYNAMIC_INIT_HIP(hipGetDeviceCount);
+ DYNAMIC_INIT_HIP(hipDeviceGet);
+ DYNAMIC_INIT_HIP(hipGetDeviceProperties);
+
+#undef DYNAMIC_INIT_HIP
+
+ int deviceCount;
+ hipError_t err = hipGetDeviceCount(&deviceCount);
+ if (err != hipSuccess) {
+ llvm::errs() << "Failed to get device count\n";
+ return 1;
+ }
+
+ for (int i = 0; i < deviceCount; ++i) {
+ int deviceId;
+ err = hipDeviceGet(&deviceId, i);
+ if (err != hipSuccess) {
+ llvm::errs() << "Failed to get device id for ordinal " << i << '\n';
+ return 1;
+ }
+
+ hipDeviceProp_t prop;
+ err = hipGetDeviceProperties(&prop, deviceId);
+ if (err != hipSuccess) {
+ llvm::errs() << "Failed to get device properties for device " << deviceId
+ << '\n';
+ return 1;
+ }
+ llvm::outs() << prop.gcnArchName << '\n';
+ }
+
+ return 0;
+}
diff --git a/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArchByHSA.cpp b/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArchByHSA.cpp
new file mode 100644
index 000000000000..f82a4890f465
--- /dev/null
+++ b/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArchByHSA.cpp
@@ -0,0 +1,122 @@
+//===- AMDGPUArchLinux.cpp - list AMDGPU installed ------*- C++ -*---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a tool for detecting name of AMDGPU installed in system
+// using HSA on Linux. This tool is used by AMDGPU OpenMP and HIP driver.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Version.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+#include <string>
+#include <vector>
+
+using namespace llvm;
+
+typedef enum {
+ HSA_STATUS_SUCCESS = 0x0,
+} hsa_status_t;
+
+typedef enum {
+ HSA_DEVICE_TYPE_CPU = 0,
+ HSA_DEVICE_TYPE_GPU = 1,
+} hsa_device_type_t;
+
+typedef enum {
+ HSA_AGENT_INFO_NAME = 0,
+ HSA_AGENT_INFO_DEVICE = 17,
+} hsa_agent_info_t;
+
+typedef struct hsa_agent_s {
+ uint64_t handle;
+} hsa_agent_t;
+
+hsa_status_t (*hsa_init)();
+hsa_status_t (*hsa_shut_down)();
+hsa_status_t (*hsa_agent_get_info)(hsa_agent_t, hsa_agent_info_t, void *);
+hsa_status_t (*hsa_iterate_agents)(hsa_status_t (*)(hsa_agent_t, void *),
+ void *);
+
+constexpr const char *DynamicHSAPath = "libhsa-runtime64.so";
+
+llvm::Error loadHSA() {
+ std::string ErrMsg;
+ auto DynlibHandle = std::make_unique<llvm::sys::DynamicLibrary>(
+ llvm::sys::DynamicLibrary::getPermanentLibrary(DynamicHSAPath, &ErrMsg));
+ if (!DynlibHandle->isValid()) {
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "Failed to 'dlopen' %s", DynamicHSAPath);
+ }
+#define DYNAMIC_INIT(SYMBOL) \
+ { \
+ void *SymbolPtr = DynlibHandle->getAddressOfSymbol(#SYMBOL); \
+ if (!SymbolPtr) \
+ return llvm::createStringError(llvm::inconvertibleErrorCode(), \
+ "Failed to 'dlsym' " #SYMBOL); \
+ SYMBOL = reinterpret_cast<decltype(SYMBOL)>(SymbolPtr); \
+ }
+ DYNAMIC_INIT(hsa_init);
+ DYNAMIC_INIT(hsa_shut_down);
+ DYNAMIC_INIT(hsa_agent_get_info);
+ DYNAMIC_INIT(hsa_iterate_agents);
+#undef DYNAMIC_INIT
+ return llvm::Error::success();
+}
+
+static hsa_status_t iterateAgentsCallback(hsa_agent_t Agent, void *Data) {
+ hsa_device_type_t DeviceType;
+ hsa_status_t Status =
+ hsa_agent_get_info(Agent, HSA_AGENT_INFO_DEVICE, &DeviceType);
+
+ // continue only if device type if GPU
+ if (Status != HSA_STATUS_SUCCESS || DeviceType != HSA_DEVICE_TYPE_GPU) {
+ return Status;
+ }
+
+ std::vector<std::string> *GPUs =
+ static_cast<std::vector<std::string> *>(Data);
+ char GPUName[64];
+ Status = hsa_agent_get_info(Agent, HSA_AGENT_INFO_NAME, GPUName);
+ if (Status != HSA_STATUS_SUCCESS) {
+ return Status;
+ }
+ GPUs->push_back(GPUName);
+ return HSA_STATUS_SUCCESS;
+}
+
+int printGPUsByHSA() {
+ // Attempt to load the HSA runtime.
+ if (llvm::Error Err = loadHSA()) {
+ logAllUnhandledErrors(std::move(Err), llvm::errs());
+ return 1;
+ }
+
+ hsa_status_t Status = hsa_init();
+ if (Status != HSA_STATUS_SUCCESS) {
+ return 1;
+ }
+
+ std::vector<std::string> GPUs;
+ Status = hsa_iterate_agents(iterateAgentsCallback, &GPUs);
+ if (Status != HSA_STATUS_SUCCESS) {
+ return 1;
+ }
+
+ for (const auto &GPU : GPUs)
+ llvm::outs() << GPU << '\n';
+
+ if (GPUs.size() < 1)
+ return 1;
+
+ hsa_shut_down();
+ return 0;
+}
diff --git a/contrib/llvm-project/clang/tools/clang-repl/ClangRepl.cpp b/contrib/llvm-project/clang/tools/clang-repl/ClangRepl.cpp
index 401a31d34063..f46452d9e10d 100644
--- a/contrib/llvm-project/clang/tools/clang-repl/ClangRepl.cpp
+++ b/contrib/llvm-project/clang/tools/clang-repl/ClangRepl.cpp
@@ -20,9 +20,13 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ManagedStatic.h" // llvm_shutdown
#include "llvm/Support/Signals.h"
-#include "llvm/Support/TargetSelect.h" // llvm::Initialize*
+#include "llvm/Support/TargetSelect.h"
#include <optional>
+static llvm::cl::opt<bool> CudaEnabled("cuda", llvm::cl::Hidden);
+static llvm::cl::opt<std::string> CudaPath("cuda-path", llvm::cl::Hidden);
+static llvm::cl::opt<std::string> OffloadArch("offload-arch", llvm::cl::Hidden);
+
static llvm::cl::list<std::string>
ClangArgs("Xcc",
llvm::cl::desc("Argument to pass to the CompilerInvocation"),
@@ -76,8 +80,11 @@ int main(int argc, const char **argv) {
std::vector<const char *> ClangArgv(ClangArgs.size());
std::transform(ClangArgs.begin(), ClangArgs.end(), ClangArgv.begin(),
[](const std::string &s) -> const char * { return s.data(); });
- llvm::InitializeNativeTarget();
- llvm::InitializeNativeTargetAsmPrinter();
+ // Initialize all targets (required for device offloading)
+ llvm::InitializeAllTargetInfos();
+ llvm::InitializeAllTargets();
+ llvm::InitializeAllTargetMCs();
+ llvm::InitializeAllAsmPrinters();
if (OptHostSupportsJit) {
auto J = llvm::orc::LLJITBuilder().create();
@@ -90,9 +97,30 @@ int main(int argc, const char **argv) {
return 0;
}
+ clang::IncrementalCompilerBuilder CB;
+ CB.SetCompilerArgs(ClangArgv);
+
+ std::unique_ptr<clang::CompilerInstance> DeviceCI;
+ if (CudaEnabled) {
+ if (!CudaPath.empty())
+ CB.SetCudaSDK(CudaPath);
+
+ if (OffloadArch.empty()) {
+ OffloadArch = "sm_35";
+ }
+ CB.SetOffloadArch(OffloadArch);
+
+ DeviceCI = ExitOnErr(CB.CreateCudaDevice());
+ }
+
// FIXME: Investigate if we could use runToolOnCodeWithArgs from tooling. It
// can replace the boilerplate code for creation of the compiler instance.
- auto CI = ExitOnErr(clang::IncrementalCompilerBuilder::create(ClangArgv));
+ std::unique_ptr<clang::CompilerInstance> CI;
+ if (CudaEnabled) {
+ CI = ExitOnErr(CB.CreateCudaHost());
+ } else {
+ CI = ExitOnErr(CB.CreateCpp());
+ }
// Set an error handler, so that any LLVM backend diagnostics go through our
// error handler.
@@ -101,8 +129,23 @@ int main(int argc, const char **argv) {
// Load any requested plugins.
CI->LoadRequestedPlugins();
+ if (CudaEnabled)
+ DeviceCI->LoadRequestedPlugins();
+
+ std::unique_ptr<clang::Interpreter> Interp;
+ if (CudaEnabled) {
+ Interp = ExitOnErr(
+ clang::Interpreter::createWithCUDA(std::move(CI), std::move(DeviceCI)));
+
+ if (CudaPath.empty()) {
+ ExitOnErr(Interp->LoadDynamicLibrary("libcudart.so"));
+ } else {
+ auto CudaRuntimeLibPath = CudaPath + "/lib/libcudart.so";
+ ExitOnErr(Interp->LoadDynamicLibrary(CudaRuntimeLibPath.c_str()));
+ }
+ } else
+ Interp = ExitOnErr(clang::Interpreter::create(std::move(CI)));
- auto Interp = ExitOnErr(clang::Interpreter::create(std::move(CI)));
for (const std::string &input : OptInputs) {
if (auto Err = Interp->ParseAndExecute(input))
llvm::logAllUnhandledErrors(std::move(Err), llvm::errs(), "error: ");
@@ -113,21 +156,38 @@ int main(int argc, const char **argv) {
if (OptInputs.empty()) {
llvm::LineEditor LE("clang-repl");
// FIXME: Add LE.setListCompleter
+ std::string Input;
while (std::optional<std::string> Line = LE.readLine()) {
- if (*Line == R"(%quit)")
+ llvm::StringRef L = *Line;
+ L = L.trim();
+ if (L.endswith("\\")) {
+ // FIXME: Support #ifdef X \ ...
+ Input += L.drop_back(1);
+ LE.setPrompt("clang-repl... ");
+ continue;
+ }
+
+ Input += L;
+
+ if (Input == R"(%quit)") {
break;
- if (*Line == R"(%undo)") {
+ } else if (Input == R"(%undo)") {
if (auto Err = Interp->Undo()) {
llvm::logAllUnhandledErrors(std::move(Err), llvm::errs(), "error: ");
HasError = true;
}
- continue;
- }
-
- if (auto Err = Interp->ParseAndExecute(*Line)) {
+ } else if (Input.rfind("%lib ", 0) == 0) {
+ if (auto Err = Interp->LoadDynamicLibrary(Input.data() + 5)) {
+ llvm::logAllUnhandledErrors(std::move(Err), llvm::errs(), "error: ");
+ HasError = true;
+ }
+ } else if (auto Err = Interp->ParseAndExecute(Input)) {
llvm::logAllUnhandledErrors(std::move(Err), llvm::errs(), "error: ");
HasError = true;
}
+
+ Input = "";
+ LE.setPrompt("clang-repl> ");
}
}
diff --git a/contrib/llvm-project/clang/tools/driver/cc1_main.cpp b/contrib/llvm-project/clang/tools/driver/cc1_main.cpp
index c79306b6f7d5..9e7f8679b4cb 100644
--- a/contrib/llvm-project/clang/tools/driver/cc1_main.cpp
+++ b/contrib/llvm-project/clang/tools/driver/cc1_main.cpp
@@ -213,9 +213,7 @@ int cc1_main(ArrayRef<const char *> Argv, const char *Argv0, void *MainAddr) {
bool Success = CompilerInvocation::CreateFromArgs(Clang->getInvocation(),
Argv, Diags, Argv0);
- if (Clang->getFrontendOpts().TimeTrace ||
- !Clang->getFrontendOpts().TimeTracePath.empty()) {
- Clang->getFrontendOpts().TimeTrace = 1;
+ if (!Clang->getFrontendOpts().TimeTracePath.empty()) {
llvm::timeTraceProfilerInitialize(
Clang->getFrontendOpts().TimeTraceGranularity, Argv0);
}
@@ -257,17 +255,21 @@ int cc1_main(ArrayRef<const char *> Argv, const char *Argv0, void *MainAddr) {
llvm::TimerGroup::clearAll();
if (llvm::timeTraceProfilerEnabled()) {
- SmallString<128> Path(Clang->getFrontendOpts().OutputFile);
- llvm::sys::path::replace_extension(Path, "json");
- if (!Clang->getFrontendOpts().TimeTracePath.empty()) {
- // replace the suffix to '.json' directly
- SmallString<128> TracePath(Clang->getFrontendOpts().TimeTracePath);
- if (llvm::sys::fs::is_directory(TracePath))
- llvm::sys::path::append(TracePath, llvm::sys::path::filename(Path));
- Path.assign(TracePath);
- }
+ // It is possible that the compiler instance doesn't own a file manager here
+ // if we're compiling a module unit. Since the file manager are owned by AST
+ // when we're compiling a module unit. So the file manager may be invalid
+ // here.
+ //
+ // It should be fine to create file manager here since the file system
+ // options are stored in the compiler invocation and we can recreate the VFS
+ // from the compiler invocation.
+ if (!Clang->hasFileManager())
+ Clang->createFileManager(createVFSFromCompilerInvocation(
+ Clang->getInvocation(), Clang->getDiagnostics()));
+
if (auto profilerOutput = Clang->createOutputFile(
- Path.str(), /*Binary=*/false, /*RemoveFileOnSignal=*/false,
+ Clang->getFrontendOpts().TimeTracePath, /*Binary=*/false,
+ /*RemoveFileOnSignal=*/false,
/*useTemporary=*/false)) {
llvm::timeTraceProfilerWrite(*profilerOutput);
profilerOutput.reset();
diff --git a/contrib/llvm-project/clang/tools/driver/cc1as_main.cpp b/contrib/llvm-project/clang/tools/driver/cc1as_main.cpp
index f944113476fd..3c5926073f02 100644
--- a/contrib/llvm-project/clang/tools/driver/cc1as_main.cpp
+++ b/contrib/llvm-project/clang/tools/driver/cc1as_main.cpp
@@ -19,8 +19,8 @@
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Frontend/Utils.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -44,7 +44,6 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormattedStream.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
@@ -53,6 +52,8 @@
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/Triple.h"
#include <memory>
#include <optional>
#include <system_error>
@@ -97,7 +98,7 @@ struct AssemblerInvocation {
std::string DwarfDebugFlags;
std::string DwarfDebugProducer;
std::string DebugCompilationDir;
- std::map<const std::string, const std::string> DebugPrefixMap;
+ llvm::SmallVector<std::pair<std::string, std::string>, 0> DebugPrefixMap;
llvm::DebugCompressionType CompressDebugSections =
llvm::DebugCompressionType::None;
std::string MainFileName;
@@ -142,6 +143,10 @@ struct AssemblerInvocation {
/// Whether to emit DWARF unwind info.
EmitDwarfUnwindType EmitDwarfUnwind;
+ // Whether to emit compact-unwind for non-canonical entries.
+ // Note: maybe overriden by other constraints.
+ unsigned EmitCompactUnwindNonCanonical : 1;
+
/// The name of the relocation model to use.
std::string RelocationModel;
@@ -181,6 +186,7 @@ public:
DwarfVersion = 0;
EmbedBitcode = 0;
EmitDwarfUnwind = EmitDwarfUnwindType::Default;
+ EmitCompactUnwindNonCanonical = false;
}
static bool CreateFromArgs(AssemblerInvocation &Res,
@@ -275,8 +281,7 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
for (const auto &Arg : Args.getAllArgValues(OPT_fdebug_prefix_map_EQ)) {
auto Split = StringRef(Arg).split('=');
- Opts.DebugPrefixMap.insert(
- {std::string(Split.first), std::string(Split.second)});
+ Opts.DebugPrefixMap.emplace_back(Split.first, Split.second);
}
// Frontend Options
@@ -349,6 +354,9 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
.Case("default", EmitDwarfUnwindType::Default);
}
+ Opts.EmitCompactUnwindNonCanonical =
+ Args.hasArg(OPT_femit_compact_unwind_non_canonical);
+
Opts.AsSecureLogFile = Args.getLastArgValue(OPT_as_secure_log_file);
return Success;
@@ -384,8 +392,8 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts,
MemoryBuffer::getFileOrSTDIN(Opts.InputFile, /*IsText=*/true);
if (std::error_code EC = Buffer.getError()) {
- Error = EC.message();
- return Diags.Report(diag::err_fe_error_reading) << Opts.InputFile;
+ return Diags.Report(diag::err_fe_error_reading)
+ << Opts.InputFile << EC.message();
}
SourceMgr SrcMgr;
@@ -402,6 +410,7 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts,
MCTargetOptions MCOptions;
MCOptions.EmitDwarfUnwind = Opts.EmitDwarfUnwind;
+ MCOptions.EmitCompactUnwindNonCanonical = Opts.EmitCompactUnwindNonCanonical;
MCOptions.AsSecureLogFile = Opts.AsSecureLogFile;
std::unique_ptr<MCAsmInfo> MAI(
diff --git a/contrib/llvm-project/clang/tools/driver/cc1gen_reproducer_main.cpp b/contrib/llvm-project/clang/tools/driver/cc1gen_reproducer_main.cpp
index 9dbfc518add9..e97fa3d27756 100644
--- a/contrib/llvm-project/clang/tools/driver/cc1gen_reproducer_main.cpp
+++ b/contrib/llvm-project/clang/tools/driver/cc1gen_reproducer_main.cpp
@@ -18,11 +18,12 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Host.h"
+#include "llvm/Support/LLVMDriver.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/YAMLTraits.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
#include <optional>
using namespace clang;
@@ -111,7 +112,8 @@ static std::string generateReproducerMetaInfo(const ClangInvocationInfo &Info) {
/// Generates a reproducer for a set of arguments from a specific invocation.
static std::optional<driver::Driver::CompilationDiagnosticReport>
generateReproducerForInvocationArguments(ArrayRef<const char *> Argv,
- const ClangInvocationInfo &Info) {
+ const ClangInvocationInfo &Info,
+ const llvm::ToolContext &ToolContext) {
using namespace driver;
auto TargetAndMode = ToolChain::getTargetAndModeFromProgramName(Argv[0]);
@@ -120,8 +122,11 @@ generateReproducerForInvocationArguments(ArrayRef<const char *> Argv,
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
DiagnosticsEngine Diags(DiagID, &*DiagOpts, new IgnoringDiagConsumer());
ProcessWarningOptions(Diags, *DiagOpts, /*ReportDiags=*/false);
- Driver TheDriver(Argv[0], llvm::sys::getDefaultTargetTriple(), Diags);
+ Driver TheDriver(ToolContext.Path, llvm::sys::getDefaultTargetTriple(),
+ Diags);
TheDriver.setTargetAndMode(TargetAndMode);
+ if (ToolContext.NeedsPrependArg)
+ TheDriver.setPrependArg(ToolContext.PrependArg);
std::unique_ptr<Compilation> C(TheDriver.BuildCompilation(Argv));
if (C && !C->containsError()) {
@@ -155,7 +160,8 @@ static void printReproducerInformation(
}
int cc1gen_reproducer_main(ArrayRef<const char *> Argv, const char *Argv0,
- void *MainAddr) {
+ void *MainAddr,
+ const llvm::ToolContext &ToolContext) {
if (Argv.size() < 1) {
llvm::errs() << "error: missing invocation file\n";
return 1;
@@ -182,7 +188,8 @@ int cc1gen_reproducer_main(ArrayRef<const char *> Argv, const char *Argv0,
std::string Path = GetExecutablePath(Argv0, /*CanonicalPrefixes=*/true);
DriverArgs[0] = Path.c_str();
std::optional<driver::Driver::CompilationDiagnosticReport> Report =
- generateReproducerForInvocationArguments(DriverArgs, InvocationInfo);
+ generateReproducerForInvocationArguments(DriverArgs, InvocationInfo,
+ ToolContext);
// Emit the information about the reproduce files to stdout.
int Result = 1;
diff --git a/contrib/llvm-project/clang/tools/driver/driver.cpp b/contrib/llvm-project/clang/tools/driver/driver.cpp
index d74741233653..471d0181ff08 100644
--- a/contrib/llvm-project/clang/tools/driver/driver.cpp
+++ b/contrib/llvm-project/clang/tools/driver/driver.cpp
@@ -36,8 +36,8 @@
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/InitLLVM.h"
+#include "llvm/Support/LLVMDriver.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Process.h"
@@ -48,6 +48,7 @@
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
#include <memory>
#include <optional>
#include <set>
@@ -210,7 +211,8 @@ extern int cc1_main(ArrayRef<const char *> Argv, const char *Argv0,
extern int cc1as_main(ArrayRef<const char *> Argv, const char *Argv0,
void *MainAddr);
extern int cc1gen_reproducer_main(ArrayRef<const char *> Argv,
- const char *Argv0, void *MainAddr);
+ const char *Argv0, void *MainAddr,
+ const llvm::ToolContext &);
static void insertTargetAndModeArgs(const ParsedClangName &NameParts,
SmallVectorImpl<const char *> &ArgVector,
@@ -305,6 +307,9 @@ static bool SetBackdoorDriverOutputsFromEnvVars(Driver &TheDriver) {
TheDriver.CCPrintProcessStats =
checkEnvVar<bool>("CC_PRINT_PROC_STAT", "CC_PRINT_PROC_STAT_FILE",
TheDriver.CCPrintStatReportFilename);
+ TheDriver.CCPrintInternalStats =
+ checkEnvVar<bool>("CC_PRINT_INTERNAL_STAT", "CC_PRINT_INTERNAL_STAT_FILE",
+ TheDriver.CCPrintInternalStatReportFilename);
return true;
}
@@ -341,7 +346,8 @@ static void SetInstallDir(SmallVectorImpl<const char *> &argv,
TheDriver.setInstalledDir(InstalledPathParent);
}
-static int ExecuteCC1Tool(SmallVectorImpl<const char *> &ArgV) {
+static int ExecuteCC1Tool(SmallVectorImpl<const char *> &ArgV,
+ const llvm::ToolContext &ToolContext) {
// If we call the cc1 tool from the clangDriver library (through
// Driver::CC1Main), we need to clean up the options usage count. The options
// are currently global, and they might have been used previously by the
@@ -362,14 +368,14 @@ static int ExecuteCC1Tool(SmallVectorImpl<const char *> &ArgV) {
return cc1as_main(ArrayRef(ArgV).slice(2), ArgV[0], GetExecutablePathVP);
if (Tool == "-cc1gen-reproducer")
return cc1gen_reproducer_main(ArrayRef(ArgV).slice(2), ArgV[0],
- GetExecutablePathVP);
+ GetExecutablePathVP, ToolContext);
// Reject unknown tools.
llvm::errs() << "error: unknown integrated tool '" << Tool << "'. "
<< "Valid tools include '-cc1' and '-cc1as'.\n";
return 1;
}
-int clang_main(int Argc, char **Argv) {
+int clang_main(int Argc, char **Argv, const llvm::ToolContext &ToolContext) {
noteBottomOfStack();
llvm::InitLLVM X(Argc, Argv);
llvm::setBugReportMsg("PLEASE submit a bug report to " BUG_REPORT_URL
@@ -385,55 +391,20 @@ int clang_main(int Argc, char **Argv) {
llvm::BumpPtrAllocator A;
llvm::StringSaver Saver(A);
- // Parse response files using the GNU syntax, unless we're in CL mode. There
- // are two ways to put clang in CL compatibility mode: Args[0] is either
- // clang-cl or cl, or --driver-mode=cl is on the command line. The normal
- // command line parsing can't happen until after response file parsing, so we
- // have to manually search for a --driver-mode=cl argument the hard way.
- // Finally, our -cc1 tools don't care which tokenization mode we use because
- // response files written by clang will tokenize the same way in either mode.
+ const char *ProgName =
+ ToolContext.NeedsPrependArg ? ToolContext.PrependArg : ToolContext.Path;
+
bool ClangCLMode =
- IsClangCL(getDriverMode(Args[0], llvm::ArrayRef(Args).slice(1)));
- enum { Default, POSIX, Windows } RSPQuoting = Default;
- for (const char *F : Args) {
- if (strcmp(F, "--rsp-quoting=posix") == 0)
- RSPQuoting = POSIX;
- else if (strcmp(F, "--rsp-quoting=windows") == 0)
- RSPQuoting = Windows;
- }
+ IsClangCL(getDriverMode(ProgName, llvm::ArrayRef(Args).slice(1)));
- // Determines whether we want nullptr markers in Args to indicate response
- // files end-of-lines. We only use this for the /LINK driver argument with
- // clang-cl.exe on Windows.
- bool MarkEOLs = ClangCLMode;
-
- llvm::cl::TokenizerCallback Tokenizer;
- if (RSPQuoting == Windows || (RSPQuoting == Default && ClangCLMode))
- Tokenizer = &llvm::cl::TokenizeWindowsCommandLine;
- else
- Tokenizer = &llvm::cl::TokenizeGNUCommandLine;
-
- if (MarkEOLs && Args.size() > 1 && StringRef(Args[1]).startswith("-cc1"))
- MarkEOLs = false;
- llvm::cl::ExpansionContext ECtx(A, Tokenizer);
- ECtx.setMarkEOLs(MarkEOLs);
- if (llvm::Error Err = ECtx.expandResponseFiles(Args)) {
+ if (llvm::Error Err = expandResponseFiles(Args, ClangCLMode, A)) {
llvm::errs() << toString(std::move(Err)) << '\n';
return 1;
}
- // Handle -cc1 integrated tools, even if -cc1 was expanded from a response
- // file.
- auto FirstArg = llvm::find_if(llvm::drop_begin(Args),
- [](const char *A) { return A != nullptr; });
- if (FirstArg != Args.end() && StringRef(*FirstArg).startswith("-cc1")) {
- // If -cc1 came from a response file, remove the EOL sentinels.
- if (MarkEOLs) {
- auto newEnd = std::remove(Args.begin(), Args.end(), nullptr);
- Args.resize(newEnd - Args.begin());
- }
- return ExecuteCC1Tool(Args);
- }
+ // Handle -cc1 integrated tools.
+ if (Args.size() >= 2 && StringRef(Args[1]).startswith("-cc1"))
+ return ExecuteCC1Tool(Args, ToolContext);
// Handle options that need handling before the real command line parsing in
// Driver::BuildCompilation()
@@ -479,7 +450,7 @@ int clang_main(int Argc, char **Argv) {
ApplyQAOverride(Args, OverrideStr, SavedStrings);
}
- std::string Path = GetExecutablePath(Args[0], CanonicalPrefixes);
+ std::string Path = GetExecutablePath(ToolContext.Path, CanonicalPrefixes);
// Whether the cc1 tool should be called inside the current process, or if we
// should spawn a new clang subprocess (old behavior).
@@ -497,7 +468,7 @@ int clang_main(int Argc, char **Argv) {
TextDiagnosticPrinter *DiagClient
= new TextDiagnosticPrinter(llvm::errs(), &*DiagOpts);
- FixupDiagPrefixExeName(DiagClient, Path);
+ FixupDiagPrefixExeName(DiagClient, ProgName);
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
@@ -515,8 +486,15 @@ int clang_main(int Argc, char **Argv) {
Driver TheDriver(Path, llvm::sys::getDefaultTargetTriple(), Diags);
SetInstallDir(Args, TheDriver, CanonicalPrefixes);
- auto TargetAndMode = ToolChain::getTargetAndModeFromProgramName(Args[0]);
+ auto TargetAndMode = ToolChain::getTargetAndModeFromProgramName(ProgName);
TheDriver.setTargetAndMode(TargetAndMode);
+ // If -canonical-prefixes is set, GetExecutablePath will have resolved Path
+ // to the llvm driver binary, not clang. In this case, we need to use
+ // PrependArg which should be clang-*. Checking just CanonicalPrefixes is
+ // safe even in the normal case because PrependArg will be null so
+ // setPrependArg will be a no-op.
+ if (ToolContext.NeedsPrependArg || CanonicalPrefixes)
+ TheDriver.setPrependArg(ToolContext.PrependArg);
insertTargetAndModeArgs(TargetAndMode, Args, SavedStrings);
@@ -524,7 +502,9 @@ int clang_main(int Argc, char **Argv) {
return 1;
if (!UseNewCC1Process) {
- TheDriver.CC1Main = &ExecuteCC1Tool;
+ TheDriver.CC1Main = [ToolContext](SmallVectorImpl<const char *> &ArgV) {
+ return ExecuteCC1Tool(ArgV, ToolContext);
+ };
// Ensure the CC1Command actually catches cc1 crashes
llvm::CrashRecoveryContext::Enable();
}
diff --git a/contrib/llvm-project/clang/tools/nvptx-arch/NVPTXArch.cpp b/contrib/llvm-project/clang/tools/nvptx-arch/NVPTXArch.cpp
index 91723324c28e..71a48657576e 100644
--- a/contrib/llvm-project/clang/tools/nvptx-arch/NVPTXArch.cpp
+++ b/contrib/llvm-project/clang/tools/nvptx-arch/NVPTXArch.cpp
@@ -11,13 +11,25 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Basic/Version.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <cstdio>
#include <memory>
-#if DYNAMIC_CUDA
+using namespace llvm;
+
+static cl::opt<bool> Help("h", cl::desc("Alias for -help"), cl::Hidden);
+
+static void PrintVersion(raw_ostream &OS) {
+ OS << clang::getClangToolFullVersion("nvptx-arch") << '\n';
+}
+// Mark all our options with this category, everything else (except for -version
+// and -help) will be hidden.
+static cl::OptionCategory NVPTXArchCategory("nvptx-arch options");
+
typedef enum cudaError_enum {
CUDA_SUCCESS = 0,
CUDA_ERROR_NO_DEVICE = 100,
@@ -36,7 +48,7 @@ CUresult (*cuGetErrorString)(CUresult, const char **);
CUresult (*cuDeviceGet)(CUdevice *, int);
CUresult (*cuDeviceGetAttribute)(int *, CUdevice_attribute, CUdevice);
-constexpr const char *DynamicCudaPath = "libcuda.so";
+constexpr const char *DynamicCudaPath = "libcuda.so.1";
llvm::Error loadCUDA() {
std::string ErrMsg;
@@ -62,12 +74,6 @@ llvm::Error loadCUDA() {
#undef DYNAMIC_INIT
return llvm::Error::success();
}
-#else
-
-#include "cuda.h"
-llvm::Error loadCUDA() { return llvm::Error::success(); }
-
-#endif
static int handleError(CUresult Err) {
const char *ErrStr = nullptr;
@@ -79,6 +85,21 @@ static int handleError(CUresult Err) {
}
int main(int argc, char *argv[]) {
+ cl::HideUnrelatedOptions(NVPTXArchCategory);
+
+ cl::SetVersionPrinter(PrintVersion);
+ cl::ParseCommandLineOptions(
+ argc, argv,
+ "A tool to detect the presence of NVIDIA devices on the system. \n\n"
+ "The tool will output each detected GPU architecture separated by a\n"
+ "newline character. If multiple GPUs of the same architecture are found\n"
+ "a string will be printed for each\n");
+
+ if (Help) {
+ cl::PrintHelpMessage();
+ return 0;
+ }
+
// Attempt to load the NVPTX driver runtime.
if (llvm::Error Err = loadCUDA()) {
logAllUnhandledErrors(std::move(Err), llvm::errs());
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangAttrEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangAttrEmitter.cpp
index de608c780f98..8ef728f86c6b 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangAttrEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangAttrEmitter.cpp
@@ -51,14 +51,18 @@ namespace {
class FlattenedSpelling {
std::string V, N, NS;
bool K = false;
+ const Record &OriginalSpelling;
public:
FlattenedSpelling(const std::string &Variety, const std::string &Name,
- const std::string &Namespace, bool KnownToGCC) :
- V(Variety), N(Name), NS(Namespace), K(KnownToGCC) {}
+ const std::string &Namespace, bool KnownToGCC,
+ const Record &OriginalSpelling)
+ : V(Variety), N(Name), NS(Namespace), K(KnownToGCC),
+ OriginalSpelling(OriginalSpelling) {}
explicit FlattenedSpelling(const Record &Spelling)
: V(std::string(Spelling.getValueAsString("Variety"))),
- N(std::string(Spelling.getValueAsString("Name"))) {
+ N(std::string(Spelling.getValueAsString("Name"))),
+ OriginalSpelling(Spelling) {
assert(V != "GCC" && V != "Clang" &&
"Given a GCC spelling, which means this hasn't been flattened!");
if (V == "CXX11" || V == "C2x" || V == "Pragma")
@@ -69,6 +73,7 @@ public:
const std::string &name() const { return N; }
const std::string &nameSpace() const { return NS; }
bool knownToGCC() const { return K; }
+ const Record &getSpellingRecord() const { return OriginalSpelling; }
};
} // end anonymous namespace
@@ -82,15 +87,15 @@ GetFlattenedSpellings(const Record &Attr) {
StringRef Variety = Spelling->getValueAsString("Variety");
StringRef Name = Spelling->getValueAsString("Name");
if (Variety == "GCC") {
- Ret.emplace_back("GNU", std::string(Name), "", true);
- Ret.emplace_back("CXX11", std::string(Name), "gnu", true);
+ Ret.emplace_back("GNU", std::string(Name), "", true, *Spelling);
+ Ret.emplace_back("CXX11", std::string(Name), "gnu", true, *Spelling);
if (Spelling->getValueAsBit("AllowInC"))
- Ret.emplace_back("C2x", std::string(Name), "gnu", true);
+ Ret.emplace_back("C2x", std::string(Name), "gnu", true, *Spelling);
} else if (Variety == "Clang") {
- Ret.emplace_back("GNU", std::string(Name), "", false);
- Ret.emplace_back("CXX11", std::string(Name), "clang", false);
+ Ret.emplace_back("GNU", std::string(Name), "", false, *Spelling);
+ Ret.emplace_back("CXX11", std::string(Name), "clang", false, *Spelling);
if (Spelling->getValueAsBit("AllowInC"))
- Ret.emplace_back("C2x", std::string(Name), "clang", false);
+ Ret.emplace_back("C2x", std::string(Name), "clang", false, *Spelling);
} else
Ret.push_back(FlattenedSpelling(*Spelling));
}
@@ -503,6 +508,16 @@ namespace {
OS << " assert(!is" << getLowerName() << "Expr);\n";
OS << " return " << getLowerName() << "Type;\n";
OS << " }";
+
+ OS << " std::optional<unsigned> getCached" << getUpperName()
+ << "Value() const {\n";
+ OS << " return " << getLowerName() << "Cache;\n";
+ OS << " }";
+
+ OS << " void setCached" << getUpperName()
+ << "Value(unsigned AlignVal) {\n";
+ OS << " " << getLowerName() << "Cache = AlignVal;\n";
+ OS << " }";
}
void writeAccessorDefinitions(raw_ostream &OS) const override {
@@ -525,21 +540,6 @@ namespace {
OS << " return " << getLowerName()
<< "Type->getType()->containsErrors();\n";
OS << "}\n";
-
- // FIXME: Do not do the calculation here
- // FIXME: Handle types correctly
- // A null pointer means maximum alignment
- OS << "unsigned " << getAttrName() << "Attr::get" << getUpperName()
- << "(ASTContext &Ctx) const {\n";
- OS << " assert(!is" << getUpperName() << "Dependent());\n";
- OS << " if (is" << getLowerName() << "Expr)\n";
- OS << " return " << getLowerName() << "Expr ? " << getLowerName()
- << "Expr->EvaluateKnownConstInt(Ctx).getZExtValue()"
- << " * Ctx.getCharWidth() : "
- << "Ctx.getTargetDefaultAlignForAttributeAligned();\n";
- OS << " else\n";
- OS << " return 0; // FIXME\n";
- OS << "}\n";
}
void writeASTVisitorTraversal(raw_ostream &OS) const override {
@@ -596,7 +596,8 @@ namespace {
OS << "union {\n";
OS << "Expr *" << getLowerName() << "Expr;\n";
OS << "TypeSourceInfo *" << getLowerName() << "Type;\n";
- OS << "};";
+ OS << "};\n";
+ OS << "std::optional<unsigned> " << getLowerName() << "Cache;\n";
}
void writePCHReadArgs(raw_ostream &OS) const override {
@@ -623,14 +624,21 @@ namespace {
}
std::string getIsOmitted() const override {
- return "!is" + getLowerName().str() + "Expr || !" + getLowerName().str()
- + "Expr";
+ return "!((is" + getLowerName().str() + "Expr && " +
+ getLowerName().str() + "Expr) || (!is" + getLowerName().str() +
+ "Expr && " + getLowerName().str() + "Type))";
}
void writeValue(raw_ostream &OS) const override {
OS << "\";\n";
- OS << " " << getLowerName()
+ OS << " if (is" << getLowerName() << "Expr && " << getLowerName()
+ << "Expr)";
+ OS << " " << getLowerName()
<< "Expr->printPretty(OS, nullptr, Policy);\n";
+ OS << " if (!is" << getLowerName() << "Expr && " << getLowerName()
+ << "Type)";
+ OS << " " << getLowerName()
+ << "Type->getType().print(OS, Policy);\n";
OS << " OS << \"";
}
@@ -2048,7 +2056,7 @@ bool PragmaClangAttributeSupport::isAttributedSupported(
for (const auto *Subject : Subjects) {
if (!isSupportedPragmaClangAttributeSubject(*Subject))
continue;
- if (SubjectsToRules.find(Subject) == SubjectsToRules.end())
+ if (!SubjectsToRules.contains(Subject))
return false;
HasAtLeastOneValidSubject = true;
}
@@ -2373,6 +2381,23 @@ static void emitClangAttrAcceptsExprPack(RecordKeeper &Records,
OS << "#endif // CLANG_ATTR_ACCEPTS_EXPR_PACK\n\n";
}
+static bool isRegularKeywordAttribute(const FlattenedSpelling &S) {
+ return (S.variety() == "Keyword" &&
+ !S.getSpellingRecord().getValueAsBit("HasOwnParseRules"));
+}
+
+static void emitFormInitializer(raw_ostream &OS,
+ const FlattenedSpelling &Spelling,
+ StringRef SpellingIndex) {
+ bool IsAlignas =
+ (Spelling.variety() == "Keyword" && Spelling.name() == "alignas");
+ OS << "{AttributeCommonInfo::AS_" << Spelling.variety() << ", "
+ << SpellingIndex << ", " << (IsAlignas ? "true" : "false")
+ << " /*IsAlignas*/, "
+ << (isRegularKeywordAttribute(Spelling) ? "true" : "false")
+ << " /*IsRegularKeywordAttribute*/}";
+}
+
static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
bool Header) {
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
@@ -2526,8 +2551,6 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
DelayedArgs->writeCtorParameters(OS);
}
OS << ", const AttributeCommonInfo &CommonInfo";
- if (Header && Implicit)
- OS << " = {SourceRange{}}";
OS << ")";
if (Header) {
OS << ";\n";
@@ -2537,6 +2560,7 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
OS << " {\n";
OS << " auto *A = new (Ctx) " << R.getName();
OS << "Attr(Ctx, CommonInfo";
+
if (!DelayedArgsOnly) {
for (auto const &ai : Args) {
if (ai->isFake() && !emitFake)
@@ -2587,11 +2611,13 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
OS << ", ";
DelayedArgs->writeCtorParameters(OS);
}
- OS << ", SourceRange Range, AttributeCommonInfo::Syntax Syntax";
- if (!ElideSpelling) {
- OS << ", " << R.getName() << "Attr::Spelling S";
+ OS << ", SourceRange Range";
+ if (Header)
+ OS << " = {}";
+ if (Spellings.size() > 1) {
+ OS << ", Spelling S";
if (Header)
- OS << " = static_cast<Spelling>(SpellingNotCalculated)";
+ OS << " = " << SemanticToSyntacticMap[0];
}
OS << ")";
if (Header) {
@@ -2607,9 +2633,31 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
else
OS << "NoSemaHandlerAttribute";
- OS << ", Syntax";
- if (!ElideSpelling)
- OS << ", S";
+ if (Spellings.size() == 0) {
+ OS << ", AttributeCommonInfo::Form::Implicit()";
+ } else if (Spellings.size() == 1) {
+ OS << ", ";
+ emitFormInitializer(OS, Spellings[0], "0");
+ } else {
+ OS << ", (\n";
+ std::set<std::string> Uniques;
+ unsigned Idx = 0;
+ for (auto I = Spellings.begin(), E = Spellings.end(); I != E;
+ ++I, ++Idx) {
+ const FlattenedSpelling &S = *I;
+ const auto &Name = SemanticToSyntacticMap[Idx];
+ if (Uniques.insert(Name).second) {
+ OS << " S == " << Name << " ? AttributeCommonInfo::Form";
+ emitFormInitializer(OS, S, Name);
+ OS << " :\n";
+ }
+ }
+ OS << " (llvm_unreachable(\"Unknown attribute spelling!\"), "
+ << " AttributeCommonInfo::Form";
+ emitFormInitializer(OS, Spellings[0], "0");
+ OS << "))";
+ }
+
OS << ");\n";
OS << " return Create";
if (Implicit)
@@ -3309,18 +3357,31 @@ static void GenerateHasAttrSpellingStringSwitch(
// C2x-style attributes have the same kind of version information
// associated with them. The unscoped attribute version information should
// be taken from the specification of the attribute in the C Standard.
+ //
+ // Clang-specific attributes have the same kind of version information
+ // associated with them. This version is typically the default value (1).
+ // These version values are clang-specific and should typically be
+ // incremented once the attribute changes its syntax and/or semantics in a
+ // a way that is impactful to the end user.
int Version = 1;
- if (Variety == "CXX11" || Variety == "C2x") {
- std::vector<Record *> Spellings = Attr->getValueAsListOfDefs("Spellings");
- for (const auto &Spelling : Spellings) {
- if (Spelling->getValueAsString("Variety") == Variety) {
- Version = static_cast<int>(Spelling->getValueAsInt("Version"));
- if (Scope.empty() && Version == 1)
- PrintError(Spelling->getLoc(), "Standard attributes must have "
- "valid version information.");
- break;
- }
+ std::vector<FlattenedSpelling> Spellings = GetFlattenedSpellings(*Attr);
+ for (const auto &Spelling : Spellings) {
+ if (Spelling.variety() == Variety &&
+ (Spelling.nameSpace().empty() || Scope == Spelling.nameSpace())) {
+ Version = static_cast<int>(
+ Spelling.getSpellingRecord().getValueAsInt("Version"));
+ // Verify that explicitly specified CXX11 and C2x spellings (i.e.
+ // not inferred from Clang/GCC spellings) have a version that's
+ // different than the default (1).
+ bool RequiresValidVersion =
+ (Variety == "CXX11" || Variety == "C2x") &&
+ Spelling.getSpellingRecord().getValueAsString("Variety") == Variety;
+ if (RequiresValidVersion && Scope.empty() && Version == 1)
+ PrintError(Spelling.getSpellingRecord().getLoc(),
+ "Standard attributes must have "
+ "valid version information.");
+ break;
}
}
@@ -3333,18 +3394,14 @@ static void GenerateHasAttrSpellingStringSwitch(
// If this is the C++11 variety, also add in the LangOpts test.
if (Variety == "CXX11")
Test += " && LangOpts.CPlusPlus11";
- else if (Variety == "C2x")
- Test += " && LangOpts.DoubleSquareBracketAttributes";
} else if (Variety == "CXX11")
// C++11 mode should be checked against LangOpts, which is presumed to be
// present in the caller.
Test = "LangOpts.CPlusPlus11";
- else if (Variety == "C2x")
- Test = "LangOpts.DoubleSquareBracketAttributes";
- std::string TestStr =
- !Test.empty() ? Test + " ? " + llvm::itostr(Version) + " : 0" : "1";
- std::vector<FlattenedSpelling> Spellings = GetFlattenedSpellings(*Attr);
+ std::string TestStr = !Test.empty()
+ ? Test + " ? " + llvm::itostr(Version) + " : 0"
+ : llvm::itostr(Version);
for (const auto &S : Spellings)
if (Variety.empty() || (Variety == S.variety() &&
(Scope.empty() || Scope == S.nameSpace())))
@@ -3353,6 +3410,26 @@ static void GenerateHasAttrSpellingStringSwitch(
OS << " .Default(0);\n";
}
+// Emits the list of tokens for regular keyword attributes.
+void EmitClangAttrTokenKinds(RecordKeeper &Records, raw_ostream &OS) {
+ emitSourceFileHeader("A list of tokens generated from the attribute"
+ " definitions",
+ OS);
+ // Assume for now that the same token is not used in multiple regular
+ // keyword attributes.
+ for (auto *R : Records.getAllDerivedDefinitions("Attr"))
+ for (const auto &S : GetFlattenedSpellings(*R))
+ if (isRegularKeywordAttribute(S)) {
+ if (!R->getValueAsListOfDefs("Args").empty())
+ PrintError(R->getLoc(),
+ "RegularKeyword attributes with arguments are not "
+ "yet supported");
+ OS << "KEYWORD_ATTRIBUTE("
+ << S.getSpellingRecord().getValueAsString("Name") << ")\n";
+ }
+ OS << "#undef KEYWORD_ATTRIBUTE\n";
+}
+
// Emits the list of spellings for attributes.
void EmitClangAttrHasAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
emitSourceFileHeader("Code to implement the __has_attribute logic", OS);
@@ -3426,6 +3503,10 @@ void EmitClangAttrHasAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
OS << "case AttributeCommonInfo::Syntax::AS_ContextSensitiveKeyword:\n";
OS << " llvm_unreachable(\"hasAttribute not supported for keyword\");\n";
OS << " return 0;\n";
+ OS << "case AttributeCommonInfo::Syntax::AS_Implicit:\n";
+ OS << " llvm_unreachable (\"hasAttribute not supported for "
+ "AS_Implicit\");\n";
+ OS << " return 0;\n";
OS << "}\n";
}
@@ -3797,7 +3878,8 @@ static void GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) {
OS << "bool diagAppertainsToDecl(Sema &S, const ParsedAttr &AL, ";
OS << "const Decl *D) const override {\n";
OS << " S.Diag(AL.getLoc(), diag::err_attribute_invalid_on_decl)\n";
- OS << " << AL << D->getLocation();\n";
+ OS << " << AL << AL.isRegularKeywordAttribute() << "
+ "D->getLocation();\n";
OS << " return false;\n";
OS << "}\n\n";
}
@@ -3826,7 +3908,7 @@ static void GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) {
OS << (Warn ? "warn_attribute_wrong_decl_type_str"
: "err_attribute_wrong_decl_type_str");
OS << ")\n";
- OS << " << Attr << ";
+ OS << " << Attr << Attr.isRegularKeywordAttribute() << ";
OS << CalculateDiagnostic(*SubjectObj) << ";\n";
OS << " return false;\n";
OS << " }\n";
@@ -3841,7 +3923,8 @@ static void GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) {
OS << "bool diagAppertainsToStmt(Sema &S, const ParsedAttr &AL, ";
OS << "const Stmt *St) const override {\n";
OS << " S.Diag(AL.getLoc(), diag::err_decl_attribute_invalid_on_stmt)\n";
- OS << " << AL << St->getBeginLoc();\n";
+ OS << " << AL << AL.isRegularKeywordAttribute() << "
+ "St->getBeginLoc();\n";
OS << " return false;\n";
OS << "}\n\n";
}
@@ -3860,7 +3943,7 @@ static void GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) {
OS << (Warn ? "warn_attribute_wrong_decl_type_str"
: "err_attribute_wrong_decl_type_str");
OS << ")\n";
- OS << " << Attr << ";
+ OS << " << Attr << Attr.isRegularKeywordAttribute() << ";
OS << CalculateDiagnostic(*SubjectObj) << ";\n";
OS << " return false;\n";
OS << " }\n";
@@ -3931,7 +4014,8 @@ static void GenerateMutualExclusionsChecks(const Record &Attr,
for (const std::string &A : DeclAttrs) {
OS << " if (const auto *A = D->getAttr<" << A << ">()) {\n";
OS << " S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)"
- << " << AL << A;\n";
+ << " << AL << A << (AL.isRegularKeywordAttribute() ||"
+ << " A->isRegularKeywordAttribute());\n";
OS << " S.Diag(A->getLocation(), diag::note_conflicting_attribute);";
OS << " \nreturn false;\n";
OS << " }\n";
@@ -3952,7 +4036,8 @@ static void GenerateMutualExclusionsChecks(const Record &Attr,
<< ">()) {\n";
MergeDeclOS << " S.Diag(First->getLocation(), "
<< "diag::err_attributes_are_not_compatible) << First << "
- << "Second;\n";
+ << "Second << (First->isRegularKeywordAttribute() || "
+ << "Second->isRegularKeywordAttribute());\n";
MergeDeclOS << " S.Diag(Second->getLocation(), "
<< "diag::note_conflicting_attribute);\n";
MergeDeclOS << " return false;\n";
@@ -3992,7 +4077,8 @@ static void GenerateMutualExclusionsChecks(const Record &Attr,
MergeStmtOS << " if (Iter != C.end()) {\n";
MergeStmtOS << " S.Diag((*Iter)->getLocation(), "
<< "diag::err_attributes_are_not_compatible) << *Iter << "
- << "Second;\n";
+ << "Second << ((*Iter)->isRegularKeywordAttribute() || "
+ << "Second->isRegularKeywordAttribute());\n";
MergeStmtOS << " S.Diag(Second->getLocation(), "
<< "diag::note_conflicting_attribute);\n";
MergeStmtOS << " return false;\n";
@@ -4204,7 +4290,7 @@ void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
// Generate all of the custom appertainsTo functions that the attributes
// will be using.
- for (auto I : Attrs) {
+ for (const auto &I : Attrs) {
const Record &Attr = *I.second;
if (Attr.isValueUnset("Subjects"))
continue;
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
index 0b553254682c..d412db62576e 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
@@ -653,6 +653,14 @@ private:
Root(O.Root) {
O.Root = nullptr;
}
+ // The move assignment operator is defined as deleted pending further
+ // motivation.
+ DiagText &operator=(DiagText &&) = delete;
+
+ // The copy constrcutor and copy assignment operator is defined as deleted
+ // pending further motivation.
+ DiagText(const DiagText &) = delete;
+ DiagText &operator=(const DiagText &) = delete;
~DiagText() {
for (Piece *P : AllocatedPieces)
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangOpcodesEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangOpcodesEmitter.cpp
index aa012233c46e..db88c990d5f9 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangOpcodesEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangOpcodesEmitter.cpp
@@ -21,7 +21,7 @@ using namespace llvm;
namespace {
class ClangOpcodesEmitter {
RecordKeeper &Records;
- Record Root;
+ const Record Root;
unsigned NumTypes;
public:
@@ -34,33 +34,32 @@ public:
private:
/// Emits the opcode name for the opcode enum.
/// The name is obtained by concatenating the name with the list of types.
- void EmitEnum(raw_ostream &OS, StringRef N, Record *R);
+ void EmitEnum(raw_ostream &OS, StringRef N, const Record *R);
/// Emits the switch case and the invocation in the interpreter.
- void EmitInterp(raw_ostream &OS, StringRef N, Record *R);
+ void EmitInterp(raw_ostream &OS, StringRef N, const Record *R);
/// Emits the disassembler.
- void EmitDisasm(raw_ostream &OS, StringRef N, Record *R);
+ void EmitDisasm(raw_ostream &OS, StringRef N, const Record *R);
/// Emits the byte code emitter method.
- void EmitEmitter(raw_ostream &OS, StringRef N, Record *R);
+ void EmitEmitter(raw_ostream &OS, StringRef N, const Record *R);
/// Emits the prototype.
- void EmitProto(raw_ostream &OS, StringRef N, Record *R);
+ void EmitProto(raw_ostream &OS, StringRef N, const Record *R);
/// Emits the prototype to dispatch from a type.
- void EmitGroup(raw_ostream &OS, StringRef N, Record *R);
+ void EmitGroup(raw_ostream &OS, StringRef N, const Record *R);
/// Emits the evaluator method.
- void EmitEval(raw_ostream &OS, StringRef N, Record *R);
+ void EmitEval(raw_ostream &OS, StringRef N, const Record *R);
- void PrintTypes(raw_ostream &OS, ArrayRef<Record *> Types);
+ void PrintTypes(raw_ostream &OS, ArrayRef<const Record *> Types);
};
-void Enumerate(const Record *R,
- StringRef N,
- std::function<void(ArrayRef<Record *>, Twine)> &&F) {
- llvm::SmallVector<Record *, 2> TypePath;
+void Enumerate(const Record *R, StringRef N,
+ std::function<void(ArrayRef<const Record *>, Twine)> &&F) {
+ llvm::SmallVector<const Record *, 2> TypePath;
auto *Types = R->getValueAsListInit("Types");
std::function<void(size_t, const Twine &)> Rec;
@@ -102,67 +101,72 @@ void ClangOpcodesEmitter::run(raw_ostream &OS) {
}
}
-void ClangOpcodesEmitter::EmitEnum(raw_ostream &OS, StringRef N, Record *R) {
+void ClangOpcodesEmitter::EmitEnum(raw_ostream &OS, StringRef N,
+ const Record *R) {
OS << "#ifdef GET_OPCODE_NAMES\n";
- Enumerate(R, N, [&OS](ArrayRef<Record *>, const Twine &ID) {
+ Enumerate(R, N, [&OS](ArrayRef<const Record *>, const Twine &ID) {
OS << "OP_" << ID << ",\n";
});
OS << "#endif\n";
}
-void ClangOpcodesEmitter::EmitInterp(raw_ostream &OS, StringRef N, Record *R) {
+void ClangOpcodesEmitter::EmitInterp(raw_ostream &OS, StringRef N,
+ const Record *R) {
OS << "#ifdef GET_INTERP\n";
- Enumerate(R, N, [this, R, &OS, &N](ArrayRef<Record *> TS, const Twine &ID) {
- bool CanReturn = R->getValueAsBit("CanReturn");
- bool ChangesPC = R->getValueAsBit("ChangesPC");
- auto Args = R->getValueAsListOfDefs("Args");
-
- OS << "case OP_" << ID << ": {\n";
-
- if (CanReturn)
- OS << " bool DoReturn = (S.Current == StartFrame);\n";
-
- // Emit calls to read arguments.
- for (size_t I = 0, N = Args.size(); I < N; ++I) {
- OS << " auto V" << I;
- OS << " = ";
- OS << "ReadArg<" << Args[I]->getValueAsString("Name") << ">(S, PC);\n";
- }
-
- // Emit a call to the template method and pass arguments.
- OS << " if (!" << N;
- PrintTypes(OS, TS);
- OS << "(S";
- if (ChangesPC)
- OS << ", PC";
- else
- OS << ", OpPC";
- if (CanReturn)
- OS << ", Result";
- for (size_t I = 0, N = Args.size(); I < N; ++I)
- OS << ", V" << I;
- OS << "))\n";
- OS << " return false;\n";
-
- // Bail out if interpreter returned.
- if (CanReturn) {
- OS << " if (!S.Current || S.Current->isRoot())\n";
- OS << " return true;\n";
-
- OS << " if (DoReturn)\n";
- OS << " return true;\n";
- }
-
- OS << " continue;\n";
- OS << "}\n";
- });
+ Enumerate(R, N,
+ [this, R, &OS, &N](ArrayRef<const Record *> TS, const Twine &ID) {
+ bool CanReturn = R->getValueAsBit("CanReturn");
+ bool ChangesPC = R->getValueAsBit("ChangesPC");
+ auto Args = R->getValueAsListOfDefs("Args");
+
+ OS << "case OP_" << ID << ": {\n";
+
+ if (CanReturn)
+ OS << " bool DoReturn = (S.Current == StartFrame);\n";
+
+ // Emit calls to read arguments.
+ for (size_t I = 0, N = Args.size(); I < N; ++I) {
+ OS << " auto V" << I;
+ OS << " = ";
+ OS << "ReadArg<" << Args[I]->getValueAsString("Name")
+ << ">(S, PC);\n";
+ }
+
+ // Emit a call to the template method and pass arguments.
+ OS << " if (!" << N;
+ PrintTypes(OS, TS);
+ OS << "(S";
+ if (ChangesPC)
+ OS << ", PC";
+ else
+ OS << ", OpPC";
+ if (CanReturn)
+ OS << ", Result";
+ for (size_t I = 0, N = Args.size(); I < N; ++I)
+ OS << ", V" << I;
+ OS << "))\n";
+ OS << " return false;\n";
+
+ // Bail out if interpreter returned.
+ if (CanReturn) {
+ OS << " if (!S.Current || S.Current->isRoot())\n";
+ OS << " return true;\n";
+
+ OS << " if (DoReturn)\n";
+ OS << " return true;\n";
+ }
+
+ OS << " continue;\n";
+ OS << "}\n";
+ });
OS << "#endif\n";
}
-void ClangOpcodesEmitter::EmitDisasm(raw_ostream &OS, StringRef N, Record *R) {
+void ClangOpcodesEmitter::EmitDisasm(raw_ostream &OS, StringRef N,
+ const Record *R) {
OS << "#ifdef GET_DISASM\n";
- Enumerate(R, N, [R, &OS](ArrayRef<Record *>, const Twine &ID) {
+ Enumerate(R, N, [R, &OS](ArrayRef<const Record *>, const Twine &ID) {
OS << "case OP_" << ID << ":\n";
OS << " PrintName(\"" << ID << "\");\n";
OS << " OS << \"\\t\"";
@@ -178,12 +182,13 @@ void ClangOpcodesEmitter::EmitDisasm(raw_ostream &OS, StringRef N, Record *R) {
OS << "#endif\n";
}
-void ClangOpcodesEmitter::EmitEmitter(raw_ostream &OS, StringRef N, Record *R) {
+void ClangOpcodesEmitter::EmitEmitter(raw_ostream &OS, StringRef N,
+ const Record *R) {
if (R->getValueAsBit("HasCustomLink"))
return;
OS << "#ifdef GET_LINK_IMPL\n";
- Enumerate(R, N, [R, &OS](ArrayRef<Record *>, const Twine &ID) {
+ Enumerate(R, N, [R, &OS](ArrayRef<const Record *>, const Twine &ID) {
auto Args = R->getValueAsListOfDefs("Args");
// Emit the list of arguments.
@@ -208,10 +213,11 @@ void ClangOpcodesEmitter::EmitEmitter(raw_ostream &OS, StringRef N, Record *R) {
OS << "#endif\n";
}
-void ClangOpcodesEmitter::EmitProto(raw_ostream &OS, StringRef N, Record *R) {
+void ClangOpcodesEmitter::EmitProto(raw_ostream &OS, StringRef N,
+ const Record *R) {
OS << "#if defined(GET_EVAL_PROTO) || defined(GET_LINK_PROTO)\n";
auto Args = R->getValueAsListOfDefs("Args");
- Enumerate(R, N, [&OS, &Args](ArrayRef<Record *> TS, const Twine &ID) {
+ Enumerate(R, N, [&OS, &Args](ArrayRef<const Record *> TS, const Twine &ID) {
OS << "bool emit" << ID << "(";
for (auto *Arg : Args)
OS << Arg->getValueAsString("Name") << ", ";
@@ -239,16 +245,19 @@ void ClangOpcodesEmitter::EmitProto(raw_ostream &OS, StringRef N, Record *R) {
OS << "#endif\n";
}
-void ClangOpcodesEmitter::EmitGroup(raw_ostream &OS, StringRef N, Record *R) {
+void ClangOpcodesEmitter::EmitGroup(raw_ostream &OS, StringRef N,
+ const Record *R) {
if (!R->getValueAsBit("HasGroup"))
return;
auto *Types = R->getValueAsListInit("Types");
auto Args = R->getValueAsListOfDefs("Args");
+ Twine EmitFuncName = "emit" + N;
+
// Emit the prototype of the group emitter in the header.
OS << "#if defined(GET_EVAL_PROTO) || defined(GET_LINK_PROTO)\n";
- OS << "bool emit" << N << "(";
+ OS << "bool " << EmitFuncName << "(";
for (size_t I = 0, N = Types->size(); I < N; ++I)
OS << "PrimType, ";
for (auto *Arg : Args)
@@ -264,7 +273,7 @@ void ClangOpcodesEmitter::EmitGroup(raw_ostream &OS, StringRef N, Record *R) {
OS << "#else\n";
OS << "ByteCodeEmitter\n";
OS << "#endif\n";
- OS << "::emit" << N << "(";
+ OS << "::" << EmitFuncName << "(";
for (size_t I = 0, N = Types->size(); I < N; ++I)
OS << "PrimType T" << I << ", ";
for (size_t I = 0, N = Args.size(); I < N; ++I)
@@ -272,8 +281,9 @@ void ClangOpcodesEmitter::EmitGroup(raw_ostream &OS, StringRef N, Record *R) {
OS << "const SourceInfo &I) {\n";
std::function<void(size_t, const Twine &)> Rec;
- llvm::SmallVector<Record *, 2> TS;
- Rec = [this, &Rec, &OS, Types, &Args, R, &TS, N](size_t I, const Twine &ID) {
+ llvm::SmallVector<const Record *, 2> TS;
+ Rec = [this, &Rec, &OS, Types, &Args, R, &TS, N,
+ EmitFuncName](size_t I, const Twine &ID) {
if (I >= Types->size()) {
// Print a call to the emitter method.
// Custom evaluator methods dispatch to template methods.
@@ -309,7 +319,8 @@ void ClangOpcodesEmitter::EmitGroup(raw_ostream &OS, StringRef N, Record *R) {
}
// Emit a default case if not all types are present.
if (Cases.size() < NumTypes)
- OS << " default: llvm_unreachable(\"invalid type\");\n";
+ OS << " default: llvm_unreachable(\"invalid type: " << EmitFuncName
+ << "\");\n";
OS << " }\n";
OS << " llvm_unreachable(\"invalid enum value\");\n";
} else {
@@ -322,34 +333,37 @@ void ClangOpcodesEmitter::EmitGroup(raw_ostream &OS, StringRef N, Record *R) {
OS << "#endif\n";
}
-void ClangOpcodesEmitter::EmitEval(raw_ostream &OS, StringRef N, Record *R) {
+void ClangOpcodesEmitter::EmitEval(raw_ostream &OS, StringRef N,
+ const Record *R) {
if (R->getValueAsBit("HasCustomEval"))
return;
OS << "#ifdef GET_EVAL_IMPL\n";
- Enumerate(R, N, [this, R, &N, &OS](ArrayRef<Record *> TS, const Twine &ID) {
- auto Args = R->getValueAsListOfDefs("Args");
-
- OS << "bool EvalEmitter::emit" << ID << "(";
- for (size_t I = 0, N = Args.size(); I < N; ++I)
- OS << Args[I]->getValueAsString("Name") << " A" << I << ", ";
- OS << "const SourceInfo &L) {\n";
- OS << " if (!isActive()) return true;\n";
- OS << " CurrentSource = L;\n";
-
- OS << " return " << N;
- PrintTypes(OS, TS);
- OS << "(S, OpPC";
- for (size_t I = 0, N = Args.size(); I < N; ++I)
- OS << ", A" << I;
- OS << ");\n";
- OS << "}\n";
- });
+ Enumerate(R, N,
+ [this, R, &N, &OS](ArrayRef<const Record *> TS, const Twine &ID) {
+ auto Args = R->getValueAsListOfDefs("Args");
+
+ OS << "bool EvalEmitter::emit" << ID << "(";
+ for (size_t I = 0, N = Args.size(); I < N; ++I)
+ OS << Args[I]->getValueAsString("Name") << " A" << I << ", ";
+ OS << "const SourceInfo &L) {\n";
+ OS << " if (!isActive()) return true;\n";
+ OS << " CurrentSource = L;\n";
+
+ OS << " return " << N;
+ PrintTypes(OS, TS);
+ OS << "(S, OpPC";
+ for (size_t I = 0, N = Args.size(); I < N; ++I)
+ OS << ", A" << I;
+ OS << ");\n";
+ OS << "}\n";
+ });
OS << "#endif\n";
}
-void ClangOpcodesEmitter::PrintTypes(raw_ostream &OS, ArrayRef<Record *> Types) {
+void ClangOpcodesEmitter::PrintTypes(raw_ostream &OS,
+ ArrayRef<const Record *> Types) {
if (Types.empty())
return;
OS << "<";
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp
index da5d1fdc2eae..1c3b7e4398a8 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp
@@ -324,6 +324,18 @@ public:
void emit() override;
};
+// OpenCL builtin header generator. This class processes the same TableGen
+// input as BuiltinNameEmitter, but generates a .h file that contains a
+// prototype for each builtin function described in the .td input.
+class OpenCLBuiltinHeaderEmitter : public OpenCLBuiltinFileEmitterBase {
+public:
+ OpenCLBuiltinHeaderEmitter(RecordKeeper &Records, raw_ostream &OS)
+ : OpenCLBuiltinFileEmitterBase(Records, OS) {}
+
+ // Entrypoint to generate the header.
+ void emit() override;
+};
+
} // namespace
void BuiltinNameEmitter::Emit() {
@@ -357,7 +369,7 @@ void BuiltinNameEmitter::ExtractEnumTypes(std::vector<Record *> &Types,
raw_string_ostream SS(Output);
for (const auto *T : Types) {
- if (TypesSeen.find(T->getValueAsString("Name")) == TypesSeen.end()) {
+ if (!TypesSeen.contains(T->getValueAsString("Name"))) {
SS << " OCLT_" + T->getValueAsString("Name") << ",\n";
// Save the type names in the same order as their enum value. Note that
// the Record can be a VectorType or something else, only the name is
@@ -498,7 +510,7 @@ void BuiltinNameEmitter::GetOverloads() {
std::vector<Record *> Builtins = Records.getAllDerivedDefinitions("Builtin");
for (const auto *B : Builtins) {
StringRef BName = B->getValueAsString("Name");
- if (FctOverloadMap.find(BName) == FctOverloadMap.end()) {
+ if (!FctOverloadMap.contains(BName)) {
FctOverloadMap.insert(std::make_pair(
BName, std::vector<std::pair<const Record *, unsigned>>{}));
}
@@ -895,10 +907,10 @@ static void OCL2Qual(Sema &S, const OpenCLTypeStruct &Ty,
for (const auto *T : Types) {
// Check this is not an image type
- if (ImageTypesMap.find(T->getValueAsString("Name")) != ImageTypesMap.end())
+ if (ImageTypesMap.contains(T->getValueAsString("Name")))
continue;
// Check we have not seen this Type
- if (TypesSeen.find(T->getValueAsString("Name")) != TypesSeen.end())
+ if (TypesSeen.contains(T->getValueAsString("Name")))
continue;
TypesSeen.insert(std::make_pair(T->getValueAsString("Name"), true));
@@ -1083,7 +1095,7 @@ void OpenCLBuiltinFileEmitterBase::expandTypesInSignature(
// the full type name to the extension.
StringRef Ext =
Type->getValueAsDef("Extension")->getValueAsString("ExtName");
- if (!Ext.empty() && TypeExtMap.find(FullType) == TypeExtMap.end()) {
+ if (!Ext.empty() && !TypeExtMap.contains(FullType)) {
TypeExtMap.insert({FullType, Ext});
}
}
@@ -1260,11 +1272,76 @@ void OpenCLBuiltinTestEmitter::emit() {
}
}
+void OpenCLBuiltinHeaderEmitter::emit() {
+ emitSourceFileHeader("OpenCL Builtin declarations", OS);
+
+ emitExtensionSetup();
+
+ OS << R"(
+#define __ovld __attribute__((overloadable))
+#define __conv __attribute__((convergent))
+#define __purefn __attribute__((pure))
+#define __cnfn __attribute__((const))
+
+)";
+
+ // Iterate over all builtins; sort to follow order of definition in .td file.
+ std::vector<Record *> Builtins = Records.getAllDerivedDefinitions("Builtin");
+ llvm::sort(Builtins, LessRecord());
+
+ for (const auto *B : Builtins) {
+ StringRef Name = B->getValueAsString("Name");
+
+ std::string OptionalExtensionEndif = emitExtensionGuard(B);
+ std::string OptionalVersionEndif = emitVersionGuard(B);
+
+ SmallVector<SmallVector<std::string, 2>, 4> FTypes;
+ expandTypesInSignature(B->getValueAsListOfDefs("Signature"), FTypes);
+
+ for (const auto &Signature : FTypes) {
+ StringRef OptionalTypeExtEndif = emitTypeExtensionGuards(Signature);
+
+ // Emit function declaration.
+ OS << Signature[0] << " __ovld ";
+ if (B->getValueAsBit("IsConst"))
+ OS << "__cnfn ";
+ if (B->getValueAsBit("IsPure"))
+ OS << "__purefn ";
+ if (B->getValueAsBit("IsConv"))
+ OS << "__conv ";
+
+ OS << Name << "(";
+ if (Signature.size() > 1) {
+ for (unsigned I = 1; I < Signature.size(); I++) {
+ if (I != 1)
+ OS << ", ";
+ OS << Signature[I];
+ }
+ }
+ OS << ");\n";
+
+ OS << OptionalTypeExtEndif;
+ }
+
+ OS << OptionalVersionEndif;
+ OS << OptionalExtensionEndif;
+ }
+
+ OS << "\n// Disable any extensions we may have enabled previously.\n"
+ "#pragma OPENCL EXTENSION all : disable\n";
+}
+
void clang::EmitClangOpenCLBuiltins(RecordKeeper &Records, raw_ostream &OS) {
BuiltinNameEmitter NameChecker(Records, OS);
NameChecker.Emit();
}
+void clang::EmitClangOpenCLBuiltinHeader(RecordKeeper &Records,
+ raw_ostream &OS) {
+ OpenCLBuiltinHeaderEmitter HeaderFileGenerator(Records, OS);
+ HeaderFileGenerator.emit();
+}
+
void clang::EmitClangOpenCLBuiltinTests(RecordKeeper &Records,
raw_ostream &OS) {
OpenCLBuiltinTestEmitter TestFileGenerator(Records, OS);
diff --git a/contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp
index 08f2344f5d0a..936724b9ce38 100644
--- a/contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp
@@ -389,7 +389,7 @@ public:
Mods = getNextModifiers(Proto, Pos);
}
- for (auto Type : Types) {
+ for (const auto &Type : Types) {
// If this builtin takes an immediate argument, we need to #define it rather
// than use a standard declaration, so that SemaChecking can range check
// the immediate passed by the user.
@@ -2086,12 +2086,13 @@ void NeonEmitter::genOverloadTypeCheckCode(raw_ostream &OS,
std::string Name = Def->getName();
// Omit type checking for the pointer arguments of vld1_lane, vld1_dup,
- // and vst1_lane intrinsics. Using a pointer to the vector element
- // type with one of those operations causes codegen to select an aligned
- // load/store instruction. If you want an unaligned operation,
- // the pointer argument needs to have less alignment than element type,
- // so just accept any pointer type.
- if (Name == "vld1_lane" || Name == "vld1_dup" || Name == "vst1_lane") {
+ // vst1_lane, vldap1_lane, and vstl1_lane intrinsics. Using a pointer to
+ // the vector element type with one of those operations causes codegen to
+ // select an aligned load/store instruction. If you want an unaligned
+ // operation, the pointer argument needs to have less alignment than element
+ // type, so just accept any pointer type.
+ if (Name == "vld1_lane" || Name == "vld1_dup" || Name == "vst1_lane" ||
+ Name == "vldap1_lane" || Name == "vstl1_lane") {
PtrArgNum = -1;
HasConstPtr = false;
}
diff --git a/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp
index 6926bbdf8d0f..2db48f3fa4af 100644
--- a/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -57,9 +57,6 @@ struct SemaRecord {
// Suffix of overloaded intrinsic name.
SmallVector<PrototypeDescriptor> OverloadedSuffix;
- // BitMask for supported policies.
- uint16_t PolicyBitMask;
-
// Number of field, large than 1 if it's segment load/store.
unsigned NF;
@@ -68,6 +65,8 @@ struct SemaRecord {
bool HasMaskedOffOperand :1;
bool HasTailPolicy : 1;
bool HasMaskPolicy : 1;
+ bool HasFRMRoundModeOp : 1;
+ bool IsTuple : 1;
uint8_t UnMaskedPolicyScheme : 2;
uint8_t MaskedPolicyScheme : 2;
};
@@ -158,6 +157,12 @@ static BasicType ParseBasicType(char c) {
}
}
+static VectorTypeModifier getTupleVTM(unsigned NF) {
+ assert(2 <= NF && NF <= 8 && "2 <= NF <= 8");
+ return static_cast<VectorTypeModifier>(
+ static_cast<uint8_t>(VectorTypeModifier::Tuple2) + (NF - 2));
+}
+
void emitCodeGenSwitchBody(const RVVIntrinsic *RVVI, raw_ostream &OS) {
if (!RVVI->getIRName().empty())
OS << " ID = Intrinsic::riscv_" + RVVI->getIRName() + ";\n";
@@ -366,34 +371,39 @@ void RVVEmitter::createHeader(raw_ostream &OS) {
TypeModifier::UnsignedInteger));
printType(*UT);
}
+ for (int NF = 2; NF <= 8; ++NF) {
+ auto TupleT = TypeCache.computeType(
+ BT, Log2LMUL,
+ PrototypeDescriptor(BaseTypeModifier::Vector, getTupleVTM(NF),
+ TypeModifier::SignedInteger));
+ auto TupleUT = TypeCache.computeType(
+ BT, Log2LMUL,
+ PrototypeDescriptor(BaseTypeModifier::Vector, getTupleVTM(NF),
+ TypeModifier::UnsignedInteger));
+ if (TupleT)
+ printType(*TupleT);
+ if (TupleUT)
+ printType(*TupleUT);
+ }
}
}
- OS << "#if defined(__riscv_zvfh)\n";
- for (int Log2LMUL : Log2LMULs) {
- auto T = TypeCache.computeType(BasicType::Float16, Log2LMUL,
- PrototypeDescriptor::Vector);
- if (T)
- printType(*T);
- }
- OS << "#endif\n";
-
- OS << "#if (__riscv_v_elen_fp >= 32)\n";
- for (int Log2LMUL : Log2LMULs) {
- auto T = TypeCache.computeType(BasicType::Float32, Log2LMUL,
- PrototypeDescriptor::Vector);
- if (T)
- printType(*T);
- }
- OS << "#endif\n";
- OS << "#if (__riscv_v_elen_fp >= 64)\n";
- for (int Log2LMUL : Log2LMULs) {
- auto T = TypeCache.computeType(BasicType::Float64, Log2LMUL,
- PrototypeDescriptor::Vector);
- if (T)
- printType(*T);
+ for (BasicType BT :
+ {BasicType::Float16, BasicType::Float32, BasicType::Float64}) {
+ for (int Log2LMUL : Log2LMULs) {
+ auto T = TypeCache.computeType(BT, Log2LMUL, PrototypeDescriptor::Vector);
+ if (T)
+ printType(*T);
+ for (int NF = 2; NF <= 8; ++NF) {
+ auto TupleT = TypeCache.computeType(
+ BT, Log2LMUL,
+ PrototypeDescriptor(BaseTypeModifier::Vector, getTupleVTM(NF),
+ TypeModifier::Float));
+ if (TupleT)
+ printType(*TupleT);
+ }
+ }
}
- OS << "#endif\n\n";
OS << "#define __riscv_v_intrinsic_overloading 1\n";
@@ -520,6 +530,8 @@ void RVVEmitter::createRVVIntrinsics(
StringRef IRName = R->getValueAsString("IRName");
StringRef MaskedIRName = R->getValueAsString("MaskedIRName");
unsigned NF = R->getValueAsInt("NF");
+ bool IsTuple = R->getValueAsBit("IsTuple");
+ bool HasFRMRoundModeOp = R->getValueAsBit("HasFRMRoundModeOp");
const Policy DefaultPolicy;
SmallVector<Policy> SupportedUnMaskedPolicies =
@@ -540,10 +552,12 @@ void RVVEmitter::createRVVIntrinsics(
auto Prototype = RVVIntrinsic::computeBuiltinTypes(
BasicPrototype, /*IsMasked=*/false,
/*HasMaskedOffOperand=*/false, HasVL, NF, UnMaskedPolicyScheme,
- DefaultPolicy);
- auto MaskedPrototype = RVVIntrinsic::computeBuiltinTypes(
- BasicPrototype, /*IsMasked=*/true, HasMaskedOffOperand, HasVL, NF,
- MaskedPolicyScheme, DefaultPolicy);
+ DefaultPolicy, IsTuple);
+ llvm::SmallVector<PrototypeDescriptor> MaskedPrototype;
+ if (HasMasked)
+ MaskedPrototype = RVVIntrinsic::computeBuiltinTypes(
+ BasicPrototype, /*IsMasked=*/true, HasMaskedOffOperand, HasVL, NF,
+ MaskedPolicyScheme, DefaultPolicy, IsTuple);
// Create Intrinsics for each type and LMUL.
for (char I : TypeRange) {
@@ -565,14 +579,14 @@ void RVVEmitter::createRVVIntrinsics(
/*IsMasked=*/false, /*HasMaskedOffOperand=*/false, HasVL,
UnMaskedPolicyScheme, SupportOverloading, HasBuiltinAlias,
ManualCodegen, *Types, IntrinsicTypes, RequiredFeatures, NF,
- DefaultPolicy));
+ DefaultPolicy, HasFRMRoundModeOp));
if (UnMaskedPolicyScheme != PolicyScheme::SchemeNone)
for (auto P : SupportedUnMaskedPolicies) {
SmallVector<PrototypeDescriptor> PolicyPrototype =
RVVIntrinsic::computeBuiltinTypes(
BasicPrototype, /*IsMasked=*/false,
/*HasMaskedOffOperand=*/false, HasVL, NF,
- UnMaskedPolicyScheme, P);
+ UnMaskedPolicyScheme, P, IsTuple);
std::optional<RVVTypes> PolicyTypes =
TypeCache.computeTypes(BT, Log2LMUL, NF, PolicyPrototype);
Out.push_back(std::make_unique<RVVIntrinsic>(
@@ -580,7 +594,7 @@ void RVVEmitter::createRVVIntrinsics(
/*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL,
UnMaskedPolicyScheme, SupportOverloading, HasBuiltinAlias,
ManualCodegen, *PolicyTypes, IntrinsicTypes, RequiredFeatures,
- NF, P));
+ NF, P, HasFRMRoundModeOp));
}
if (!HasMasked)
continue;
@@ -591,14 +605,15 @@ void RVVEmitter::createRVVIntrinsics(
Name, SuffixStr, OverloadedName, OverloadedSuffixStr, MaskedIRName,
/*IsMasked=*/true, HasMaskedOffOperand, HasVL, MaskedPolicyScheme,
SupportOverloading, HasBuiltinAlias, ManualCodegen, *MaskTypes,
- IntrinsicTypes, RequiredFeatures, NF, DefaultPolicy));
+ IntrinsicTypes, RequiredFeatures, NF, DefaultPolicy,
+ HasFRMRoundModeOp));
if (MaskedPolicyScheme == PolicyScheme::SchemeNone)
continue;
for (auto P : SupportedMaskedPolicies) {
SmallVector<PrototypeDescriptor> PolicyPrototype =
RVVIntrinsic::computeBuiltinTypes(
BasicPrototype, /*IsMasked=*/true, HasMaskedOffOperand, HasVL,
- NF, MaskedPolicyScheme, P);
+ NF, MaskedPolicyScheme, P, IsTuple);
std::optional<RVVTypes> PolicyTypes =
TypeCache.computeTypes(BT, Log2LMUL, NF, PolicyPrototype);
Out.push_back(std::make_unique<RVVIntrinsic>(
@@ -606,7 +621,7 @@ void RVVEmitter::createRVVIntrinsics(
MaskedIRName, /*IsMasked=*/true, HasMaskedOffOperand, HasVL,
MaskedPolicyScheme, SupportOverloading, HasBuiltinAlias,
ManualCodegen, *PolicyTypes, IntrinsicTypes, RequiredFeatures, NF,
- P));
+ P, HasFRMRoundModeOp));
}
} // End for Log2LMULList
} // End for TypeRange
@@ -640,7 +655,7 @@ void RVVEmitter::createRVVIntrinsics(
for (auto RequiredFeature : RequiredFeatures) {
RVVRequire RequireExt = StringSwitch<RVVRequire>(RequiredFeature)
.Case("RV64", RVV_REQ_RV64)
- .Case("FullMultiply", RVV_REQ_FullMultiply)
+ .Case("Xsfvcp", RVV_REQ_Xsfvcp)
.Default(RVV_REQ_None);
assert(RequireExt != RVV_REQ_None && "Unrecognized required feature?");
SR.RequiredExtensions |= RequireExt;
@@ -657,6 +672,8 @@ void RVVEmitter::createRVVIntrinsics(
SR.Prototype = std::move(BasicPrototype);
SR.Suffix = parsePrototypes(SuffixProto);
SR.OverloadedSuffix = parsePrototypes(OverloadedSuffixProto);
+ SR.IsTuple = IsTuple;
+ SR.HasFRMRoundModeOp = HasFRMRoundModeOp;
SemaRecords->push_back(SR);
}
@@ -698,6 +715,8 @@ void RVVEmitter::createRVVIntrinsicRecords(std::vector<RVVIntrinsicRecord> &Out,
R.HasMaskPolicy = SR.HasMaskPolicy;
R.UnMaskedPolicyScheme = SR.UnMaskedPolicyScheme;
R.MaskedPolicyScheme = SR.MaskedPolicyScheme;
+ R.IsTuple = SR.IsTuple;
+ R.HasFRMRoundModeOp = SR.HasFRMRoundModeOp;
assert(R.PrototypeIndex !=
static_cast<uint16_t>(SemaSignatureTable::INVALID_INDEX));
diff --git a/contrib/llvm-project/clang/utils/TableGen/SveEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/SveEmitter.cpp
index bca61e703020..dbf5122fdf22 100644
--- a/contrib/llvm-project/clang/utils/TableGen/SveEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/SveEmitter.cpp
@@ -66,7 +66,8 @@ public:
class SVEType {
TypeSpec TS;
bool Float, Signed, Immediate, Void, Constant, Pointer, BFloat;
- bool DefaultType, IsScalable, Predicate, PredicatePattern, PrefetchOp;
+ bool DefaultType, IsScalable, Predicate, PredicatePattern, PrefetchOp,
+ Svcount;
unsigned Bitwidth, ElementBitwidth, NumVectors;
public:
@@ -76,7 +77,8 @@ public:
: TS(TS), Float(false), Signed(true), Immediate(false), Void(false),
Constant(false), Pointer(false), BFloat(false), DefaultType(false),
IsScalable(true), Predicate(false), PredicatePattern(false),
- PrefetchOp(false), Bitwidth(128), ElementBitwidth(~0U), NumVectors(1) {
+ PrefetchOp(false), Svcount(false), Bitwidth(128), ElementBitwidth(~0U),
+ NumVectors(1) {
if (!TS.empty())
applyTypespec();
applyModifier(CharMod);
@@ -95,13 +97,16 @@ public:
bool isFloat() const { return Float && !BFloat; }
bool isBFloat() const { return BFloat && !Float; }
bool isFloatingPoint() const { return Float || BFloat; }
- bool isInteger() const { return !isFloatingPoint() && !Predicate; }
+ bool isInteger() const {
+ return !isFloatingPoint() && !Predicate && !Svcount;
+ }
bool isScalarPredicate() const {
return !isFloatingPoint() && Predicate && NumVectors == 0;
}
bool isPredicateVector() const { return Predicate; }
bool isPredicatePattern() const { return PredicatePattern; }
bool isPrefetchOp() const { return PrefetchOp; }
+ bool isSvcount() const { return Svcount; }
bool isConstant() const { return Constant; }
unsigned getElementSizeInBits() const { return ElementBitwidth; }
unsigned getNumVectors() const { return NumVectors; }
@@ -203,6 +208,9 @@ public:
/// ClassS, so will add type suffixes such as _u32/_s32.
std::string getMangledName() const { return mangleName(ClassS); }
+ /// As above, but mangles the LLVM name instead.
+ std::string getMangledLLVMName() const { return mangleLLVMName(); }
+
/// Returns true if the intrinsic is overloaded, in that it should also generate
/// a short form without the type-specifiers, e.g. 'svld1(..)' instead of
/// 'svld1_u32(..)'.
@@ -228,11 +236,12 @@ public:
}
/// Emits the intrinsic declaration to the ostream.
- void emitIntrinsic(raw_ostream &OS) const;
+ void emitIntrinsic(raw_ostream &OS, SVEEmitter &Emitter) const;
private:
std::string getMergeSuffix() const { return MergeSuffix; }
std::string mangleName(ClassKind LocalCK) const;
+ std::string mangleLLVMName() const;
std::string replaceTemplatedArgs(std::string Name, TypeSpec TS,
std::string Proto) const;
};
@@ -298,7 +307,8 @@ public:
auto It = FlagTypes.find(MaskName);
if (It != FlagTypes.end()) {
uint64_t Mask = It->getValue();
- unsigned Shift = llvm::countTrailingZeros(Mask);
+ unsigned Shift = llvm::countr_zero(Mask);
+ assert(Shift < 64 && "Mask value produced an invalid shift value");
return (V << Shift) & Mask;
}
llvm_unreachable("Unsupported flag");
@@ -346,8 +356,21 @@ public:
/// Create the SVETypeFlags used in CGBuiltins
void createTypeFlags(raw_ostream &o);
+ /// Emit arm_sme.h.
+ void createSMEHeader(raw_ostream &o);
+
+ /// Emit all the SME __builtin prototypes and code needed by Sema.
+ void createSMEBuiltins(raw_ostream &o);
+
+ /// Emit all the information needed to map builtin -> LLVM IR intrinsic.
+ void createSMECodeGenMap(raw_ostream &o);
+
+ /// Emit all the range checks for the immediates.
+ void createSMERangeChecks(raw_ostream &o);
+
/// Create intrinsic and add it to \p Out
- void createIntrinsic(Record *R, SmallVectorImpl<std::unique_ptr<Intrinsic>> &Out);
+ void createIntrinsic(Record *R,
+ SmallVectorImpl<std::unique_ptr<Intrinsic>> &Out);
};
} // end anonymous namespace
@@ -365,6 +388,9 @@ std::string SVEType::builtin_str() const {
if (isScalarPredicate())
return "b";
+ if (isSvcount())
+ return "Qa";
+
if (isVoidPointer())
S += "v";
else if (!isFloatingPoint())
@@ -428,13 +454,15 @@ std::string SVEType::str() const {
if (Void)
S += "void";
else {
- if (isScalableVector())
+ if (isScalableVector() || isSvcount())
S += "sv";
if (!Signed && !isFloatingPoint())
S += "u";
if (Float)
S += "float";
+ else if (isSvcount())
+ S += "count";
else if (isScalarPredicate() || isPredicateVector())
S += "bool";
else if (isBFloat())
@@ -442,7 +470,7 @@ std::string SVEType::str() const {
else
S += "int";
- if (!isScalarPredicate() && !isPredicateVector())
+ if (!isScalarPredicate() && !isPredicateVector() && !isSvcount())
S += utostr(ElementBitwidth);
if (!isScalableVector() && isVector())
S += "x" + utostr(getNumElements());
@@ -462,6 +490,9 @@ std::string SVEType::str() const {
void SVEType::applyTypespec() {
for (char I : TS) {
switch (I) {
+ case 'Q':
+ Svcount = true;
+ break;
case 'P':
Predicate = true;
break;
@@ -480,6 +511,9 @@ void SVEType::applyTypespec() {
case 'l':
ElementBitwidth = 64;
break;
+ case 'q':
+ ElementBitwidth = 128;
+ break;
case 'h':
Float = true;
ElementBitwidth = 16;
@@ -553,6 +587,7 @@ void SVEType::applyModifier(char Mod) {
Float = false;
BFloat = false;
Predicate = true;
+ Svcount = false;
Bitwidth = 16;
ElementBitwidth = 1;
break;
@@ -592,18 +627,21 @@ void SVEType::applyModifier(char Mod) {
break;
case 'u':
Predicate = false;
+ Svcount = false;
Signed = false;
Float = false;
BFloat = false;
break;
case 'x':
Predicate = false;
+ Svcount = false;
Signed = true;
Float = false;
BFloat = false;
break;
case 'i':
Predicate = false;
+ Svcount = false;
Float = false;
BFloat = false;
ElementBitwidth = Bitwidth = 64;
@@ -613,6 +651,7 @@ void SVEType::applyModifier(char Mod) {
break;
case 'I':
Predicate = false;
+ Svcount = false;
Float = false;
BFloat = false;
ElementBitwidth = Bitwidth = 32;
@@ -623,6 +662,7 @@ void SVEType::applyModifier(char Mod) {
break;
case 'J':
Predicate = false;
+ Svcount = false;
Float = false;
BFloat = false;
ElementBitwidth = Bitwidth = 32;
@@ -633,6 +673,7 @@ void SVEType::applyModifier(char Mod) {
break;
case 'k':
Predicate = false;
+ Svcount = false;
Signed = true;
Float = false;
BFloat = false;
@@ -641,6 +682,7 @@ void SVEType::applyModifier(char Mod) {
break;
case 'l':
Predicate = false;
+ Svcount = false;
Signed = true;
Float = false;
BFloat = false;
@@ -649,6 +691,7 @@ void SVEType::applyModifier(char Mod) {
break;
case 'm':
Predicate = false;
+ Svcount = false;
Signed = false;
Float = false;
BFloat = false;
@@ -657,6 +700,7 @@ void SVEType::applyModifier(char Mod) {
break;
case 'n':
Predicate = false;
+ Svcount = false;
Signed = false;
Float = false;
BFloat = false;
@@ -695,17 +739,20 @@ void SVEType::applyModifier(char Mod) {
break;
case 'O':
Predicate = false;
+ Svcount = false;
Float = true;
ElementBitwidth = 16;
break;
case 'M':
Predicate = false;
+ Svcount = false;
Float = true;
BFloat = false;
ElementBitwidth = 32;
break;
case 'N':
Predicate = false;
+ Svcount = false;
Float = true;
ElementBitwidth = 64;
break;
@@ -757,6 +804,11 @@ void SVEType::applyModifier(char Mod) {
NumVectors = 0;
Signed = true;
break;
+ case '%':
+ Pointer = true;
+ Void = true;
+ NumVectors = 0;
+ break;
case 'A':
Pointer = true;
ElementBitwidth = Bitwidth = 8;
@@ -799,6 +851,14 @@ void SVEType::applyModifier(char Mod) {
NumVectors = 0;
Signed = false;
break;
+ case '}':
+ Predicate = false;
+ Signed = true;
+ Svcount = true;
+ NumVectors = 0;
+ Float = false;
+ BFloat = false;
+ break;
default:
llvm_unreachable("Unhandled character!");
}
@@ -879,6 +939,8 @@ std::string Intrinsic::replaceTemplatedArgs(std::string Name, TypeSpec TS,
std::string TypeCode;
if (T.isInteger())
TypeCode = T.isSigned() ? 's' : 'u';
+ else if (T.isSvcount())
+ TypeCode = 'c';
else if (T.isPredicateVector())
TypeCode = 'b';
else if (T.isBFloat())
@@ -891,6 +953,13 @@ std::string Intrinsic::replaceTemplatedArgs(std::string Name, TypeSpec TS,
return Ret;
}
+std::string Intrinsic::mangleLLVMName() const {
+ std::string S = getLLVMName();
+
+ // Replace all {d} like expressions with e.g. 'u32'
+ return replaceTemplatedArgs(S, getBaseTypeSpec(), getProto());
+}
+
std::string Intrinsic::mangleName(ClassKind LocalCK) const {
std::string S = getName();
@@ -918,15 +987,29 @@ std::string Intrinsic::mangleName(ClassKind LocalCK) const {
getMergeSuffix();
}
-void Intrinsic::emitIntrinsic(raw_ostream &OS) const {
+void Intrinsic::emitIntrinsic(raw_ostream &OS, SVEEmitter &Emitter) const {
bool IsOverloaded = getClassKind() == ClassG && getProto().size() > 1;
std::string FullName = mangleName(ClassS);
std::string ProtoName = mangleName(getClassKind());
+ std::string SMEAttrs = "";
+
+ if (Flags & Emitter.getEnumValueForFlag("IsStreaming"))
+ SMEAttrs += ", arm_streaming";
+ if (Flags & Emitter.getEnumValueForFlag("IsStreamingCompatible"))
+ SMEAttrs += ", arm_streaming_compatible";
+ if (Flags & Emitter.getEnumValueForFlag("IsSharedZA"))
+ SMEAttrs += ", arm_shared_za";
+ if (Flags & Emitter.getEnumValueForFlag("IsPreservesZA"))
+ SMEAttrs += ", arm_preserves_za";
OS << (IsOverloaded ? "__aio " : "__ai ")
<< "__attribute__((__clang_arm_builtin_alias("
- << "__builtin_sve_" << FullName << ")))\n";
+ << (SMEAttrs.empty() ? "__builtin_sve_" : "__builtin_sme_")
+ << FullName << ")";
+ if (!SMEAttrs.empty())
+ OS << SMEAttrs;
+ OS << "))\n";
OS << getTypes()[0].str() << " " << ProtoName << "(";
for (unsigned I = 0; I < getTypes().size() - 1; ++I) {
@@ -959,7 +1042,7 @@ uint64_t SVEEmitter::encodeTypeFlags(const SVEType &T) {
return encodeEltType("EltTyBFloat16");
}
- if (T.isPredicateVector()) {
+ if (T.isPredicateVector() || T.isSvcount()) {
switch (T.getElementSizeInBits()) {
case 8:
return encodeEltType("EltTyBool8");
@@ -983,6 +1066,8 @@ uint64_t SVEEmitter::encodeTypeFlags(const SVEType &T) {
return encodeEltType("EltTyInt32");
case 64:
return encodeEltType("EltTyInt64");
+ case 128:
+ return encodeEltType("EltTyInt128");
default:
llvm_unreachable("Unhandled integer element bitwidth!");
}
@@ -1139,12 +1224,16 @@ void SVEEmitter::createHeader(raw_ostream &OS) {
OS << "typedef __clang_svfloat16x4_t svfloat16x4_t;\n";
OS << "typedef __clang_svfloat32x4_t svfloat32x4_t;\n";
OS << "typedef __clang_svfloat64x4_t svfloat64x4_t;\n";
- OS << "typedef __SVBool_t svbool_t;\n\n";
+ OS << "typedef __SVBool_t svbool_t;\n";
+ OS << "typedef __clang_svboolx2_t svboolx2_t;\n";
+ OS << "typedef __clang_svboolx4_t svboolx4_t;\n\n";
OS << "typedef __clang_svbfloat16x2_t svbfloat16x2_t;\n";
OS << "typedef __clang_svbfloat16x3_t svbfloat16x3_t;\n";
OS << "typedef __clang_svbfloat16x4_t svbfloat16x4_t;\n";
+ OS << "typedef __SVCount_t svcount_t;\n\n";
+
OS << "enum svpattern\n";
OS << "{\n";
OS << " SV_POW2 = 0,\n";
@@ -1225,7 +1314,7 @@ void SVEEmitter::createHeader(raw_ostream &OS) {
// Actually emit the intrinsic declarations.
for (auto &I : Defs)
- I->emitIntrinsic(OS);
+ I->emitIntrinsic(OS, *this);
OS << "#define svcvtnt_bf16_x svcvtnt_bf16_m\n";
OS << "#define svcvtnt_bf16_f32_x svcvtnt_bf16_f32_m\n";
@@ -1300,7 +1389,7 @@ void SVEEmitter::createCodeGenMap(raw_ostream &OS) {
uint64_t Flags = Def->getFlags();
auto FlagString = std::to_string(Flags);
- std::string LLVMName = Def->getLLVMName();
+ std::string LLVMName = Def->getMangledLLVMName();
std::string Builtin = Def->getMangledName();
if (!LLVMName.empty())
OS << "SVEMAP1(" << Builtin << ", " << LLVMName << ", " << FlagString
@@ -1374,6 +1463,167 @@ void SVEEmitter::createTypeFlags(raw_ostream &OS) {
OS << "#endif\n\n";
}
+void SVEEmitter::createSMEHeader(raw_ostream &OS) {
+ OS << "/*===---- arm_sme_draft_spec_subject_to_change.h - ARM SME intrinsics "
+ "------===\n"
+ " *\n"
+ " *\n"
+ " * Part of the LLVM Project, under the Apache License v2.0 with LLVM "
+ "Exceptions.\n"
+ " * See https://llvm.org/LICENSE.txt for license information.\n"
+ " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n"
+ " *\n"
+ " *===-----------------------------------------------------------------"
+ "------===\n"
+ " */\n\n";
+
+ OS << "#ifndef __ARM_SME_H\n";
+ OS << "#define __ARM_SME_H\n\n";
+
+ OS << "#if !defined(__LITTLE_ENDIAN__)\n";
+ OS << "#error \"Big endian is currently not supported for arm_sme_draft_spec_subject_to_change.h\"\n";
+ OS << "#endif\n";
+
+ OS << "#include <arm_sve.h> \n\n";
+
+ OS << "/* Function attributes */\n";
+ OS << "#define __ai static __inline__ __attribute__((__always_inline__, "
+ "__nodebug__))\n\n";
+ OS << "#define __aio static __inline__ __attribute__((__always_inline__, "
+ "__nodebug__, __overloadable__))\n\n";
+
+ OS << "#ifdef __cplusplus\n";
+ OS << "extern \"C\" {\n";
+ OS << "#endif\n\n";
+
+ SmallVector<std::unique_ptr<Intrinsic>, 128> Defs;
+ std::vector<Record *> RV = Records.getAllDerivedDefinitions("Inst");
+ for (auto *R : RV)
+ createIntrinsic(R, Defs);
+
+ // Sort intrinsics in header file by following order/priority similar to SVE:
+ // - Architectural guard
+ // - Class (is intrinsic overloaded or not)
+ // - Intrinsic name
+ std::stable_sort(Defs.begin(), Defs.end(),
+ [](const std::unique_ptr<Intrinsic> &A,
+ const std::unique_ptr<Intrinsic> &B) {
+ auto ToTuple = [](const std::unique_ptr<Intrinsic> &I) {
+ return std::make_tuple(I->getGuard(),
+ (unsigned)I->getClassKind(),
+ I->getName());
+ };
+ return ToTuple(A) < ToTuple(B);
+ });
+
+ // Actually emit the intrinsic declaration.
+ for (auto &I : Defs) {
+ I->emitIntrinsic(OS, *this);
+ }
+
+ OS << "#ifdef __cplusplus\n";
+ OS << "} // extern \"C\"\n";
+ OS << "#endif\n\n";
+ OS << "#undef __ai\n\n";
+ OS << "#endif /* __ARM_SME_H */\n";
+}
+
+void SVEEmitter::createSMEBuiltins(raw_ostream &OS) {
+ std::vector<Record *> RV = Records.getAllDerivedDefinitions("Inst");
+ SmallVector<std::unique_ptr<Intrinsic>, 128> Defs;
+ for (auto *R : RV) {
+ createIntrinsic(R, Defs);
+ }
+
+ // The mappings must be sorted based on BuiltinID.
+ llvm::sort(Defs, [](const std::unique_ptr<Intrinsic> &A,
+ const std::unique_ptr<Intrinsic> &B) {
+ return A->getMangledName() < B->getMangledName();
+ });
+
+ OS << "#ifdef GET_SME_BUILTINS\n";
+ for (auto &Def : Defs) {
+ // Only create BUILTINs for non-overloaded intrinsics, as overloaded
+ // declarations only live in the header file.
+ if (Def->getClassKind() != ClassG)
+ OS << "TARGET_BUILTIN(__builtin_sme_" << Def->getMangledName() << ", \""
+ << Def->getBuiltinTypeStr() << "\", \"n\", \"" << Def->getGuard()
+ << "\")\n";
+ }
+
+ OS << "#endif\n\n";
+}
+
+void SVEEmitter::createSMECodeGenMap(raw_ostream &OS) {
+ std::vector<Record *> RV = Records.getAllDerivedDefinitions("Inst");
+ SmallVector<std::unique_ptr<Intrinsic>, 128> Defs;
+ for (auto *R : RV) {
+ createIntrinsic(R, Defs);
+ }
+
+ // The mappings must be sorted based on BuiltinID.
+ llvm::sort(Defs, [](const std::unique_ptr<Intrinsic> &A,
+ const std::unique_ptr<Intrinsic> &B) {
+ return A->getMangledName() < B->getMangledName();
+ });
+
+ OS << "#ifdef GET_SME_LLVM_INTRINSIC_MAP\n";
+ for (auto &Def : Defs) {
+ // Builtins only exist for non-overloaded intrinsics, overloaded
+ // declarations only live in the header file.
+ if (Def->getClassKind() == ClassG)
+ continue;
+
+ uint64_t Flags = Def->getFlags();
+ auto FlagString = std::to_string(Flags);
+
+ std::string LLVMName = Def->getLLVMName();
+ std::string Builtin = Def->getMangledName();
+ if (!LLVMName.empty())
+ OS << "SMEMAP1(" << Builtin << ", " << LLVMName << ", " << FlagString
+ << "),\n";
+ else
+ OS << "SMEMAP2(" << Builtin << ", " << FlagString << "),\n";
+ }
+ OS << "#endif\n\n";
+}
+
+void SVEEmitter::createSMERangeChecks(raw_ostream &OS) {
+ std::vector<Record *> RV = Records.getAllDerivedDefinitions("Inst");
+ SmallVector<std::unique_ptr<Intrinsic>, 128> Defs;
+ for (auto *R : RV) {
+ createIntrinsic(R, Defs);
+ }
+
+ // The mappings must be sorted based on BuiltinID.
+ llvm::sort(Defs, [](const std::unique_ptr<Intrinsic> &A,
+ const std::unique_ptr<Intrinsic> &B) {
+ return A->getMangledName() < B->getMangledName();
+ });
+
+
+ OS << "#ifdef GET_SME_IMMEDIATE_CHECK\n";
+
+ // Ensure these are only emitted once.
+ std::set<std::string> Emitted;
+
+ for (auto &Def : Defs) {
+ if (Emitted.find(Def->getMangledName()) != Emitted.end() ||
+ Def->getImmChecks().empty())
+ continue;
+
+ OS << "case SME::BI__builtin_sme_" << Def->getMangledName() << ":\n";
+ for (auto &Check : Def->getImmChecks())
+ OS << "ImmChecks.push_back(std::make_tuple(" << Check.getArg() << ", "
+ << Check.getKind() << ", " << Check.getElementSizeInBits() << "));\n";
+ OS << " break;\n";
+
+ Emitted.insert(Def->getMangledName());
+ }
+
+ OS << "#endif\n\n";
+}
+
namespace clang {
void EmitSveHeader(RecordKeeper &Records, raw_ostream &OS) {
SVEEmitter(Records).createHeader(OS);
@@ -1395,4 +1645,19 @@ void EmitSveTypeFlags(RecordKeeper &Records, raw_ostream &OS) {
SVEEmitter(Records).createTypeFlags(OS);
}
+void EmitSmeHeader(RecordKeeper &Records, raw_ostream &OS) {
+ SVEEmitter(Records).createSMEHeader(OS);
+}
+
+void EmitSmeBuiltins(RecordKeeper &Records, raw_ostream &OS) {
+ SVEEmitter(Records).createSMEBuiltins(OS);
+}
+
+void EmitSmeBuiltinCG(RecordKeeper &Records, raw_ostream &OS) {
+ SVEEmitter(Records).createSMECodeGenMap(OS);
+}
+
+void EmitSmeRangeChecks(RecordKeeper &Records, raw_ostream &OS) {
+ SVEEmitter(Records).createSMERangeChecks(OS);
+}
} // End namespace clang
diff --git a/contrib/llvm-project/clang/utils/TableGen/TableGen.cpp b/contrib/llvm-project/clang/utils/TableGen/TableGen.cpp
index 6864ba2040ef..38215abd9d9b 100644
--- a/contrib/llvm-project/clang/utils/TableGen/TableGen.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/TableGen.cpp
@@ -35,6 +35,7 @@ enum ActionType {
GenClangAttrSubjectMatchRuleList,
GenClangAttrPCHRead,
GenClangAttrPCHWrite,
+ GenClangAttrTokenKinds,
GenClangAttrHasAttributeImpl,
GenClangAttrSpellingListIndex,
GenClangAttrASTVisitor,
@@ -65,6 +66,7 @@ enum ActionType {
GenClangCommentCommandInfo,
GenClangCommentCommandList,
GenClangOpenCLBuiltins,
+ GenClangOpenCLBuiltinHeader,
GenClangOpenCLBuiltinTests,
GenArmNeon,
GenArmFP16,
@@ -81,6 +83,10 @@ enum ActionType {
GenArmSveBuiltinCG,
GenArmSveTypeFlags,
GenArmSveRangeChecks,
+ GenArmSmeHeader,
+ GenArmSmeBuiltins,
+ GenArmSmeBuiltinCG,
+ GenArmSmeRangeChecks,
GenArmCdeHeader,
GenArmCdeBuiltinDef,
GenArmCdeBuiltinSema,
@@ -90,6 +96,9 @@ enum ActionType {
GenRISCVVectorBuiltins,
GenRISCVVectorBuiltinCG,
GenRISCVVectorBuiltinSema,
+ GenRISCVSiFiveVectorBuiltins,
+ GenRISCVSiFiveVectorBuiltinCG,
+ GenRISCVSiFiveVectorBuiltinSema,
GenAttrDocs,
GenDiagDocs,
GenOptDocs,
@@ -127,6 +136,8 @@ cl::opt<ActionType> Action(
"Generate clang PCH attribute reader"),
clEnumValN(GenClangAttrPCHWrite, "gen-clang-attr-pch-write",
"Generate clang PCH attribute writer"),
+ clEnumValN(GenClangAttrTokenKinds, "gen-clang-attr-token-kinds",
+ "Generate a list of attribute-related clang tokens"),
clEnumValN(GenClangAttrHasAttributeImpl,
"gen-clang-attr-has-attribute-impl",
"Generate a clang attribute spelling list"),
@@ -200,6 +211,9 @@ cl::opt<ActionType> Action(
"documentation comments"),
clEnumValN(GenClangOpenCLBuiltins, "gen-clang-opencl-builtins",
"Generate OpenCL builtin declaration handlers"),
+ clEnumValN(GenClangOpenCLBuiltinHeader,
+ "gen-clang-opencl-builtin-header",
+ "Generate OpenCL builtin header"),
clEnumValN(GenClangOpenCLBuiltinTests, "gen-clang-opencl-builtin-tests",
"Generate OpenCL builtin declaration tests"),
clEnumValN(GenArmNeon, "gen-arm-neon", "Generate arm_neon.h for clang"),
@@ -219,6 +233,14 @@ cl::opt<ActionType> Action(
"Generate arm_sve_typeflags.inc for clang"),
clEnumValN(GenArmSveRangeChecks, "gen-arm-sve-sema-rangechecks",
"Generate arm_sve_sema_rangechecks.inc for clang"),
+ clEnumValN(GenArmSmeHeader, "gen-arm-sme-header",
+ "Generate arm_sme.h for clang"),
+ clEnumValN(GenArmSmeBuiltins, "gen-arm-sme-builtins",
+ "Generate arm_sme_builtins.inc for clang"),
+ clEnumValN(GenArmSmeBuiltinCG, "gen-arm-sme-builtin-codegen",
+ "Generate arm_sme_builtin_cg_map.inc for clang"),
+ clEnumValN(GenArmSmeRangeChecks, "gen-arm-sme-sema-rangechecks",
+ "Generate arm_sme_sema_rangechecks.inc for clang"),
clEnumValN(GenArmMveHeader, "gen-arm-mve-header",
"Generate arm_mve.h for clang"),
clEnumValN(GenArmMveBuiltinDef, "gen-arm-mve-builtin-def",
@@ -247,6 +269,12 @@ cl::opt<ActionType> Action(
"Generate riscv_vector_builtin_cg.inc for clang"),
clEnumValN(GenRISCVVectorBuiltinSema, "gen-riscv-vector-builtin-sema",
"Generate riscv_vector_builtin_sema.inc for clang"),
+ clEnumValN(GenRISCVSiFiveVectorBuiltins, "gen-riscv-sifive-vector-builtins",
+ "Generate riscv_sifive_vector_builtins.inc for clang"),
+ clEnumValN(GenRISCVSiFiveVectorBuiltinCG, "gen-riscv-sifive-vector-builtin-codegen",
+ "Generate riscv_sifive_vector_builtin_cg.inc for clang"),
+ clEnumValN(GenRISCVSiFiveVectorBuiltinSema, "gen-riscv-sifive-vector-builtin-sema",
+ "Generate riscv_sifive_vector_builtin_sema.inc for clang"),
clEnumValN(GenAttrDocs, "gen-attr-docs",
"Generate attribute documentation"),
clEnumValN(GenDiagDocs, "gen-diag-docs",
@@ -299,6 +327,9 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenClangAttrPCHWrite:
EmitClangAttrPCHWrite(Records, OS);
break;
+ case GenClangAttrTokenKinds:
+ EmitClangAttrTokenKinds(Records, OS);
+ break;
case GenClangAttrHasAttributeImpl:
EmitClangAttrHasAttrImpl(Records, OS);
break;
@@ -384,6 +415,9 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenClangOpenCLBuiltins:
EmitClangOpenCLBuiltins(Records, OS);
break;
+ case GenClangOpenCLBuiltinHeader:
+ EmitClangOpenCLBuiltinHeader(Records, OS);
+ break;
case GenClangOpenCLBuiltinTests:
EmitClangOpenCLBuiltinTests(Records, OS);
break;
@@ -438,6 +472,18 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenArmSveRangeChecks:
EmitSveRangeChecks(Records, OS);
break;
+ case GenArmSmeHeader:
+ EmitSmeHeader(Records, OS);
+ break;
+ case GenArmSmeBuiltins:
+ EmitSmeBuiltins(Records, OS);
+ break;
+ case GenArmSmeBuiltinCG:
+ EmitSmeBuiltinCG(Records, OS);
+ break;
+ case GenArmSmeRangeChecks:
+ EmitSmeRangeChecks(Records, OS);
+ break;
case GenArmCdeHeader:
EmitCdeHeader(Records, OS);
break;
@@ -465,6 +511,15 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenRISCVVectorBuiltinSema:
EmitRVVBuiltinSema(Records, OS);
break;
+ case GenRISCVSiFiveVectorBuiltins:
+ EmitRVVBuiltins(Records, OS);
+ break;
+ case GenRISCVSiFiveVectorBuiltinCG:
+ EmitRVVBuiltinCG(Records, OS);
+ break;
+ case GenRISCVSiFiveVectorBuiltinSema:
+ EmitRVVBuiltinSema(Records, OS);
+ break;
case GenAttrDocs:
EmitClangAttrDocs(Records, OS);
break;
diff --git a/contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h b/contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h
index 2ba857f66f50..8265a531a98f 100644
--- a/contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h
+++ b/contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h
@@ -43,6 +43,8 @@ void EmitClangAttrSubjectMatchRuleList(llvm::RecordKeeper &Records,
llvm::raw_ostream &OS);
void EmitClangAttrPCHRead(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitClangAttrPCHWrite(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitClangAttrTokenKinds(llvm::RecordKeeper &Records,
+ llvm::raw_ostream &OS);
void EmitClangAttrHasAttrImpl(llvm::RecordKeeper &Records,
llvm::raw_ostream &OS);
void EmitClangAttrSpellingListIndex(llvm::RecordKeeper &Records,
@@ -101,6 +103,11 @@ void EmitSveBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitSveTypeFlags(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitSveRangeChecks(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitSmeHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitSmeBuiltins(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitSmeBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitSmeRangeChecks(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+
void EmitMveHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitMveBuiltinDef(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitMveBuiltinSema(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
@@ -124,6 +131,8 @@ void EmitClangOptDocs(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitClangOpenCLBuiltins(llvm::RecordKeeper &Records,
llvm::raw_ostream &OS);
+void EmitClangOpenCLBuiltinHeader(llvm::RecordKeeper &Records,
+ llvm::raw_ostream &OS);
void EmitClangOpenCLBuiltinTests(llvm::RecordKeeper &Records,
llvm::raw_ostream &OS);